Compare commits
1305 Commits
last-befor
...
wip-fronte
| Author | SHA1 | Date | |
|---|---|---|---|
| ec8f5032e9 | |||
| 7a5af9282c | |||
| 466adabbb0 | |||
| 5fb40eb32b | |||
| 9f380751f5 | |||
| 49075cbc60 | |||
| 81848c2c44 | |||
| 9ee7b742ab | |||
| 58c33074c3 | |||
| 756427b34e | |||
| 7e06212cd5 | |||
| ef3912b647 | |||
| 151484dee3 | |||
| bec1f209ba | |||
| 0e14bdd09f | |||
| ce6df542cc | |||
| 530302b74f | |||
| 1bfb6cd2f6 | |||
| 53b6210531 | |||
| aeaa03ed80 | |||
| 3319a578b9 | |||
| 24d47f0848 | |||
| 505e3c3a6d | |||
| e5259bb56c | |||
| 8c0c22d801 | |||
|
|
157eed8321 | ||
| 9ed526510f | |||
| ec2e4dee46 | |||
| c9789f46db | |||
| 289dc39e50 | |||
| 22b6673346 | |||
| 3e7722a567 | |||
| 1ba1da49c3 | |||
| a71de3a727 | |||
| 67e8e7c082 | |||
| cbb5d546ef | |||
| a86920fc73 | |||
| 14b31174dc | |||
| 1cb3a24e2f | |||
| a052e754f9 | |||
| 3b452d14ce | |||
| f4e0b9185b | |||
| 0841d52dd1 | |||
| f32630237a | |||
| 9ee816d366 | |||
| d10bdea6c5 | |||
| 5b061af3a5 | |||
| e69f991aa6 | |||
| fc9c518c2a | |||
| dcde2a4551 | |||
| fe7e078f8b | |||
| 8288455468 | |||
| 5eb464a1f3 | |||
| ab6b277293 | |||
| a4e415f1e3 | |||
| ebfd3d542c | |||
| 8f227076fd | |||
| a7cb3b9658 | |||
| 641f29ab30 | |||
| 17792df85e | |||
| bca8fac4cd | |||
| d3ff88e5cf | |||
| f22dc4d92a | |||
| 540dd28861 | |||
| 218c3f0dca | |||
| dfaac59e20 | |||
| 08ce84fe31 | |||
| d2a0a5ae26 | |||
| bf498b829c | |||
| 195edf679c | |||
| d24715a224 | |||
| dee0b18429 | |||
| de8bff51b5 | |||
| 318ccb2d95 | |||
| 12272750c3 | |||
| 0cf45c0d78 | |||
| e4f229cc70 | |||
| f8ccb8aaaa | |||
| fb2852acdc | |||
| e6edd00e46 | |||
| 479a435ec5 | |||
| d30a11c8f7 | |||
| 67a24e9d4e | |||
| 2bf0bf1064 | |||
| 678f72766e | |||
| 66e4229b9b | |||
| 99e0eb7a7a | |||
| 6a0e0721e9 | |||
| 97091457a8 | |||
| 6f69fe5b8a | |||
| 7292c534ed | |||
| df6297d40f | |||
| 257793dcd5 | |||
| 6e1d255dfc | |||
| f236845374 | |||
| 450dde56b7 | |||
| 854bc7cfaf | |||
| 0c7abdb99a | |||
| b10369a867 | |||
| 05187cacea | |||
| f79642d69f | |||
| 1f2fb774b4 | |||
| de801e41e3 | |||
| cd42ce6cba | |||
| eb18e5b933 | |||
| 350cf85cd2 | |||
| f2888069db | |||
| d0520484bb | |||
| d114b5631a | |||
| ce33ce994f | |||
| 05d5882c68 | |||
| 0c238284b0 | |||
| d85c45f10f | |||
| 06b2adf923 | |||
| 1ca2f336c4 | |||
| 284873ddd4 | |||
| d86c215c34 | |||
| 1b57b333df | |||
| 08a814525b | |||
| 27153bd74a | |||
| 20d80dee61 | |||
| ca7d528c85 | |||
| f8ff30fb4d | |||
| 7d1b08bf58 | |||
| 60abf6d4a9 | |||
| 7c384d1f45 | |||
| f18d5580c1 | |||
| 9177f77e69 | |||
| 4b5a961e14 | |||
| ed1e348d67 | |||
| 660b7a3811 | |||
| e5fb156224 | |||
| de1eab4596 | |||
| de1c227ccd | |||
| 230b2c669c | |||
| 2e2314c16b | |||
| 75e2402420 | |||
| fb121a9601 | |||
| f8c3408f18 | |||
| 89ca0516a9 | |||
| 5ae98507e3 | |||
| 66ac8c6587 | |||
| fd95135f66 | |||
| 987d6d03a6 | |||
| 9b3a836c83 | |||
| 741cdf6e12 | |||
| 0744aeb42f | |||
| ae7489d8e7 | |||
| 666da0adda | |||
| 889b5dc1c5 | |||
| b3a36f2833 | |||
| dd8d19178b | |||
| 840e8ba29b | |||
| 6a17949fdf | |||
| 0a0c47205f | |||
| fd3e795824 | |||
| 270bb21646 | |||
| d3f97358d9 | |||
| c44f0489bc | |||
| 9bd41ed5d7 | |||
| 0eca0c706f | |||
| 4da7a84c86 | |||
| 2b2910a1ac | |||
| 90c62664a6 | |||
| 18fe240b93 | |||
| bdff391440 | |||
| 46beaece75 | |||
| 15ce143356 | |||
| 7245dac1ca | |||
| 6748fd0006 | |||
| b2bd01117e | |||
| 31ca4f3d23 | |||
| 8326d8e7fe | |||
| 3e5ccaf8fd | |||
| 68b6e43649 | |||
| ca3d99c52c | |||
| 55ccd39960 | |||
| 61673ef273 | |||
| 82a2e9a523 | |||
| 36da289746 | |||
| 7f892601f4 | |||
| 1d08f6850b | |||
| 408db5e060 | |||
| 740e088cc5 | |||
| 4fdcd2a343 | |||
| 6e40b9a44a | |||
| c20aa41b5c | |||
| d96be99d1d | |||
| 6a9c27f8bf | |||
| 284d822a8a | |||
| 9d39995d0f | |||
| 36aad45b26 | |||
| 83a38ff50e | |||
| 67851752fa | |||
| 33c051bf28 | |||
| b6f7958dfe | |||
| acd7a40fe6 | |||
| 999c1a3fa6 | |||
| a574a75610 | |||
| af69b4fa58 | |||
| 01d8ad5ca2 | |||
| 57ce554feb | |||
| 72c01cc743 | |||
| 75a6de18b2 | |||
| fdab66a500 | |||
| dcd67b6114 | |||
| f6cf8d29f0 | |||
| 0b6969bf0c | |||
| b4a5cdec55 | |||
| 11b5be9d8e | |||
| 90883eddb9 | |||
| d240a979ba | |||
| 8f6966978f | |||
| 7f33826d1d | |||
| b09e4463bd | |||
| 1bfda7769a | |||
| bbdb731043 | |||
| 4381ed6671 | |||
| 2ca960a73f | |||
| 2433a1b981 | |||
| 2ed2aaf58f | |||
| 24d38fe52e | |||
| de8c6a8b63 | |||
| 96428d3c73 | |||
| 520f327f5a | |||
| f1b3409052 | |||
| 91660fefe4 | |||
| fdb9970792 | |||
| 1c6599fc30 | |||
| a938342611 | |||
| 656a878c6a | |||
| ef2cc44ceb | |||
| c7ba775048 | |||
| 85d6f76000 | |||
| ebe524ded3 | |||
| f4625cfe06 | |||
| 99131374cd | |||
| 04684c5f65 | |||
| d726e15ed8 | |||
| d73146ff62 | |||
| 8f73dab36e | |||
| 46612a9f68 | |||
| 766e766f50 | |||
| f47a45f9a3 | |||
| 8fb22931f5 | |||
| 8f9d21cdd8 | |||
| 054eced7de | |||
| 8ca6b4cdb0 | |||
| 01f81ce4d5 | |||
| ef1609efc2 | |||
| b7bf29c06e | |||
| dab8fbae6d | |||
| c545053b85 | |||
| 05ad824dcb | |||
| 92fe39ddac | |||
| 10732f9a10 | |||
| 7c6425ff4d | |||
| e0604fc217 | |||
| a7693aa78d | |||
| 20ca3f8ee4 | |||
| 6d37046933 | |||
| ae8c6e92fc | |||
| 1d1e588d57 | |||
| 8206186426 | |||
| d38f7fda3e | |||
| 88939ba51d | |||
| c15fffa11f | |||
| b77527e9a2 | |||
| 3bdd5197f5 | |||
| 199c6b1f77 | |||
| 3ea2504e8c | |||
| 8eee0d57b6 | |||
| 8a400c5c0f | |||
| fccd3e306e | |||
| d467f000a7 | |||
| 533544117b | |||
| 3b21027d6f | |||
| b7773e69c7 | |||
| 821f11393c | |||
| ca25078b30 | |||
| 785145e1c1 | |||
| e1646adff6 | |||
| d20f3d5668 | |||
| dfc224d8a9 | |||
| 5c7f37a100 | |||
| fc25ca9c03 | |||
| 6c4dd8ae02 | |||
| 9fdcfff4fc | |||
| 2bcc26860f | |||
| 1e012f860b | |||
| 87fe1887e8 | |||
| c8221ea0e4 | |||
| 517b283893 | |||
| b6a93452cd | |||
| 1cba014948 | |||
| b0d6f724ef | |||
| 0b218eb656 | |||
| 8ba4cc5c0c | |||
| 2ad65b0485 | |||
| ce7754ffe4 | |||
| 9cd3d97c75 | |||
| 7252055e4a | |||
| c086cff36e | |||
| eeba87d333 | |||
| cb7a23bc69 | |||
| 1bda98228c | |||
| 8b789d408e | |||
| d355e58f2f | |||
| b03e8d5bd7 | |||
| 76bb68dcc8 | |||
| 41eb5e256f | |||
| d2a8f2a47f | |||
| 43fa8f1a45 | |||
| fcf19de786 | |||
| 2233d015f3 | |||
| 8b25024f6f | |||
| 235a88d613 | |||
| 49a6a6a758 | |||
| 5a9a2c4268 | |||
| 491c5e1b8c | |||
| 3466320d98 | |||
| a8849ec823 | |||
| 0cea8d622d | |||
| a2df79d614 | |||
| 15d2f1ce18 | |||
| fee242ad07 | |||
| cde86db44e | |||
| 7d5785da62 | |||
| ac9aa59924 | |||
| cb0272fe60 | |||
| 8ef89d0b53 | |||
| e01f915abf | |||
| c6a138ff43 | |||
| 22d65f1e9c | |||
| 4f282c1587 | |||
| d651791f22 | |||
| 0cf57a633c | |||
| 5d6e0af605 | |||
| 1fe88819f4 | |||
| 8187a8a0dd | |||
| b6af919fa9 | |||
| 390d687f61 | |||
| 33d3ff07db | |||
| d0f10779f9 | |||
| 8427f03df4 | |||
| d66bfe6166 | |||
| 04f7869e8e | |||
| 545165c97f | |||
| 021c9e03bb | |||
| 53aabc8c84 | |||
| 3202b3f0b5 | |||
| e41fd36952 | |||
| 3636db1793 | |||
| 5732b1a938 | |||
| 3585443508 | |||
| 47a1db07dc | |||
| 99ed8fff5d | |||
| a6ab7dda97 | |||
| 6564fa000d | |||
| bb33ddd9fb | |||
| 5fbe62105a | |||
| 771b93b169 | |||
| f13310d71b | |||
| 243442694c | |||
| a4addbfd22 | |||
| e983d0756d | |||
| 52cd30b947 | |||
| ed55a73d04 | |||
| 4f3fc91c0a | |||
| 5c3524706f | |||
| 2290e019e6 | |||
| 6fe6345b13 | |||
| 53fe047bca | |||
| d9c3705c47 | |||
| 88ffd64706 | |||
| a897282400 | |||
| cfbb05530a | |||
| 72f440f509 | |||
| 9c3667b51f | |||
| 6ffbd52a36 | |||
| 49feaa8281 | |||
| 72507d7bb2 | |||
| 3bcf4eaebd | |||
| d637e322f7 | |||
| 43cb7a5f65 | |||
| 7fd7fcc16c | |||
| e01197a5d5 | |||
| be4ce024f4 | |||
| 98527c72f4 | |||
| 053e681d00 | |||
| 6d8870ba25 | |||
| 01dab4188c | |||
| eca5f48d41 | |||
| 73b50556be | |||
| e724c9c2ad | |||
| f42334453c | |||
| 2603c4f44f | |||
| bd93625119 | |||
| 8fe6b472e4 | |||
| 68c7a88fed | |||
| f9e10976b8 | |||
| f17453ba10 | |||
| c57aefd48b | |||
|
|
389413ab8a | ||
| a2ffb8a342 | |||
| 6f0a03ce60 | |||
|
|
053848607d | ||
|
|
94a0868ba5 | ||
|
|
c6d8a4c46f | ||
| 58a34d6cdb | |||
| b0c7997128 | |||
| 1bf2a87726 | |||
| d3cd6a884e | |||
|
|
9870979c54 | ||
|
|
bb067f77c3 | ||
|
|
da7be50c61 | ||
|
|
e4c5743852 | ||
| e9233ff7c0 | |||
| 2d01cd8761 | |||
| 45a44d08eb | |||
| ddc52b969e | |||
| 8ad2ee8729 | |||
| d160999535 | |||
| e4fff8df81 | |||
| 923cbe62d1 | |||
| 0612bd1a21 | |||
| a1fd48752e | |||
| 0e48c18579 | |||
| db1e3239e8 | |||
| 34353f773b | |||
| ad0253a461 | |||
| 6a541e0662 | |||
| ec42d033b3 | |||
|
|
8fd577c733 | ||
|
|
4eeccb6107 | ||
|
|
6688ae66fa | ||
|
|
38e960eb3f | ||
|
|
dbde681aff | ||
|
|
76a5555ff4 | ||
| 7d740c74e3 | |||
| 58b8cea757 | |||
| dc75980eba | |||
| f311dbbe30 | |||
|
|
b5f0c59511 | ||
| bc5a8fba61 | |||
| 1e6180be48 | |||
| ebe3118f79 | |||
| 714455a4eb | |||
| 0089a8a98e | |||
| 9fa4da284b | |||
| e3f1f101e2 | |||
| 386571157a | |||
| 5012d19180 | |||
| 54b5e8e3d4 | |||
|
|
5e0e71aa1c | ||
| bd976e6c2e | |||
| 9cce441f6c | |||
| 6af0cd2be8 | |||
| 82a1e0796c | |||
| 8d0c0894cb | |||
| b98771f067 | |||
| b2cfe46438 | |||
| 53ac29cfd1 | |||
| 4e153413d9 | |||
|
|
7d48c02fa3 | ||
| 5df68c4ead | |||
| 1563f4142d | |||
| 1177f516ba | |||
| 2d18057c6e | |||
| 970376ed56 | |||
| e2ea8d491b | |||
| 62954ac157 | |||
| 1c70d80b99 | |||
| dc50d6e941 | |||
| 8377dc63c0 | |||
| 54bb506e10 | |||
| d4facbf2e3 | |||
| ddc8fc0f5e | |||
| 82d2921424 | |||
| 5d137ac997 | |||
| b06e17acf0 | |||
| c272b5d4bb | |||
| eba28b4eb4 | |||
| 44f473221f | |||
| 3be47056a0 | |||
| be6746f7ab | |||
| 8cb506731f | |||
| e09649586f | |||
| 230c15d51c | |||
| c4a765e73b | |||
| 18eb84fa9d | |||
| 1a505bb0a2 | |||
| 86caa3a044 | |||
| 3263f0a551 | |||
| 8aa6bb61dd | |||
| f89bec5089 | |||
| 896784a351 | |||
| 6488f4677e | |||
| f650835c07 | |||
| 33feaa81ca | |||
| 16bf193b0e | |||
| ce59bc3335 | |||
| 46d834f5aa | |||
| 4be05c8b57 | |||
| 5ce02bbbfe | |||
| d01b498ad5 | |||
| b1d69b2304 | |||
| 9ac870e0a5 | |||
| a8511c9db5 | |||
| ab7d623d27 | |||
| 901fea3361 | |||
|
|
3329788be8 | ||
|
|
303a33c3bf | ||
|
|
b769cc6c3c | ||
| 4e5ce71a52 | |||
| 7dbd0c7896 | |||
|
|
d5a55f71a7 | ||
| ebcd280886 | |||
|
|
216b9278af | ||
| eb467474f8 | |||
| e5b80297ba | |||
| 6b32c4ede8 | |||
| 2db0ee11db | |||
| 146bf97432 | |||
| fc5177d58b | |||
| 97564022c3 | |||
| 941fd8dd9c | |||
| b6b7aad046 | |||
| a8e912fcb1 | |||
| 903bb6cfe9 | |||
| 94efdcd9b5 | |||
| b8153b5c9a | |||
| 923b4bd9d6 | |||
| 4c25248e5f | |||
| 06dd24bcf9 | |||
| 941ec94313 | |||
| adcbebd7b6 | |||
| 4a68821dee | |||
| 4b1bb6283e | |||
| 3a3b3c3269 | |||
| fe64a0c70e | |||
| ea9af92bd4 | |||
| dd3cfe80ef | |||
| 314f0b8dbe | |||
| 6a4f571b05 | |||
| 30b3d6abaf | |||
| 01c8f6cdae | |||
| 8318d4b1f6 | |||
| d6dd0d69d0 | |||
| 2d3b54b80b | |||
| 89f24ac4e6 | |||
| 7890cd2622 | |||
| b6bd40f956 | |||
| c0b380f215 | |||
| 811236cff4 | |||
| 62542f0329 | |||
| 6825b8bf74 | |||
| bdd603fb17 | |||
| 86e76aaa5f | |||
| 88af86ae61 | |||
| a6f56a4811 | |||
| c7c867f1c7 | |||
| add2538655 | |||
| ff1b14d980 | |||
| a12838032f | |||
| 6edd0e2f8d | |||
| 398bbbc316 | |||
| 41a82c44c5 | |||
| cecf81a07d | |||
| 45275c3831 | |||
| 99866542a1 | |||
| 6b3e523036 | |||
| 6e9a539d61 | |||
| c9b2eb25b2 | |||
| 23b856b073 | |||
| e0520e265d | |||
| 9b9e0488d3 | |||
| c827dc4ed2 | |||
| d48a308cc6 | |||
| b9ae4396e5 | |||
| 95dc799692 | |||
| be12bd7d99 | |||
| 0445c3bd86 | |||
| 694e04cc50 | |||
| 598b59c0c6 | |||
| 1e1bd83baf | |||
| d41e2bbce4 | |||
| 5f607fa2cf | |||
| cd417bb9db | |||
| 65518f2208 | |||
| 30902bc9cd | |||
| 37b1a6e0c1 | |||
| f1edb901d1 | |||
| a5d11ec31b | |||
| 2bf95223b7 | |||
| 08294e2f14 | |||
| 1c9f425a40 | |||
| 4116357447 | |||
| e9cb235640 | |||
| 64eab850c5 | |||
| c6eebc4eae | |||
| 1bd6e07fe2 | |||
| 1ad13d048f | |||
| cfde720b1d | |||
| 5d17d892a4 | |||
| 40172bf8b5 | |||
| 72404d0fd9 | |||
| b53d485960 | |||
| cf51d1a280 | |||
| efc1890871 | |||
| 93d534fe94 | |||
| 87afbc52f6 | |||
| 15de24214a | |||
| 2b09711eb0 | |||
| 566f2a4835 | |||
| 575a7ed1a7 | |||
| 566a23d3b6 | |||
| 6473ad3de7 | |||
| 6285e81883 | |||
| 4c896ae6b7 | |||
| b3aee6c8bc | |||
| e18ed79c7b | |||
| 9aa73c5306 | |||
| e430b2b9a1 | |||
| 502e494083 | |||
| e752a5dc87 | |||
| 5ec76f8801 | |||
| 7f336cb47c | |||
| bd13d89817 | |||
| 8a8f654657 | |||
| b88594958d | |||
| 3d1757476a | |||
| c9af6fe44f | |||
| 5c21443f9b | |||
| 758df5bc26 | |||
| 4c273671e4 | |||
| f3e79bcfb5 | |||
| b04abef20f | |||
| 73d4a77881 | |||
| c974b388b6 | |||
| 66ebfc669a | |||
| e061d6c29d | |||
| 08cb2b8438 | |||
| a19ad751a8 | |||
| fc4ab9d6ba | |||
| 2482381999 | |||
| 6e6ea6082d | |||
| 50108201cf | |||
| 964526924d | |||
| 05f6fb6016 | |||
| 7ed053b6c1 | |||
| 8e02de32ab | |||
| 8d94901bab | |||
| 13b67702b4 | |||
| cdb148fc0d | |||
| 4fd193ba2b | |||
| efa2321ac3 | |||
| 94d12c6b66 | |||
| 73c5032a48 | |||
| 5955b71459 | |||
| b091044bc2 | |||
| 3a500f3ea3 | |||
| de96e8e189 | |||
| 10e14e8c24 | |||
| 6f7aa86e8b | |||
| 1b6fbb940b | |||
| df40560c5a | |||
| 3713f1eee2 | |||
| 07ca1ad16e | |||
| de5557a68f | |||
| 93087f7fa9 | |||
| 41bc4b6a6f | |||
| 36a5190349 | |||
| 293961097f | |||
| 740df09b9d | |||
| 263c274774 | |||
| a9c506c290 | |||
| 85f2c6093d | |||
| 91807ad022 | |||
| 5ce78a2fb3 | |||
| 155ddf243a | |||
| 31b71eb244 | |||
| 1ce4654673 | |||
| 72cbd2ce2b | |||
| 3d273d4284 | |||
| d920d9e9fe | |||
| c2bc52718a | |||
| c3ea8228a1 | |||
| 5047803e3c | |||
| 1c566c6259 | |||
| 2ad8c5186c | |||
| d6506b6402 | |||
| 5bde262ad7 | |||
| 27ad3459c1 | |||
| 6f16e20bf6 | |||
| 3e67db50f0 | |||
| c7e225e81b | |||
| 878bf22695 | |||
| d0c30cfeca | |||
| 5af54237b9 | |||
| e7d268bde6 | |||
| ed4ee5228a | |||
| f152521041 | |||
| 2b36b4c514 | |||
| 5f2153ae5a | |||
| 22301a0e9a | |||
| 96ffee49cf | |||
| 8c38861c2f | |||
| 80a0643747 | |||
| 92536aa7ac | |||
| 7ac9203753 | |||
| d67f65019e | |||
| a806f294b2 | |||
| bfbcdee926 | |||
| 34b9be4efa | |||
| 2c78697e80 | |||
| f953f1e51b | |||
| 207dacf816 | |||
| add1c8b9b3 | |||
| 85922f4493 | |||
| a7d3ba24b4 | |||
| 10c584daab | |||
| fe56b04f7d | |||
| 43d4a36a0d | |||
| 12a8a34bdc | |||
| 85b6ff2d7f | |||
| 4edbcd6a98 | |||
| 2ba52e2467 | |||
| b7bccfeee3 | |||
| 43a04880e0 | |||
| 0cf96e47e8 | |||
| 7fbe648d99 | |||
| 1ce13b71a3 | |||
| 4e268510f2 | |||
| 1f2dd34683 | |||
| c50f745744 | |||
| 5e721c61b9 | |||
| 8c1dbf984b | |||
| 38df6e873b | |||
| ef2d8d14a0 | |||
| 1f0a855510 | |||
| 50d62f17b8 | |||
| c12b646b09 | |||
| cbe182a298 | |||
| 59a95450e5 | |||
| e4f221ab13 | |||
| 4ad82a1eb3 | |||
| 47b81055fd | |||
| 19d9684a67 | |||
| 091c70e735 | |||
| abcb0c27a0 | |||
| 71403e6f28 | |||
| 9a10c86329 | |||
| fdb9154b85 | |||
| 2703617179 | |||
| 9f752e2584 | |||
| 82437724cc | |||
| 080d98f57c | |||
| ad9a981cda | |||
| 7c5aef033d | |||
| d2f548faf9 | |||
| 203c6418fd | |||
| 736686390f | |||
| c66a6e67c8 | |||
| a139e8c41a | |||
| ee7af393a0 | |||
| a6617cae68 | |||
| 319f815985 | |||
| c77a6b9d21 | |||
| c854ccbb4b | |||
| fdaf4af31a | |||
| 69d7c5c5ce | |||
| 095f1cda0c | |||
| c3eb97e24c | |||
| b1b91a7b29 | |||
| 870800e8d2 | |||
| 379d40837b | |||
| 10a40ddabd | |||
| 118de12712 | |||
| cfa31ab542 | |||
| 47ba5e18a3 | |||
| 1a54b723aa | |||
| d0557445cd | |||
| 1ad3e7910c | |||
| 49895805e3 | |||
| bd3f8d597a | |||
| c711a04e6c | |||
| 1cb7a92e40 | |||
| d8640df115 | |||
| 4c704c8cda | |||
| 6f9feea8a9 | |||
| dde5526022 | |||
| 34a6fb064c | |||
| cecc9bc7fb | |||
| 9ccf4474bc | |||
| 3622fad9c2 | |||
| c846ee9823 | |||
| fd541d2243 | |||
| fcaa4fb936 | |||
| ddfb69edb2 | |||
| fac56943ee | |||
| 8330488c0a | |||
| 6001f1936c | |||
| 46c019a758 | |||
| b2ed441bf7 | |||
| 373be03c80 | |||
| ce5e27814a | |||
| 24468159e7 | |||
| 7153c8adde | |||
| 41414319a5 | |||
| a3513aa45c | |||
| 1ed31d26e5 | |||
| b36dc63335 | |||
| c02c6d89b0 | |||
| 563bb2c244 | |||
| 6b526f6b53 | |||
| cce388049d | |||
| fdfdd38c38 | |||
| 56b631d4a2 | |||
| 6eadc09c10 | |||
| 1f3d699a0c | |||
| 0eb2f17624 | |||
| 47eba6786a | |||
| ec1db0a725 | |||
| 04a235ca12 | |||
| 5b59d6b6b1 | |||
| 0e6bdf9467 | |||
| 6d1f81b26a | |||
| a000176db9 | |||
| 9f49140230 | |||
| 8934eb8b8d | |||
| 26f8a3fec7 | |||
| 49500570d4 | |||
| b2b3e8acf1 | |||
| 67bce48de8 | |||
| 3d1c8625a3 | |||
| 41600f208e | |||
| ef6e76f153 | |||
| ae5009c9ef | |||
| dcdcd99393 | |||
| 2e41c074b5 | |||
| c2206e6b27 | |||
| c58d616bfc | |||
| fb25e3e03f | |||
| 16b2b119fd | |||
| 4e138d38a1 | |||
| bced6cae68 | |||
| e0c525389f | |||
| 663627358f | |||
| 10b3318419 | |||
| c2c19bd6f3 | |||
| 1266d4b5d9 | |||
| e6fb64621f | |||
| fde50f6525 | |||
| f19be0ae17 | |||
| b70bc07a75 | |||
| 508a28aeae | |||
| ba5923044a | |||
| c52bfd2236 | |||
| bfb5f4f44e | |||
| 1eb1cd7b64 | |||
| 9abdd1ee90 | |||
| db98c681a2 | |||
| a7cd515fdb | |||
| 7967b80ab3 | |||
| 4e36ea5aae | |||
| 47ca614ea3 | |||
| 662f1276d2 | |||
| a0a8257df0 | |||
| 3fe9472d27 | |||
| a9e40ccf10 | |||
| 6fb58a3f26 | |||
| 86b13557fb | |||
| 60c608d095 | |||
| 72b002491a | |||
| 82157af84b | |||
| b454b011b0 | |||
| b65dd49aa6 | |||
| 7c055b5f56 | |||
| 2d6bdd350f | |||
| eadb91abc9 | |||
| 3e7152bb93 | |||
| 647ae0f3d6 | |||
| e5b4ce0890 | |||
| 27df603299 | |||
| 4d6bf65a99 | |||
| c06533db5b | |||
| b3b9c68486 | |||
| aecab0561e | |||
| 4570b4637b | |||
| e381ca774e | |||
| 6765276519 | |||
| eca4ade9d8 | |||
| 2e00e81b30 | |||
| 0a86ad357f | |||
| 02f736dcc4 | |||
| d8eae2c44b | |||
| c98cd82b3f | |||
| 69b3e06b1c | |||
| 7b9fef2fc8 | |||
| 528887b1a6 | |||
| 10df0af355 | |||
| ae38bec218 | |||
| 3ef0bf6761 | |||
| 1e56ca5227 | |||
| b8ad0cd18f | |||
| e049ab0a08 | |||
| 089b0f1535 | |||
| bf0ebce81a | |||
| eb02fa5eec | |||
| bc6f526b72 | |||
| 0e07cb2b1d | |||
| 2b528f0fff | |||
| 9b90070191 | |||
| 68fcae64ae | |||
| e3fc5d1b9b | |||
| 85988bb8c9 | |||
| 85dba5e9e9 | |||
| 350577033c | |||
| eb5fb4eb09 | |||
| 181cbc07d6 | |||
| 784c1ed0bb | |||
| 604d6c1a07 | |||
| 129ec94608 | |||
| 01cc52bba9 | |||
| 8115bc2ad5 | |||
| a100d73a8b | |||
| 11197e669c | |||
| 7a6e1d3386 | |||
| 6bb491aadc | |||
| bc456f9387 | |||
| 1beb3ca488 | |||
| 0190cf944a | |||
| 5f590a2063 | |||
| c284156723 | |||
| 7219c5ca72 | |||
| 86b5c1b242 | |||
| ffdffdeb96 | |||
| 455bfdfc49 | |||
| 2ad3c8a7ed | |||
| 08f3467406 | |||
| 2bae7c2fef | |||
| b6b517688e | |||
| f2942a20fe | |||
| d9b56f485b | |||
| f06b3c94eb | |||
| 742a16fb9f | |||
| e72f02711d | |||
| 48ebdf11b3 | |||
| e43f99593a | |||
| 476e7be826 | |||
| 8654503f5a | |||
| 98295305fd | |||
| e43b0cbccf | |||
| 462ef953bc | |||
| 29629f6647 | |||
| e3fc265408 | |||
| a67774c6e8 | |||
| dea6dd5242 | |||
| a79ca80f28 | |||
| 7fb94a86e8 | |||
| 9783711818 | |||
| bf5b457141 | |||
| 3fbee33369 | |||
| 2c71168677 | |||
| 51d7eed164 | |||
| 64ce091f11 | |||
| 4a5d553bc8 | |||
| f75c43055f | |||
| f2d9df8b61 | |||
| c73ad07e83 | |||
| a93d9be632 | |||
| 89689db96e | |||
| 01e79f8565 | |||
| 5866cc54aa | |||
| e8b03de444 | |||
| 1e1d9e57e7 | |||
| 5617f89c99 | |||
| b30aba2463 | |||
| c8ae748bd6 | |||
| 3e6a9909da | |||
| d35f2aa8c9 | |||
| 32ac0a64fb | |||
| 3125ff75ca | |||
| 62b518c81e | |||
| 8865ae02e4 | |||
| 44c4182a86 | |||
| f59086c025 | |||
| 081a7f96ca | |||
| b1a0e1e3b6 | |||
| 6910d3da49 | |||
| b9c3d6b0fb | |||
| f99869f57e | |||
| 85bfbdb5e3 | |||
| ee20926233 | |||
| f732f1e08b | |||
| f899fb48ce | |||
| 4f071260f7 | |||
| 6ed772278c | |||
|
|
b04ed3f5b6 | ||
| 738c3e82d7 | |||
| 9e952b0436 | |||
| 6ef2c5ca0d | |||
| c025aa3aac | |||
| a41bda6859 | |||
| 9210285089 | |||
| f1661f7efb | |||
| 8959fac415 | |||
| 9b469cee7d | |||
| bbb3f5c7c0 | |||
| 3139ba5368 | |||
| df810c0c4e | |||
| 29b4ebd09a | |||
| 76a5d9c9e1 | |||
| fe848525b1 | |||
| 24ede3f4ee | |||
| 756e3d2d89 | |||
| 684afb8cd5 | |||
| 52a1602a7c | |||
| ce6020702e | |||
| 76f2367e66 | |||
| 5f0092cfa1 | |||
| 4b84e6506b | |||
| a13937e500 | |||
| b9e27a4cbf | |||
| 3b694a91af | |||
| f651ece343 | |||
| 595a690473 | |||
| 1702b40812 | |||
| 9612e99806 | |||
| c17993418c | |||
| 60e43c368d | |||
| 2f3e5a513b | |||
| 54fccfc3ad | |||
| b6b62babd2 | |||
| ad3f2c0119 | |||
| dc70705b1e | |||
| ab375b2126 | |||
| fcecc75c3d | |||
| 15be184816 | |||
| 45328b629b | |||
| cce45b96e1 | |||
| edad85ee34 | |||
| 116ed9f08a | |||
| 7391f40cba | |||
| e54bfa4520 | |||
| d272896787 | |||
| 724fe6ceeb | |||
| 865259d40e | |||
| 65b554986c | |||
| fb6e326a14 | |||
| 920a1de263 | |||
| 0da4e3bafc | |||
| 89be4efe6f | |||
| ba591da2fc | |||
| 4c6a51c501 | |||
| 76174046ad | |||
| 7b79270481 | |||
| a1dca29382 | |||
| c1427cf6a2 | |||
| a89ada7c2f | |||
| 84a86a690e | |||
| 0a0db88701 | |||
| 27bad1be8a | |||
| e98b158886 | |||
| 324d500edb | |||
| ef326a2193 | |||
| 5ade876784 | |||
| 738c20b36b | |||
| 3c6642d879 | |||
| e43405a349 | |||
| f394907dd2 | |||
| e117432f3d | |||
| 295c821b9d | |||
| 865f777152 | |||
| 36e7cc56ef | |||
| aa3340ddbe | |||
| 4280e0175b | |||
| cc562a9fb1 | |||
| 4ec3268a23 | |||
| 80601f75ed | |||
| 9ac2f38042 | |||
| 4bd334e403 | |||
| ae859d3ea7 | |||
| e69393e95e | |||
| 2cc21583d9 | |||
| 0ac0f482ac | |||
| f30cdd5246 | |||
| 48157254c1 | |||
| 3fc08bcafd | |||
| ff94cc57a3 | |||
| cf28e5a3f4 | |||
| 6ea7386bd3 | |||
| 90c6fdc377 | |||
| 2a5b3dc53e | |||
| dabc1a44b8 | |||
| eb1561136b | |||
| d24677992e | |||
| e143b9cb72 | |||
| 6faea83372 | |||
| d36dcad773 | |||
| a385a373b9 | |||
| 8fa135d52e | |||
| 6f460ee127 | |||
| 8cc2cfb189 | |||
| c672bc07fe | |||
| 656944f1ce | |||
| ab9d5c1793 | |||
| fe4d70c0d1 | |||
| 964e807721 | |||
| 3cf71a365f | |||
| 5bd2c101fe | |||
| aef7754537 | |||
| d50d206e77 | |||
| 28223159e7 | |||
| a38e053c1a | |||
| 62ac12deff | |||
| 64ece74404 | |||
| bffbbad323 | |||
| 8fb64c38d6 | |||
| f72890cc59 | |||
| 0929a80f2b | |||
| ff7101c3fe | |||
| 590d075735 | |||
| fa3406b7d0 | |||
| 5805f4eb2a | |||
| 53cbe78ec1 | |||
| f4b5e49c26 | |||
| 499af03473 | |||
| 51c2c1d568 | |||
| 144c5b8894 | |||
| c9d7da3a42 | |||
| b59fcb5cba | |||
| 7be8e9b967 | |||
| 041722f71a | |||
| 457a63ddcb | |||
| 5677ae8532 | |||
| 8d99f8fc2e | |||
| 09a21510a2 | |||
| 73641ecc8a | |||
| b1da6de46e | |||
| fceac01505 | |||
| 8b64f9140b | |||
| e1678537c0 | |||
| d8686e5a14 | |||
| e71e6a7b32 | |||
| 8352fafd21 | |||
| db2680be81 | |||
| c456696600 | |||
| ad1816c617 | |||
| 8d3c4745aa | |||
| 3afeeaccd0 | |||
| 7f4ad85781 | |||
| ea2be0f13d | |||
| eea934a86a | |||
| f2f66d7a6c | |||
| aca54d76e0 | |||
| 646ab58395 | |||
| d99ddca410 | |||
| 87f3093503 | |||
| ae723b1655 | |||
| 0a606ae15c | |||
| 6af3dfdb51 | |||
| eca3f47eb8 | |||
| 8043caf187 | |||
| aa953f76a1 | |||
| 10ecb2158e | |||
| 96c9e12f7f | |||
| 7c310e12ef | |||
| 26aa155b9e | |||
| 0146b568c0 | |||
| ade62033ba | |||
| 8aab88bdc2 | |||
| f4b34f1d02 | |||
| 4eb8319697 | |||
| 5dd49fa5dd | |||
| 6429c3df21 | |||
| 3561cb61c6 | |||
| a52c263733 | |||
| c9d4a06486 | |||
| 8a35fe3a16 | |||
| 620107fdc0 | |||
| 14a8be6329 | |||
| 77b17e31e0 | |||
| 2028891e7a | |||
| abe0c28a99 | |||
| c71186f318 | |||
| 4e0db78ff1 | |||
| d1610da5f9 | |||
| 73ec464292 | |||
| 0de8772c98 | |||
| 91b116aa74 | |||
| 6537332b26 | |||
| 001d310d76 | |||
| e2921c8da8 | |||
| d1d48553e5 | |||
| dd58d4ad04 | |||
| b429933737 | |||
| 2cc22f4f90 | |||
| e2236864e7 | |||
| 74d86487a9 | |||
| d7fe196af0 | |||
| dcef372e4f | |||
| 7931428312 | |||
| 407aefb9ad | |||
| c64fbf61ba | |||
| 063023c69a | |||
| 2c7d2e7dfd | |||
| 7968c6ca37 | |||
| 91e3ec659f | |||
| e0f92b6185 | |||
| 0bf07b4ba4 | |||
| dfe398458b | |||
| 30215bf87c | |||
| 0f23ee7a08 | |||
| 9514066893 | |||
| cd8707207b | |||
| 7f9f89853d | |||
| 78824c9c2a | |||
| 40896fc70b | |||
| 7598ad0b57 | |||
| 4b11aab429 | |||
| ad91e37d14 | |||
| df8afb8b14 | |||
| 55b2911665 | |||
| 1680475d92 | |||
| d116439b57 | |||
| 56c669874d | |||
| 76b0f5fc46 | |||
| 68666f0650 | |||
| 4313284dab | |||
| 9e6b998c50 | |||
| b2e8711ac4 | |||
| f03566a10f | |||
| 2730a7a2b2 | |||
| f21b708085 | |||
| 8a6cd96198 | |||
| 4ae36a0dc3 | |||
| eac49ab810 | |||
| 49c08cba10 | |||
| cf30bb5d62 | |||
| ab5a4a6b6c | |||
| e04b2ef7ea | |||
| 52ca2adc19 | |||
| 29a0bed39b | |||
| 634ad86fa1 | |||
| 574178cffc | |||
| 305d9b44ec | |||
| 3bb55fd3db | |||
| 486686f1f9 | |||
| 52cc61b143 | |||
| e4763d809b | |||
| 4cf7fde5bf | |||
| e58f29a9d0 | |||
| fa050da8e2 | |||
| 3d9b9e40d4 | |||
| 4cf779e040 | |||
| a0cc76259e | |||
| 54bc0e87ce | |||
| cb5128907c | |||
| 34921ece76 | |||
| 5ebec42e6d | |||
| 4529d0597b | |||
| 3f9d519753 | |||
| 3039aef7d3 | |||
| cb84e6f0b7 | |||
| 88b5537df4 | |||
| 88dd574797 | |||
| 8d6df947c8 | |||
| b9b993fe4a | |||
| 2c62bd4016 | |||
| 06ed6af2a9 | |||
| 32c130ed93 | |||
| 634b233685 | |||
| eb7b875122 | |||
| c4a3601939 | |||
| 225f9ae054 | |||
| 163db3f2b8 | |||
| dd6fc8bde4 | |||
| ff692d287c | |||
| 1fe86fa000 | |||
| 04c9c010f0 | |||
| b6c623cca8 | |||
| 9b2a419d9b | |||
| d5cf3b8246 | |||
| 0d3ed3af2c | |||
| 751a321aa6 | |||
| 207d821564 | |||
| d7b71e38e8 | |||
| 07691db874 | |||
| dcbefc33ae | |||
| 751c692e6a | |||
| 00a34e7e24 | |||
| 2e0ba4c6cd | |||
| 9d1181330b | |||
| f3bf380bb7 | |||
| 27eee380d2 | |||
| 57620fd49a | |||
| becf7e6381 | |||
| c440465cf1 | |||
| 25fb4ce842 | |||
| 9c59b06ab9 | |||
| bd9ce3182d | |||
| 4398d250a7 | |||
| 2c5dc34ea2 |
6
.arcconfig
Normal file
6
.arcconfig
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"project_id" : "Pillar Server",
|
||||
"conduit_uri" : "https://developer.blender.org/",
|
||||
"git.default-relative-commit" : "origin/master",
|
||||
"arc.land.update.default" : "rebase"
|
||||
}
|
||||
20
.gitignore
vendored
20
.gitignore
vendored
@@ -6,14 +6,26 @@
|
||||
*.ropeproject*
|
||||
*.swp
|
||||
|
||||
/pillar/config_local.py
|
||||
config_local.py
|
||||
|
||||
.ropeproject/*
|
||||
|
||||
/pillar/application/static/storage/
|
||||
/build
|
||||
/.cache
|
||||
/pillar/pillar.egg-info/
|
||||
/pillar/google_app.json
|
||||
/*.egg-info/
|
||||
profile.stats
|
||||
/dump/
|
||||
/.eggs
|
||||
|
||||
/node_modules
|
||||
/.sass-cache
|
||||
*.css.map
|
||||
*.js.map
|
||||
|
||||
/translations/*/LC_MESSAGES/*.mo
|
||||
|
||||
pillar/web/static/assets/css/*.css
|
||||
pillar/web/static/assets/js/*.min.js
|
||||
pillar/web/static/storage/
|
||||
pillar/web/static/uploads/
|
||||
pillar/web/templates/
|
||||
|
||||
78
README.md
Normal file
78
README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
Pillar
|
||||
======
|
||||
|
||||
This is the latest iteration on the Attract project. We are building a unified
|
||||
framework called Pillar. Pillar will combine Blender Cloud and Attract. You
|
||||
can see Pillar in action on the [Blender Cloud](https://cloud.bender.org).
|
||||
|
||||
## Custom fonts
|
||||
|
||||
The icons on the website are drawn using a custom font, stored in
|
||||
[pillar/web/static/font](pillar/web/static/font).
|
||||
This font is generated via [Fontello](http://fontello.com/) by uploading
|
||||
[pillar/web/static/font/config.json](pillar/web/static/font/config.json).
|
||||
|
||||
Note that we only use the WOFF and WOFF2 formats, and discard the others
|
||||
supplied by Fontello.
|
||||
|
||||
After replacing the font files & `config.json`, edit the Fontello-supplied
|
||||
`font.css` to remove all font formats except `woff` and `woff2`. Then upload
|
||||
it to [css2sass](http://css2sass.herokuapp.com/) to convert it to SASS, and
|
||||
place it in [src/styles/font-pillar.sass](src/styles/font-pillar.sass).
|
||||
|
||||
Don't forget to Gulp!
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure your /data directory exists and is writable by the current user.
|
||||
Alternatively, provide a `pillar/config_local.py` that changes the relevant
|
||||
settings.
|
||||
|
||||
```
|
||||
git clone git@git.blender.org:pillar-python-sdk.git ../pillar-python-sdk
|
||||
pip install -e ../pillar-python-sdk
|
||||
pip install -U -r requirements.txt
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## HDRi viewer
|
||||
|
||||
The HDRi viewer uses [Google VRView](https://github.com/googlevr/vrview). To upgrade,
|
||||
get those files:
|
||||
|
||||
* [three.min.js](https://raw.githubusercontent.com/googlevr/vrview/master/build/three.min.js)
|
||||
* [embed.min.js](https://raw.githubusercontent.com/googlevr/vrview/master/build/embed.min.js)
|
||||
* [loading.gif](https://raw.githubusercontent.com/googlevr/vrview/master/images/loading.gif)
|
||||
|
||||
and place them in `pillar/web/static/assets/vrview`. Replace `images/loading.gif` in `embed.min.js` with `static/pillar/assets/vrview/loading.gif`.
|
||||
|
||||
You may also want to compare their
|
||||
[index.html](https://raw.githubusercontent.com/googlevr/vrview/master/index.html) to our
|
||||
`src/templates/vrview.pug`.
|
||||
|
||||
When on a HDRi page with the viewer embedded, use this JavaScript code to find the current
|
||||
yaw: `vrview_window.contentWindow.yaw()`. This can be passed as `default_yaw` parameter to
|
||||
the iframe.
|
||||
|
||||
## Celery
|
||||
|
||||
Pillar requires [Celery](http://www.celeryproject.org/) for background task processing. This in
|
||||
turn requires a backend and a broker, for which the default Pillar configuration uses Redis and
|
||||
RabbitMQ.
|
||||
|
||||
You can run the Celery Worker using `manage.py celery worker`.
|
||||
|
||||
Find other Celery operations with the `manage.py celery` command.
|
||||
|
||||
## Translations
|
||||
|
||||
If the language you want to support doesn't exist, you need to run: `translations init es_AR`.
|
||||
|
||||
Every time a new string is marked for translation you need to update the entire catalog: `translations update`
|
||||
|
||||
And once more strings are translated, you need to compile the translations: `translations compile`
|
||||
|
||||
*To mark strings strings for translations in Python scripts you need to
|
||||
wrap them with the `flask_babel.gettext` function.
|
||||
For .pug templates wrap them with `_()`.*
|
||||
@@ -1,3 +1,3 @@
|
||||
#!/bin/bash
|
||||
#!/bin/bash -ex
|
||||
|
||||
mongodump -h localhost:27018 -d eve --out dump/$(date +'%Y-%m-%d-%H%M') --excludeCollection tokens
|
||||
mongodump -h localhost:27018 -d cloud --out dump/$(date +'%Y-%m-%d-%H%M') --excludeCollection tokens --excludeCollection flamenco_task_logs
|
||||
|
||||
57
deploy.sh
57
deploy.sh
@@ -1,57 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
|
||||
# Deploys the current production branch to the production machine.
|
||||
|
||||
PROJECT_NAME="pillar"
|
||||
DOCKER_NAME="pillar"
|
||||
REMOTE_ROOT="/data/git/${PROJECT_NAME}"
|
||||
|
||||
SSH="ssh -o ClearAllForwardings=yes cloud.blender.org"
|
||||
ROOT="$(dirname "$(readlink -f "$0")")"
|
||||
cd ${ROOT}
|
||||
|
||||
# Check that we're on production branch.
|
||||
if [ $(git rev-parse --abbrev-ref HEAD) != "production" ]; then
|
||||
echo "You are NOT on the production branch, refusing to deploy." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check that production branch has been pushed.
|
||||
if [ -n "$(git log origin/production..production --oneline)" ]; then
|
||||
echo "WARNING: not all changes to the production branch have been pushed."
|
||||
echo "Press [ENTER] to continue deploying current origin/production, CTRL+C to abort."
|
||||
read dummy
|
||||
fi
|
||||
|
||||
# SSH to cloud to pull all files in
|
||||
echo "==================================================================="
|
||||
echo "UPDATING FILES ON ${PROJECT_NAME}"
|
||||
${SSH} git -C ${REMOTE_ROOT} fetch origin production
|
||||
${SSH} git -C ${REMOTE_ROOT} log origin/production..production --oneline
|
||||
${SSH} git -C ${REMOTE_ROOT} merge --ff-only origin/production
|
||||
|
||||
# Update the virtualenv
|
||||
${SSH} -t docker exec ${DOCKER_NAME} /data/venv/bin/pip install -U -r ${REMOTE_ROOT}/requirements.txt --exists-action w
|
||||
|
||||
# Notify Bugsnag of this new deploy.
|
||||
echo
|
||||
echo "==================================================================="
|
||||
GIT_REVISION=$(${SSH} git -C ${REMOTE_ROOT} describe --always)
|
||||
echo "Notifying Bugsnag of this new deploy of revision ${GIT_REVISION}."
|
||||
BUGSNAG_API_KEY=$(${SSH} python -c "\"import sys; sys.path.append('${REMOTE_ROOT}/${PROJECT_NAME}'); import config_local; print(config_local.BUGSNAG_API_KEY)\"")
|
||||
curl --data "apiKey=${BUGSNAG_API_KEY}&revision=${GIT_REVISION}" https://notify.bugsnag.com/deploy
|
||||
echo
|
||||
|
||||
# Wait for [ENTER] to restart the server
|
||||
echo
|
||||
echo "==================================================================="
|
||||
echo "NOTE: If you want to edit config_local.py on the server, do so now."
|
||||
echo "NOTE: Press [ENTER] to continue and restart the server process."
|
||||
read dummy
|
||||
${SSH} docker exec ${DOCKER_NAME} kill -HUP 1
|
||||
echo "Server process restarted"
|
||||
|
||||
echo
|
||||
echo "==================================================================="
|
||||
echo "Deploy of ${PROJECT_NAME} is done."
|
||||
echo "==================================================================="
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
echo $DIR
|
||||
|
||||
if [[ $1 == 'pro' || $1 == 'dev' ]]; then
|
||||
# Copy requirements.txt into pro folder
|
||||
cp ../requirements.txt $1/requirements.txt
|
||||
# Build image
|
||||
docker build -t armadillica/pillar_$1 $1
|
||||
# Remove requirements.txt
|
||||
rm $1/requirements.txt
|
||||
|
||||
else
|
||||
echo "POS. Your options are 'pro' or 'dev'"
|
||||
fi
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Francesco Siddi <francesco@blender.org>
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python \
|
||||
python-dev \
|
||||
python-pip \
|
||||
vim \
|
||||
nano \
|
||||
zlib1g-dev \
|
||||
libjpeg-dev \
|
||||
python-crypto \
|
||||
python-openssl \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
software-properties-common \
|
||||
git
|
||||
|
||||
RUN add-apt-repository ppa:mc3man/trusty-media \
|
||||
&& apt-get update && apt-get install -y \
|
||||
ffmpeg
|
||||
|
||||
RUN mkdir -p /data/git/pillar \
|
||||
&& mkdir -p /data/storage/shared \
|
||||
&& mkdir -p /data/storage/pillar \
|
||||
&& mkdir -p /data/config \
|
||||
&& mkdir -p /data/storage/logs
|
||||
|
||||
RUN pip install virtualenv \
|
||||
&& virtualenv /data/venv
|
||||
|
||||
ENV PIP_PACKAGES_VERSION = 2
|
||||
ADD requirements.txt /requirements.txt
|
||||
|
||||
RUN . /data/venv/bin/activate && pip install -r /requirements.txt
|
||||
|
||||
VOLUME /data/git/pillar
|
||||
VOLUME /data/config
|
||||
VOLUME /data/storage/shared
|
||||
VOLUME /data/storage/pillar
|
||||
|
||||
ENV MONGO_HOST mongo_pillar
|
||||
|
||||
EXPOSE 5000
|
||||
|
||||
ADD runserver.sh /runserver.sh
|
||||
|
||||
ENTRYPOINT ["bash", "/runserver.sh"]
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /data/venv/bin/activate && python /data/git/pillar/pillar/manage.py runserver
|
||||
@@ -1,47 +0,0 @@
|
||||
<VirtualHost *:80>
|
||||
# The ServerName directive sets the request scheme, hostname and port that
|
||||
# the server uses to identify itself. This is used when creating
|
||||
# redirection URLs. In the context of virtual hosts, the ServerName
|
||||
# specifies what hostname must appear in the request's Host: header to
|
||||
# match this virtual host. For the default virtual host (this file) this
|
||||
# value is not decisive as it is used as a last resort host regardless.
|
||||
# However, you must set it for any further virtual host explicitly.
|
||||
#ServerName 127.0.0.1
|
||||
|
||||
# EnableSendfile on
|
||||
XSendFile on
|
||||
XSendFilePath /data/storage/pillar
|
||||
|
||||
ServerAdmin webmaster@localhost
|
||||
DocumentRoot /var/www/html
|
||||
|
||||
# Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
|
||||
# error, crit, alert, emerg.
|
||||
# It is also possible to configure the loglevel for particular
|
||||
# modules, e.g.
|
||||
#LogLevel info ssl:warn
|
||||
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||
|
||||
# For most configuration files from conf-available/, which are
|
||||
# enabled or disabled at a global level, it is possible to
|
||||
# include a line for only one particular virtual host. For example the
|
||||
# following line enables the CGI configuration for this host only
|
||||
# after it has been globally disabled with "a2disconf".
|
||||
#Include conf-available/serve-cgi-bin.conf
|
||||
|
||||
WSGIDaemonProcess pillar
|
||||
WSGIPassAuthorization On
|
||||
|
||||
WSGIScriptAlias / /data/git/pillar/pillar/runserver.wsgi \
|
||||
process-group=pillar application-group=%{GLOBAL}
|
||||
|
||||
<Directory /data/git/pillar/pillar>
|
||||
<Files runserver.wsgi>
|
||||
Require all granted
|
||||
</Files>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
|
||||
# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
|
||||
@@ -1,61 +0,0 @@
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Francesco Siddi <francesco@blender.org>
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
python \
|
||||
python-dev \
|
||||
python-pip \
|
||||
vim \
|
||||
nano \
|
||||
zlib1g-dev \
|
||||
libjpeg-dev \
|
||||
python-crypto \
|
||||
python-openssl \
|
||||
libssl-dev \
|
||||
libffi-dev \
|
||||
software-properties-common \
|
||||
apache2-mpm-event \
|
||||
libapache2-mod-wsgi \
|
||||
libapache2-mod-xsendfile \
|
||||
git
|
||||
|
||||
RUN add-apt-repository ppa:mc3man/trusty-media \
|
||||
&& apt-get update && apt-get install -y \
|
||||
ffmpeg
|
||||
|
||||
RUN mkdir -p /data/git/pillar \
|
||||
&& mkdir -p /data/storage/shared \
|
||||
&& mkdir -p /data/storage/pillar \
|
||||
&& mkdir -p /data/config \
|
||||
&& mkdir -p /data/storage/logs
|
||||
|
||||
ENV APACHE_RUN_USER www-data
|
||||
ENV APACHE_RUN_GROUP www-data
|
||||
ENV APACHE_LOG_DIR /var/log/apache2
|
||||
ENV APACHE_PID_FILE /var/run/apache2.pid
|
||||
ENV APACHE_RUN_DIR /var/run/apache2
|
||||
ENV APACHE_LOCK_DIR /var/lock/apache2
|
||||
|
||||
RUN mkdir -p $APACHE_RUN_DIR $APACHE_LOCK_DIR $APACHE_LOG_DIR
|
||||
|
||||
RUN pip install virtualenv \
|
||||
&& virtualenv /data/venv
|
||||
|
||||
ENV PIP_PACKAGES_VERSION = 2
|
||||
ADD requirements.txt /requirements.txt
|
||||
|
||||
RUN . /data/venv/bin/activate \
|
||||
&& pip install -r /requirements.txt
|
||||
|
||||
VOLUME /data/git/pillar
|
||||
VOLUME /data/config
|
||||
VOLUME /data/storage/shared
|
||||
VOLUME /data/storage/pillar
|
||||
|
||||
ENV MONGO_HOST mongo_pillar
|
||||
|
||||
EXPOSE 80
|
||||
|
||||
ADD 000-default.conf /etc/apache2/sites-available/000-default.conf
|
||||
|
||||
CMD ["/usr/sbin/apache2", "-D", "FOREGROUND"]
|
||||
19
gulp
Executable file
19
gulp
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
GULP=./node_modules/.bin/gulp
|
||||
|
||||
function install() {
|
||||
npm install
|
||||
touch $GULP # installer doesn't always touch this after a build, so we do.
|
||||
}
|
||||
|
||||
# Rebuild Gulp if missing or outdated.
|
||||
[ -e $GULP ] || install
|
||||
[ gulpfile.js -nt $GULP ] && install
|
||||
|
||||
if [ "$1" == "watch" ]; then
|
||||
# Treat "gulp watch" as "gulp && gulp watch"
|
||||
$GULP
|
||||
fi
|
||||
|
||||
exec $GULP "$@"
|
||||
140
gulpfile.js
Normal file
140
gulpfile.js
Normal file
@@ -0,0 +1,140 @@
|
||||
var argv = require('minimist')(process.argv.slice(2));
|
||||
var autoprefixer = require('gulp-autoprefixer');
|
||||
var cache = require('gulp-cached');
|
||||
var chmod = require('gulp-chmod');
|
||||
var concat = require('gulp-concat');
|
||||
var git = require('gulp-git');
|
||||
var gulpif = require('gulp-if');
|
||||
var gulp = require('gulp');
|
||||
var livereload = require('gulp-livereload');
|
||||
var plumber = require('gulp-plumber');
|
||||
var pug = require('gulp-pug');
|
||||
var rename = require('gulp-rename');
|
||||
var sass = require('gulp-sass');
|
||||
var sourcemaps = require('gulp-sourcemaps');
|
||||
var uglify = require('gulp-uglify');
|
||||
|
||||
var enabled = {
|
||||
uglify: argv.production,
|
||||
maps: argv.production,
|
||||
failCheck: !argv.production,
|
||||
prettyPug: !argv.production,
|
||||
cachify: !argv.production,
|
||||
cleanup: argv.production,
|
||||
};
|
||||
|
||||
var destination = {
|
||||
css: 'pillar/web/static/assets/css',
|
||||
pug: 'pillar/web/templates',
|
||||
js: 'pillar/web/static/assets/js',
|
||||
}
|
||||
|
||||
|
||||
/* CSS */
|
||||
gulp.task('styles', function() {
|
||||
gulp.src('src/styles/**/*.sass')
|
||||
.pipe(gulpif(enabled.failCheck, plumber()))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.init()))
|
||||
.pipe(sass({
|
||||
outputStyle: 'compressed'}
|
||||
))
|
||||
.pipe(autoprefixer("last 3 versions"))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
|
||||
.pipe(gulp.dest(destination.css))
|
||||
.pipe(gulpif(argv.livereload, livereload()));
|
||||
});
|
||||
|
||||
|
||||
/* Templates - Pug */
|
||||
gulp.task('templates', function() {
|
||||
gulp.src('src/templates/**/*.pug')
|
||||
.pipe(gulpif(enabled.failCheck, plumber()))
|
||||
.pipe(gulpif(enabled.cachify, cache('templating')))
|
||||
.pipe(pug({
|
||||
pretty: enabled.prettyPug
|
||||
}))
|
||||
.pipe(gulp.dest(destination.pug))
|
||||
.pipe(gulpif(argv.livereload, livereload()));
|
||||
});
|
||||
|
||||
|
||||
/* Individual Uglified Scripts */
|
||||
gulp.task('scripts', function() {
|
||||
gulp.src('src/scripts/*.js')
|
||||
.pipe(gulpif(enabled.failCheck, plumber()))
|
||||
.pipe(gulpif(enabled.cachify, cache('scripting')))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.init()))
|
||||
.pipe(gulpif(enabled.uglify, uglify()))
|
||||
.pipe(rename({suffix: '.min'}))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
|
||||
.pipe(chmod(644))
|
||||
.pipe(gulp.dest(destination.js))
|
||||
.pipe(gulpif(argv.livereload, livereload()));
|
||||
});
|
||||
|
||||
|
||||
/* Collection of scripts in src/scripts/tutti/ to merge into tutti.min.js */
|
||||
/* Since it's always loaded, it's only for functions that we want site-wide */
|
||||
gulp.task('scripts_concat_tutti', function() {
|
||||
gulp.src('src/scripts/tutti/**/*.js')
|
||||
.pipe(gulpif(enabled.failCheck, plumber()))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.init()))
|
||||
.pipe(concat("tutti.min.js"))
|
||||
.pipe(gulpif(enabled.uglify, uglify()))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
|
||||
.pipe(chmod(644))
|
||||
.pipe(gulp.dest(destination.js))
|
||||
.pipe(gulpif(argv.livereload, livereload()));
|
||||
});
|
||||
|
||||
gulp.task('scripts_concat_markdown', function() {
|
||||
gulp.src('src/scripts/markdown/**/*.js')
|
||||
.pipe(gulpif(enabled.failCheck, plumber()))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.init()))
|
||||
.pipe(concat("markdown.min.js"))
|
||||
.pipe(gulpif(enabled.uglify, uglify()))
|
||||
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
|
||||
.pipe(chmod(644))
|
||||
.pipe(gulp.dest(destination.js))
|
||||
.pipe(gulpif(argv.livereload, livereload()));
|
||||
});
|
||||
|
||||
|
||||
// While developing, run 'gulp watch'
|
||||
gulp.task('watch',function() {
|
||||
// Only listen for live reloads if ran with --livereload
|
||||
if (argv.livereload){
|
||||
livereload.listen();
|
||||
}
|
||||
|
||||
gulp.watch('src/styles/**/*.sass',['styles']);
|
||||
gulp.watch('src/templates/**/*.pug',['templates']);
|
||||
gulp.watch('src/scripts/*.js',['scripts']);
|
||||
gulp.watch('src/scripts/tutti/**/*.js',['scripts_concat_tutti']);
|
||||
gulp.watch('src/scripts/markdown/**/*.js',['scripts_concat_markdown']);
|
||||
});
|
||||
|
||||
// Erases all generated files in output directories.
|
||||
gulp.task('cleanup', function() {
|
||||
var paths = [];
|
||||
for (attr in destination) {
|
||||
paths.push(destination[attr]);
|
||||
}
|
||||
|
||||
git.clean({ args: '-f -X ' + paths.join(' ') }, function (err) {
|
||||
if(err) throw err;
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
|
||||
// Run 'gulp' to build everything at once
|
||||
var tasks = [];
|
||||
if (enabled.cleanup) tasks.push('cleanup');
|
||||
gulp.task('default', tasks.concat([
|
||||
'styles',
|
||||
'templates',
|
||||
'scripts',
|
||||
'scripts_concat_tutti',
|
||||
'scripts_concat_markdown',
|
||||
]));
|
||||
5452
package-lock.json
generated
Normal file
5452
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
26
package.json
Normal file
26
package.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "pillar",
|
||||
"license": "GPL-2.0+",
|
||||
"author": "Blender Institute",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/armadillica/pillar.git"
|
||||
},
|
||||
"devDependencies": {
|
||||
"gulp": "~3.9.1",
|
||||
"gulp-autoprefixer": "~2.3.1",
|
||||
"gulp-cached": "~1.1.0",
|
||||
"gulp-chmod": "~1.3.0",
|
||||
"gulp-concat": "~2.6.0",
|
||||
"gulp-if": "^2.0.1",
|
||||
"gulp-git": "~2.4.2",
|
||||
"gulp-livereload": "~3.8.1",
|
||||
"gulp-plumber": "~1.1.0",
|
||||
"gulp-pug": "~3.2.0",
|
||||
"gulp-rename": "~1.2.2",
|
||||
"gulp-sass": "~2.3.1",
|
||||
"gulp-sourcemaps": "~1.6.0",
|
||||
"gulp-uglify": "~1.5.3",
|
||||
"minimist": "^1.2.0"
|
||||
}
|
||||
}
|
||||
921
pillar/__init__.py
Normal file
921
pillar/__init__.py
Normal file
@@ -0,0 +1,921 @@
|
||||
"""Pillar server."""
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import json
|
||||
import logging
|
||||
import logging.config
|
||||
import subprocess
|
||||
import tempfile
|
||||
import typing
|
||||
import os
|
||||
import os.path
|
||||
import pathlib
|
||||
|
||||
import jinja2
|
||||
from eve import Eve
|
||||
import flask
|
||||
from flask import g, render_template, request
|
||||
from flask_babel import Babel, gettext as _
|
||||
from flask.templating import TemplateNotFound
|
||||
import pymongo.database
|
||||
from werkzeug.local import LocalProxy
|
||||
|
||||
|
||||
# Declare pillar.current_app before importing other Pillar modules.
|
||||
def _get_current_app():
|
||||
"""Returns the current application."""
|
||||
|
||||
return flask.current_app
|
||||
|
||||
|
||||
current_app: 'PillarServer' = LocalProxy(_get_current_app)
|
||||
"""the current app, annotated as PillarServer"""
|
||||
|
||||
from pillar.api import custom_field_validation
|
||||
from pillar.api.utils import authentication
|
||||
import pillar.web.jinja
|
||||
|
||||
from . import api
|
||||
from . import web
|
||||
from . import auth
|
||||
from . import sentry_extra
|
||||
import pillar.api.organizations
|
||||
|
||||
empty_settings = {
|
||||
# Use a random URL prefix when booting Eve, to ensure that any
|
||||
# Flask route that's registered *before* we load our own config
|
||||
# won't interfere with Pillar itself.
|
||||
'URL_PREFIX': 'pieQui4vah9euwieFai6naivaV4thahchoochiiwazieBe5o',
|
||||
'DOMAIN': {},
|
||||
}
|
||||
|
||||
|
||||
class ConfigurationMissingError(SystemExit):
|
||||
"""Raised when a vital configuration key is missing.
|
||||
|
||||
Causes Python to exit.
|
||||
"""
|
||||
|
||||
|
||||
class BlinkerCompatibleEve(Eve):
|
||||
"""Workaround for https://github.com/pyeve/eve/issues/1087"""
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in {"im_self", "im_func"}:
|
||||
raise AttributeError("type object '%s' has no attribute '%s'" %
|
||||
(self.__class__.__name__, name))
|
||||
return super().__getattr__(name)
|
||||
|
||||
|
||||
class PillarServer(BlinkerCompatibleEve):
|
||||
def __init__(self, app_root, **kwargs):
|
||||
from .extension import PillarExtension
|
||||
from celery import Celery
|
||||
from flask_wtf.csrf import CSRFProtect
|
||||
|
||||
kwargs.setdefault('validator', custom_field_validation.ValidateCustomFields)
|
||||
super(PillarServer, self).__init__(settings=empty_settings, **kwargs)
|
||||
|
||||
# mapping from extension name to extension object.
|
||||
map_type = typing.MutableMapping[str, PillarExtension]
|
||||
self.pillar_extensions: map_type = collections.OrderedDict()
|
||||
self.pillar_extensions_template_paths = [] # list of paths
|
||||
|
||||
# The default roles Pillar uses. Will probably all move to extensions at some point.
|
||||
self._user_roles: typing.Set[str] = {
|
||||
'demo', 'admin', 'subscriber', 'homeproject',
|
||||
'protected', 'org-subscriber', 'video-encoder',
|
||||
'service', 'badger', 'svner',
|
||||
}
|
||||
self._user_roles_indexable: typing.Set[str] = {'demo', 'admin', 'subscriber'}
|
||||
|
||||
# Mapping from role name to capabilities given to that role.
|
||||
self._user_caps: typing.MutableMapping[str, typing.FrozenSet[str]] = \
|
||||
collections.defaultdict(frozenset)
|
||||
|
||||
self.app_root = os.path.abspath(app_root)
|
||||
self._load_flask_config()
|
||||
self._config_logging()
|
||||
|
||||
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
|
||||
self.log.info('Creating new instance from %r', self.app_root)
|
||||
|
||||
self._config_url_map()
|
||||
self._config_auth_token_hmac_key()
|
||||
self._config_tempdirs()
|
||||
self._config_git()
|
||||
|
||||
self.sentry: typing.Optional[sentry_extra.PillarSentry] = None
|
||||
self._config_sentry()
|
||||
self._config_google_cloud_storage()
|
||||
|
||||
self.algolia_index_users = None
|
||||
self.algolia_index_nodes = None
|
||||
self.algolia_client = None
|
||||
self._config_algolia()
|
||||
|
||||
self.encoding_service_client = None
|
||||
self._config_encoding_backend()
|
||||
|
||||
try:
|
||||
self.settings = os.environ['EVE_SETTINGS']
|
||||
except KeyError:
|
||||
self.settings = os.path.join(os.path.dirname(os.path.abspath(__file__)),
|
||||
'api', 'eve_settings.py')
|
||||
# self.settings = self.config['EVE_SETTINGS_PATH']
|
||||
self.load_config()
|
||||
self._validate_config()
|
||||
|
||||
# Configure authentication
|
||||
self.login_manager = auth.config_login_manager(self)
|
||||
|
||||
self._config_caching()
|
||||
|
||||
self._config_translations()
|
||||
|
||||
# Celery itself is configured after all extensions have loaded.
|
||||
self.celery: Celery = None
|
||||
|
||||
self.org_manager = pillar.api.organizations.OrgManager()
|
||||
|
||||
self.before_first_request(self.setup_db_indices)
|
||||
|
||||
# Make CSRF protection available to the application. By default it is
|
||||
# disabled on all endpoints. More info at WTF_CSRF_CHECK_DEFAULT in config.py
|
||||
self.csrf = CSRFProtect(self)
|
||||
|
||||
def _validate_config(self):
|
||||
if not self.config.get('SECRET_KEY'):
|
||||
raise ConfigurationMissingError('SECRET_KEY configuration key is missing')
|
||||
|
||||
server_name = self.config.get('SERVER_NAME')
|
||||
if not server_name:
|
||||
raise ConfigurationMissingError('SERVER_NAME configuration key is missing, should be a '
|
||||
'FQDN with TLD')
|
||||
if server_name != 'localhost' and '.' not in server_name:
|
||||
raise ConfigurationMissingError('SERVER_NAME should contain a FQDN with TLD')
|
||||
|
||||
def _load_flask_config(self):
|
||||
# Load configuration from different sources, to make it easy to override
|
||||
# settings with secrets, as well as for development & testing.
|
||||
self.config.from_pyfile(os.path.join(os.path.dirname(__file__), 'config.py'), silent=False)
|
||||
self.config.from_pyfile(os.path.join(self.app_root, 'config.py'), silent=True)
|
||||
self.config.from_pyfile(os.path.join(self.app_root, 'config_local.py'), silent=True)
|
||||
from_envvar = os.environ.get('PILLAR_CONFIG')
|
||||
if from_envvar:
|
||||
# Don't use from_envvar, as we want different behaviour. If the envvar
|
||||
# is not set, it's fine (i.e. silent=True), but if it is set and the
|
||||
# configfile doesn't exist, it should error out (i.e. silent=False).
|
||||
self.config.from_pyfile(from_envvar, silent=False)
|
||||
|
||||
def _config_logging(self):
|
||||
# Configure logging
|
||||
logging.config.dictConfig(self.config['LOGGING'])
|
||||
log = logging.getLogger(__name__)
|
||||
if self.config['DEBUG']:
|
||||
log.info('Pillar starting, debug=%s', self.config['DEBUG'])
|
||||
|
||||
def _config_url_map(self):
|
||||
"""Extend Flask url_map with our own converters."""
|
||||
import secrets, re
|
||||
from . import flask_extra
|
||||
|
||||
if not self.config.get('STATIC_FILE_HASH'):
|
||||
self.log.warning('STATIC_FILE_HASH is empty, generating random one')
|
||||
h = re.sub(r'[_.~-]', '', secrets.token_urlsafe())[:8]
|
||||
self.config['STATIC_FILE_HASH'] = h
|
||||
|
||||
self.url_map.converters['hashed_path'] = flask_extra.HashedPathConverter
|
||||
|
||||
def _config_auth_token_hmac_key(self):
|
||||
"""Load AUTH_TOKEN_HMAC_KEY, falling back to SECRET_KEY."""
|
||||
|
||||
hmac_key = self.config.get('AUTH_TOKEN_HMAC_KEY')
|
||||
if not hmac_key:
|
||||
self.log.warning('AUTH_TOKEN_HMAC_KEY not set, falling back to SECRET_KEY')
|
||||
hmac_key = self.config['AUTH_TOKEN_HMAC_KEY'] = self.config['SECRET_KEY']
|
||||
|
||||
if isinstance(hmac_key, str):
|
||||
self.log.warning('Converting AUTH_TOKEN_HMAC_KEY to bytes')
|
||||
self.config['AUTH_TOKEN_HMAC_KEY'] = hmac_key.encode('utf8')
|
||||
|
||||
def _config_tempdirs(self):
|
||||
storage_dir = self.config['STORAGE_DIR']
|
||||
if not os.path.exists(storage_dir):
|
||||
self.log.info('Creating storage directory %r', storage_dir)
|
||||
os.makedirs(storage_dir)
|
||||
|
||||
# Set the TMP environment variable to manage where uploads are stored.
|
||||
# These are all used by tempfile.mkstemp(), but we don't knwow in whic
|
||||
# order. As such, we remove all used variables but the one we set.
|
||||
tempfile.tempdir = storage_dir
|
||||
os.environ['TMP'] = storage_dir
|
||||
os.environ.pop('TEMP', None)
|
||||
os.environ.pop('TMPDIR', None)
|
||||
|
||||
def _config_git(self):
|
||||
# Get the Git hash
|
||||
try:
|
||||
git_cmd = ['git', '-C', self.app_root, 'describe', '--always']
|
||||
description = subprocess.check_output(git_cmd)
|
||||
self.config['GIT_REVISION'] = description.strip()
|
||||
except (subprocess.CalledProcessError, OSError) as ex:
|
||||
self.log.warning('Unable to run "git describe" to get git revision: %s', ex)
|
||||
self.config['GIT_REVISION'] = 'unknown'
|
||||
self.log.info('Git revision %r', self.config['GIT_REVISION'])
|
||||
|
||||
def _config_sentry(self):
|
||||
# TODO(Sybren): keep Sentry unconfigured when running CLI commands.
|
||||
sentry_dsn = self.config.get('SENTRY_CONFIG', {}).get('dsn')
|
||||
if self.config.get('TESTING') or sentry_dsn in {'', '-set-in-config-local-'}:
|
||||
self.log.warning('Sentry NOT configured.')
|
||||
self.sentry = None
|
||||
return
|
||||
|
||||
self.sentry = sentry_extra.PillarSentry(
|
||||
self, logging=True, level=logging.WARNING,
|
||||
logging_exclusions=('werkzeug',))
|
||||
self.log.debug('Sentry setup complete')
|
||||
|
||||
def _config_google_cloud_storage(self):
|
||||
# Google Cloud project
|
||||
try:
|
||||
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = \
|
||||
self.config['GCLOUD_APP_CREDENTIALS']
|
||||
except KeyError:
|
||||
raise ConfigurationMissingError('GCLOUD_APP_CREDENTIALS configuration is missing')
|
||||
|
||||
# Storage backend (GCS)
|
||||
try:
|
||||
os.environ['GCLOUD_PROJECT'] = self.config['GCLOUD_PROJECT']
|
||||
except KeyError:
|
||||
raise ConfigurationMissingError('GCLOUD_PROJECT configuration value is missing')
|
||||
|
||||
def _config_algolia(self):
|
||||
# Algolia search
|
||||
if 'algolia' not in self.config['SEARCH_BACKENDS']:
|
||||
return
|
||||
|
||||
from algoliasearch import algoliasearch
|
||||
|
||||
client = algoliasearch.Client(self.config['ALGOLIA_USER'],
|
||||
self.config['ALGOLIA_API_KEY'])
|
||||
self.algolia_client = client
|
||||
self.algolia_index_users = client.init_index(self.config['ALGOLIA_INDEX_USERS'])
|
||||
self.algolia_index_nodes = client.init_index(self.config['ALGOLIA_INDEX_NODES'])
|
||||
|
||||
def _config_encoding_backend(self):
|
||||
# Encoding backend
|
||||
if self.config['ENCODING_BACKEND'] != 'zencoder':
|
||||
self.log.warning('Encoding backend %r not supported, no video encoding possible!',
|
||||
self.config['ENCODING_BACKEND'])
|
||||
return
|
||||
|
||||
self.log.info('Setting up video encoding backend %r',
|
||||
self.config['ENCODING_BACKEND'])
|
||||
|
||||
from zencoder import Zencoder
|
||||
self.encoding_service_client = Zencoder(self.config['ZENCODER_API_KEY'])
|
||||
|
||||
def _config_caching(self):
|
||||
from flask_cache import Cache
|
||||
self.cache = Cache(self)
|
||||
|
||||
def set_languages(self, translations_folder: pathlib.Path):
|
||||
"""Set the supported languages based on translations folders
|
||||
|
||||
English is an optional language included by default, since we will
|
||||
never have a translations folder for it.
|
||||
"""
|
||||
self.default_locale = self.config['DEFAULT_LOCALE']
|
||||
self.config['BABEL_DEFAULT_LOCALE'] = self.default_locale
|
||||
|
||||
# Determine available languages.
|
||||
languages = list()
|
||||
|
||||
# The available languages will be determined based on available
|
||||
# translations in the //translations/ folder. The exception is (American) English
|
||||
# since all the text is originally in English already.
|
||||
# That said, if rare occasions we may want to never show
|
||||
# the site in English.
|
||||
|
||||
if self.config['SUPPORT_ENGLISH']:
|
||||
languages.append('en_US')
|
||||
|
||||
base_path = pathlib.Path(self.app_root) / 'translations'
|
||||
|
||||
if not base_path.is_dir():
|
||||
self.log.debug('Project has no translations folder: %s', base_path)
|
||||
else:
|
||||
languages.extend(i.name for i in base_path.iterdir() if i.is_dir())
|
||||
|
||||
# Use set for quicker lookup
|
||||
self.languages = set(languages)
|
||||
|
||||
self.log.info('Available languages: %s' % ', '.join(self.languages))
|
||||
|
||||
def _config_translations(self):
|
||||
"""
|
||||
Initialize translations variable.
|
||||
|
||||
The BABEL_TRANSLATION_DIRECTORIES has the folder for the compiled
|
||||
translations files. It uses ; separation for the extension folders.
|
||||
"""
|
||||
self.log.info('Configure translations')
|
||||
translations_path = pathlib.Path(__file__).parents[1].joinpath('translations')
|
||||
|
||||
self.config['BABEL_TRANSLATION_DIRECTORIES'] = str(translations_path)
|
||||
babel = Babel(self)
|
||||
|
||||
self.set_languages(translations_path)
|
||||
|
||||
# get_locale() is registered as a callback for locale selection.
|
||||
# That prevents the function from being garbage collected.
|
||||
@babel.localeselector
|
||||
def get_locale() -> str:
|
||||
"""
|
||||
Callback runs before each request to give us a chance to choose the
|
||||
language to use when producing its response.
|
||||
|
||||
We set g.locale to be able to access it from the template pages.
|
||||
We still need to return it explicitly, since this function is
|
||||
called as part of the babel translation framework.
|
||||
|
||||
We are using the 'Accept-Languages' header to match the available
|
||||
translations with the user supported languages.
|
||||
"""
|
||||
locale = request.accept_languages.best_match(
|
||||
self.languages, self.default_locale)
|
||||
g.locale = locale
|
||||
return locale
|
||||
|
||||
def load_extension(self, pillar_extension, url_prefix):
|
||||
from .extension import PillarExtension
|
||||
|
||||
if not isinstance(pillar_extension, PillarExtension):
|
||||
if self.config.get('DEBUG'):
|
||||
for cls in type(pillar_extension).mro():
|
||||
self.log.error('class %42r (%i) is %42r (%i): %s',
|
||||
cls, id(cls), PillarExtension, id(PillarExtension),
|
||||
cls is PillarExtension)
|
||||
raise AssertionError('Extension has wrong type %r' % type(pillar_extension))
|
||||
self.log.info('Loading extension %s', pillar_extension.name)
|
||||
|
||||
# Remember this extension, and disallow duplicates.
|
||||
if pillar_extension.name in self.pillar_extensions:
|
||||
raise ValueError('Extension with name %s already loaded', pillar_extension.name)
|
||||
self.pillar_extensions[pillar_extension.name] = pillar_extension
|
||||
|
||||
# Load extension Flask configuration
|
||||
for key, value in pillar_extension.flask_config().items():
|
||||
self.config.setdefault(key, value)
|
||||
|
||||
# Load extension blueprint(s)
|
||||
for blueprint in pillar_extension.blueprints():
|
||||
if blueprint.url_prefix:
|
||||
if not url_prefix:
|
||||
# If we registered the extension with url_prefix=None
|
||||
url_prefix = ''
|
||||
blueprint_prefix = url_prefix + blueprint.url_prefix
|
||||
else:
|
||||
blueprint_prefix = url_prefix
|
||||
self.register_blueprint(blueprint, url_prefix=blueprint_prefix)
|
||||
|
||||
# Load template paths
|
||||
tpath = pillar_extension.template_path
|
||||
if tpath:
|
||||
self.log.info('Extension %s: adding template path %s',
|
||||
pillar_extension.name, tpath)
|
||||
if not os.path.exists(tpath):
|
||||
raise ValueError('Template path %s for extension %s does not exist.',
|
||||
tpath, pillar_extension.name)
|
||||
self.pillar_extensions_template_paths.append(tpath)
|
||||
|
||||
# Load extension Eve settings
|
||||
eve_settings = pillar_extension.eve_settings()
|
||||
|
||||
if 'DOMAIN' in eve_settings:
|
||||
pillar_ext_prefix = pillar_extension.name + '_'
|
||||
pillar_url_prefix = pillar_extension.name + '/'
|
||||
for key, collection in eve_settings['DOMAIN'].items():
|
||||
assert key.startswith(pillar_ext_prefix), \
|
||||
'Eve collection names of %s MUST start with %r' % \
|
||||
(pillar_extension.name, pillar_ext_prefix)
|
||||
url = key.replace(pillar_ext_prefix, pillar_url_prefix)
|
||||
|
||||
collection.setdefault('datasource', {}).setdefault('source', key)
|
||||
collection.setdefault('url', url)
|
||||
|
||||
self.config['DOMAIN'].update(eve_settings['DOMAIN'])
|
||||
|
||||
# Configure the extension translations
|
||||
trpath = pillar_extension.translations_path
|
||||
if not trpath:
|
||||
self.log.debug('Extension %s does not have a translations folder',
|
||||
pillar_extension.name)
|
||||
return
|
||||
|
||||
self.log.info('Extension %s: adding translations path %s',
|
||||
pillar_extension.name, trpath)
|
||||
|
||||
# Babel requires semi-colon string separation
|
||||
self.config['BABEL_TRANSLATION_DIRECTORIES'] += ';' + str(trpath)
|
||||
|
||||
def _config_jinja_env(self):
|
||||
# Start with the extensions...
|
||||
paths_list = [
|
||||
jinja2.FileSystemLoader(path)
|
||||
for path in reversed(self.pillar_extensions_template_paths)
|
||||
]
|
||||
|
||||
# ...then load Pillar paths.
|
||||
pillar_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
parent_theme_path = os.path.join(pillar_dir, 'web', 'templates')
|
||||
current_path = os.path.join(self.app_root, 'templates')
|
||||
paths_list += [
|
||||
jinja2.FileSystemLoader(current_path),
|
||||
jinja2.FileSystemLoader(parent_theme_path),
|
||||
self.jinja_loader
|
||||
]
|
||||
# Set up a custom loader, so that Jinja searches for a theme file first
|
||||
# in the current theme dir, and if it fails it searches in the default
|
||||
# location.
|
||||
custom_jinja_loader = jinja2.ChoiceLoader(paths_list)
|
||||
self.jinja_loader = custom_jinja_loader
|
||||
|
||||
pillar.web.jinja.setup_jinja_env(self.jinja_env, self.config)
|
||||
|
||||
# Register context processors from extensions
|
||||
for ext in self.pillar_extensions.values():
|
||||
if not ext.has_context_processor:
|
||||
continue
|
||||
|
||||
self.log.debug('Registering context processor for %s', ext.name)
|
||||
self.context_processor(ext.context_processor)
|
||||
|
||||
def _config_static_dirs(self):
|
||||
# Setup static folder for the instanced app
|
||||
self.static_folder = os.path.join(self.app_root, 'static')
|
||||
|
||||
# Setup static folder for Pillar
|
||||
pillar_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
pillar_static_folder = os.path.join(pillar_dir, 'web', 'static')
|
||||
self.register_static_file_endpoint('/static/pillar', 'static_pillar', pillar_static_folder)
|
||||
|
||||
# Setup static folders for extensions
|
||||
for name, ext in self.pillar_extensions.items():
|
||||
if not ext.static_path:
|
||||
continue
|
||||
self.register_static_file_endpoint('/static/%s' % name,
|
||||
'static_%s' % name,
|
||||
ext.static_path)
|
||||
|
||||
def _config_celery(self):
|
||||
from celery import Celery
|
||||
|
||||
self.log.info('Configuring Celery')
|
||||
|
||||
# Pillar-defined Celery task modules:
|
||||
celery_task_modules = [
|
||||
'pillar.celery.tasks',
|
||||
'pillar.celery.search_index_tasks',
|
||||
'pillar.celery.file_link_tasks',
|
||||
'pillar.celery.email_tasks',
|
||||
]
|
||||
|
||||
# Allow Pillar extensions from defining their own Celery tasks.
|
||||
for extension in self.pillar_extensions.values():
|
||||
celery_task_modules.extend(extension.celery_task_modules)
|
||||
|
||||
self.celery = Celery(
|
||||
'pillar.celery',
|
||||
backend=self.config['CELERY_BACKEND'],
|
||||
broker=self.config['CELERY_BROKER'],
|
||||
include=celery_task_modules,
|
||||
task_track_started=True,
|
||||
result_expires=3600,
|
||||
)
|
||||
|
||||
# This configures the Celery task scheduler in such a way that we don't
|
||||
# have to import the pillar.celery.XXX modules. Remember to run
|
||||
# 'manage.py celery beat' too, otherwise those will never run.
|
||||
beat_schedule = self.config.get('CELERY_BEAT_SCHEDULE')
|
||||
if beat_schedule:
|
||||
self.celery.conf.beat_schedule = beat_schedule
|
||||
|
||||
self.log.info('Pinging Celery workers')
|
||||
self.log.info('Response: %s', self.celery.control.ping())
|
||||
|
||||
def _config_user_roles(self):
|
||||
"""Gathers all user roles from extensions.
|
||||
|
||||
The union of all user roles can be obtained from self.user_roles.
|
||||
"""
|
||||
|
||||
for extension in self.pillar_extensions.values():
|
||||
indexed_but_not_defined = extension.user_roles_indexable - extension.user_roles
|
||||
if indexed_but_not_defined:
|
||||
raise ValueError('Extension %s has roles %s indexable but not in user_roles',
|
||||
extension.name, indexed_but_not_defined)
|
||||
|
||||
self._user_roles.update(extension.user_roles)
|
||||
self._user_roles_indexable.update(extension.user_roles_indexable)
|
||||
|
||||
self.log.info('Loaded %i user roles from extensions, %i of which are indexable',
|
||||
len(self._user_roles), len(self._user_roles_indexable))
|
||||
|
||||
def _config_user_caps(self):
|
||||
"""Merges all capability settings from app config and extensions."""
|
||||
|
||||
app_caps = collections.defaultdict(frozenset, **self.config['USER_CAPABILITIES'])
|
||||
|
||||
for extension in self.pillar_extensions.values():
|
||||
ext_caps = extension.user_caps
|
||||
|
||||
for role, caps in ext_caps.items():
|
||||
union_caps = frozenset(app_caps[role] | caps)
|
||||
app_caps[role] = union_caps
|
||||
|
||||
self._user_caps = app_caps
|
||||
|
||||
if self.log.isEnabledFor(logging.DEBUG):
|
||||
import pprint
|
||||
self.log.debug('Configured user capabilities: %s', pprint.pformat(self._user_caps))
|
||||
|
||||
def register_static_file_endpoint(self, url_prefix, endpoint_name, static_folder):
|
||||
from pillar.web.staticfile import PillarStaticFile
|
||||
|
||||
view_func = PillarStaticFile.as_view(endpoint_name, static_folder=static_folder)
|
||||
self.add_url_rule(f'{url_prefix}/<hashed_path:filename>', view_func=view_func)
|
||||
|
||||
def process_extensions(self):
|
||||
"""This is about Eve extensions, not Pillar extensions."""
|
||||
|
||||
# Re-initialise Eve after we allowed Pillar submodules to be loaded.
|
||||
# EVIL STARTS HERE. It just copies part of the Eve.__init__() method.
|
||||
self.set_defaults()
|
||||
self.validate_config()
|
||||
self.validate_domain_struct()
|
||||
|
||||
self._init_url_rules()
|
||||
self._init_media_endpoint()
|
||||
self._init_schema_endpoint()
|
||||
|
||||
if self.config['OPLOG'] is True:
|
||||
self._init_oplog()
|
||||
|
||||
domain_copy = copy.deepcopy(self.config['DOMAIN'])
|
||||
for resource, settings in domain_copy.items():
|
||||
self.register_resource(resource, settings)
|
||||
|
||||
self.register_error_handlers()
|
||||
# EVIL ENDS HERE. No guarantees, though.
|
||||
|
||||
self.finish_startup()
|
||||
|
||||
def register_error_handlers(self):
|
||||
super(PillarServer, self).register_error_handlers()
|
||||
|
||||
# Register error handlers per code.
|
||||
for code in (403, 404, 412, 500):
|
||||
self.register_error_handler(code, self.pillar_error_handler)
|
||||
|
||||
# Register error handlers per exception.
|
||||
from pillarsdk import exceptions as sdk_exceptions
|
||||
|
||||
sdk_handlers = [
|
||||
(sdk_exceptions.UnauthorizedAccess, self.handle_sdk_unauth),
|
||||
(sdk_exceptions.ForbiddenAccess, self.handle_sdk_forbidden),
|
||||
(sdk_exceptions.ResourceNotFound, self.handle_sdk_resource_not_found),
|
||||
(sdk_exceptions.ResourceInvalid, self.handle_sdk_resource_invalid),
|
||||
(sdk_exceptions.MethodNotAllowed, self.handle_sdk_method_not_allowed),
|
||||
(sdk_exceptions.PreconditionFailed, self.handle_sdk_precondition_failed),
|
||||
]
|
||||
|
||||
for (eclass, handler) in sdk_handlers:
|
||||
self.register_error_handler(eclass, handler)
|
||||
|
||||
def handle_sdk_unauth(self, error):
|
||||
"""Global exception handling for pillarsdk UnauthorizedAccess
|
||||
Currently the api is fully locked down so we need to constantly
|
||||
check for user authorization.
|
||||
"""
|
||||
|
||||
return flask.redirect(flask.url_for('users.login'))
|
||||
|
||||
def handle_sdk_forbidden(self, error):
|
||||
self.log.info('Forwarding ForbiddenAccess exception to client: %s', error, exc_info=True)
|
||||
error.code = 403
|
||||
return self.pillar_error_handler(error)
|
||||
|
||||
def handle_sdk_resource_not_found(self, error):
|
||||
self.log.info('Forwarding ResourceNotFound exception to client: %s', error, exc_info=True)
|
||||
|
||||
content = getattr(error, 'content', None)
|
||||
if content:
|
||||
try:
|
||||
error_content = json.loads(content)
|
||||
except ValueError:
|
||||
error_content = None
|
||||
|
||||
if error_content and error_content.get('_deleted', False):
|
||||
# This document used to exist, but doesn't any more. Let the user know.
|
||||
doc_name = error_content.get('name')
|
||||
node_type = error_content.get('node_type')
|
||||
if node_type:
|
||||
node_type = node_type.replace('_', ' ').title()
|
||||
if doc_name:
|
||||
description = '%s "%s" was deleted.' % (node_type, doc_name)
|
||||
else:
|
||||
description = 'This %s was deleted.' % (node_type,)
|
||||
else:
|
||||
if doc_name:
|
||||
description = '"%s" was deleted.' % doc_name
|
||||
else:
|
||||
description = None
|
||||
|
||||
error.description = description
|
||||
|
||||
error.code = 404
|
||||
return self.pillar_error_handler(error)
|
||||
|
||||
def handle_sdk_precondition_failed(self, error):
|
||||
self.log.info('Forwarding PreconditionFailed exception to client: %s', error)
|
||||
|
||||
error.code = 412
|
||||
return self.pillar_error_handler(error)
|
||||
|
||||
def handle_sdk_resource_invalid(self, error):
|
||||
self.log.info('Forwarding ResourceInvalid exception to client: %s', error, exc_info=True)
|
||||
|
||||
# Raising a Werkzeug 422 exception doens't work, as Flask turns it into a 500.
|
||||
return _('The submitted data could not be validated.'), 422
|
||||
|
||||
def handle_sdk_method_not_allowed(self, error):
|
||||
"""Forwards 405 Method Not Allowed to the client.
|
||||
|
||||
This is actually not fair, as a 405 between Pillar and Pillar-Web
|
||||
doesn't imply that the request the client did on Pillar-Web is not
|
||||
allowed. However, it does allow us to debug this if it happens, by
|
||||
watching for 405s in the browser.
|
||||
"""
|
||||
from flask import request
|
||||
|
||||
self.log.info('Forwarding MethodNotAllowed exception to client: %s', error, exc_info=True)
|
||||
self.log.info('HTTP Referer is %r', request.referrer)
|
||||
|
||||
# Raising a Werkzeug 405 exception doens't work, as Flask turns it into a 500.
|
||||
return 'The requested HTTP method is not allowed on this URL.', 405
|
||||
|
||||
def pillar_error_handler(self, error_ob):
|
||||
|
||||
# 'error_ob' can be any exception. If it's not a Werkzeug exception,
|
||||
# handle it as a 500.
|
||||
if not hasattr(error_ob, 'code'):
|
||||
error_ob.code = 500
|
||||
if not hasattr(error_ob, 'description'):
|
||||
error_ob.description = str(error_ob)
|
||||
|
||||
if request.full_path.startswith('/%s/' % self.config['URL_PREFIX']):
|
||||
from pillar.api.utils import jsonify
|
||||
# This is an API request, so respond in JSON.
|
||||
return jsonify({
|
||||
'_status': 'ERR',
|
||||
'_code': error_ob.code,
|
||||
'_message': error_ob.description,
|
||||
}, status=error_ob.code)
|
||||
|
||||
# See whether we should return an embedded page or a regular one.
|
||||
if request.is_xhr:
|
||||
fname = 'errors/%i_embed.html' % error_ob.code
|
||||
else:
|
||||
fname = 'errors/%i.html' % error_ob.code
|
||||
|
||||
# Also handle the case where we didn't create a template for this error.
|
||||
try:
|
||||
return render_template(fname, description=error_ob.description), error_ob.code
|
||||
except TemplateNotFound:
|
||||
self.log.warning('Error template %s for code %i not found',
|
||||
fname, error_ob.code)
|
||||
return render_template('errors/500.html'), error_ob.code
|
||||
|
||||
def finish_startup(self):
|
||||
self.log.info('Using MongoDB database %r', self.config['MONGO_DBNAME'])
|
||||
|
||||
self._config_celery()
|
||||
|
||||
api.setup_app(self)
|
||||
web.setup_app(self)
|
||||
|
||||
authentication.setup_app(self)
|
||||
|
||||
for ext in self.pillar_extensions.values():
|
||||
self.log.info('Setting up extension %s', ext.name)
|
||||
ext.setup_app(self)
|
||||
|
||||
self._config_jinja_env()
|
||||
self._config_static_dirs()
|
||||
self._config_user_roles()
|
||||
self._config_user_caps()
|
||||
|
||||
# Only enable this when debugging.
|
||||
# self._list_routes()
|
||||
|
||||
def setup_db_indices(self):
|
||||
"""Adds missing database indices.
|
||||
|
||||
This does NOT drop and recreate existing indices,
|
||||
nor does it reconfigure existing indices.
|
||||
If you want that, drop them manually first.
|
||||
"""
|
||||
|
||||
self.log.debug('Adding any missing database indices.')
|
||||
|
||||
import pymongo
|
||||
|
||||
db = self.data.driver.db
|
||||
|
||||
coll = db['tokens']
|
||||
coll.create_index([('user', pymongo.ASCENDING)])
|
||||
coll.create_index([('token', pymongo.ASCENDING)])
|
||||
coll.create_index([('token_hashed', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['notifications']
|
||||
coll.create_index([('user', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['activities-subscriptions']
|
||||
coll.create_index([('context_object', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['nodes']
|
||||
# This index is used for queries on project, and for queries on
|
||||
# the combination (project, node type).
|
||||
coll.create_index([('project', pymongo.ASCENDING),
|
||||
('node_type', pymongo.ASCENDING)])
|
||||
coll.create_index([('parent', pymongo.ASCENDING)])
|
||||
coll.create_index([('short_code', pymongo.ASCENDING)],
|
||||
sparse=True, unique=True)
|
||||
# Used for latest assets & comments
|
||||
coll.create_index([('properties.status', pymongo.ASCENDING),
|
||||
('node_type', pymongo.ASCENDING),
|
||||
('_created', pymongo.DESCENDING)])
|
||||
|
||||
coll = db['projects']
|
||||
# This index is used for statistics, and for fetching public projects.
|
||||
coll.create_index([('is_private', pymongo.ASCENDING)])
|
||||
coll.create_index([('category', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['organizations']
|
||||
coll.create_index([('ip_ranges.start', pymongo.ASCENDING)])
|
||||
coll.create_index([('ip_ranges.end', pymongo.ASCENDING)])
|
||||
self.log.debug('Created database indices')
|
||||
|
||||
def register_api_blueprint(self, blueprint, url_prefix):
|
||||
# TODO: use Eve config variable instead of hard-coded '/api'
|
||||
self.register_blueprint(blueprint, url_prefix='/api' + url_prefix)
|
||||
|
||||
def make_header(self, username, subclient_id=''):
|
||||
"""Returns a Basic HTTP Authentication header value."""
|
||||
import base64
|
||||
|
||||
return 'basic ' + base64.b64encode('%s:%s' % (username, subclient_id))
|
||||
|
||||
def post_internal(self, resource: str, payl=None, skip_validation=False):
|
||||
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
|
||||
from eve.methods.post import post_internal
|
||||
|
||||
url = self.config['URLS'][resource]
|
||||
path = '%s/%s' % (self.api_prefix, url)
|
||||
with self.__fake_request_url_rule('POST', path):
|
||||
return post_internal(resource, payl=payl, skip_validation=skip_validation)[:4]
|
||||
|
||||
def put_internal(self, resource: str, payload=None, concurrency_check=False,
|
||||
skip_validation=False, **lookup):
|
||||
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
|
||||
from eve.methods.put import put_internal
|
||||
|
||||
url = self.config['URLS'][resource]
|
||||
path = '%s/%s/%s' % (self.api_prefix, url, lookup['_id'])
|
||||
with self.__fake_request_url_rule('PUT', path):
|
||||
return put_internal(resource, payload=payload, concurrency_check=concurrency_check,
|
||||
skip_validation=skip_validation, **lookup)[:4]
|
||||
|
||||
def patch_internal(self, resource: str, payload=None, concurrency_check=False,
|
||||
skip_validation=False, **lookup):
|
||||
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
|
||||
from eve.methods.patch import patch_internal
|
||||
|
||||
url = self.config['URLS'][resource]
|
||||
path = '%s/%s/%s' % (self.api_prefix, url, lookup['_id'])
|
||||
with self.__fake_request_url_rule('PATCH', path):
|
||||
return patch_internal(resource, payload=payload, concurrency_check=concurrency_check,
|
||||
skip_validation=skip_validation, **lookup)[:4]
|
||||
|
||||
def delete_internal(self, resource: str, concurrency_check=False,
|
||||
suppress_callbacks=False, **lookup):
|
||||
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
|
||||
from eve.methods.delete import deleteitem_internal
|
||||
|
||||
url = self.config['URLS'][resource]
|
||||
path = '%s/%s/%s' % (self.api_prefix, url, lookup['_id'])
|
||||
with self.__fake_request_url_rule('DELETE', path):
|
||||
return deleteitem_internal(resource,
|
||||
concurrency_check=concurrency_check,
|
||||
suppress_callbacks=suppress_callbacks,
|
||||
**lookup)[:4]
|
||||
|
||||
def _list_routes(self):
|
||||
from pprint import pprint
|
||||
from flask import url_for
|
||||
|
||||
def has_no_empty_params(rule):
|
||||
defaults = rule.defaults if rule.defaults is not None else ()
|
||||
arguments = rule.arguments if rule.arguments is not None else ()
|
||||
return len(defaults) >= len(arguments)
|
||||
|
||||
links = []
|
||||
with self.test_request_context():
|
||||
for rule in self.url_map.iter_rules():
|
||||
# Filter out rules we can't navigate to in a browser
|
||||
# and rules that require parameters
|
||||
if "GET" in rule.methods and has_no_empty_params(rule):
|
||||
url = url_for(rule.endpoint, **(rule.defaults or {}))
|
||||
links.append((url, rule.endpoint, rule.methods))
|
||||
if "PATCH" in rule.methods:
|
||||
args = {arg: arg for arg in rule.arguments}
|
||||
url = url_for(rule.endpoint, **args)
|
||||
links.append((url, rule.endpoint, rule.methods))
|
||||
|
||||
links.sort(key=lambda t: (('/api/' in t[0]), len(t[0])))
|
||||
|
||||
pprint(links, width=300)
|
||||
|
||||
def db(self, collection_name: str = None) \
|
||||
-> typing.Union[pymongo.collection.Collection, pymongo.database.Database]:
|
||||
"""Returns the MongoDB database, or the collection (if given)"""
|
||||
|
||||
if collection_name:
|
||||
return self.data.driver.db[collection_name]
|
||||
return self.data.driver.db
|
||||
|
||||
def extension_sidebar_links(self, project):
|
||||
"""Returns the sidebar links for the given projects.
|
||||
|
||||
:returns: HTML as a string for the sidebar.
|
||||
"""
|
||||
|
||||
if not project:
|
||||
return ''
|
||||
|
||||
return jinja2.Markup(''.join(ext.sidebar_links(project)
|
||||
for ext in self.pillar_extensions.values()))
|
||||
|
||||
@contextlib.contextmanager
|
||||
def __fake_request_url_rule(self, method: str, url_path: str):
|
||||
"""Tries to force-set the request URL rule.
|
||||
|
||||
This is required by Eve (since 0.70) to be able to construct a
|
||||
Location HTTP header that points to the resource item.
|
||||
|
||||
See post_internal, put_internal and patch_internal.
|
||||
"""
|
||||
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
with self.test_request_context(method=method, path=url_path) as ctx:
|
||||
try:
|
||||
rule, _ = ctx.url_adapter.match(url_path, method=method, return_rule=True)
|
||||
except (wz_exceptions.MethodNotAllowed, wz_exceptions.NotFound):
|
||||
# We're POSTing things that we haven't told Eve are POSTable. Try again using the
|
||||
# GET method.
|
||||
rule, _ = ctx.url_adapter.match(url_path, method='GET', return_rule=True)
|
||||
current_request = request._get_current_object()
|
||||
current_request.url_rule = rule
|
||||
|
||||
yield ctx
|
||||
|
||||
def validator_for_resource(self, resource_name: str) -> custom_field_validation.ValidateCustomFields:
|
||||
schema = self.config['DOMAIN'][resource_name]['schema']
|
||||
validator = self.validator(schema, resource_name)
|
||||
return validator
|
||||
|
||||
@property
|
||||
def user_roles(self) -> typing.FrozenSet[str]:
|
||||
return frozenset(self._user_roles)
|
||||
|
||||
@property
|
||||
def user_roles_indexable(self) -> typing.FrozenSet[str]:
|
||||
return frozenset(self._user_roles_indexable)
|
||||
|
||||
@property
|
||||
def user_caps(self) -> typing.Mapping[str, typing.FrozenSet[str]]:
|
||||
return self._user_caps
|
||||
|
||||
@property
|
||||
def real_app(self) -> 'PillarServer':
|
||||
"""The real application object.
|
||||
|
||||
Can be used to obtain the real app object from a LocalProxy.
|
||||
"""
|
||||
return self
|
||||
19
pillar/api/__init__.py
Normal file
19
pillar/api/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
def setup_app(app):
|
||||
from . import encoding, blender_id, projects, local_auth, file_storage
|
||||
from . import users, nodes, latest, blender_cloud, service, activities
|
||||
from . import organizations
|
||||
from . import search
|
||||
|
||||
encoding.setup_app(app, url_prefix='/encoding')
|
||||
blender_id.setup_app(app, url_prefix='/blender_id')
|
||||
search.setup_app(app, url_prefix='/newsearch')
|
||||
projects.setup_app(app, api_prefix='/p')
|
||||
local_auth.setup_app(app, url_prefix='/auth')
|
||||
file_storage.setup_app(app, url_prefix='/storage')
|
||||
latest.setup_app(app, url_prefix='/latest')
|
||||
blender_cloud.setup_app(app, url_prefix='/bcloud')
|
||||
users.setup_app(app, api_prefix='/users')
|
||||
service.setup_app(app, api_prefix='/service')
|
||||
nodes.setup_app(app, url_prefix='/nodes')
|
||||
activities.setup_app(app)
|
||||
organizations.setup_app(app)
|
||||
@@ -1,7 +1,10 @@
|
||||
from flask import g
|
||||
from flask import current_app
|
||||
from eve.methods.post import post_internal
|
||||
from application.modules.users import gravatar
|
||||
import logging
|
||||
|
||||
from flask import request, current_app
|
||||
from pillar.api.utils import gravatar
|
||||
from pillar.auth import current_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def notification_parse(notification):
|
||||
@@ -15,6 +18,11 @@ def notification_parse(notification):
|
||||
if activity is None or activity['object_type'] != 'node':
|
||||
return
|
||||
node = nodes_collection.find_one({'_id': activity['object']})
|
||||
if not node:
|
||||
# This can happen when a notification is generated and then the
|
||||
# node is deleted.
|
||||
return
|
||||
|
||||
# Initial support only for node_type comments
|
||||
if node['node_type'] != 'comment':
|
||||
return
|
||||
@@ -23,7 +31,7 @@ def notification_parse(notification):
|
||||
object_name = ''
|
||||
object_id = activity['object']
|
||||
|
||||
if node['parent']['user'] == g.current_user['user_id']:
|
||||
if node['parent']['user'] == current_user.user_id:
|
||||
owner = "your {0}".format(node['parent']['node_type'])
|
||||
else:
|
||||
parent_comment_user = users_collection.find_one(
|
||||
@@ -45,7 +53,7 @@ def notification_parse(notification):
|
||||
action = activity['verb']
|
||||
|
||||
lookup = {
|
||||
'user': g.current_user['user_id'],
|
||||
'user': current_user.user_id,
|
||||
'context_object_type': 'node',
|
||||
'context_object': context_object_id,
|
||||
}
|
||||
@@ -111,7 +119,7 @@ def activity_subscribe(user_id, context_object_type, context_object_id):
|
||||
|
||||
# If no subscription exists, we create one
|
||||
if not subscription:
|
||||
post_internal('activities-subscriptions', lookup)
|
||||
current_app.post_internal('activities-subscriptions', lookup)
|
||||
|
||||
|
||||
def activity_object_add(actor_user_id, verb, object_type, object_id,
|
||||
@@ -133,22 +141,82 @@ def activity_object_add(actor_user_id, verb, object_type, object_id,
|
||||
subscriptions = notification_get_subscriptions(
|
||||
context_object_type, context_object_id, actor_user_id)
|
||||
|
||||
if subscriptions.count() > 0:
|
||||
activity = dict(
|
||||
actor_user=actor_user_id,
|
||||
verb=verb,
|
||||
object_type=object_type,
|
||||
object=object_id,
|
||||
context_object_type=context_object_type,
|
||||
context_object=context_object_id
|
||||
)
|
||||
if subscriptions.count() == 0:
|
||||
return
|
||||
|
||||
activity = post_internal('activities', activity)
|
||||
if activity[3] != 201:
|
||||
# If creation failed for any reason, do not create a any notifcation
|
||||
return
|
||||
for subscription in subscriptions:
|
||||
notification = dict(
|
||||
user=subscription['user'],
|
||||
activity=activity[0]['_id'])
|
||||
post_internal('notifications', notification)
|
||||
info, status = register_activity(actor_user_id, verb, object_type, object_id,
|
||||
context_object_type, context_object_id)
|
||||
if status != 201:
|
||||
# If creation failed for any reason, do not create a any notifcation
|
||||
return
|
||||
|
||||
for subscription in subscriptions:
|
||||
notification = dict(
|
||||
user=subscription['user'],
|
||||
activity=info['_id'])
|
||||
current_app.post_internal('notifications', notification)
|
||||
|
||||
|
||||
def register_activity(actor_user_id, verb, object_type, object_id,
|
||||
context_object_type, context_object_id,
|
||||
project_id=None,
|
||||
node_type=None):
|
||||
"""Registers an activity.
|
||||
|
||||
This works using the following pattern:
|
||||
|
||||
ACTOR -> VERB -> OBJECT -> CONTEXT
|
||||
|
||||
:param actor_user_id: id of the user who is changing the object
|
||||
:param verb: the action on the object ('commented', 'replied')
|
||||
:param object_type: hardcoded name, see database schema
|
||||
:param object_id: object id, to be traced with object_type
|
||||
:param context_object_type: the type of the context object, like 'project' or 'node',
|
||||
see database schema
|
||||
:param context_object_id:
|
||||
:param project_id: optional project ID to make the activity easily queryable
|
||||
per project.
|
||||
:param node_type: optional, node type of the node receiving the activity.
|
||||
|
||||
:returns: tuple (info, status_code), where a successful operation should have
|
||||
status_code=201. If it is not 201, a warning is logged.
|
||||
"""
|
||||
|
||||
activity = {
|
||||
'actor_user': actor_user_id,
|
||||
'verb': verb,
|
||||
'object_type': object_type,
|
||||
'object': object_id,
|
||||
'context_object_type': context_object_type,
|
||||
'context_object': context_object_id}
|
||||
if project_id:
|
||||
activity['project'] = project_id
|
||||
if node_type:
|
||||
activity['node_type'] = node_type
|
||||
|
||||
info, _, _, status_code = current_app.post_internal('activities', activity)
|
||||
|
||||
if status_code != 201:
|
||||
log.error('register_activity: code %i creating activity %s: %s',
|
||||
status_code, activity, info)
|
||||
else:
|
||||
log.info('register_activity: user %s "%s" on %s %s, context %s %s',
|
||||
actor_user_id, verb, object_type, object_id,
|
||||
context_object_type, context_object_id)
|
||||
return info, status_code
|
||||
|
||||
|
||||
def before_returning_item_notifications(response):
|
||||
if request.args.get('parse'):
|
||||
notification_parse(response)
|
||||
|
||||
|
||||
def before_returning_resource_notifications(response):
|
||||
for item in response['_items']:
|
||||
if request.args.get('parse'):
|
||||
notification_parse(item)
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
app.on_fetched_item_notifications += before_returning_item_notifications
|
||||
app.on_fetched_resource_notifications += before_returning_resource_notifications
|
||||
@@ -24,7 +24,8 @@ def blender_cloud_addon_version():
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
from . import texture_libs, home_project
|
||||
from . import texture_libs, home_project, subscription
|
||||
|
||||
texture_libs.setup_app(app, url_prefix=url_prefix)
|
||||
home_project.setup_app(app, url_prefix=url_prefix)
|
||||
subscription.setup_app(app, url_prefix=url_prefix)
|
||||
@@ -1,17 +1,14 @@
|
||||
import copy
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
from bson import ObjectId, tz_util
|
||||
from eve.methods.post import post_internal
|
||||
from eve.methods.put import put_internal
|
||||
from bson import ObjectId
|
||||
from eve.methods.get import get
|
||||
from flask import Blueprint, g, current_app, request
|
||||
from flask import Blueprint, current_app, request
|
||||
from pillar.api import utils
|
||||
from pillar.api.utils import authentication, authorization, utcnow
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from application.modules import projects
|
||||
from application import utils
|
||||
from application.utils import authentication, authorization
|
||||
from pillar.api.projects import utils as proj_utils
|
||||
|
||||
blueprint = Blueprint('blender_cloud.home_project', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -20,7 +17,7 @@ log = logging.getLogger(__name__)
|
||||
HOME_PROJECT_USERS = set()
|
||||
|
||||
# Users with any of these roles will get full write access to their home project.
|
||||
HOME_PROJECT_WRITABLE_USERS = {u'subscriber', u'demo'}
|
||||
HOME_PROJECT_WRITABLE_USERS = {'subscriber', 'demo'}
|
||||
|
||||
HOME_PROJECT_DESCRIPTION = ('# Your home project\n\n'
|
||||
'This is your home project. It allows synchronisation '
|
||||
@@ -32,7 +29,7 @@ HOME_PROJECT_SUMMARY = 'This is your home project. Here you can sync your Blende
|
||||
# 'as a pastebin for text, images and other assets, and '
|
||||
# 'allows synchronisation of your Blender settings.')
|
||||
# HOME_PROJECT_SUMMARY = 'This is your home project. Pastebin and Blender settings sync in one!'
|
||||
SYNC_GROUP_NODE_NAME = u'Blender Sync'
|
||||
SYNC_GROUP_NODE_NAME = 'Blender Sync'
|
||||
SYNC_GROUP_NODE_DESC = ('The [Blender Cloud Addon](https://cloud.blender.org/services'
|
||||
'#blender-addon) will synchronize your Blender settings here.')
|
||||
|
||||
@@ -73,7 +70,7 @@ def create_blender_sync_node(project_id, admin_group_id, user_id):
|
||||
}
|
||||
}
|
||||
|
||||
r, _, _, status = post_internal('nodes', node)
|
||||
r, _, _, status = current_app.post_internal('nodes', node)
|
||||
if status != 201:
|
||||
log.warning('Unable to create Blender Sync node for home project %s: %s',
|
||||
project_id, r)
|
||||
@@ -109,13 +106,13 @@ def create_home_project(user_id, write_access):
|
||||
project = deleted_proj
|
||||
else:
|
||||
log.debug('User %s does not have a deleted project', user_id)
|
||||
project = projects.create_new_project(project_name='Home',
|
||||
user_id=ObjectId(user_id),
|
||||
overrides=overrides)
|
||||
project = proj_utils.create_new_project(project_name='Home',
|
||||
user_id=ObjectId(user_id),
|
||||
overrides=overrides)
|
||||
|
||||
# Re-validate the authentication token, so that the put_internal call sees the
|
||||
# new group created for the project.
|
||||
authentication.validate_token()
|
||||
authentication.validate_token(force=True)
|
||||
|
||||
# There are a few things in the on_insert_projects hook we need to adjust.
|
||||
|
||||
@@ -124,10 +121,10 @@ def create_home_project(user_id, write_access):
|
||||
|
||||
# Set up the correct node types. No need to set permissions for them,
|
||||
# as the inherited project permissions are fine.
|
||||
from manage_extra.node_types.group import node_type_group
|
||||
from manage_extra.node_types.asset import node_type_asset
|
||||
# from manage_extra.node_types.text import node_type_text
|
||||
from manage_extra.node_types.comment import node_type_comment
|
||||
from pillar.api.node_types.group import node_type_group
|
||||
from pillar.api.node_types.asset import node_type_asset
|
||||
# from pillar.api.node_types.text import node_type_text
|
||||
from pillar.api.node_types.comment import node_type_comment
|
||||
|
||||
# For non-subscribers: take away write access from the admin group,
|
||||
# and grant it to certain node types.
|
||||
@@ -137,8 +134,8 @@ def create_home_project(user_id, write_access):
|
||||
# This allows people to comment on shared images and see comments.
|
||||
node_type_comment = assign_permissions(
|
||||
node_type_comment,
|
||||
subscriber_methods=[u'GET', u'POST'],
|
||||
world_methods=[u'GET'])
|
||||
subscriber_methods=['GET', 'POST'],
|
||||
world_methods=['GET'])
|
||||
|
||||
project['node_types'] = [
|
||||
node_type_group,
|
||||
@@ -147,8 +144,8 @@ def create_home_project(user_id, write_access):
|
||||
node_type_comment,
|
||||
]
|
||||
|
||||
result, _, _, status = put_internal('projects', utils.remove_private_keys(project),
|
||||
_id=project['_id'])
|
||||
result, _, _, status = current_app.put_internal('projects', utils.remove_private_keys(project),
|
||||
_id=project['_id'])
|
||||
if status != 200:
|
||||
log.error('Unable to update home project %s for user %s: %s',
|
||||
project['_id'], user_id, result)
|
||||
@@ -166,7 +163,7 @@ def create_home_project(user_id, write_access):
|
||||
def assign_permissions(node_type, subscriber_methods, world_methods):
|
||||
"""Assigns permissions to the node type object.
|
||||
|
||||
:param node_type: a node type from manage_extra.node_types.
|
||||
:param node_type: a node type from pillar.api.node_types.
|
||||
:type node_type: dict
|
||||
:param subscriber_methods: allowed HTTP methods for users of role 'subscriber',
|
||||
'demo' and 'admin'.
|
||||
@@ -177,7 +174,7 @@ def assign_permissions(node_type, subscriber_methods, world_methods):
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
from application.modules import service
|
||||
from pillar.api import service
|
||||
|
||||
nt_with_perms = copy.deepcopy(node_type)
|
||||
|
||||
@@ -203,8 +200,10 @@ def home_project():
|
||||
Eve projections are supported, but at least the following fields must be present:
|
||||
'permissions', 'category', 'user'
|
||||
"""
|
||||
user_id = g.current_user['user_id']
|
||||
roles = g.current_user.get('roles', ())
|
||||
from pillar.auth import current_user
|
||||
|
||||
user_id = current_user.user_id
|
||||
roles = current_user.roles
|
||||
|
||||
log.debug('Possibly creating home project for user %s with roles %s', user_id, roles)
|
||||
if HOME_PROJECT_USERS and not HOME_PROJECT_USERS.intersection(roles):
|
||||
@@ -217,7 +216,7 @@ def home_project():
|
||||
write_access = write_access_with_roles(roles)
|
||||
create_home_project(user_id, write_access)
|
||||
|
||||
resp, _, _, status, _ = get('projects', category=u'home', user=user_id)
|
||||
resp, _, _, status, _ = get('projects', category='home', user=user_id)
|
||||
if status != 200:
|
||||
return utils.jsonify(resp), status
|
||||
|
||||
@@ -250,8 +249,8 @@ def home_project_permissions(write_access):
|
||||
"""
|
||||
|
||||
if write_access:
|
||||
return [u'GET', u'PUT', u'POST', u'DELETE']
|
||||
return [u'GET']
|
||||
return ['GET', 'PUT', 'POST', 'DELETE']
|
||||
return ['GET']
|
||||
|
||||
|
||||
def has_home_project(user_id):
|
||||
@@ -282,7 +281,7 @@ def is_home_project(project_id, user_id):
|
||||
def mark_node_updated(node_id):
|
||||
"""Uses pymongo to set the node's _updated to "now"."""
|
||||
|
||||
now = datetime.datetime.now(tz=tz_util.utc)
|
||||
now = utcnow()
|
||||
nodes_coll = current_app.data.driver.db['nodes']
|
||||
|
||||
return nodes_coll.update_one({'_id': node_id},
|
||||
@@ -391,7 +390,7 @@ def user_changed_role(sender, user):
|
||||
|
||||
user_id = user['_id']
|
||||
if not has_home_project(user_id):
|
||||
log.debug('User %s does not have a home project', user_id)
|
||||
log.debug('User %s does not have a home project, not changing access permissions', user_id)
|
||||
return
|
||||
|
||||
proj_coll = current_app.data.driver.db['projects']
|
||||
@@ -414,12 +413,12 @@ def user_changed_role(sender, user):
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
|
||||
app.on_insert_nodes += check_home_project_nodes_permissions
|
||||
app.on_inserted_nodes += mark_parents_as_updated
|
||||
app.on_updated_nodes += mark_parent_as_updated
|
||||
app.on_replaced_nodes += mark_parent_as_updated
|
||||
|
||||
from application.modules import service
|
||||
from pillar.api import service
|
||||
service.signal_user_changed_role.connect(user_changed_role)
|
||||
180
pillar/api/blender_cloud/subscription.py
Normal file
180
pillar/api/blender_cloud/subscription.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import blinker
|
||||
from flask import Blueprint, Response
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from pillar import auth, current_app
|
||||
from pillar.api import blender_id
|
||||
from pillar.api.utils import authorization, jsonify
|
||||
from pillar.auth import current_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint = Blueprint('blender_cloud.subscription', __name__)
|
||||
|
||||
# Mapping from roles on Blender ID to roles here in Pillar.
|
||||
# Roles not mentioned here will not be synced from Blender ID.
|
||||
ROLES_BID_TO_PILLAR = {
|
||||
'cloud_subscriber': 'subscriber',
|
||||
'cloud_demo': 'demo',
|
||||
'cloud_has_subscription': 'has_subscription',
|
||||
}
|
||||
|
||||
user_subscription_updated = blinker.NamedSignal(
|
||||
'user_subscription_updated',
|
||||
'The sender is a UserClass instance, kwargs includes "revoke_roles" and "grant_roles".')
|
||||
|
||||
|
||||
@blueprint.route('/update-subscription')
|
||||
@authorization.require_login()
|
||||
def update_subscription() -> typing.Tuple[str, int]:
|
||||
"""Updates the subscription status of the current user.
|
||||
|
||||
Returns an empty HTTP response.
|
||||
"""
|
||||
|
||||
my_log: logging.Logger = log.getChild('update_subscription')
|
||||
real_current_user = auth.get_current_user() # multiple accesses, just get unproxied.
|
||||
|
||||
try:
|
||||
bid_user = blender_id.fetch_blenderid_user()
|
||||
except blender_id.LogoutUser:
|
||||
auth.logout_user()
|
||||
return '', 204
|
||||
|
||||
if not bid_user:
|
||||
my_log.warning('Logged in user %s has no BlenderID account! '
|
||||
'Unable to update subscription status.', real_current_user.user_id)
|
||||
return '', 204
|
||||
|
||||
do_update_subscription(real_current_user, bid_user)
|
||||
return '', 204
|
||||
|
||||
|
||||
@blueprint.route('/update-subscription-for/<user_id>', methods=['POST'])
|
||||
@authorization.require_login(require_cap='admin')
|
||||
def update_subscription_for(user_id: str):
|
||||
"""Updates the user based on their info at Blender ID."""
|
||||
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from pillar.api.utils import str2id
|
||||
|
||||
my_log = log.getChild('update_subscription_for')
|
||||
|
||||
bid_session = requests.Session()
|
||||
bid_session.mount('https://', HTTPAdapter(max_retries=5))
|
||||
bid_session.mount('http://', HTTPAdapter(max_retries=5))
|
||||
|
||||
users_coll = current_app.db('users')
|
||||
db_user = users_coll.find_one({'_id': str2id(user_id)})
|
||||
if not db_user:
|
||||
my_log.warning('User %s not found in database', user_id)
|
||||
return Response(f'User {user_id} not found in our database', status=404)
|
||||
|
||||
log.info('Updating user %s from Blender ID on behalf of %s',
|
||||
db_user['email'], current_user.email)
|
||||
|
||||
bid_user_id = blender_id.get_user_blenderid(db_user)
|
||||
if not bid_user_id:
|
||||
my_log.info('User %s has no Blender ID', user_id)
|
||||
return Response('User has no Blender ID', status=404)
|
||||
|
||||
# Get the user info from Blender ID, and handle errors.
|
||||
api_url = current_app.config['BLENDER_ID_USER_INFO_API']
|
||||
api_token = current_app.config['BLENDER_ID_USER_INFO_TOKEN']
|
||||
url = urljoin(api_url, bid_user_id)
|
||||
resp = bid_session.get(url, headers={'Authorization': f'Bearer {api_token}'})
|
||||
if resp.status_code == 404:
|
||||
my_log.info('User %s has a Blender ID %s but Blender ID itself does not find it',
|
||||
user_id, bid_user_id)
|
||||
return Response(f'User {bid_user_id} does not exist at Blender ID', status=404)
|
||||
if resp.status_code != 200:
|
||||
my_log.info('Error code %s getting user %s from Blender ID (resp = %s)',
|
||||
resp.status_code, user_id, resp.text)
|
||||
return Response(f'Error code {resp.status_code} from Blender ID', status=resp.status_code)
|
||||
|
||||
# Update the user in our database.
|
||||
local_user = auth.UserClass.construct('', db_user)
|
||||
bid_user = resp.json()
|
||||
do_update_subscription(local_user, bid_user)
|
||||
|
||||
return '', 204
|
||||
|
||||
|
||||
def do_update_subscription(local_user: auth.UserClass, bid_user: dict):
|
||||
"""Updates the subscription status of the user given the Blender ID user info.
|
||||
|
||||
Uses the badger service to update the user's roles from Blender ID.
|
||||
|
||||
bid_user should be a dict like:
|
||||
{'id': 1234,
|
||||
'full_name': 'मूंगफली मक्खन प्रेमी',
|
||||
'email': 'here@example.com',
|
||||
'roles': {'cloud_demo': True}}
|
||||
|
||||
The 'roles' key can also be an interable of role names instead of a dict.
|
||||
"""
|
||||
|
||||
from pillar.api import service
|
||||
|
||||
my_log: logging.Logger = log.getChild('do_update_subscription')
|
||||
|
||||
try:
|
||||
email = bid_user['email']
|
||||
except KeyError:
|
||||
email = '-missing email-'
|
||||
|
||||
# Transform the BID roles from a dict to a set.
|
||||
bidr = bid_user.get('roles', set())
|
||||
if isinstance(bidr, dict):
|
||||
bid_roles = {role
|
||||
for role, has_role in bid_user.get('roles', {}).items()
|
||||
if has_role}
|
||||
else:
|
||||
bid_roles = set(bidr)
|
||||
|
||||
# Handle the role changes via the badger service functionality.
|
||||
plr_roles = set(local_user.roles)
|
||||
|
||||
grant_roles = set()
|
||||
revoke_roles = set()
|
||||
for bid_role, plr_role in ROLES_BID_TO_PILLAR.items():
|
||||
if bid_role in bid_roles and plr_role not in plr_roles:
|
||||
grant_roles.add(plr_role)
|
||||
continue
|
||||
if bid_role not in bid_roles and plr_role in plr_roles:
|
||||
revoke_roles.add(plr_role)
|
||||
|
||||
user_id = local_user.user_id
|
||||
|
||||
if grant_roles:
|
||||
if my_log.isEnabledFor(logging.INFO):
|
||||
my_log.info('granting roles to user %s (Blender ID %s): %s',
|
||||
user_id, email, ', '.join(sorted(grant_roles)))
|
||||
service.do_badger('grant', roles=grant_roles, user_id=user_id)
|
||||
|
||||
if revoke_roles:
|
||||
if my_log.isEnabledFor(logging.INFO):
|
||||
my_log.info('revoking roles to user %s (Blender ID %s): %s',
|
||||
user_id, email, ', '.join(sorted(revoke_roles)))
|
||||
service.do_badger('revoke', roles=revoke_roles, user_id=user_id)
|
||||
|
||||
# Let the world know this user's subscription was updated.
|
||||
final_roles = (plr_roles - revoke_roles).union(grant_roles)
|
||||
local_user.roles = list(final_roles)
|
||||
local_user.collect_capabilities()
|
||||
user_subscription_updated.send(local_user,
|
||||
grant_roles=grant_roles,
|
||||
revoke_roles=revoke_roles)
|
||||
|
||||
# Re-index the user in the search database.
|
||||
from pillar.api.users import hooks
|
||||
hooks.push_updated_user_to_search({'_id': user_id}, {})
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
log.info('Registering blueprint at %s', url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
@@ -1,15 +1,16 @@
|
||||
import functools
|
||||
import logging
|
||||
|
||||
from flask import Blueprint, request, current_app, g
|
||||
from eve.methods.get import get
|
||||
from eve.utils import config as eve_config
|
||||
from flask import Blueprint, request, current_app
|
||||
from werkzeug.datastructures import MultiDict
|
||||
from werkzeug.exceptions import InternalServerError
|
||||
|
||||
from application import utils
|
||||
from application.utils.authentication import current_user_id
|
||||
from application.utils.authorization import require_login
|
||||
from pillar.api import utils
|
||||
from pillar.api.utils.authentication import current_user_id
|
||||
from pillar.api.utils.authorization import require_login
|
||||
from pillar.auth import current_user
|
||||
|
||||
FIRST_ADDON_VERSION_WITH_HDRI = (1, 4, 0)
|
||||
TL_PROJECTION = utils.dumps({'name': 1, 'url': 1, 'permissions': 1,})
|
||||
@@ -26,8 +27,8 @@ log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def keep_fetching_texture_libraries(proj_filter):
|
||||
groups = g.current_user['groups']
|
||||
user_id = g.current_user['user_id']
|
||||
groups = current_user.group_ids
|
||||
user_id = current_user.user_id
|
||||
|
||||
page = 1
|
||||
max_page = float('inf')
|
||||
@@ -75,7 +76,7 @@ def texture_libraries():
|
||||
# of the Blender Cloud Addon. If the addon version is None, we're dealing
|
||||
# with a version of the BCA that's so old it doesn't send its version along.
|
||||
addon_version = blender_cloud_addon_version()
|
||||
return_hdri = addon_version >= FIRST_ADDON_VERSION_WITH_HDRI
|
||||
return_hdri = addon_version is not None and addon_version >= FIRST_ADDON_VERSION_WITH_HDRI
|
||||
log.debug('User %s has Blender Cloud Addon version %s; return_hdri=%s',
|
||||
current_user_id(), addon_version, return_hdri)
|
||||
|
||||
@@ -144,4 +145,4 @@ def setup_app(app, url_prefix):
|
||||
app.on_replace_nodes += sort_by_image_width
|
||||
app.on_insert_nodes += sort_nodes_by_image_width
|
||||
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
269
pillar/api/blender_id.py
Normal file
269
pillar/api/blender_id.py
Normal file
@@ -0,0 +1,269 @@
|
||||
"""Blender ID subclient endpoint.
|
||||
|
||||
Also contains functionality for other parts of Pillar to perform communication
|
||||
with Blender ID.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from bson import tz_util
|
||||
from rauth import OAuth2Session
|
||||
from flask import Blueprint, request, jsonify, session
|
||||
from requests.adapters import HTTPAdapter
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.api.utils import authentication, utcnow
|
||||
from pillar.api.utils.authentication import find_user_in_db, upsert_user
|
||||
|
||||
blender_id = Blueprint('blender_id', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogoutUser(Exception):
|
||||
"""Raised when Blender ID tells us the current user token is invalid.
|
||||
|
||||
This indicates the user should be immediately logged out.
|
||||
"""
|
||||
|
||||
|
||||
@blender_id.route('/store_scst', methods=['POST'])
|
||||
def store_subclient_token():
|
||||
"""Verifies & stores a user's subclient-specific token."""
|
||||
|
||||
user_id = request.form['user_id'] # User ID at BlenderID
|
||||
subclient_id = request.form['subclient_id']
|
||||
scst = request.form['token']
|
||||
|
||||
db_user, status = validate_create_user(user_id, scst, subclient_id)
|
||||
|
||||
if db_user is None:
|
||||
log.warning('Unable to verify subclient token with Blender ID.')
|
||||
return jsonify({'status': 'fail',
|
||||
'error': 'BLENDER ID ERROR'}), 403
|
||||
|
||||
return jsonify({'status': 'success',
|
||||
'subclient_user_id': str(db_user['_id'])}), status
|
||||
|
||||
|
||||
def validate_create_user(blender_id_user_id, token, oauth_subclient_id):
|
||||
"""Validates a user against Blender ID, creating the user in our database.
|
||||
|
||||
:param blender_id_user_id: the user ID at the BlenderID server.
|
||||
:param token: the OAuth access token.
|
||||
:param oauth_subclient_id: the subclient ID, or empty string if not a subclient.
|
||||
:returns: (user in MongoDB, HTTP status 200 or 201)
|
||||
"""
|
||||
|
||||
# Verify with Blender ID
|
||||
log.debug('Storing token for BlenderID user %s', blender_id_user_id)
|
||||
user_info, token_expiry = validate_token(blender_id_user_id, token, oauth_subclient_id)
|
||||
|
||||
if user_info is None:
|
||||
log.debug('Unable to verify token with Blender ID.')
|
||||
return None, None
|
||||
|
||||
# Blender ID can be queried without user ID, and will always include the
|
||||
# correct user ID in its response.
|
||||
log.debug('Obtained user info from Blender ID: %s', user_info)
|
||||
|
||||
# Store the user info in MongoDB.
|
||||
db_user = find_user_in_db(user_info)
|
||||
db_id, status = upsert_user(db_user)
|
||||
|
||||
# Store the token in MongoDB.
|
||||
ip_based_roles = current_app.org_manager.roles_for_request()
|
||||
authentication.store_token(db_id, token, token_expiry, oauth_subclient_id,
|
||||
org_roles=ip_based_roles)
|
||||
|
||||
if current_app.org_manager is not None:
|
||||
roles = current_app.org_manager.refresh_roles(db_id)
|
||||
db_user['roles'] = list(roles)
|
||||
|
||||
return db_user, status
|
||||
|
||||
|
||||
def validate_token(user_id, token, oauth_subclient_id):
|
||||
"""Verifies a subclient token with Blender ID.
|
||||
|
||||
:returns: (user info, token expiry) on success, or (None, None) on failure.
|
||||
The user information from Blender ID is returned as dict
|
||||
{'email': 'a@b', 'full_name': 'AB'}, token expiry as a datime.datetime.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
our_subclient_id = current_app.config['BLENDER_ID_SUBCLIENT_ID']
|
||||
|
||||
# Check that IF there is a subclient ID given, it is the correct one.
|
||||
if oauth_subclient_id and our_subclient_id != oauth_subclient_id:
|
||||
log.warning('validate_token(): BlenderID user %s is trying to use the wrong subclient '
|
||||
'ID %r; treating as invalid login.', user_id, oauth_subclient_id)
|
||||
return None, None
|
||||
|
||||
# Validate against BlenderID.
|
||||
log.debug('Validating subclient token for BlenderID user %r, subclient %r', user_id,
|
||||
oauth_subclient_id)
|
||||
payload = {'user_id': user_id,
|
||||
'token': token}
|
||||
if oauth_subclient_id:
|
||||
# If the subclient ID is set, the token belongs to another OAuth Client,
|
||||
# in which case we do not set the client_id field.
|
||||
payload['subclient_id'] = oauth_subclient_id
|
||||
else:
|
||||
# We only want to accept Blender Cloud tokens.
|
||||
payload['client_id'] = current_app.config['OAUTH_CREDENTIALS']['blender-id']['id']
|
||||
|
||||
url = '{0}/u/validate_token'.format(current_app.config['BLENDER_ID_ENDPOINT'])
|
||||
log.debug('POSTing to %r', url)
|
||||
|
||||
# Retry a few times when POSTing to BlenderID fails.
|
||||
# Source: http://stackoverflow.com/a/15431343/875379
|
||||
s = requests.Session()
|
||||
s.mount(current_app.config['BLENDER_ID_ENDPOINT'], HTTPAdapter(max_retries=5))
|
||||
|
||||
# POST to Blender ID, handling errors as negative verification results.
|
||||
try:
|
||||
r = s.post(url, data=payload, timeout=5,
|
||||
verify=current_app.config['TLS_CERT_FILE'])
|
||||
except requests.exceptions.ConnectionError:
|
||||
log.error('Connection error trying to POST to %s, handling as invalid token.', url)
|
||||
return None, None
|
||||
except requests.exceptions.ReadTimeout:
|
||||
log.error('Read timeout trying to POST to %s, handling as invalid token.', url)
|
||||
return None, None
|
||||
except requests.exceptions.RequestException as ex:
|
||||
log.error('Requests error "%s" trying to POST to %s, handling as invalid token.', ex, url)
|
||||
return None, None
|
||||
except IOError as ex:
|
||||
log.error('Unknown I/O error "%s" trying to POST to %s, handling as invalid token.',
|
||||
ex, url)
|
||||
return None, None
|
||||
|
||||
if r.status_code != 200:
|
||||
log.debug('Token %s invalid, HTTP status %i returned', token, r.status_code)
|
||||
return None, None
|
||||
|
||||
resp = r.json()
|
||||
if resp['status'] != 'success':
|
||||
log.warning('Failed response from %s: %s', url, resp)
|
||||
return None, None
|
||||
|
||||
expires = _compute_token_expiry(resp['token_expires'])
|
||||
|
||||
return resp['user'], expires
|
||||
|
||||
|
||||
def _compute_token_expiry(token_expires_string):
|
||||
"""Computes token expiry based on current time and BlenderID expiry.
|
||||
|
||||
Expires our side of the token when either the BlenderID token expires,
|
||||
or in one hour. The latter case is to ensure we periodically verify
|
||||
the token.
|
||||
"""
|
||||
|
||||
# requirement is called python-dateutil, so PyCharm doesn't find it.
|
||||
# noinspection PyPackageRequirements
|
||||
from dateutil import parser
|
||||
|
||||
blid_expiry = parser.parse(token_expires_string)
|
||||
blid_expiry = blid_expiry.astimezone(tz_util.utc)
|
||||
our_expiry = utcnow() + datetime.timedelta(hours=1)
|
||||
|
||||
return min(blid_expiry, our_expiry)
|
||||
|
||||
|
||||
def get_user_blenderid(db_user: dict) -> str:
|
||||
"""Returns the Blender ID user ID for this Pillar user.
|
||||
|
||||
Takes the string from 'auth.*.user_id' for the '*' where 'provider'
|
||||
is 'blender-id'.
|
||||
|
||||
:returns the user ID, or the empty string when the user has none.
|
||||
"""
|
||||
|
||||
bid_user_ids = [auth['user_id']
|
||||
for auth in db_user['auth']
|
||||
if auth['provider'] == 'blender-id']
|
||||
try:
|
||||
return bid_user_ids[0]
|
||||
except IndexError:
|
||||
return ''
|
||||
|
||||
|
||||
def fetch_blenderid_user() -> dict:
|
||||
"""Returns the user info of the currently logged in user from BlenderID.
|
||||
|
||||
Returns an empty dict if communication fails.
|
||||
|
||||
Example dict:
|
||||
{
|
||||
"email": "some@email.example.com",
|
||||
"full_name": "dr. Sybren A. St\u00fcvel",
|
||||
"id": 5555,
|
||||
"roles": {
|
||||
"admin": true,
|
||||
"bfct_trainer": false,
|
||||
"cloud_has_subscription": true,
|
||||
"cloud_subscriber": true,
|
||||
"conference_speaker": true,
|
||||
"network_member": true
|
||||
}
|
||||
}
|
||||
|
||||
:raises LogoutUser: when Blender ID tells us the current token is
|
||||
invalid, and the user should be logged out.
|
||||
"""
|
||||
import httplib2 # used by the oauth2 package
|
||||
|
||||
my_log = log.getChild('fetch_blenderid_user')
|
||||
|
||||
bid_url = '%s/api/user' % current_app.config['BLENDER_ID_ENDPOINT']
|
||||
my_log.debug('Fetching user info from %s', bid_url)
|
||||
|
||||
credentials = current_app.config['OAUTH_CREDENTIALS']['blender-id']
|
||||
oauth_token = session.get('blender_id_oauth_token')
|
||||
if not oauth_token:
|
||||
my_log.warning('no Blender ID oauth token found in user session')
|
||||
return {}
|
||||
|
||||
assert isinstance(oauth_token, str), f'oauth token must be str, not {type(oauth_token)}'
|
||||
|
||||
oauth_session = OAuth2Session(
|
||||
credentials['id'], credentials['secret'],
|
||||
access_token=oauth_token)
|
||||
|
||||
try:
|
||||
bid_resp = oauth_session.get(bid_url)
|
||||
except httplib2.HttpLib2Error:
|
||||
my_log.exception('Error getting %s from BlenderID', bid_url)
|
||||
return {}
|
||||
|
||||
if bid_resp.status_code == 403:
|
||||
my_log.warning('Error %i from BlenderID %s, logging out user', bid_resp.status_code, bid_url)
|
||||
raise LogoutUser()
|
||||
|
||||
if bid_resp.status_code != 200:
|
||||
my_log.warning('Error %i from BlenderID %s: %s', bid_resp.status_code, bid_url, bid_resp.text)
|
||||
return {}
|
||||
|
||||
payload = bid_resp.json()
|
||||
if not payload:
|
||||
my_log.warning('Empty data returned from BlenderID %s', bid_url)
|
||||
return {}
|
||||
|
||||
my_log.debug('BlenderID returned %s', payload)
|
||||
return payload
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_api_blueprint(blender_id, url_prefix=url_prefix)
|
||||
|
||||
|
||||
def switch_user_url(next_url: str) -> str:
|
||||
from urllib.parse import quote
|
||||
|
||||
base_url = '%s/switch' % current_app.config['BLENDER_ID_ENDPOINT']
|
||||
if next_url:
|
||||
return '%s?next=%s' % (base_url, quote(next_url))
|
||||
return base_url
|
||||
209
pillar/api/custom_field_validation.py
Normal file
209
pillar/api/custom_field_validation.py
Normal file
@@ -0,0 +1,209 @@
|
||||
import logging
|
||||
|
||||
from bson import ObjectId, tz_util
|
||||
from datetime import datetime
|
||||
import cerberus.errors
|
||||
from eve.io.mongo import Validator
|
||||
from flask import current_app
|
||||
|
||||
import pillar.markdown
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ValidateCustomFields(Validator):
|
||||
# TODO: split this into a convert_property(property, schema) and call that from this function.
|
||||
def convert_properties(self, properties, node_schema):
|
||||
"""Converts datetime strings and ObjectId strings to actual Python objects."""
|
||||
|
||||
date_format = current_app.config['RFC1123_DATE_FORMAT']
|
||||
|
||||
for prop in node_schema:
|
||||
if prop not in properties:
|
||||
continue
|
||||
schema_prop = node_schema[prop]
|
||||
prop_type = schema_prop['type']
|
||||
|
||||
if prop_type == 'dict':
|
||||
try:
|
||||
dict_valueschema = schema_prop['schema']
|
||||
properties[prop] = self.convert_properties(properties[prop], dict_valueschema)
|
||||
except KeyError:
|
||||
dict_valueschema = schema_prop['valueschema']
|
||||
self.convert_dict_values(properties[prop], dict_valueschema)
|
||||
|
||||
elif prop_type == 'list':
|
||||
if properties[prop] in ['', '[]']:
|
||||
properties[prop] = []
|
||||
if 'schema' in schema_prop:
|
||||
for k, val in enumerate(properties[prop]):
|
||||
item_schema = {'item': schema_prop['schema']}
|
||||
item_prop = {'item': properties[prop][k]}
|
||||
properties[prop][k] = self.convert_properties(
|
||||
item_prop, item_schema)['item']
|
||||
|
||||
# Convert datetime string to RFC1123 datetime
|
||||
elif prop_type == 'datetime':
|
||||
prop_val = properties[prop]
|
||||
prop_naieve = datetime.strptime(prop_val, date_format)
|
||||
prop_aware = prop_naieve.replace(tzinfo=tz_util.utc)
|
||||
properties[prop] = prop_aware
|
||||
|
||||
elif prop_type == 'objectid':
|
||||
prop_val = properties[prop]
|
||||
if prop_val:
|
||||
properties[prop] = ObjectId(prop_val)
|
||||
else:
|
||||
properties[prop] = None
|
||||
|
||||
return properties
|
||||
|
||||
def convert_dict_values(self, dict_property, dict_valueschema):
|
||||
"""Calls convert_properties() for the values in the dict.
|
||||
|
||||
Only validates the dict values, not the keys. Modifies the given dict in-place.
|
||||
"""
|
||||
|
||||
assert dict_valueschema['type'] == 'dict'
|
||||
assert isinstance(dict_property, dict)
|
||||
|
||||
for key, val in dict_property.items():
|
||||
item_schema = {'item': dict_valueschema}
|
||||
item_prop = {'item': val}
|
||||
dict_property[key] = self.convert_properties(item_prop, item_schema)['item']
|
||||
|
||||
def _validate_valid_properties(self, valid_properties, field, value):
|
||||
from pillar.api.utils import project_get_node_type
|
||||
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
lookup = {'_id': ObjectId(self.document['project'])}
|
||||
|
||||
project = projects_collection.find_one(lookup, {
|
||||
'node_types.name': 1,
|
||||
'node_types.dyn_schema': 1,
|
||||
})
|
||||
if project is None:
|
||||
log.warning('Unknown project %s, declared by node %s',
|
||||
lookup, self.document.get('_id'))
|
||||
self._error(field, 'Unknown project')
|
||||
return False
|
||||
|
||||
node_type_name = self.document['node_type']
|
||||
node_type = project_get_node_type(project, node_type_name)
|
||||
if node_type is None:
|
||||
log.warning('Project %s has no node type %s, declared by node %s',
|
||||
project, node_type_name, self.document.get('_id'))
|
||||
self._error(field, 'Unknown node type')
|
||||
return False
|
||||
|
||||
try:
|
||||
value = self.convert_properties(value, node_type['dyn_schema'])
|
||||
except Exception as e:
|
||||
log.warning("Error converting form properties", exc_info=True)
|
||||
|
||||
v = self.__class__(schema=node_type['dyn_schema'])
|
||||
val = v.validate(value)
|
||||
|
||||
if val:
|
||||
# This ensures the modifications made by v's coercion rules are
|
||||
# visible to this validator's output.
|
||||
self.current[field] = v.current
|
||||
return True
|
||||
|
||||
log.warning('Error validating properties for node %s: %s', self.document, v.errors)
|
||||
self._error(field, "Error validating properties")
|
||||
|
||||
def _validate_required_after_creation(self, required_after_creation, field, value):
|
||||
"""Makes a value required after creation only.
|
||||
|
||||
Combine "required_after_creation=True" with "required=False" to allow
|
||||
pre-insert hooks to set default values.
|
||||
"""
|
||||
|
||||
if not required_after_creation:
|
||||
# Setting required_after_creation=False is the same as not mentioning this
|
||||
# validator at all.
|
||||
return
|
||||
|
||||
if self._id is None:
|
||||
# This is a creation call, in which case this validator shouldn't run.
|
||||
return
|
||||
|
||||
if not value:
|
||||
self._error(field, "Value is required once the document was created")
|
||||
|
||||
def _validate_type_iprange(self, field_name: str, value: str):
|
||||
"""Ensure the field contains a valid IP address.
|
||||
|
||||
Supports both IPv6 and IPv4 ranges. Requires the IPy module.
|
||||
"""
|
||||
|
||||
from IPy import IP
|
||||
|
||||
try:
|
||||
ip = IP(value, make_net=True)
|
||||
except ValueError as ex:
|
||||
self._error(field_name, str(ex))
|
||||
return
|
||||
|
||||
if ip.prefixlen() == 0:
|
||||
self._error(field_name, 'Zero-length prefix is not allowed')
|
||||
|
||||
def _validate_type_binary(self, field_name: str, value: bytes):
|
||||
"""Add support for binary type.
|
||||
|
||||
This type was actually introduced in Cerberus 1.0, so we can drop
|
||||
support for this once Eve starts using that version (or newer).
|
||||
"""
|
||||
|
||||
if not isinstance(value, (bytes, bytearray)):
|
||||
self._error(field_name, f'wrong value type {type(value)}, expected bytes or bytearray')
|
||||
|
||||
def _validate_coerce(self, coerce, field: str, value):
|
||||
"""Override Cerberus' _validate_coerce method for richer features.
|
||||
|
||||
This now supports named coercion functions (available in Cerberus 1.0+)
|
||||
and passes the field name to coercion functions as well.
|
||||
"""
|
||||
if isinstance(coerce, str):
|
||||
coerce = getattr(self, f'_normalize_coerce_{coerce}')
|
||||
|
||||
try:
|
||||
return coerce(field, value)
|
||||
except (TypeError, ValueError):
|
||||
self._error(field, cerberus.errors.ERROR_COERCION_FAILED.format(field))
|
||||
|
||||
def _normalize_coerce_markdown(self, field: str, value):
|
||||
"""Render Markdown from this field into {field}_html.
|
||||
|
||||
The field name MUST NOT end in `_html`. The Markdown is read from this
|
||||
field and the rendered HTML is written to the field `{field}_html`.
|
||||
"""
|
||||
html = pillar.markdown.markdown(value)
|
||||
field_name = pillar.markdown.cache_field_name(field)
|
||||
self.current[field_name] = html
|
||||
return value
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from pprint import pprint
|
||||
|
||||
v = ValidateCustomFields()
|
||||
v.schema = {
|
||||
'foo': {'type': 'string', 'coerce': 'markdown'},
|
||||
'foo_html': {'type': 'string'},
|
||||
'nested': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
'bar': {'type': 'string', 'coerce': 'markdown'},
|
||||
'bar_html': {'type': 'string'},
|
||||
}
|
||||
}
|
||||
}
|
||||
print('Valid :', v.validate({
|
||||
'foo': '# Title\n\nHeyyyy',
|
||||
'nested': {'bar': 'bhahaha'},
|
||||
}))
|
||||
print('Document:')
|
||||
pprint(v.document)
|
||||
print('Errors :', v.errors)
|
||||
@@ -1,17 +1,16 @@
|
||||
import logging
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from bson import ObjectId, tz_util
|
||||
from eve.methods.put import put_internal
|
||||
from bson import ObjectId
|
||||
from flask import Blueprint
|
||||
from flask import abort
|
||||
from flask import request
|
||||
from flask import current_app
|
||||
from application import utils
|
||||
from application.utils import skip_when_testing
|
||||
from application.utils.gcs import GoogleCloudStorageBucket
|
||||
from flask import request
|
||||
|
||||
from pillar.api import utils
|
||||
from pillar.api.file_storage_backends import Bucket
|
||||
|
||||
encoding = Blueprint('encoding', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -34,6 +33,7 @@ def size_descriptor(width, height):
|
||||
1280: '720p',
|
||||
1920: '1080p',
|
||||
2048: '2k',
|
||||
3840: 'UHD',
|
||||
4096: '4k',
|
||||
}
|
||||
|
||||
@@ -44,13 +44,6 @@ def size_descriptor(width, height):
|
||||
return '%ip' % height
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def rename_on_gcs(bucket_name, from_path, to_path):
|
||||
gcs = GoogleCloudStorageBucket(str(bucket_name))
|
||||
blob = gcs.bucket.blob(from_path)
|
||||
gcs.bucket.rename_blob(blob, to_path)
|
||||
|
||||
|
||||
@encoding.route('/zencoder/notifications', methods=['POST'])
|
||||
def zencoder_notifications():
|
||||
"""
|
||||
@@ -104,25 +97,24 @@ def zencoder_notifications():
|
||||
file_doc['processing']['status'] = job_state
|
||||
|
||||
if job_state == 'failed':
|
||||
log.warning('Zencoder job %i for file %s failed.', zencoder_job_id, file_id)
|
||||
# Log what Zencoder told us went wrong.
|
||||
for output in data['outputs']:
|
||||
if not any('error' in key for key in output):
|
||||
continue
|
||||
log.warning('Errors for output %s:', output['url'])
|
||||
for key in output:
|
||||
if 'error' in key:
|
||||
log.info(' %s: %s', key, output[key])
|
||||
log.warning('Zencoder job %s for file %s failed: %s', zencoder_job_id, file_id,
|
||||
json.dumps(data, sort_keys=True, indent=4))
|
||||
|
||||
file_doc['status'] = 'failed'
|
||||
put_internal('files', file_doc, _id=file_id)
|
||||
current_app.put_internal('files', file_doc, _id=file_id)
|
||||
|
||||
# This is 'okay' because we handled the Zencoder notification properly.
|
||||
return "You failed, but that's okay.", 200
|
||||
|
||||
log.info('Zencoder job %s for file %s completed with status %s.', zencoder_job_id, file_id,
|
||||
job_state)
|
||||
|
||||
# For every variation encoded, try to update the file object
|
||||
root, _ = os.path.splitext(file_doc['file_path'])
|
||||
storage_name, _ = os.path.splitext(file_doc['file_path'])
|
||||
nice_name, _ = os.path.splitext(file_doc['filename'])
|
||||
|
||||
bucket_class = Bucket.for_backend(file_doc['backend'])
|
||||
bucket = bucket_class(str(file_doc['project']))
|
||||
|
||||
for output in data['outputs']:
|
||||
video_format = output['format']
|
||||
@@ -143,16 +135,16 @@ def zencoder_notifications():
|
||||
|
||||
# Rename the file to include the now-known size descriptor.
|
||||
size = size_descriptor(output['width'], output['height'])
|
||||
new_fname = '{}-{}.{}'.format(root, size, video_format)
|
||||
new_fname = f'{storage_name}-{size}.{video_format}'
|
||||
|
||||
# Rename on Google Cloud Storage
|
||||
# Rename the file on the storage.
|
||||
blob = bucket.blob(variation['file_path'])
|
||||
try:
|
||||
rename_on_gcs(file_doc['project'],
|
||||
'_/' + variation['file_path'],
|
||||
'_/' + new_fname)
|
||||
new_blob = bucket.rename_blob(blob, new_fname)
|
||||
new_blob.update_filename(f'{nice_name}-{size}.{video_format}')
|
||||
except Exception:
|
||||
log.warning('Unable to rename GCS blob %r to %r. Keeping old name.',
|
||||
variation['file_path'], new_fname, exc_info=True)
|
||||
log.warning('Unable to rename blob %r to %r. Keeping old name.',
|
||||
blob, new_fname, exc_info=True)
|
||||
else:
|
||||
variation['file_path'] = new_fname
|
||||
|
||||
@@ -169,8 +161,15 @@ def zencoder_notifications():
|
||||
file_doc['status'] = 'complete'
|
||||
|
||||
# Force an update of the links on the next load of the file.
|
||||
file_doc['link_expires'] = datetime.datetime.now(tz=tz_util.utc) - datetime.timedelta(days=1)
|
||||
file_doc['link_expires'] = utils.utcnow() - datetime.timedelta(days=1)
|
||||
|
||||
put_internal('files', file_doc, _id=file_id)
|
||||
r, _, _, status = current_app.put_internal('files', file_doc, _id=file_id)
|
||||
if status != 200:
|
||||
log.error('unable to save file %s after Zencoder notification: %s', file_id, r)
|
||||
return json.dumps(r), 500
|
||||
|
||||
return '', 204
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_api_blueprint(encoding, url_prefix=url_prefix)
|
||||
@@ -1,5 +1,7 @@
|
||||
import os
|
||||
|
||||
URL_PREFIX = 'api'
|
||||
|
||||
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
|
||||
# (if you omit this line, the API will default to ['GET'] and provide
|
||||
# read-only access to the endpoint).
|
||||
@@ -86,8 +88,8 @@ users_schema = {
|
||||
}
|
||||
},
|
||||
'auth': {
|
||||
# Storage of authentication credentials (one will be able to auth with
|
||||
# multiple providers on the same account)
|
||||
# Storage of authentication credentials (one will be able to auth with multiple providers on
|
||||
# the same account)
|
||||
'type': 'list',
|
||||
'required': True,
|
||||
'schema': {
|
||||
@@ -95,13 +97,12 @@ users_schema = {
|
||||
'schema': {
|
||||
'provider': {
|
||||
'type': 'string',
|
||||
'allowed': ["blender-id", "local"],
|
||||
'allowed': ['local', 'blender-id', 'facebook', 'google'],
|
||||
},
|
||||
'user_id': {
|
||||
'type': 'string'
|
||||
},
|
||||
# A token is considered a "password" in case the provider is
|
||||
# "local".
|
||||
# A token is considered a "password" in case the provider is "local".
|
||||
'token': {
|
||||
'type': 'string'
|
||||
}
|
||||
@@ -119,13 +120,29 @@ users_schema = {
|
||||
},
|
||||
'service': {
|
||||
'type': 'dict',
|
||||
'allow_unknown': True,
|
||||
'schema': {
|
||||
'badger': {
|
||||
'type': 'list',
|
||||
'schema': {'type': 'string'}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
# Properties defined by extensions. Extensions should use their name (see the
|
||||
# PillarExtension.name property) as the key, and are free to use whatever they want as value,
|
||||
# but we suggest a dict for future extendability.
|
||||
# Properties can be of two types:
|
||||
# - public: they will be visible to the world (for example as part of the User.find() query)
|
||||
# - private: visible only to their user
|
||||
'extension_props_public': {
|
||||
'type': 'dict',
|
||||
'required': False,
|
||||
},
|
||||
'extension_props_private': {
|
||||
'type': 'dict',
|
||||
'required': False,
|
||||
},
|
||||
}
|
||||
|
||||
organizations_schema = {
|
||||
@@ -135,19 +152,12 @@ organizations_schema = {
|
||||
'maxlength': 128,
|
||||
'required': True
|
||||
},
|
||||
'email': {
|
||||
'type': 'string'
|
||||
},
|
||||
'url': {
|
||||
'type': 'string',
|
||||
'minlength': 1,
|
||||
'maxlength': 128,
|
||||
'required': True
|
||||
},
|
||||
'description': {
|
||||
'type': 'string',
|
||||
'maxlength': 256,
|
||||
'coerce': 'markdown',
|
||||
},
|
||||
'_description_html': {'type': 'string'},
|
||||
'website': {
|
||||
'type': 'string',
|
||||
'maxlength': 256,
|
||||
@@ -159,7 +169,15 @@ organizations_schema = {
|
||||
'picture': dict(
|
||||
nullable=True,
|
||||
**_file_embedded_schema),
|
||||
'users': {
|
||||
'admin_uid': {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'users',
|
||||
'field': '_id',
|
||||
},
|
||||
'required': True,
|
||||
},
|
||||
'members': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
'schema': {
|
||||
@@ -167,51 +185,52 @@ organizations_schema = {
|
||||
'data_relation': {
|
||||
'resource': 'users',
|
||||
'field': '_id',
|
||||
'embeddable': True
|
||||
}
|
||||
}
|
||||
},
|
||||
'teams': {
|
||||
'unknown_members': {
|
||||
'type': 'list', # of email addresses of yet-to-register users.
|
||||
'default': [],
|
||||
'schema': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
|
||||
# Maximum size of the organization, i.e. len(members) + len(unknown_members) may
|
||||
# not exceed this.
|
||||
'seat_count': {
|
||||
'type': 'integer',
|
||||
'required': True,
|
||||
},
|
||||
|
||||
# Roles that the members of this organization automatically get.
|
||||
'org_roles': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
'schema': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
|
||||
# Identification of the subscription that pays for this organisation
|
||||
# in an external subscription/payment management system.
|
||||
'payment_subscription_id': {
|
||||
'type': 'string',
|
||||
},
|
||||
|
||||
'ip_ranges': {
|
||||
'type': 'list',
|
||||
'schema': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
# Team name
|
||||
'name': {
|
||||
'type': 'string',
|
||||
'minlength': 1,
|
||||
'maxlength': 128,
|
||||
'required': True
|
||||
},
|
||||
# List of user ids for the team
|
||||
'users': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
'schema': {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'users',
|
||||
'field': '_id',
|
||||
}
|
||||
}
|
||||
},
|
||||
# List of groups assigned to the team (this will automatically
|
||||
# update the groups property of each user in the team)
|
||||
'groups': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
'schema': {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'groups',
|
||||
'field': '_id',
|
||||
}
|
||||
}
|
||||
}
|
||||
# see _validate_type_{typename} in ValidateCustomFields:
|
||||
'start': {'type': 'binary', 'required': True},
|
||||
'end': {'type': 'binary', 'required': True},
|
||||
'prefix': {'type': 'integer', 'required': True},
|
||||
'human': {'type': 'iprange', 'required': True},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
permissions_embedded_schema = {
|
||||
@@ -273,7 +292,9 @@ nodes_schema = {
|
||||
},
|
||||
'description': {
|
||||
'type': 'string',
|
||||
'coerce': 'markdown',
|
||||
},
|
||||
'_description_html': {'type': 'string'},
|
||||
'picture': _file_embedded_schema,
|
||||
'order': {
|
||||
'type': 'integer',
|
||||
@@ -323,6 +344,10 @@ tokens_schema = {
|
||||
'required': True,
|
||||
},
|
||||
'token': {
|
||||
'type': 'string',
|
||||
'required': False,
|
||||
},
|
||||
'token_hashed': {
|
||||
'type': 'string',
|
||||
'required': True,
|
||||
},
|
||||
@@ -333,7 +358,16 @@ tokens_schema = {
|
||||
'is_subclient_token': {
|
||||
'type': 'boolean',
|
||||
'required': False,
|
||||
}
|
||||
},
|
||||
|
||||
# Roles this user gets while this token is valid.
|
||||
'org_roles': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
'schema': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
files_schema = {
|
||||
@@ -375,14 +409,15 @@ files_schema = {
|
||||
},
|
||||
'length_aggregate_in_bytes': { # Size of file + all variations
|
||||
'type': 'integer',
|
||||
'required': False, # it's computed on the fly anyway, so clients don't need to provide it.
|
||||
'required': False,
|
||||
# it's computed on the fly anyway, so clients don't need to provide it.
|
||||
},
|
||||
'md5': {
|
||||
'type': 'string',
|
||||
'required': True,
|
||||
},
|
||||
|
||||
# Original filename as given by the user, possibly cleaned-up to make it safe.
|
||||
# Original filename as given by the user, cleaned-up to make it safe.
|
||||
'filename': {
|
||||
'type': 'string',
|
||||
'required': True,
|
||||
@@ -390,7 +425,7 @@ files_schema = {
|
||||
'backend': {
|
||||
'type': 'string',
|
||||
'required': True,
|
||||
'allowed': ["attract-web", "pillar", "cdnsun", "gcs", "unittest"]
|
||||
'allowed': ["local", "pillar", "cdnsun", "gcs", "unittest"]
|
||||
},
|
||||
|
||||
# Where the file is in the backend storage itself. In the case of GCS,
|
||||
@@ -504,7 +539,9 @@ projects_schema = {
|
||||
},
|
||||
'description': {
|
||||
'type': 'string',
|
||||
'coerce': 'markdown',
|
||||
},
|
||||
'_description_html': {'type': 'string'},
|
||||
# Short summary for the project
|
||||
'summary': {
|
||||
'type': 'string',
|
||||
@@ -530,8 +567,9 @@ projects_schema = {
|
||||
'category': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
'training',
|
||||
'course',
|
||||
'film',
|
||||
'workshop',
|
||||
'assets',
|
||||
'software',
|
||||
'game',
|
||||
@@ -620,7 +658,16 @@ projects_schema = {
|
||||
'permissions': {
|
||||
'type': 'dict',
|
||||
'schema': permissions_embedded_schema
|
||||
}
|
||||
},
|
||||
|
||||
# Properties defined by extensions. Extensions should use their name
|
||||
# (see the PillarExtension.name property) as the key, and are free to
|
||||
# use whatever they want as value (but we suggest a dict for future
|
||||
# extendability).
|
||||
'extension_props': {
|
||||
'type': 'dict',
|
||||
'required': False,
|
||||
},
|
||||
}
|
||||
|
||||
activities_subscriptions_schema = {
|
||||
@@ -664,6 +711,19 @@ activities_schema = {
|
||||
'type': 'objectid',
|
||||
'required': True
|
||||
},
|
||||
'project': {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'projects',
|
||||
'field': '_id',
|
||||
},
|
||||
'required': False,
|
||||
},
|
||||
# If the object type is 'node', the node type can be stored here.
|
||||
'node_type': {
|
||||
'type': 'string',
|
||||
'required': False,
|
||||
}
|
||||
}
|
||||
|
||||
notifications_schema = {
|
||||
@@ -695,10 +755,6 @@ users = {
|
||||
'item_methods': ['GET', 'PUT'],
|
||||
'public_item_methods': ['GET'],
|
||||
|
||||
# By default don't include the 'auth' field. It can still be obtained
|
||||
# using projections, though, so we block that in hooks.
|
||||
'datasource': {'projection': {u'auth': 0}},
|
||||
|
||||
'schema': users_schema
|
||||
}
|
||||
|
||||
@@ -712,10 +768,12 @@ tokens = {
|
||||
}
|
||||
|
||||
files = {
|
||||
'schema': files_schema,
|
||||
'resource_methods': ['GET', 'POST'],
|
||||
'item_methods': ['GET', 'PATCH'],
|
||||
'public_methods': ['GET'],
|
||||
'public_item_methods': ['GET'],
|
||||
'schema': files_schema
|
||||
'soft_delete': True,
|
||||
}
|
||||
|
||||
groups = {
|
||||
@@ -727,8 +785,11 @@ groups = {
|
||||
|
||||
organizations = {
|
||||
'schema': organizations_schema,
|
||||
'public_item_methods': ['GET'],
|
||||
'public_methods': ['GET']
|
||||
'resource_methods': ['GET', 'POST'],
|
||||
'item_methods': ['GET'],
|
||||
'public_item_methods': [],
|
||||
'public_methods': [],
|
||||
'soft_delete': True,
|
||||
}
|
||||
|
||||
projects = {
|
||||
@@ -763,9 +824,9 @@ DOMAIN = {
|
||||
'notifications': notifications
|
||||
}
|
||||
|
||||
MONGO_HOST = os.environ.get('MONGO_HOST', 'localhost')
|
||||
MONGO_PORT = os.environ.get('MONGO_PORT', 27017)
|
||||
MONGO_DBNAME = os.environ.get('MONGO_DBNAME', 'eve')
|
||||
MONGO_HOST = os.environ.get('PILLAR_MONGO_HOST', 'localhost')
|
||||
MONGO_PORT = int(os.environ.get('PILLAR_MONGO_PORT', 27017))
|
||||
MONGO_DBNAME = os.environ.get('PILLAR_MONGO_DBNAME', 'eve')
|
||||
CACHE_EXPIRES = 60
|
||||
HATEOAS = False
|
||||
UPSET_ON_PUT = False # do not create new document on PUT of non-existant URL.
|
||||
@@ -1,37 +1,37 @@
|
||||
import datetime
|
||||
import io
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import pathlib
|
||||
import tempfile
|
||||
import typing
|
||||
import uuid
|
||||
import io
|
||||
from hashlib import md5
|
||||
|
||||
import bson.tz_util
|
||||
import eve.utils
|
||||
import pymongo
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
import werkzeug.datastructures
|
||||
|
||||
from bson import ObjectId
|
||||
from bson.errors import InvalidId
|
||||
from eve.methods.patch import patch_internal
|
||||
from eve.methods.post import post_internal
|
||||
from eve.methods.put import put_internal
|
||||
from flask import Blueprint
|
||||
from flask import current_app
|
||||
from flask import jsonify
|
||||
from flask import request
|
||||
from flask import send_from_directory
|
||||
from flask import url_for, helpers
|
||||
from flask import current_app
|
||||
from flask import g
|
||||
from flask import make_response
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from application import utils
|
||||
from application.utils import remove_private_keys, authentication
|
||||
from application.utils.authorization import require_login, user_has_role, user_matches_roles
|
||||
from application.utils.cdn import hash_file_path
|
||||
from application.utils.encoding import Encoder
|
||||
from application.utils.gcs import GoogleCloudStorageBucket
|
||||
from application.utils.imaging import generate_local_thumbnails
|
||||
from pillar.api import utils
|
||||
from pillar.api.file_storage_backends.gcs import GoogleCloudStorageBucket, \
|
||||
GoogleCloudStorageBlob
|
||||
from pillar.api.utils import remove_private_keys, imaging
|
||||
from pillar.api.utils.authorization import require_login, \
|
||||
user_matches_roles
|
||||
from pillar.api.utils.cdn import hash_file_path
|
||||
from pillar.api.utils.encoding import Encoder
|
||||
from pillar.api.file_storage_backends import default_storage_backend, Bucket
|
||||
from pillar.auth import current_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -50,31 +50,6 @@ mimetypes.add_type('application/x-radiance-hdr', '.hdr')
|
||||
mimetypes.add_type('application/x-exr', '.exr')
|
||||
|
||||
|
||||
@file_storage.route('/gcs/<bucket_name>/<subdir>/')
|
||||
@file_storage.route('/gcs/<bucket_name>/<subdir>/<path:file_path>')
|
||||
def browse_gcs(bucket_name, subdir, file_path=None):
|
||||
"""Browse the content of a Google Cloud Storage bucket"""
|
||||
|
||||
# Initialize storage client
|
||||
storage = GoogleCloudStorageBucket(bucket_name, subdir=subdir)
|
||||
if file_path:
|
||||
# If we provided a file_path, we try to fetch it
|
||||
file_object = storage.Get(file_path)
|
||||
if file_object:
|
||||
# If it exists, return file properties in a dictionary
|
||||
return jsonify(file_object)
|
||||
else:
|
||||
listing = storage.List(file_path)
|
||||
return jsonify(listing)
|
||||
# We always return an empty listing even if the directory does not
|
||||
# exist. This can be changed later.
|
||||
# return abort(404)
|
||||
|
||||
else:
|
||||
listing = storage.List('')
|
||||
return jsonify(listing)
|
||||
|
||||
|
||||
@file_storage.route('/file', methods=['POST'])
|
||||
@file_storage.route('/file/<path:file_name>', methods=['GET', 'POST'])
|
||||
def index(file_name=None):
|
||||
@@ -93,7 +68,8 @@ def index(file_name=None):
|
||||
|
||||
# Determine & create storage directory
|
||||
folder_name = file_name[:2]
|
||||
file_folder_path = helpers.safe_join(current_app.config['STORAGE_DIR'], folder_name)
|
||||
file_folder_path = helpers.safe_join(current_app.config['STORAGE_DIR'],
|
||||
folder_name)
|
||||
if not os.path.exists(file_folder_path):
|
||||
log.info('Creating folder path %r', file_folder_path)
|
||||
os.mkdir(file_folder_path)
|
||||
@@ -107,7 +83,10 @@ def index(file_name=None):
|
||||
return jsonify({'url': url_for('file_storage.index', file_name=file_name)})
|
||||
|
||||
|
||||
def _process_image(gcs, file_id, local_file, src_file):
|
||||
def _process_image(bucket: Bucket,
|
||||
file_id: ObjectId,
|
||||
local_file: tempfile._TemporaryFileWrapper,
|
||||
src_file: dict):
|
||||
from PIL import Image
|
||||
|
||||
im = Image.open(local_file)
|
||||
@@ -117,23 +96,24 @@ def _process_image(gcs, file_id, local_file, src_file):
|
||||
|
||||
# Generate previews
|
||||
log.info('Generating thumbnails for file %s', file_id)
|
||||
src_file['variations'] = generate_local_thumbnails(src_file['name'],
|
||||
local_file.name)
|
||||
local_path = pathlib.Path(local_file.name)
|
||||
name_base = pathlib.Path(src_file['name']).stem
|
||||
src_file['variations'] = imaging.generate_local_thumbnails(name_base, local_path)
|
||||
|
||||
# Send those previews to Google Cloud Storage.
|
||||
log.info('Uploading %i thumbnails for file %s to Google Cloud Storage (GCS)',
|
||||
len(src_file['variations']), file_id)
|
||||
log.info('Uploading %i thumbnails for file %s to Google Cloud Storage '
|
||||
'(GCS)', len(src_file['variations']), file_id)
|
||||
|
||||
# TODO: parallelize this at some point.
|
||||
for variation in src_file['variations']:
|
||||
fname = variation['file_path']
|
||||
if current_app.config['TESTING']:
|
||||
log.warning(' - NOT sending thumbnail %s to GCS', fname)
|
||||
log.warning(' - NOT sending thumbnail %s to %s', fname, bucket)
|
||||
else:
|
||||
log.debug(' - Sending thumbnail %s to GCS', fname)
|
||||
blob = gcs.bucket.blob('_/' + fname, chunk_size=256 * 1024 * 2)
|
||||
blob.upload_from_filename(variation['local_path'],
|
||||
content_type=variation['content_type'])
|
||||
blob = bucket.blob(fname)
|
||||
log.debug(' - Sending thumbnail %s to %s', fname, blob)
|
||||
blob.upload_from_path(pathlib.Path(variation['local_path']),
|
||||
content_type=variation['content_type'])
|
||||
|
||||
if variation.get('size') == 't':
|
||||
blob.make_public()
|
||||
@@ -141,8 +121,8 @@ def _process_image(gcs, file_id, local_file, src_file):
|
||||
try:
|
||||
os.unlink(variation['local_path'])
|
||||
except OSError:
|
||||
log.warning('Unable to unlink %s, ignoring this but it will need cleanup later.',
|
||||
variation['local_path'])
|
||||
log.warning('Unable to unlink %s, ignoring this but it will need '
|
||||
'cleanup later.', variation['local_path'])
|
||||
|
||||
del variation['local_path']
|
||||
|
||||
@@ -150,11 +130,99 @@ def _process_image(gcs, file_id, local_file, src_file):
|
||||
src_file['status'] = 'complete'
|
||||
|
||||
|
||||
def _process_video(gcs, file_id, local_file, src_file):
|
||||
"""Video is processed by Zencoder; the file isn't even stored locally."""
|
||||
def _video_size_pixels(filename: pathlib.Path) -> typing.Tuple[int, int]:
|
||||
"""Figures out the size (in pixels) of the video file.
|
||||
|
||||
Returns (0, 0) if there was any error detecting the size.
|
||||
"""
|
||||
|
||||
import json
|
||||
import subprocess
|
||||
|
||||
cli_args = [
|
||||
current_app.config['BIN_FFPROBE'],
|
||||
'-loglevel', 'error',
|
||||
'-hide_banner',
|
||||
'-print_format', 'json',
|
||||
'-select_streams', 'v:0', # we only care about the first video stream
|
||||
'-show_streams',
|
||||
str(filename),
|
||||
]
|
||||
|
||||
if log.isEnabledFor(logging.INFO):
|
||||
import shlex
|
||||
cmd = ' '.join(shlex.quote(s) for s in cli_args)
|
||||
log.info('Calling %s', cmd)
|
||||
|
||||
ffprobe = subprocess.run(
|
||||
cli_args,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
timeout=10, # seconds
|
||||
)
|
||||
|
||||
if ffprobe.returncode:
|
||||
import shlex
|
||||
cmd = ' '.join(shlex.quote(s) for s in cli_args)
|
||||
log.error('Error running %s: stopped with return code %i',
|
||||
cmd, ffprobe.returncode)
|
||||
log.error('Output was: %s', ffprobe.stdout)
|
||||
return 0, 0
|
||||
|
||||
try:
|
||||
ffprobe_info = json.loads(ffprobe.stdout)
|
||||
except json.JSONDecodeError:
|
||||
log.exception('ffprobe produced invalid JSON: %s', ffprobe.stdout)
|
||||
return 0, 0
|
||||
|
||||
try:
|
||||
stream_info = ffprobe_info['streams'][0]
|
||||
return stream_info['width'], stream_info['height']
|
||||
except (KeyError, IndexError):
|
||||
log.exception('ffprobe produced unexpected JSON: %s', ffprobe.stdout)
|
||||
return 0, 0
|
||||
|
||||
|
||||
def _video_cap_at_1080(width: int, height: int) -> typing.Tuple[int, int]:
|
||||
"""Returns an appropriate width/height for a video capped at 1920x1080.
|
||||
|
||||
Takes into account that h264 has limitations:
|
||||
- the width must be a multiple of 16
|
||||
- the height must be a multiple of 8
|
||||
"""
|
||||
|
||||
if width > 1920:
|
||||
# The height must be a multiple of 8
|
||||
new_height = height / width * 1920
|
||||
height = new_height - (new_height % 8)
|
||||
width = 1920
|
||||
|
||||
if height > 1080:
|
||||
# The width must be a multiple of 16
|
||||
new_width = width / height * 1080
|
||||
width = new_width - (new_width % 16)
|
||||
height = 1080
|
||||
|
||||
return int(width), int(height)
|
||||
|
||||
|
||||
def _process_video(gcs,
|
||||
file_id: ObjectId,
|
||||
local_file: tempfile._TemporaryFileWrapper,
|
||||
src_file: dict):
|
||||
"""Video is processed by Zencoder."""
|
||||
|
||||
log.info('Processing video for file %s', file_id)
|
||||
|
||||
# Use ffprobe to find the size (in pixels) of the video.
|
||||
# Even though Zencoder can do resizing to a maximum resolution without upscaling,
|
||||
# by determining the video size here we already have this information in the file
|
||||
# document before Zencoder calls our notification URL. It also opens up possibilities
|
||||
# for other encoding backends that don't support this functionality.
|
||||
video_width, video_height = _video_size_pixels(pathlib.Path(local_file.name))
|
||||
capped_video_width, capped_video_height = _video_cap_at_1080(video_width, video_height)
|
||||
|
||||
# Create variations
|
||||
root, _ = os.path.splitext(src_file['file_path'])
|
||||
src_file['variations'] = []
|
||||
@@ -167,8 +235,8 @@ def _process_video(gcs, file_id, local_file, src_file):
|
||||
file_path='{}-{}.{}'.format(root, v, v),
|
||||
size='',
|
||||
duration=0,
|
||||
width=0,
|
||||
height=0,
|
||||
width=capped_video_width,
|
||||
height=capped_video_height,
|
||||
length=0,
|
||||
md5='',
|
||||
)
|
||||
@@ -177,17 +245,19 @@ def _process_video(gcs, file_id, local_file, src_file):
|
||||
src_file['variations'].append(file_variation)
|
||||
|
||||
if current_app.config['TESTING']:
|
||||
log.warning('_process_video: NOT sending out encoding job due to TESTING=%r',
|
||||
current_app.config['TESTING'])
|
||||
j = type('EncoderJob', (), {'process_id': 'fake-process-id',
|
||||
'backend': 'fake'})
|
||||
log.warning('_process_video: NOT sending out encoding job due to '
|
||||
'TESTING=%r', current_app.config['TESTING'])
|
||||
j = {'process_id': 'fake-process-id',
|
||||
'backend': 'fake'}
|
||||
else:
|
||||
j = Encoder.job_create(src_file)
|
||||
if j is None:
|
||||
log.warning('_process_video: unable to create encoder job for file %s.', file_id)
|
||||
log.warning('_process_video: unable to create encoder job for file '
|
||||
'%s.', file_id)
|
||||
return
|
||||
|
||||
log.info('Created asynchronous Zencoder job %s for file %s', j['process_id'], file_id)
|
||||
log.info('Created asynchronous Zencoder job %s for file %s',
|
||||
j['process_id'], file_id)
|
||||
|
||||
# Add the processing status to the file object
|
||||
src_file['processing'] = {
|
||||
@@ -196,13 +266,14 @@ def _process_video(gcs, file_id, local_file, src_file):
|
||||
'backend': j['backend']}
|
||||
|
||||
|
||||
def process_file(gcs, file_id, local_file):
|
||||
def process_file(bucket: Bucket,
|
||||
file_id: typing.Union[str, ObjectId],
|
||||
local_file: tempfile._TemporaryFileWrapper):
|
||||
"""Process the file by creating thumbnails, sending to Zencoder, etc.
|
||||
|
||||
:param file_id: '_id' key of the file
|
||||
:type file_id: ObjectId or str
|
||||
:param local_file: locally stored file, or None if no local processing is needed.
|
||||
:type local_file: file
|
||||
:param local_file: locally stored file, or None if no local processing is
|
||||
needed.
|
||||
"""
|
||||
|
||||
file_id = ObjectId(file_id)
|
||||
@@ -219,8 +290,8 @@ def process_file(gcs, file_id, local_file):
|
||||
# TODO: overrule the content type based on file extention & magic numbers.
|
||||
mime_category, src_file['format'] = src_file['content_type'].split('/', 1)
|
||||
|
||||
# Prevent video handling for non-admins.
|
||||
if not user_has_role(u'admin') and mime_category == 'video':
|
||||
# Only allow video encoding when the user has the correct capability.
|
||||
if not current_user.has_cap('encode-video') and mime_category == 'video':
|
||||
if src_file['format'].startswith('x-'):
|
||||
xified = src_file['format']
|
||||
else:
|
||||
@@ -228,10 +299,10 @@ def process_file(gcs, file_id, local_file):
|
||||
|
||||
src_file['content_type'] = 'application/%s' % xified
|
||||
mime_category = 'application'
|
||||
log.info('Not processing video file %s for non-admin user', file_id)
|
||||
log.info('Not processing video file %s for non-video-encoding user', file_id)
|
||||
|
||||
# Run the required processor, based on the MIME category.
|
||||
processors = {
|
||||
processors: typing.Mapping[str, typing.Callable] = {
|
||||
'image': _process_image,
|
||||
'video': _process_video,
|
||||
}
|
||||
@@ -239,98 +310,84 @@ def process_file(gcs, file_id, local_file):
|
||||
try:
|
||||
processor = processors[mime_category]
|
||||
except KeyError:
|
||||
log.info("POSTed file %s was of type %r, which isn't thumbnailed/encoded.", file_id,
|
||||
log.info("POSTed file %s was of type %r, which isn't "
|
||||
"thumbnailed/encoded.", file_id,
|
||||
mime_category)
|
||||
src_file['status'] = 'complete'
|
||||
else:
|
||||
log.debug('process_file(%s): marking file status as "processing"', file_id)
|
||||
log.debug('process_file(%s): marking file status as "processing"',
|
||||
file_id)
|
||||
src_file['status'] = 'processing'
|
||||
update_file_doc(file_id, status='processing')
|
||||
|
||||
try:
|
||||
processor(gcs, file_id, local_file, src_file)
|
||||
processor(bucket, file_id, local_file, src_file)
|
||||
except Exception:
|
||||
log.warning('process_file(%s): error when processing file, resetting status to '
|
||||
log.warning('process_file(%s): error when processing file, '
|
||||
'resetting status to '
|
||||
'"queued_for_processing"', file_id, exc_info=True)
|
||||
update_file_doc(file_id, status='queued_for_processing')
|
||||
return
|
||||
|
||||
# Update the original file with additional info, e.g. image resolution
|
||||
r, _, _, status = put_internal('files', src_file, _id=file_id)
|
||||
r, _, _, status = current_app.put_internal('files', src_file, _id=file_id)
|
||||
if status not in (200, 201):
|
||||
log.warning('process_file(%s): status %i when saving processed file info to MongoDB: %s',
|
||||
log.warning('process_file(%s): status %i when saving processed file '
|
||||
'info to MongoDB: %s',
|
||||
file_id, status, r)
|
||||
|
||||
|
||||
def delete_file(file_item):
|
||||
def process_file_delete(file_item):
|
||||
"""Given a file item, delete the actual file from the storage backend.
|
||||
This function can be probably made self-calling."""
|
||||
if file_item['backend'] == 'gcs':
|
||||
storage = GoogleCloudStorageBucket(str(file_item['project']))
|
||||
storage.Delete(file_item['file_path'])
|
||||
# Delete any file variation found in the file_item document
|
||||
if 'variations' in file_item:
|
||||
for v in file_item['variations']:
|
||||
storage.Delete(v['file_path'])
|
||||
return True
|
||||
elif file_item['backend'] == 'pillar':
|
||||
pass
|
||||
elif file_item['backend'] == 'cdnsun':
|
||||
pass
|
||||
else:
|
||||
pass
|
||||
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
# Collect children (variations) of the original file
|
||||
children = files_collection.find({'parent': file_item['_id']})
|
||||
for child in children:
|
||||
process_file_delete(child)
|
||||
# Finally remove the original file
|
||||
process_file_delete(file_item)
|
||||
|
||||
|
||||
def generate_link(backend, file_path, project_id=None, is_public=False):
|
||||
def generate_link(backend, file_path: str, project_id: str=None, is_public=False) -> str:
|
||||
"""Hook to check the backend of a file resource, to build an appropriate link
|
||||
that can be used by the client to retrieve the actual file.
|
||||
"""
|
||||
|
||||
if backend == 'gcs':
|
||||
storage = GoogleCloudStorageBucket(project_id)
|
||||
blob = storage.Get(file_path)
|
||||
# TODO: replace config['TESTING'] with mocking GCS.
|
||||
if backend == 'gcs' and current_app.config['TESTING']:
|
||||
log.info('Skipping GCS link generation, and returning a fake link '
|
||||
'instead.')
|
||||
return '/path/to/testing/gcs/%s' % file_path
|
||||
|
||||
if backend in {'gcs', 'local'}:
|
||||
from ..file_storage_backends import Bucket
|
||||
|
||||
bucket_cls = Bucket.for_backend(backend)
|
||||
storage = bucket_cls(project_id)
|
||||
blob = storage.get_blob(file_path)
|
||||
|
||||
if blob is None:
|
||||
log.warning('generate_link(%r, %r): unable to find blob for file'
|
||||
' path, returning empty link.', backend, file_path)
|
||||
return ''
|
||||
|
||||
if is_public:
|
||||
return blob['public_url']
|
||||
return blob['signed_url']
|
||||
return blob.get_url(is_public=is_public)
|
||||
|
||||
if backend == 'pillar':
|
||||
return url_for('file_storage.index', file_name=file_path, _external=True,
|
||||
_scheme=current_app.config['SCHEME'])
|
||||
if backend == 'pillar': # obsolete, replace with local.
|
||||
return url_for('file_storage.index', file_name=file_path,
|
||||
_external=True, _scheme=current_app.config['SCHEME'])
|
||||
if backend == 'cdnsun':
|
||||
return hash_file_path(file_path, None)
|
||||
if backend == 'unittest':
|
||||
return md5(file_path).hexdigest()
|
||||
return 'https://unit.test/%s' % md5(file_path.encode()).hexdigest()
|
||||
|
||||
log.warning('generate_link(): Unknown backend %r, returning empty string '
|
||||
'as new link.',
|
||||
backend)
|
||||
return ''
|
||||
|
||||
|
||||
def before_returning_file(response):
|
||||
ensure_valid_link(response)
|
||||
|
||||
# Enable this call later, when we have implemented the is_public field on files.
|
||||
# Enable this call later, when we have implemented the is_public field on
|
||||
# files.
|
||||
# strip_link_and_variations(response)
|
||||
|
||||
|
||||
def strip_link_and_variations(response):
|
||||
# Check the access level of the user.
|
||||
if g.current_user is None:
|
||||
has_full_access = False
|
||||
else:
|
||||
user_roles = g.current_user['roles']
|
||||
access_roles = current_app.config['FULL_FILE_ACCESS_ROLES']
|
||||
has_full_access = bool(user_roles.intersection(access_roles))
|
||||
capability = current_app.config['FULL_FILE_ACCESS_CAP']
|
||||
has_full_access = current_user.has_cap(capability)
|
||||
|
||||
# Strip all file variations (unless image) and link to the actual file.
|
||||
if not has_full_access:
|
||||
@@ -352,27 +409,28 @@ def ensure_valid_link(response):
|
||||
"""Ensures the file item has valid file links using generate_link(...)."""
|
||||
|
||||
# Log to function-specific logger, so we can easily turn it off.
|
||||
log = logging.getLogger('%s.ensure_valid_link' % __name__)
|
||||
log_link = logging.getLogger('%s.ensure_valid_link' % __name__)
|
||||
# log.debug('Inspecting link for file %s', response['_id'])
|
||||
|
||||
# Check link expiry.
|
||||
now = datetime.datetime.now(tz=bson.tz_util.utc)
|
||||
now = utils.utcnow()
|
||||
if 'link_expires' in response:
|
||||
link_expires = response['link_expires']
|
||||
if now < link_expires:
|
||||
# Not expired yet, so don't bother regenerating anything.
|
||||
log.debug('Link expires at %s, which is in the future, so not generating new link',
|
||||
link_expires)
|
||||
log_link.debug('Link expires at %s, which is in the future, so not '
|
||||
'generating new link', link_expires)
|
||||
return
|
||||
|
||||
log.debug('Link expired at %s, which is in the past; generating new link', link_expires)
|
||||
log_link.debug('Link expired at %s, which is in the past; generating '
|
||||
'new link', link_expires)
|
||||
else:
|
||||
log.debug('No expiry date for link; generating new link')
|
||||
log_link.debug('No expiry date for link; generating new link')
|
||||
|
||||
_generate_all_links(response, now)
|
||||
generate_all_links(response, now)
|
||||
|
||||
|
||||
def _generate_all_links(response, now):
|
||||
def generate_all_links(response, now):
|
||||
"""Generate a new link for the file and all its variations.
|
||||
|
||||
:param response: the file document that should be updated.
|
||||
@@ -380,64 +438,90 @@ def _generate_all_links(response, now):
|
||||
"""
|
||||
|
||||
project_id = str(
|
||||
response['project']) if 'project' in response else None # TODO: add project id to all files
|
||||
response['project']) if 'project' in response else None
|
||||
# TODO: add project id to all files
|
||||
backend = response['backend']
|
||||
response['link'] = generate_link(backend, response['file_path'], project_id)
|
||||
|
||||
if 'file_path' in response:
|
||||
response['link'] = generate_link(backend, response['file_path'], project_id)
|
||||
else:
|
||||
import pprint
|
||||
log.error('File without file_path properly, unable to generate links: %s',
|
||||
pprint.pformat(response))
|
||||
return
|
||||
|
||||
variations = response.get('variations')
|
||||
if variations:
|
||||
for variation in variations:
|
||||
variation['link'] = generate_link(backend, variation['file_path'], project_id)
|
||||
variation['link'] = generate_link(backend, variation['file_path'],
|
||||
project_id)
|
||||
|
||||
# Construct the new expiry datetime.
|
||||
validity_secs = current_app.config['FILE_LINK_VALIDITY'][backend]
|
||||
response['link_expires'] = now + datetime.timedelta(seconds=validity_secs)
|
||||
|
||||
patch_info = remove_private_keys(response)
|
||||
|
||||
# The project could have been soft-deleted, in which case it's fine to
|
||||
# update the links to the file. However, Eve/Cerberus doesn't allow this;
|
||||
# removing the 'project' key from the PATCH works around this.
|
||||
patch_info.pop('project', None)
|
||||
|
||||
file_id = ObjectId(response['_id'])
|
||||
(patch_resp, _, _, _) = patch_internal('files', patch_info, _id=file_id)
|
||||
(patch_resp, _, _, _) = current_app.patch_internal('files', patch_info,
|
||||
_id=file_id)
|
||||
if patch_resp.get('_status') == 'ERR':
|
||||
log.warning('Unable to save new links for file %s: %r', response['_id'], patch_resp)
|
||||
log.warning('Unable to save new links for file %s: %r',
|
||||
response['_id'], patch_resp)
|
||||
# TODO: raise a snag.
|
||||
response['_updated'] = now
|
||||
else:
|
||||
response['_updated'] = patch_resp['_updated']
|
||||
|
||||
# Be silly and re-fetch the etag ourselves. TODO: handle this better.
|
||||
etag_doc = current_app.data.driver.db['files'].find_one({'_id': file_id}, {'_etag': 1})
|
||||
etag_doc = current_app.data.driver.db['files'].find_one({'_id': file_id},
|
||||
{'_etag': 1})
|
||||
response['_etag'] = etag_doc['_etag']
|
||||
|
||||
|
||||
def before_deleting_file(item):
|
||||
delete_file(item)
|
||||
|
||||
|
||||
def on_pre_get_files(_, lookup):
|
||||
# Override the HTTP header, we always want to fetch the document from MongoDB.
|
||||
# Override the HTTP header, we always want to fetch the document from
|
||||
# MongoDB.
|
||||
parsed_req = eve.utils.parse_request('files')
|
||||
parsed_req.if_modified_since = None
|
||||
|
||||
# If there is no lookup, we would refresh *all* file documents,
|
||||
# which is far too heavy to do in one client HTTP request.
|
||||
if not lookup:
|
||||
return
|
||||
|
||||
# Only fetch it if the date got expired.
|
||||
now = datetime.datetime.now(tz=bson.tz_util.utc)
|
||||
now = utils.utcnow()
|
||||
lookup_expired = lookup.copy()
|
||||
lookup_expired['link_expires'] = {'$lte': now}
|
||||
|
||||
cursor = current_app.data.find('files', parsed_req, lookup_expired)
|
||||
if cursor.count() == 0:
|
||||
return
|
||||
|
||||
log.debug('Updating expired links for %d files that matched lookup %s',
|
||||
cursor.count(), lookup_expired)
|
||||
for file_doc in cursor:
|
||||
# log.debug('Updating expired links for file %r.', file_doc['_id'])
|
||||
_generate_all_links(file_doc, now)
|
||||
generate_all_links(file_doc, now)
|
||||
|
||||
|
||||
def refresh_links_for_project(project_uuid, chunk_size, expiry_seconds):
|
||||
if chunk_size:
|
||||
log.info('Refreshing the first %i links for project %s', chunk_size, project_uuid)
|
||||
log.info('Refreshing the first %i links for project %s',
|
||||
chunk_size, project_uuid)
|
||||
else:
|
||||
log.info('Refreshing all links for project %s', project_uuid)
|
||||
|
||||
# Retrieve expired links.
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
|
||||
now = datetime.datetime.now(tz=bson.tz_util.utc)
|
||||
now = utils.utcnow()
|
||||
expire_before = now + datetime.timedelta(seconds=expiry_seconds)
|
||||
log.info('Limiting to links that expire before %s', expire_before)
|
||||
|
||||
@@ -452,7 +536,7 @@ def refresh_links_for_project(project_uuid, chunk_size, expiry_seconds):
|
||||
|
||||
for file_doc in to_refresh:
|
||||
log.debug('Refreshing links for file %s', file_doc['_id'])
|
||||
_generate_all_links(file_doc, now)
|
||||
generate_all_links(file_doc, now)
|
||||
|
||||
log.info('Refreshed %i links', min(chunk_size, to_refresh.count()))
|
||||
|
||||
@@ -460,31 +544,43 @@ def refresh_links_for_project(project_uuid, chunk_size, expiry_seconds):
|
||||
def refresh_links_for_backend(backend_name, chunk_size, expiry_seconds):
|
||||
import gcloud.exceptions
|
||||
|
||||
my_log = log.getChild(f'refresh_links_for_backend.{backend_name}')
|
||||
|
||||
# Retrieve expired links.
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
proj_coll = current_app.data.driver.db['projects']
|
||||
|
||||
now = datetime.datetime.now(tz=bson.tz_util.utc)
|
||||
now = utils.utcnow()
|
||||
expire_before = now + datetime.timedelta(seconds=expiry_seconds)
|
||||
log.info('Limiting to links that expire before %s', expire_before)
|
||||
my_log.info('Limiting to links that expire before %s', expire_before)
|
||||
|
||||
base_query = {'backend': backend_name, '_deleted': {'$ne': True}}
|
||||
to_refresh = files_collection.find(
|
||||
{'$or': [{'backend': backend_name, 'link_expires': None},
|
||||
{'backend': backend_name, 'link_expires': {'$lt': expire_before}},
|
||||
{'backend': backend_name, 'link': None}]
|
||||
}).sort([('link_expires', pymongo.ASCENDING)]).limit(chunk_size).batch_size(5)
|
||||
{'$or': [{'link_expires': None, **base_query},
|
||||
{'link_expires': {'$lt': expire_before}, **base_query},
|
||||
{'link': None, **base_query}]
|
||||
}).sort([('link_expires', pymongo.ASCENDING)]).limit(
|
||||
chunk_size).batch_size(5)
|
||||
|
||||
if to_refresh.count() == 0:
|
||||
log.info('No links to refresh.')
|
||||
document_count = to_refresh.count()
|
||||
if document_count == 0:
|
||||
my_log.info('No links to refresh.')
|
||||
return
|
||||
|
||||
if 0 < chunk_size == document_count:
|
||||
my_log.info('Found %d documents to refresh, probably limited by the chunk size.',
|
||||
document_count)
|
||||
else:
|
||||
my_log.info('Found %d documents to refresh.', document_count)
|
||||
|
||||
refreshed = 0
|
||||
report_chunks = min(max(5, document_count // 25), 100)
|
||||
for file_doc in to_refresh:
|
||||
try:
|
||||
file_id = file_doc['_id']
|
||||
project_id = file_doc.get('project')
|
||||
if project_id is None:
|
||||
log.debug('Skipping file %s, it has no project.', file_id)
|
||||
my_log.debug('Skipping file %s, it has no project.', file_id)
|
||||
continue
|
||||
|
||||
count = proj_coll.count({'_id': project_id, '$or': [
|
||||
@@ -493,44 +589,50 @@ def refresh_links_for_backend(backend_name, chunk_size, expiry_seconds):
|
||||
]})
|
||||
|
||||
if count == 0:
|
||||
log.debug('Skipping file %s, project %s does not exist.', file_id, project_id)
|
||||
my_log.debug('Skipping file %s, project %s does not exist.',
|
||||
file_id, project_id)
|
||||
continue
|
||||
|
||||
if 'file_path' not in file_doc:
|
||||
log.warning("Skipping file %s, missing 'file_path' property.", file_id)
|
||||
my_log.warning("Skipping file %s, missing 'file_path' property.",
|
||||
file_id)
|
||||
continue
|
||||
|
||||
log.debug('Refreshing links for file %s', file_id)
|
||||
my_log.debug('Refreshing links for file %s', file_id)
|
||||
|
||||
try:
|
||||
_generate_all_links(file_doc, now)
|
||||
generate_all_links(file_doc, now)
|
||||
except gcloud.exceptions.Forbidden:
|
||||
log.warning('Skipping file %s, GCS forbids us access to project %s bucket.',
|
||||
file_id, project_id)
|
||||
my_log.warning('Skipping file %s, GCS forbids us access to '
|
||||
'project %s bucket.', file_id, project_id)
|
||||
continue
|
||||
refreshed += 1
|
||||
|
||||
if refreshed % report_chunks == 0:
|
||||
my_log.info('Refreshed %i links', refreshed)
|
||||
except KeyboardInterrupt:
|
||||
log.warning('Aborting due to KeyboardInterrupt after refreshing %i links',
|
||||
refreshed)
|
||||
my_log.warning('Aborting due to KeyboardInterrupt after refreshing %i '
|
||||
'links', refreshed)
|
||||
return
|
||||
|
||||
log.info('Refreshed %i links', refreshed)
|
||||
my_log.info('Refreshed %i links', refreshed)
|
||||
|
||||
|
||||
@require_login()
|
||||
def create_file_doc(name, filename, content_type, length, project, backend='gcs',
|
||||
**extra_fields):
|
||||
def create_file_doc(name, filename, content_type, length, project,
|
||||
backend=None, **extra_fields):
|
||||
"""Creates a minimal File document for storage in MongoDB.
|
||||
|
||||
Doesn't save it to MongoDB yet.
|
||||
"""
|
||||
|
||||
current_user = g.get('current_user')
|
||||
if backend is None:
|
||||
backend = current_app.config['STORAGE_BACKEND']
|
||||
|
||||
file_doc = {'name': name,
|
||||
'filename': filename,
|
||||
'file_path': '',
|
||||
'user': current_user['user_id'],
|
||||
'user': current_user.user_id,
|
||||
'backend': backend,
|
||||
'md5': '',
|
||||
'content_type': content_type,
|
||||
@@ -571,14 +673,15 @@ def override_content_type(uploaded_file):
|
||||
# content_type property can't be set directly
|
||||
uploaded_file.headers['content-type'] = mimetype
|
||||
|
||||
# It has this, because we used uploaded_file.mimetype earlier this function.
|
||||
# It has this, because we used uploaded_file.mimetype earlier this
|
||||
# function.
|
||||
del uploaded_file._parsed_content_type
|
||||
|
||||
|
||||
def assert_file_size_allowed(file_size):
|
||||
def assert_file_size_allowed(file_size: int):
|
||||
"""Asserts that the current user is allowed to upload a file of the given size.
|
||||
|
||||
:raises
|
||||
:raises wz_exceptions.RequestEntityTooLarge:
|
||||
"""
|
||||
|
||||
roles = current_app.config['ROLES_FOR_UNLIMITED_UPLOADS']
|
||||
@@ -590,15 +693,18 @@ def assert_file_size_allowed(file_size):
|
||||
return
|
||||
|
||||
filesize_limit_mb = filesize_limit / 2.0 ** 20
|
||||
log.info('User %s tried to upload a %.3f MiB file, but is only allowed %.3f MiB.',
|
||||
authentication.current_user_id(), file_size / 2.0 ** 20, filesize_limit_mb)
|
||||
log.info('User %s tried to upload a %.3f MiB file, but is only allowed '
|
||||
'%.3f MiB.',
|
||||
current_user.user_id, file_size / 2.0 ** 20,
|
||||
filesize_limit_mb)
|
||||
raise wz_exceptions.RequestEntityTooLarge(
|
||||
'To upload files larger than %i MiB, subscribe to Blender Cloud' % filesize_limit_mb)
|
||||
'To upload files larger than %i MiB, subscribe to Blender Cloud' %
|
||||
filesize_limit_mb)
|
||||
|
||||
|
||||
@file_storage.route('/stream/<string:project_id>', methods=['POST', 'OPTIONS'])
|
||||
@require_login()
|
||||
def stream_to_gcs(project_id):
|
||||
def stream_to_storage(project_id: str):
|
||||
project_oid = utils.str2id(project_id)
|
||||
|
||||
projects = current_app.data.driver.db['projects']
|
||||
@@ -608,95 +714,120 @@ def stream_to_gcs(project_id):
|
||||
raise wz_exceptions.NotFound('Project %s does not exist' % project_id)
|
||||
|
||||
log.info('Streaming file to bucket for project=%s user_id=%s', project_id,
|
||||
authentication.current_user_id())
|
||||
current_user.user_id)
|
||||
log.info('request.headers[Origin] = %r', request.headers.get('Origin'))
|
||||
log.info('request.content_length = %r', request.content_length)
|
||||
|
||||
# Try a check for the content length before we access request.files[].
|
||||
# This allows us to abort the upload early. The entire body content length
|
||||
# is always a bit larger than the actual file size, so if we accept here,
|
||||
# we're sure it'll be accepted in subsequent checks as well.
|
||||
if request.content_length:
|
||||
assert_file_size_allowed(request.content_length)
|
||||
|
||||
uploaded_file = request.files['file']
|
||||
|
||||
# Not every upload has a Content-Length header. If it was passed, we might as
|
||||
# well check for its value before we require the user to upload the entire file.
|
||||
# (At least I hope that this part of the code is processed before the body is
|
||||
# read in its entirety)
|
||||
# Not every upload has a Content-Length header. If it was passed, we might
|
||||
# as well check for its value before we require the user to upload the
|
||||
# entire file. (At least I hope that this part of the code is processed
|
||||
# before the body is read in its entirety)
|
||||
if uploaded_file.content_length:
|
||||
assert_file_size_allowed(uploaded_file.content_length)
|
||||
|
||||
override_content_type(uploaded_file)
|
||||
if not uploaded_file.content_type:
|
||||
log.warning('File uploaded to project %s without content type.', project_oid)
|
||||
log.warning('File uploaded to project %s without content type.',
|
||||
project_oid)
|
||||
raise wz_exceptions.BadRequest('Missing content type.')
|
||||
|
||||
if uploaded_file.content_type.startswith('image/'):
|
||||
# We need to do local thumbnailing, so we have to write the stream
|
||||
if uploaded_file.content_type.startswith('image/') or uploaded_file.content_type.startswith(
|
||||
'video/'):
|
||||
# We need to do local thumbnailing and ffprobe, so we have to write the stream
|
||||
# both to Google Cloud Storage and to local storage.
|
||||
local_file = tempfile.NamedTemporaryFile(dir=current_app.config['STORAGE_DIR'])
|
||||
local_file = tempfile.NamedTemporaryFile(
|
||||
dir=current_app.config['STORAGE_DIR'])
|
||||
uploaded_file.save(local_file)
|
||||
local_file.seek(0) # Make sure that a re-read starts from the beginning.
|
||||
stream_for_gcs = local_file
|
||||
local_file.seek(0) # Make sure that re-read starts from the beginning.
|
||||
else:
|
||||
local_file = None
|
||||
stream_for_gcs = uploaded_file.stream
|
||||
local_file = uploaded_file.stream
|
||||
|
||||
result = upload_and_process(local_file, uploaded_file, project_id)
|
||||
resp = jsonify(result)
|
||||
resp.status_code = result['status_code']
|
||||
add_access_control_headers(resp)
|
||||
return resp
|
||||
|
||||
|
||||
def upload_and_process(local_file: typing.Union[io.BytesIO, typing.BinaryIO],
|
||||
uploaded_file: werkzeug.datastructures.FileStorage,
|
||||
project_id: str):
|
||||
# Figure out the file size, as we need to pass this in explicitly to GCloud.
|
||||
# Otherwise it always uses os.fstat(file_obj.fileno()).st_size, which isn't
|
||||
# supported by a BytesIO object (even though it does have a fileno attribute).
|
||||
if isinstance(stream_for_gcs, io.BytesIO):
|
||||
file_size = len(stream_for_gcs.getvalue())
|
||||
# supported by a BytesIO object (even though it does have a fileno
|
||||
# attribute).
|
||||
if isinstance(local_file, io.BytesIO):
|
||||
file_size = len(local_file.getvalue())
|
||||
else:
|
||||
file_size = os.fstat(stream_for_gcs.fileno()).st_size
|
||||
file_size = os.fstat(local_file.fileno()).st_size
|
||||
|
||||
# Check the file size again, now that we know its size for sure.
|
||||
assert_file_size_allowed(file_size)
|
||||
|
||||
# Create file document in MongoDB.
|
||||
file_id, internal_fname, status = create_file_doc_for_upload(project_oid, uploaded_file)
|
||||
file_id, internal_fname, status = create_file_doc_for_upload(project_id, uploaded_file)
|
||||
|
||||
if current_app.config['TESTING']:
|
||||
log.warning('NOT streaming to GCS because TESTING=%r', current_app.config['TESTING'])
|
||||
# Fake a Blob object.
|
||||
gcs = None
|
||||
blob = type('Blob', (), {'size': file_size})
|
||||
else:
|
||||
# Upload the file to GCS.
|
||||
from gcloud.streaming import transfer
|
||||
# Files larger than this many bytes will be streamed directly from disk, smaller
|
||||
# ones will be read into memory and then uploaded.
|
||||
transfer.RESUMABLE_UPLOAD_THRESHOLD = 102400
|
||||
try:
|
||||
gcs = GoogleCloudStorageBucket(project_id)
|
||||
blob = gcs.bucket.blob('_/' + internal_fname, chunk_size=256 * 1024 * 2)
|
||||
blob.upload_from_file(stream_for_gcs, size=file_size,
|
||||
content_type=uploaded_file.mimetype)
|
||||
except Exception:
|
||||
log.exception('Error uploading file to Google Cloud Storage (GCS),'
|
||||
' aborting handling of uploaded file (id=%s).', file_id)
|
||||
update_file_doc(file_id, status='failed')
|
||||
raise wz_exceptions.InternalServerError('Unable to stream file to Google Cloud Storage')
|
||||
# Copy the file into storage.
|
||||
bucket = default_storage_backend(project_id)
|
||||
blob = bucket.blob(internal_fname)
|
||||
blob.create_from_file(local_file,
|
||||
file_size=file_size,
|
||||
content_type=uploaded_file.mimetype)
|
||||
|
||||
if stream_for_gcs.closed:
|
||||
log.error('Eek, GCS closed its stream, Andy is not going to like this.')
|
||||
|
||||
# Reload the blob to get the file size according to Google.
|
||||
blob.reload()
|
||||
log.debug('Marking uploaded file id=%s, fname=%s, '
|
||||
'size=%i as "queued_for_processing"',
|
||||
file_id, internal_fname, file_size)
|
||||
update_file_doc(file_id,
|
||||
status='queued_for_processing',
|
||||
file_path=internal_fname,
|
||||
length=blob.size,
|
||||
content_type=uploaded_file.mimetype)
|
||||
|
||||
process_file(gcs, file_id, local_file)
|
||||
log.debug('Processing uploaded file id=%s, fname=%s, size=%i', file_id,
|
||||
internal_fname, blob.size)
|
||||
process_file(bucket, file_id, local_file)
|
||||
|
||||
# Local processing is done, we can close the local file so it is removed.
|
||||
if local_file is not None:
|
||||
local_file.close()
|
||||
|
||||
log.debug('Handled uploaded file id=%s, fname=%s, size=%i', file_id, internal_fname, blob.size)
|
||||
log.debug('Handled uploaded file id=%s, fname=%s, size=%i, status=%i',
|
||||
file_id, internal_fname, blob.size, status)
|
||||
|
||||
# Status is 200 if the file already existed, and 201 if it was newly created.
|
||||
# Status is 200 if the file already existed, and 201 if it was newly
|
||||
# created.
|
||||
# TODO: add a link to a thumbnail in the response.
|
||||
resp = jsonify(status='ok', file_id=str(file_id))
|
||||
resp.status_code = status
|
||||
add_access_control_headers(resp)
|
||||
return resp
|
||||
return dict(status='ok', file_id=str(file_id), status_code=status)
|
||||
|
||||
|
||||
from ..file_storage_backends.abstract import FileType
|
||||
|
||||
|
||||
def stream_to_gcs(file_id: ObjectId, file_size: int, internal_fname: str, project_id: ObjectId,
|
||||
stream_for_gcs: FileType, content_type: str) \
|
||||
-> typing.Tuple[GoogleCloudStorageBlob, GoogleCloudStorageBucket]:
|
||||
# Upload the file to GCS.
|
||||
try:
|
||||
bucket = GoogleCloudStorageBucket(str(project_id))
|
||||
blob = bucket.blob(internal_fname)
|
||||
blob.create_from_file(stream_for_gcs, file_size=file_size, content_type=content_type)
|
||||
except Exception:
|
||||
log.exception('Error uploading file to Google Cloud Storage (GCS),'
|
||||
' aborting handling of uploaded file (id=%s).', file_id)
|
||||
update_file_doc(file_id, status='failed')
|
||||
raise wz_exceptions.InternalServerError(
|
||||
'Unable to stream file to Google Cloud Storage')
|
||||
|
||||
return blob, bucket
|
||||
|
||||
|
||||
def add_access_control_headers(resp):
|
||||
@@ -710,15 +841,6 @@ def add_access_control_headers(resp):
|
||||
return resp
|
||||
|
||||
|
||||
def update_file_doc(file_id, **updates):
|
||||
files = current_app.data.driver.db['files']
|
||||
res = files.update_one({'_id': ObjectId(file_id)},
|
||||
{'$set': updates})
|
||||
log.debug('update_file_doc(%s, %s): %i matched, %i updated.',
|
||||
file_id, updates, res.matched_count, res.modified_count)
|
||||
return res
|
||||
|
||||
|
||||
def create_file_doc_for_upload(project_id, uploaded_file):
|
||||
"""Creates a secure filename and a document in MongoDB for the file.
|
||||
|
||||
@@ -756,16 +878,19 @@ def create_file_doc_for_upload(project_id, uploaded_file):
|
||||
if file_doc is None:
|
||||
# Create a file document on MongoDB for this file.
|
||||
file_doc = create_file_doc(name=internal_filename, **new_props)
|
||||
file_fields, _, _, status = post_internal('files', file_doc)
|
||||
file_fields, _, _, status = current_app.post_internal('files', file_doc)
|
||||
else:
|
||||
file_doc.update(new_props)
|
||||
file_fields, _, _, status = put_internal('files', remove_private_keys(file_doc))
|
||||
file_fields, _, _, status = current_app.put_internal('files', remove_private_keys(file_doc))
|
||||
|
||||
if status not in (200, 201):
|
||||
log.error('Unable to create new file document in MongoDB, status=%i: %s',
|
||||
status, file_fields)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
log.debug('Created file document %s for uploaded file %s; internal name %s',
|
||||
file_fields['_id'], uploaded_file.filename, internal_filename)
|
||||
|
||||
return file_fields['_id'], internal_filename, status
|
||||
|
||||
|
||||
@@ -793,10 +918,17 @@ def setup_app(app, url_prefix):
|
||||
app.on_fetched_item_files += before_returning_file
|
||||
app.on_fetched_resource_files += before_returning_files
|
||||
|
||||
app.on_delete_item_files += before_deleting_file
|
||||
|
||||
app.on_update_files += compute_aggregate_length
|
||||
app.on_replace_files += compute_aggregate_length
|
||||
app.on_insert_files += compute_aggregate_length_items
|
||||
|
||||
app.register_blueprint(file_storage, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(file_storage, url_prefix=url_prefix)
|
||||
|
||||
|
||||
def update_file_doc(file_id, **updates):
|
||||
files = current_app.data.driver.db['files']
|
||||
res = files.update_one({'_id': ObjectId(file_id)},
|
||||
{'$set': updates})
|
||||
log.debug('update_file_doc(%s, %s): %i matched, %i updated.',
|
||||
file_id, updates, res.matched_count, res.modified_count)
|
||||
return res
|
||||
199
pillar/api/file_storage/moving.py
Normal file
199
pillar/api/file_storage/moving.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""Code for moving files between backends."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import requests
|
||||
import requests.exceptions
|
||||
from bson import ObjectId
|
||||
from flask import current_app
|
||||
|
||||
from pillar.api import utils
|
||||
from . import stream_to_gcs, generate_all_links, ensure_valid_link
|
||||
|
||||
__all__ = ['PrerequisiteNotMetError', 'change_file_storage_backend', 'move_to_bucket']
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PrerequisiteNotMetError(RuntimeError):
|
||||
"""Raised when a file cannot be moved due to unmet prerequisites."""
|
||||
|
||||
|
||||
def change_file_storage_backend(file_id, dest_backend):
|
||||
"""Given a file document, move it to the specified backend (if not already
|
||||
there) and update the document to reflect that.
|
||||
Files on the original backend are not deleted automatically.
|
||||
"""
|
||||
|
||||
dest_backend = str(dest_backend)
|
||||
file_id = ObjectId(file_id)
|
||||
|
||||
# Fetch file document
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
f = files_collection.find_one(file_id)
|
||||
if f is None:
|
||||
raise ValueError('File with _id: {} not found'.format(file_id))
|
||||
|
||||
# Check that new backend differs from current one
|
||||
if dest_backend == f['backend']:
|
||||
raise PrerequisiteNotMetError('Destination backend ({}) matches the current backend, we '
|
||||
'are not moving the file'.format(dest_backend))
|
||||
|
||||
# TODO Check that new backend is allowed (make conf var)
|
||||
|
||||
# Check that the file has a project; without project, we don't know
|
||||
# which bucket to store the file into.
|
||||
try:
|
||||
project_id = f['project']
|
||||
except KeyError:
|
||||
raise PrerequisiteNotMetError('File document does not have a project')
|
||||
|
||||
# Ensure that all links are up to date before we even attempt a download.
|
||||
ensure_valid_link(f)
|
||||
|
||||
# Upload file and variations to the new backend
|
||||
variations = f.get('variations', ())
|
||||
|
||||
try:
|
||||
copy_file_to_backend(file_id, project_id, f, f['backend'], dest_backend)
|
||||
except requests.exceptions.HTTPError as ex:
|
||||
# allow the main file to be removed from storage.
|
||||
if ex.response.status_code not in {404, 410}:
|
||||
raise
|
||||
if not variations:
|
||||
raise PrerequisiteNotMetError('Main file ({link}) does not exist on server, '
|
||||
'and no variations exist either'.format(**f))
|
||||
log.warning('Main file %s does not exist; skipping main and visiting variations', f['link'])
|
||||
|
||||
for var in variations:
|
||||
copy_file_to_backend(file_id, project_id, var, f['backend'], dest_backend)
|
||||
|
||||
# Generate new links for the file & all variations. This also saves
|
||||
# the new backend we set here.
|
||||
f['backend'] = dest_backend
|
||||
generate_all_links(f, utils.utcnow())
|
||||
|
||||
|
||||
def copy_file_to_backend(file_id, project_id, file_or_var, src_backend, dest_backend):
|
||||
# Filenames on GCS do not contain paths, by our convention
|
||||
internal_fname = os.path.basename(file_or_var['file_path'])
|
||||
file_or_var['file_path'] = internal_fname
|
||||
|
||||
# If the file is not local already, fetch it
|
||||
if src_backend == 'pillar':
|
||||
local_finfo = fetch_file_from_local(file_or_var)
|
||||
else:
|
||||
local_finfo = fetch_file_from_link(file_or_var['link'])
|
||||
|
||||
try:
|
||||
# Upload to GCS
|
||||
if dest_backend != 'gcs':
|
||||
raise ValueError('Only dest_backend="gcs" is supported now.')
|
||||
|
||||
if current_app.config['TESTING']:
|
||||
log.warning('Skipping actual upload to GCS due to TESTING')
|
||||
else:
|
||||
# TODO check for name collisions
|
||||
stream_to_gcs(file_id, local_finfo['file_size'],
|
||||
internal_fname=internal_fname,
|
||||
project_id=project_id,
|
||||
stream_for_gcs=local_finfo['local_file'],
|
||||
content_type=local_finfo['content_type'])
|
||||
finally:
|
||||
# No longer needed, so it can be closed & dispersed of.
|
||||
local_finfo['local_file'].close()
|
||||
|
||||
|
||||
def fetch_file_from_link(link):
|
||||
"""Utility to download a file from a remote location and return it with
|
||||
additional info (for upload to a different storage backend).
|
||||
"""
|
||||
|
||||
log.info('Downloading %s', link)
|
||||
r = requests.get(link, stream=True)
|
||||
r.raise_for_status()
|
||||
|
||||
local_file = tempfile.NamedTemporaryFile(dir=current_app.config['STORAGE_DIR'])
|
||||
log.info('Downloading to %s', local_file.name)
|
||||
|
||||
for chunk in r.iter_content(chunk_size=1024):
|
||||
if chunk:
|
||||
local_file.write(chunk)
|
||||
local_file.seek(0)
|
||||
|
||||
file_dict = {
|
||||
'file_size': os.fstat(local_file.fileno()).st_size,
|
||||
'content_type': r.headers.get('content-type', 'application/octet-stream'),
|
||||
'local_file': local_file
|
||||
}
|
||||
return file_dict
|
||||
|
||||
|
||||
def fetch_file_from_local(file_doc):
|
||||
"""Mimicks fetch_file_from_link(), but just returns the local file.
|
||||
|
||||
:param file_doc: dict with 'link' key pointing to a path in STORAGE_DIR, and
|
||||
'content_type' key.
|
||||
:type file_doc: dict
|
||||
:rtype: dict self._log.info('Moving file %s to project %s', file_id, dest_proj['_id'])
|
||||
|
||||
"""
|
||||
|
||||
local_file = open(os.path.join(current_app.config['STORAGE_DIR'], file_doc['file_path']), 'rb')
|
||||
local_finfo = {
|
||||
'file_size': os.fstat(local_file.fileno()).st_size,
|
||||
'content_type': file_doc['content_type'],
|
||||
'local_file': local_file
|
||||
}
|
||||
return local_finfo
|
||||
|
||||
|
||||
def move_to_bucket(file_id: ObjectId, dest_project_id: ObjectId, *, skip_storage=False):
|
||||
"""Move a file + variations from its own bucket to the new project_id bucket.
|
||||
|
||||
:param file_id: ID of the file to move.
|
||||
:param dest_project_id: Project to move to.
|
||||
:param skip_storage: If True, the storage bucket will not be touched.
|
||||
Only use this when you know what you're doing.
|
||||
"""
|
||||
|
||||
files_coll = current_app.db('files')
|
||||
f = files_coll.find_one(file_id)
|
||||
if f is None:
|
||||
raise ValueError(f'File with _id: {file_id} not found')
|
||||
|
||||
# Move file and variations to the new bucket.
|
||||
if skip_storage:
|
||||
log.warning('NOT ACTUALLY MOVING file %s on storage, just updating MongoDB', file_id)
|
||||
else:
|
||||
from pillar.api.file_storage_backends import Bucket
|
||||
bucket_class = Bucket.for_backend(f['backend'])
|
||||
src_bucket = bucket_class(str(f['project']))
|
||||
dst_bucket = bucket_class(str(dest_project_id))
|
||||
|
||||
src_blob = src_bucket.get_blob(f['file_path'])
|
||||
src_bucket.copy_blob(src_blob, dst_bucket)
|
||||
|
||||
for var in f.get('variations', []):
|
||||
src_blob = src_bucket.get_blob(var['file_path'])
|
||||
src_bucket.copy_blob(src_blob, dst_bucket)
|
||||
|
||||
# Update the file document after moving was successful.
|
||||
# No need to update _etag or _updated, since that'll be done when
|
||||
# the links are regenerated at the end of this function.
|
||||
log.info('Switching file %s to project %s', file_id, dest_project_id)
|
||||
update_result = files_coll.update_one({'_id': file_id},
|
||||
{'$set': {'project': dest_project_id}})
|
||||
if update_result.matched_count != 1:
|
||||
raise RuntimeError(
|
||||
'Unable to update file %s in MongoDB: matched_count=%i; modified_count=%i' % (
|
||||
file_id, update_result.matched_count, update_result.modified_count))
|
||||
|
||||
log.info('Switching file %s: matched_count=%i; modified_count=%i',
|
||||
file_id, update_result.matched_count, update_result.modified_count)
|
||||
|
||||
# Regenerate the links for this file
|
||||
f['project'] = dest_project_id
|
||||
generate_all_links(f, now=utils.utcnow())
|
||||
29
pillar/api/file_storage_backends/__init__.py
Normal file
29
pillar/api/file_storage_backends/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
||||
"""Storage backends.
|
||||
|
||||
To obtain a storage backend, use either of the two forms:
|
||||
|
||||
>>> bucket = default_storage_backend('bucket_name')
|
||||
|
||||
>>> BucketClass = Bucket.for_backend('backend_name')
|
||||
>>> bucket = BucketClass('bucket_name')
|
||||
|
||||
"""
|
||||
|
||||
from .abstract import Bucket
|
||||
|
||||
# Import the other backends so that they register.
|
||||
from . import local
|
||||
from . import gcs
|
||||
|
||||
|
||||
def default_storage_backend(name: str) -> Bucket:
|
||||
"""Returns an instance of a Bucket, based on the default backend.
|
||||
|
||||
Depending on the backend this may actually create the bucket.
|
||||
"""
|
||||
from flask import current_app
|
||||
|
||||
backend_name = current_app.config['STORAGE_BACKEND']
|
||||
backend_cls = Bucket.for_backend(backend_name)
|
||||
|
||||
return backend_cls(name)
|
||||
161
pillar/api/file_storage_backends/abstract.py
Normal file
161
pillar/api/file_storage_backends/abstract.py
Normal file
@@ -0,0 +1,161 @@
|
||||
import abc
|
||||
import io
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import pathlib
|
||||
from bson import ObjectId
|
||||
|
||||
__all__ = ['Bucket', 'Blob', 'Path', 'FileType']
|
||||
|
||||
# Shorthand for the type of path we use.
|
||||
Path = pathlib.PurePosixPath
|
||||
|
||||
# This is a mess: typing.IO keeps mypy-0.501 happy, but not in all cases,
|
||||
# and io.FileIO + io.BytesIO keeps PyCharm-2017.1 happy.
|
||||
FileType = typing.Union[typing.IO, io.FileIO, io.BytesIO]
|
||||
|
||||
|
||||
class Bucket(metaclass=abc.ABCMeta):
|
||||
"""Can be a GCS bucket or simply a project folder in Pillar
|
||||
|
||||
:type name: string
|
||||
:param name: Name of the bucket. As a convention, we use the ID of
|
||||
the project to name the bucket.
|
||||
|
||||
"""
|
||||
|
||||
# Mapping from backend name to Bucket class
|
||||
backends: typing.Dict[str, typing.Type['Bucket']] = {}
|
||||
|
||||
backend_name: str = None # define in subclass.
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = str(name)
|
||||
|
||||
def __init_subclass__(cls):
|
||||
assert cls.backend_name, '%s.backend_name must be non-empty string' % cls
|
||||
cls.backends[cls.backend_name] = cls
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} name={self.name!r}>'
|
||||
|
||||
@classmethod
|
||||
def for_backend(cls, backend_name: str) -> typing.Type['Bucket']:
|
||||
"""Returns the Bucket subclass for the given backend."""
|
||||
return cls.backends[backend_name]
|
||||
|
||||
@abc.abstractmethod
|
||||
def blob(self, blob_name: str) -> 'Blob':
|
||||
"""Factory constructor for blob object.
|
||||
|
||||
:param blob_name: The path of the blob to be instantiated.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_blob(self, blob_name: str) -> typing.Optional['Blob']:
|
||||
"""Get a blob object by name.
|
||||
|
||||
If the blob exists return the object, otherwise None.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def copy_blob(self, blob: 'Blob', to_bucket: 'Bucket'):
|
||||
"""Copies a blob from the current bucket to the other bucket.
|
||||
|
||||
Implementations only need to support copying between buckets of the
|
||||
same storage backend.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def rename_blob(self, blob: 'Blob', new_name: str) -> 'Blob':
|
||||
"""Rename the blob, returning the new Blob."""
|
||||
|
||||
@classmethod
|
||||
def copy_to_bucket(cls, blob_name, src_project_id: ObjectId, dest_project_id: ObjectId):
|
||||
"""Copies a file from one bucket to the other."""
|
||||
|
||||
src_storage = cls(str(src_project_id))
|
||||
dest_storage = cls(str(dest_project_id))
|
||||
|
||||
blob = src_storage.get_blob(blob_name)
|
||||
src_storage.copy_blob(blob, dest_storage)
|
||||
|
||||
|
||||
Bu = typing.TypeVar('Bu', bound=Bucket)
|
||||
|
||||
|
||||
class Blob(metaclass=abc.ABCMeta):
|
||||
"""A wrapper for file or blob objects."""
|
||||
|
||||
def __init__(self, name: str, bucket: Bucket) -> None:
|
||||
self.name = name
|
||||
self.bucket = bucket
|
||||
self._size_in_bytes: typing.Optional[int] = None
|
||||
|
||||
self.filename: str = None
|
||||
"""Name of the file for the Content-Disposition header when downloading it."""
|
||||
|
||||
self._log = logging.getLogger(f'{__name__}.Blob')
|
||||
|
||||
def __repr__(self):
|
||||
return f'<{self.__class__.__name__} bucket={self.bucket.name!r} name={self.name!r}>'
|
||||
|
||||
@property
|
||||
def size(self) -> typing.Optional[int]:
|
||||
"""Size of the object, in bytes.
|
||||
|
||||
:returns: The size of the blob or ``None`` if the property
|
||||
is not set locally.
|
||||
"""
|
||||
|
||||
size = self._size_in_bytes
|
||||
if size is None:
|
||||
return None
|
||||
return int(size)
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_from_file(self, file_obj: FileType, *,
|
||||
content_type: str,
|
||||
file_size: int = -1):
|
||||
"""Copies the file object to the storage.
|
||||
|
||||
:param file_obj: The file object to send to storage.
|
||||
:param content_type: The content type of the file.
|
||||
:param file_size: The size of the file in bytes, or -1 if unknown
|
||||
"""
|
||||
|
||||
def upload_from_path(self, path: pathlib.Path, content_type: str):
|
||||
file_size = path.stat().st_size
|
||||
|
||||
with path.open('rb') as infile:
|
||||
self.create_from_file(infile, content_type=content_type,
|
||||
file_size=file_size)
|
||||
|
||||
@abc.abstractmethod
|
||||
def update_filename(self, filename: str):
|
||||
"""Sets the filename which is used when downloading the file.
|
||||
|
||||
Not all storage backends support this, and will use the on-disk filename instead.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_url(self, *, is_public: bool) -> str:
|
||||
"""Returns the URL to access this blob.
|
||||
|
||||
Note that this may involve API calls to generate a signed URL.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_public(self):
|
||||
"""Makes the blob publicly available.
|
||||
|
||||
Only performs an actual action on backends that support temporary links.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def exists(self) -> bool:
|
||||
"""Returns True iff the file exists on the storage backend."""
|
||||
|
||||
|
||||
Bl = typing.TypeVar('Bl', bound=Blob)
|
||||
263
pillar/api/file_storage_backends/gcs.py
Normal file
263
pillar/api/file_storage_backends/gcs.py
Normal file
@@ -0,0 +1,263 @@
|
||||
import os
|
||||
import datetime
|
||||
import logging
|
||||
import typing
|
||||
|
||||
from bson import ObjectId
|
||||
from gcloud.storage.client import Client
|
||||
import gcloud.storage.blob
|
||||
import gcloud.exceptions as gcloud_exc
|
||||
from flask import current_app, g
|
||||
from werkzeug.local import LocalProxy
|
||||
|
||||
from pillar.api import utils
|
||||
from .abstract import Bucket, Blob, FileType
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_client() -> Client:
|
||||
"""Stores the GCS client on the global Flask object.
|
||||
|
||||
The GCS client is not user-specific anyway.
|
||||
"""
|
||||
|
||||
_gcs = getattr(g, '_gcs_client', None)
|
||||
if _gcs is None:
|
||||
_gcs = g._gcs_client = Client()
|
||||
return _gcs
|
||||
|
||||
|
||||
# This hides the specifics of how/where we store the GCS client,
|
||||
# and allows the rest of the code to use 'gcs' as a simple variable
|
||||
# that does the right thing.
|
||||
gcs: Client = LocalProxy(get_client)
|
||||
|
||||
|
||||
class GoogleCloudStorageBucket(Bucket):
|
||||
"""Cloud Storage bucket interface. We create a bucket for every project. In
|
||||
the bucket we create first level subdirs as follows:
|
||||
- '_' (will contain hashed assets, and stays on top of default listing)
|
||||
- 'svn' (svn checkout mirror)
|
||||
- 'shared' (any additional folder of static folder that is accessed via a
|
||||
node of 'storage' node_type)
|
||||
|
||||
:type bucket_name: string
|
||||
:param bucket_name: Name of the bucket.
|
||||
|
||||
:type subdir: string
|
||||
:param subdir: The local entry point to browse the bucket.
|
||||
|
||||
"""
|
||||
|
||||
backend_name = 'gcs'
|
||||
|
||||
def __init__(self, name: str, subdir='_') -> None:
|
||||
super().__init__(name=name)
|
||||
|
||||
self._log = logging.getLogger(f'{__name__}.GoogleCloudStorageBucket')
|
||||
|
||||
try:
|
||||
self._gcs_bucket = gcs.get_bucket(name)
|
||||
except gcloud_exc.NotFound:
|
||||
self._gcs_bucket = gcs.bucket(name)
|
||||
# Hardcode the bucket location to EU
|
||||
self._gcs_bucket.location = 'EU'
|
||||
# Optionally enable CORS from * (currently only used for vrview)
|
||||
# self.gcs_bucket.cors = [
|
||||
# {
|
||||
# "origin": ["*"],
|
||||
# "responseHeader": ["Content-Type"],
|
||||
# "method": ["GET", "HEAD", "DELETE"],
|
||||
# "maxAgeSeconds": 3600
|
||||
# }
|
||||
# ]
|
||||
self._gcs_bucket.create()
|
||||
log.info('Created GCS instance for project %s', name)
|
||||
|
||||
self.subdir = subdir
|
||||
|
||||
def blob(self, blob_name: str) -> 'GoogleCloudStorageBlob':
|
||||
return GoogleCloudStorageBlob(name=blob_name, bucket=self)
|
||||
|
||||
def get_blob(self, internal_fname: str) -> typing.Optional['GoogleCloudStorageBlob']:
|
||||
blob = self.blob(internal_fname)
|
||||
if not blob.gblob.exists():
|
||||
return None
|
||||
return blob
|
||||
|
||||
def _gcs_get(self, path: str, *, chunk_size=None) -> gcloud.storage.Blob:
|
||||
"""Get selected file info if the path matches.
|
||||
|
||||
:param path: The path to the file, relative to the bucket's subdir.
|
||||
"""
|
||||
path = os.path.join(self.subdir, path)
|
||||
blob = self._gcs_bucket.blob(path, chunk_size=chunk_size)
|
||||
return blob
|
||||
|
||||
def _gcs_post(self, full_path, *, path=None) -> typing.Optional[gcloud.storage.Blob]:
|
||||
"""Create new blob and upload data to it.
|
||||
"""
|
||||
path = path if path else os.path.join(self.subdir, os.path.basename(full_path))
|
||||
gblob = self._gcs_bucket.blob(path)
|
||||
if gblob.exists():
|
||||
self._log.error(f'Trying to upload to {path}, but that blob already exists. '
|
||||
f'Not uploading.')
|
||||
return None
|
||||
|
||||
gblob.upload_from_filename(full_path)
|
||||
return gblob
|
||||
# return self.blob_to_dict(blob) # Has issues with threading
|
||||
|
||||
def delete_blob(self, path: str) -> bool:
|
||||
"""Deletes the blob (when removing an asset or replacing a preview)"""
|
||||
|
||||
# We want to get the actual blob to delete
|
||||
gblob = self._gcs_get(path)
|
||||
try:
|
||||
gblob.delete()
|
||||
return True
|
||||
except gcloud_exc.NotFound:
|
||||
return False
|
||||
|
||||
def copy_blob(self, blob: Blob, to_bucket: Bucket):
|
||||
"""Copies the given blob from this bucket to the other bucket.
|
||||
|
||||
Returns the new blob.
|
||||
"""
|
||||
|
||||
assert isinstance(blob, GoogleCloudStorageBlob)
|
||||
assert isinstance(to_bucket, GoogleCloudStorageBucket)
|
||||
|
||||
self._log.info('Copying %s to bucket %s', blob, to_bucket)
|
||||
|
||||
return self._gcs_bucket.copy_blob(blob.gblob, to_bucket._gcs_bucket)
|
||||
|
||||
def rename_blob(self, blob: 'GoogleCloudStorageBlob', new_name: str) \
|
||||
-> 'GoogleCloudStorageBlob':
|
||||
"""Rename the blob, returning the new Blob."""
|
||||
|
||||
assert isinstance(blob, GoogleCloudStorageBlob)
|
||||
|
||||
new_name = os.path.join(self.subdir, new_name)
|
||||
|
||||
self._log.info('Renaming %s to %r', blob, new_name)
|
||||
new_gblob = self._gcs_bucket.rename_blob(blob.gblob, new_name)
|
||||
return GoogleCloudStorageBlob(new_gblob.name, self, gblob=new_gblob)
|
||||
|
||||
|
||||
class GoogleCloudStorageBlob(Blob):
|
||||
"""GCS blob interface."""
|
||||
|
||||
def __init__(self, name: str, bucket: GoogleCloudStorageBucket,
|
||||
*, gblob: gcloud.storage.blob.Blob=None) -> None:
|
||||
super().__init__(name, bucket)
|
||||
|
||||
self._log = logging.getLogger(f'{__name__}.GoogleCloudStorageBlob')
|
||||
self.gblob = gblob or bucket._gcs_get(name, chunk_size=256 * 1024 * 2)
|
||||
|
||||
def create_from_file(self, file_obj: FileType, *,
|
||||
content_type: str,
|
||||
file_size: int = -1) -> None:
|
||||
from gcloud.streaming import transfer
|
||||
|
||||
self._log.debug('Streaming file to GCS bucket %r, size=%i', self, file_size)
|
||||
|
||||
# Files larger than this many bytes will be streamed directly from disk,
|
||||
# smaller ones will be read into memory and then uploaded.
|
||||
transfer.RESUMABLE_UPLOAD_THRESHOLD = 102400
|
||||
self.gblob.upload_from_file(file_obj,
|
||||
size=file_size,
|
||||
content_type=content_type)
|
||||
|
||||
# Reload the blob to get the file size according to Google.
|
||||
self.gblob.reload()
|
||||
self._size_in_bytes = self.gblob.size
|
||||
|
||||
def update_filename(self, filename: str):
|
||||
"""Set the ContentDisposition metadata so that when a file is downloaded
|
||||
it has a human-readable name.
|
||||
"""
|
||||
|
||||
if '"' in filename:
|
||||
raise ValueError(f'Filename is not allowed to have double quote in it: {filename!r}')
|
||||
|
||||
self.gblob.content_disposition = f'attachment; filename="{filename}"'
|
||||
self.gblob.patch()
|
||||
|
||||
def get_url(self, *, is_public: bool) -> str:
|
||||
if is_public:
|
||||
return self.gblob.public_url
|
||||
|
||||
expiration = utils.utcnow() + datetime.timedelta(days=1)
|
||||
return self.gblob.generate_signed_url(expiration)
|
||||
|
||||
def make_public(self):
|
||||
self.gblob.make_public()
|
||||
|
||||
def exists(self) -> bool:
|
||||
# Reload to get the actual file properties from Google.
|
||||
try:
|
||||
self.gblob.reload()
|
||||
except gcloud_exc.NotFound:
|
||||
return False
|
||||
return self.gblob.exists()
|
||||
|
||||
|
||||
def update_file_name(node):
|
||||
"""Assign to the CGS blob the same name of the asset node. This way when
|
||||
downloading an asset we get a human-readable name.
|
||||
"""
|
||||
|
||||
# Process only files that are not processing
|
||||
if node['properties'].get('status', '') == 'processing':
|
||||
return
|
||||
|
||||
def _format_name(name, override_ext, size=None, map_type=''):
|
||||
root, _ = os.path.splitext(name)
|
||||
size = '-{}'.format(size) if size else ''
|
||||
map_type = '-{}'.format(map_type) if map_type else ''
|
||||
return '{}{}{}{}'.format(root, size, map_type, override_ext)
|
||||
|
||||
def _update_name(file_id, file_props):
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
file_doc = files_collection.find_one({'_id': ObjectId(file_id)})
|
||||
|
||||
if file_doc is None or file_doc.get('backend') != 'gcs':
|
||||
return
|
||||
|
||||
# For textures -- the map type should be part of the name.
|
||||
map_type = file_props.get('map_type', '')
|
||||
|
||||
storage = GoogleCloudStorageBucket(str(node['project']))
|
||||
blob = storage.get_blob(file_doc['file_path'])
|
||||
if blob is None:
|
||||
log.warning('Unable to find blob for file %s in project %s',
|
||||
file_doc['file_path'], file_doc['project'])
|
||||
return
|
||||
|
||||
# Pick file extension from original filename
|
||||
_, ext = os.path.splitext(file_doc['filename'])
|
||||
name = _format_name(node['name'], ext, map_type=map_type)
|
||||
blob.update_filename(name)
|
||||
|
||||
# Assign the same name to variations
|
||||
for v in file_doc.get('variations', []):
|
||||
_, override_ext = os.path.splitext(v['file_path'])
|
||||
name = _format_name(node['name'], override_ext, v['size'], map_type=map_type)
|
||||
blob = storage.get_blob(v['file_path'])
|
||||
if blob is None:
|
||||
log.info('Unable to find blob for file %s in project %s. This can happen if the '
|
||||
'video encoding is still processing.', v['file_path'], node['project'])
|
||||
continue
|
||||
blob.update_filename(name)
|
||||
|
||||
# Currently we search for 'file' and 'files' keys in the object properties.
|
||||
# This could become a bit more flexible and realy on a true reference of the
|
||||
# file object type from the schema.
|
||||
if 'file' in node['properties']:
|
||||
_update_name(node['properties']['file'], {})
|
||||
|
||||
if 'files' in node['properties']:
|
||||
for file_props in node['properties']['files']:
|
||||
_update_name(file_props['file'], file_props)
|
||||
131
pillar/api/file_storage_backends/local.py
Normal file
131
pillar/api/file_storage_backends/local.py
Normal file
@@ -0,0 +1,131 @@
|
||||
import logging
|
||||
import pathlib
|
||||
import typing
|
||||
|
||||
from flask import current_app
|
||||
|
||||
__all__ = ['LocalBucket', 'LocalBlob']
|
||||
|
||||
from .abstract import Bucket, Blob, FileType, Path
|
||||
|
||||
|
||||
class LocalBucket(Bucket):
|
||||
backend_name = 'local'
|
||||
|
||||
def __init__(self, name: str) -> None:
|
||||
super().__init__(name)
|
||||
|
||||
self._log = logging.getLogger(f'{__name__}.LocalBucket')
|
||||
|
||||
# For local storage, the name is actually a partial path, relative
|
||||
# to the local storage root.
|
||||
self.root = pathlib.Path(current_app.config['STORAGE_DIR'])
|
||||
self.bucket_path = pathlib.PurePosixPath(self.name[:2]) / self.name
|
||||
self.abspath = self.root / self.bucket_path
|
||||
|
||||
def blob(self, blob_name: str) -> 'LocalBlob':
|
||||
return LocalBlob(name=blob_name, bucket=self)
|
||||
|
||||
def get_blob(self, blob_name: str) -> typing.Optional['LocalBlob']:
|
||||
# TODO: Check if file exists, otherwise None
|
||||
return self.blob(blob_name)
|
||||
|
||||
def copy_blob(self, blob: Blob, to_bucket: Bucket):
|
||||
"""Copies a blob from the current bucket to the other bucket.
|
||||
|
||||
Implementations only need to support copying between buckets of the
|
||||
same storage backend.
|
||||
"""
|
||||
|
||||
assert isinstance(blob, LocalBlob)
|
||||
assert isinstance(to_bucket, LocalBucket)
|
||||
|
||||
self._log.info('Copying %s to bucket %s', blob, to_bucket)
|
||||
|
||||
dest_blob = to_bucket.blob(blob.name)
|
||||
|
||||
# TODO: implement content type handling for local storage.
|
||||
self._log.warning('Unable to set correct file content type for %s', dest_blob)
|
||||
|
||||
fpath = blob.abspath()
|
||||
if not fpath.exists():
|
||||
if not fpath.parent.exists():
|
||||
raise FileNotFoundError(f'File {fpath} does not exist, and neither does its parent,'
|
||||
f' unable to copy to {to_bucket}')
|
||||
raise FileNotFoundError(f'File {fpath} does not exist, unable to copy to {to_bucket}')
|
||||
|
||||
with open(fpath, 'rb') as src_file:
|
||||
dest_blob.create_from_file(src_file, content_type='application/x-octet-stream')
|
||||
|
||||
def rename_blob(self, blob: 'LocalBlob', new_name: str) -> 'LocalBlob':
|
||||
"""Rename the blob, returning the new Blob."""
|
||||
|
||||
assert isinstance(blob, LocalBlob)
|
||||
|
||||
self._log.info('Renaming %s to %r', blob, new_name)
|
||||
new_blob = LocalBlob(new_name, self)
|
||||
|
||||
old_path = blob.abspath()
|
||||
new_path = new_blob.abspath()
|
||||
new_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
old_path.rename(new_path)
|
||||
|
||||
return new_blob
|
||||
|
||||
|
||||
class LocalBlob(Blob):
|
||||
"""Blob representing a local file on the filesystem."""
|
||||
|
||||
bucket: LocalBucket
|
||||
|
||||
def __init__(self, name: str, bucket: LocalBucket) -> None:
|
||||
super().__init__(name, bucket)
|
||||
|
||||
self._log = logging.getLogger(f'{__name__}.LocalBlob')
|
||||
self.partial_path = Path(name[:2]) / name
|
||||
|
||||
def abspath(self) -> pathlib.Path:
|
||||
"""Returns a concrete, absolute path to the local file."""
|
||||
|
||||
return pathlib.Path(self.bucket.abspath / self.partial_path)
|
||||
|
||||
def get_url(self, *, is_public: bool) -> str:
|
||||
from flask import url_for
|
||||
|
||||
path = self.bucket.bucket_path / self.partial_path
|
||||
url = url_for('file_storage.index', file_name=str(path), _external=True,
|
||||
_scheme=current_app.config['SCHEME'])
|
||||
return url
|
||||
|
||||
def create_from_file(self, file_obj: FileType, *,
|
||||
content_type: str,
|
||||
file_size: int = -1):
|
||||
assert hasattr(file_obj, 'read')
|
||||
|
||||
import shutil
|
||||
|
||||
# Ensure path exists before saving
|
||||
my_path = self.abspath()
|
||||
my_path.parent.mkdir(exist_ok=True, parents=True)
|
||||
|
||||
with my_path.open('wb') as outfile:
|
||||
shutil.copyfileobj(typing.cast(typing.IO, file_obj), outfile)
|
||||
|
||||
self._size_in_bytes = file_size
|
||||
|
||||
def update_filename(self, filename: str):
|
||||
# TODO: implement this for local storage.
|
||||
self._log.info('update_filename(%r) not supported', filename)
|
||||
|
||||
def make_public(self):
|
||||
# No-op on this storage backend.
|
||||
pass
|
||||
|
||||
def exists(self) -> bool:
|
||||
return self.abspath().exists()
|
||||
|
||||
def touch(self):
|
||||
"""Touch the file, creating parent directories if needed."""
|
||||
path = self.abspath()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.touch(exist_ok=True)
|
||||
112
pillar/api/latest.py
Normal file
112
pillar/api/latest.py
Normal file
@@ -0,0 +1,112 @@
|
||||
import typing
|
||||
|
||||
import bson
|
||||
import pymongo
|
||||
from flask import Blueprint, current_app
|
||||
|
||||
from pillar.api.utils import jsonify
|
||||
|
||||
blueprint = Blueprint('latest', __name__)
|
||||
|
||||
|
||||
def _public_project_ids() -> typing.List[bson.ObjectId]:
|
||||
"""Returns a list of ObjectIDs of public projects.
|
||||
|
||||
Memoized in setup_app().
|
||||
"""
|
||||
|
||||
proj_coll = current_app.db('projects')
|
||||
result = proj_coll.find({'is_private': False}, {'_id': 1})
|
||||
return [p['_id'] for p in result]
|
||||
|
||||
|
||||
def latest_nodes(db_filter, projection, limit):
|
||||
"""Returns the latest nodes, of a certain type, of public projects.
|
||||
|
||||
Also includes information about the project and the user of each node.
|
||||
"""
|
||||
|
||||
proj = {
|
||||
'_created': 1,
|
||||
'_updated': 1,
|
||||
'user.full_name': 1,
|
||||
'project._id': 1,
|
||||
'project.url': 1,
|
||||
'project.name': 1,
|
||||
'name': 1,
|
||||
'node_type': 1,
|
||||
'parent': 1,
|
||||
**projection,
|
||||
}
|
||||
|
||||
nodes_coll = current_app.db('nodes')
|
||||
pipeline = [
|
||||
{'$match': {'_deleted': {'$ne': True}}},
|
||||
{'$match': db_filter},
|
||||
{'$match': {'project': {'$in': _public_project_ids()}}},
|
||||
{'$sort': {'_created': pymongo.DESCENDING}},
|
||||
{'$limit': limit},
|
||||
{'$lookup': {"from": "users",
|
||||
"localField": "user",
|
||||
"foreignField": "_id",
|
||||
"as": "user"}},
|
||||
{'$unwind': {'path': "$user"}},
|
||||
{'$lookup': {"from": "projects",
|
||||
"localField": "project",
|
||||
"foreignField": "_id",
|
||||
"as": "project"}},
|
||||
{'$unwind': {'path': "$project"}},
|
||||
{'$project': proj},
|
||||
]
|
||||
|
||||
latest = nodes_coll.aggregate(pipeline)
|
||||
return list(latest)
|
||||
|
||||
|
||||
@blueprint.route('/assets')
|
||||
def latest_assets():
|
||||
latest = latest_nodes({'node_type': 'asset',
|
||||
'properties.status': 'published'},
|
||||
{'name': 1, 'node_type': 1,
|
||||
'parent': 1, 'picture': 1, 'properties.status': 1,
|
||||
'properties.content_type': 1,
|
||||
'permissions.world': 1},
|
||||
12)
|
||||
|
||||
return jsonify({'_items': latest})
|
||||
|
||||
|
||||
@blueprint.route('/comments')
|
||||
def latest_comments():
|
||||
latest = latest_nodes({'node_type': 'comment',
|
||||
'properties.status': 'published'},
|
||||
{'parent': 1,
|
||||
'properties.content': 1, 'node_type': 1,
|
||||
'properties.status': 1,
|
||||
'properties.is_reply': 1},
|
||||
10)
|
||||
|
||||
# Embed the comments' parents.
|
||||
# TODO: move to aggregation pipeline.
|
||||
nodes = current_app.data.driver.db['nodes']
|
||||
parents = {}
|
||||
for comment in latest:
|
||||
parent_id = comment['parent']
|
||||
|
||||
if parent_id in parents:
|
||||
comment['parent'] = parents[parent_id]
|
||||
continue
|
||||
|
||||
parent = nodes.find_one(parent_id)
|
||||
parents[parent_id] = parent
|
||||
comment['parent'] = parent
|
||||
|
||||
return jsonify({'_items': latest})
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
global _public_project_ids
|
||||
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
cached = app.cache.cached(timeout=3600)
|
||||
_public_project_ids = cached(_public_project_ids)
|
||||
@@ -2,16 +2,15 @@ import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import logging
|
||||
import rsa.randnum
|
||||
import typing
|
||||
|
||||
import bcrypt
|
||||
from bson import tz_util
|
||||
from eve.methods.post import post_internal
|
||||
|
||||
from flask import abort, Blueprint, current_app, jsonify, request
|
||||
|
||||
from application.utils.authentication import store_token
|
||||
from application.utils.authentication import create_new_user_document
|
||||
from application.utils.authentication import make_unique_username
|
||||
from pillar.api.utils.authentication import create_new_user_document
|
||||
from pillar.api.utils.authentication import make_unique_username
|
||||
from pillar.api.utils.authentication import store_token
|
||||
from pillar.api.utils import utcnow
|
||||
|
||||
blueprint = Blueprint('authentication', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
@@ -31,7 +30,7 @@ def create_local_user(email, password):
|
||||
# Make username unique
|
||||
db_user['username'] = make_unique_username(email)
|
||||
# Create the user
|
||||
r, _, _, status = post_internal('users', db_user)
|
||||
r, _, _, status = current_app.post_internal('users', db_user)
|
||||
if status != 201:
|
||||
log.error('internal response: %r %r', status, r)
|
||||
return abort(500)
|
||||
@@ -39,17 +38,7 @@ def create_local_user(email, password):
|
||||
return r['_id']
|
||||
|
||||
|
||||
@blueprint.route('/make-token', methods=['POST'])
|
||||
def make_token():
|
||||
"""Direct login for a user, without OAuth, using local database. Generates
|
||||
a token that is passed back to Pillar Web and used in subsequent
|
||||
transactions.
|
||||
|
||||
:return: a token string
|
||||
"""
|
||||
username = request.form['username']
|
||||
password = request.form['password']
|
||||
|
||||
def get_local_user(username, password):
|
||||
# Look up user in db
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
user = users_collection.find_one({'username': username})
|
||||
@@ -64,36 +53,71 @@ def make_token():
|
||||
hashed_password = hash_password(password, salt)
|
||||
if hashed_password != credentials['token']:
|
||||
return abort(403)
|
||||
return user
|
||||
|
||||
|
||||
@blueprint.route('/make-token', methods=['POST'])
|
||||
def make_token():
|
||||
"""Direct login for a user, without OAuth, using local database. Generates
|
||||
a token that is passed back to Pillar Web and used in subsequent
|
||||
transactions.
|
||||
|
||||
:return: a token string
|
||||
"""
|
||||
username = request.form['username']
|
||||
password = request.form['password']
|
||||
|
||||
user = get_local_user(username, password)
|
||||
|
||||
token = generate_and_store_token(user['_id'])
|
||||
return jsonify(token=token['token'])
|
||||
|
||||
|
||||
def generate_and_store_token(user_id, days=15, prefix=''):
|
||||
def generate_and_store_token(user_id, days=15, prefix=b'') -> dict:
|
||||
"""Generates token based on random bits.
|
||||
|
||||
NOTE: the returned document includes the plain-text token.
|
||||
DO NOT STORE OR LOG THIS unless there is a good reason to.
|
||||
|
||||
:param user_id: ObjectId of the owning user.
|
||||
:param days: token will expire in this many days.
|
||||
:param prefix: the token will be prefixed by this string, for easy identification.
|
||||
:return: the token document.
|
||||
:param prefix: the token will be prefixed by these bytes, for easy identification.
|
||||
:return: the token document with the token in plain text as well as hashed.
|
||||
"""
|
||||
|
||||
random_bits = rsa.randnum.read_random_bits(256)
|
||||
if not isinstance(prefix, bytes):
|
||||
raise TypeError('prefix must be bytes, not %s' % type(prefix))
|
||||
|
||||
import secrets
|
||||
|
||||
random_bits = secrets.token_bytes(32)
|
||||
|
||||
# Use 'xy' as altargs to prevent + and / characters from appearing.
|
||||
# We never have to b64decode the string anyway.
|
||||
token = prefix + base64.b64encode(random_bits, altchars='xy').strip('=')
|
||||
token_bytes = prefix + base64.b64encode(random_bits, altchars=b'xy').strip(b'=')
|
||||
token = token_bytes.decode('ascii')
|
||||
|
||||
token_expiry = datetime.datetime.now(tz=tz_util.utc) + datetime.timedelta(days=days)
|
||||
return store_token(user_id, token, token_expiry)
|
||||
token_expiry = utcnow() + datetime.timedelta(days=days)
|
||||
token_data = store_token(user_id, token, token_expiry)
|
||||
|
||||
# Include the token in the returned document so that it can be stored client-side,
|
||||
# in configuration, etc.
|
||||
token_data['token'] = token
|
||||
|
||||
return token_data
|
||||
|
||||
|
||||
def hash_password(password, salt):
|
||||
if isinstance(salt, unicode):
|
||||
def hash_password(password: str, salt: typing.Union[str, bytes]) -> str:
|
||||
password = password.encode()
|
||||
|
||||
if isinstance(salt, str):
|
||||
salt = salt.encode('utf-8')
|
||||
encoded_password = base64.b64encode(hashlib.sha256(password).digest())
|
||||
return bcrypt.hashpw(encoded_password, salt)
|
||||
|
||||
hash = hashlib.sha256(password).digest()
|
||||
encoded_password = base64.b64encode(hash)
|
||||
hashed_password = bcrypt.hashpw(encoded_password, salt)
|
||||
return hashed_password.decode('ascii')
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
105
pillar/api/node_types/__init__.py
Normal file
105
pillar/api/node_types/__init__.py
Normal file
@@ -0,0 +1,105 @@
|
||||
_file_embedded_schema = {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'files',
|
||||
'field': '_id',
|
||||
'embeddable': True
|
||||
}
|
||||
}
|
||||
|
||||
ATTACHMENT_SLUG_REGEX = r'[a-zA-Z0-9_\-]+'
|
||||
|
||||
attachments_embedded_schema = {
|
||||
'type': 'dict',
|
||||
# TODO: will be renamed to 'keyschema' in Cerberus 1.0
|
||||
'propertyschema': {
|
||||
'type': 'string',
|
||||
'regex': '^%s$' % ATTACHMENT_SLUG_REGEX,
|
||||
},
|
||||
'valueschema': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
'oid': {
|
||||
'type': 'objectid',
|
||||
'required': True,
|
||||
},
|
||||
'link': {
|
||||
'type': 'string',
|
||||
'allowed': ['self', 'none', 'custom'],
|
||||
'default': 'self',
|
||||
},
|
||||
'link_custom': {
|
||||
'type': 'string',
|
||||
},
|
||||
'collection': {
|
||||
'type': 'string',
|
||||
'allowed': ['files'],
|
||||
'default': 'files',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# TODO (fsiddi) reference this schema in all node_types that allow ratings
|
||||
ratings_embedded_schema = {
|
||||
'type': 'dict',
|
||||
# Total count of positive ratings (updated at every rating action)
|
||||
'schema': {
|
||||
'positive': {
|
||||
'type': 'integer',
|
||||
},
|
||||
# Total count of negative ratings (updated at every rating action)
|
||||
'negative': {
|
||||
'type': 'integer',
|
||||
},
|
||||
# Collection of ratings, keyed by user
|
||||
'ratings': {
|
||||
'type': 'list',
|
||||
'schema': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
'user': {
|
||||
'type': 'objectid',
|
||||
'data_relation': {
|
||||
'resource': 'users',
|
||||
'field': '_id',
|
||||
'embeddable': False
|
||||
}
|
||||
},
|
||||
'is_positive': {
|
||||
'type': 'boolean'
|
||||
},
|
||||
# Weight of the rating based on user rep and the context.
|
||||
# Currently we have the following weights:
|
||||
# - 1 auto null
|
||||
# - 2 manual null
|
||||
# - 3 auto valid
|
||||
# - 4 manual valid
|
||||
'weight': {
|
||||
'type': 'integer'
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
'hot': {'type': 'float'},
|
||||
},
|
||||
}
|
||||
|
||||
# Import after defining the common embedded schemas, to prevent dependency cycles.
|
||||
from pillar.api.node_types.asset import node_type_asset
|
||||
from pillar.api.node_types.blog import node_type_blog
|
||||
from pillar.api.node_types.comment import node_type_comment
|
||||
from pillar.api.node_types.group import node_type_group
|
||||
from pillar.api.node_types.group_hdri import node_type_group_hdri
|
||||
from pillar.api.node_types.group_texture import node_type_group_texture
|
||||
from pillar.api.node_types.hdri import node_type_hdri
|
||||
from pillar.api.node_types.page import node_type_page
|
||||
from pillar.api.node_types.post import node_type_post
|
||||
from pillar.api.node_types.storage import node_type_storage
|
||||
from pillar.api.node_types.text import node_type_text
|
||||
from pillar.api.node_types.texture import node_type_texture
|
||||
|
||||
PILLAR_NODE_TYPES = (node_type_asset, node_type_blog, node_type_comment, node_type_group,
|
||||
node_type_group_hdri, node_type_group_texture, node_type_hdri, node_type_page,
|
||||
node_type_post, node_type_storage, node_type_text, node_type_texture)
|
||||
PILLAR_NAMED_NODE_TYPES = {nt['name']: nt for nt in PILLAR_NODE_TYPES}
|
||||
@@ -1,4 +1,4 @@
|
||||
from manage_extra.node_types import _file_embedded_schema
|
||||
from pillar.api.node_types import _file_embedded_schema, attachments_embedded_schema
|
||||
|
||||
node_type_asset = {
|
||||
'name': 'asset',
|
||||
@@ -27,26 +27,7 @@ node_type_asset = {
|
||||
# We point to the original file (and use it to extract any relevant
|
||||
# variation useful for our scope).
|
||||
'file': _file_embedded_schema,
|
||||
'attachments': {
|
||||
'type': 'list',
|
||||
'schema': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
'field': {'type': 'string'},
|
||||
'files': {
|
||||
'type': 'list',
|
||||
'schema': {
|
||||
'type': 'dict',
|
||||
'schema': {
|
||||
'file': _file_embedded_schema,
|
||||
'slug': {'type': 'string', 'minlength': 1},
|
||||
'size': {'type': 'string'}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
'attachments': attachments_embedded_schema,
|
||||
# Tags for search
|
||||
'tags': {
|
||||
'type': 'list',
|
||||
@@ -58,17 +39,29 @@ node_type_asset = {
|
||||
# this schema: "Root > Nested Category > One More Nested Category"
|
||||
'categories': {
|
||||
'type': 'string'
|
||||
}
|
||||
},
|
||||
'license_type': {
|
||||
'default': 'cc-by',
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
'cc-by',
|
||||
'cc-0',
|
||||
'cc-by-sa',
|
||||
'cc-by-nd',
|
||||
'cc-by-nc',
|
||||
'copyright'
|
||||
]
|
||||
},
|
||||
'license_notes': {
|
||||
'type': 'string'
|
||||
},
|
||||
},
|
||||
'form_schema': {
|
||||
'status': {},
|
||||
'content_type': {'visible': False},
|
||||
'file': {},
|
||||
'attachments': {'visible': False},
|
||||
'order': {'visible': False},
|
||||
'tags': {'visible': False},
|
||||
'categories': {'visible': False}
|
||||
'categories': {'visible': False},
|
||||
'license_type': {'visible': False},
|
||||
'license_notes': {'visible': False},
|
||||
},
|
||||
'permissions': {
|
||||
}
|
||||
}
|
||||
17
pillar/api/node_types/blog.py
Normal file
17
pillar/api/node_types/blog.py
Normal file
@@ -0,0 +1,17 @@
|
||||
node_type_blog = {
|
||||
'name': 'blog',
|
||||
'description': 'Container for node_type post.',
|
||||
'dyn_schema': {
|
||||
'categories': {
|
||||
'type': 'list',
|
||||
'schema': {
|
||||
'type': 'string'
|
||||
}
|
||||
}
|
||||
},
|
||||
'form_schema': {
|
||||
'categories': {},
|
||||
'template': {},
|
||||
},
|
||||
'parent': ['project', ],
|
||||
}
|
||||
@@ -2,11 +2,14 @@ node_type_comment = {
|
||||
'name': 'comment',
|
||||
'description': 'Comments for asset nodes, pages, etc.',
|
||||
'dyn_schema': {
|
||||
# The actual comment content (initially Markdown format)
|
||||
# The actual comment content
|
||||
'content': {
|
||||
'type': 'string',
|
||||
'minlength': 5,
|
||||
'required': True,
|
||||
'coerce': 'markdown',
|
||||
},
|
||||
'_content_html': {'type': 'string'},
|
||||
'status': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
@@ -50,16 +53,6 @@ node_type_comment = {
|
||||
'confidence': {'type': 'float'},
|
||||
'is_reply': {'type': 'boolean'}
|
||||
},
|
||||
'form_schema': {
|
||||
'content': {},
|
||||
'status': {},
|
||||
'rating_positive': {},
|
||||
'rating_negative': {},
|
||||
'ratings': {},
|
||||
'confidence': {},
|
||||
'is_reply': {}
|
||||
},
|
||||
'form_schema': {},
|
||||
'parent': ['asset', 'comment'],
|
||||
'permissions': {
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
node_type_group = {
|
||||
'name': 'group',
|
||||
'description': 'Generic group node type edited',
|
||||
'description': 'Folder node type',
|
||||
'parent': ['group', 'project'],
|
||||
'dyn_schema': {
|
||||
# Used for sorting within the context of a group
|
||||
@@ -24,10 +24,7 @@ node_type_group = {
|
||||
},
|
||||
'form_schema': {
|
||||
'url': {'visible': False},
|
||||
'status': {},
|
||||
'notes': {'visible': False},
|
||||
'order': {'visible': False}
|
||||
},
|
||||
'permissions': {
|
||||
}
|
||||
}
|
||||
@@ -15,8 +15,5 @@ node_type_group_hdri = {
|
||||
],
|
||||
}
|
||||
},
|
||||
'form_schema': {
|
||||
'status': {},
|
||||
'order': {}
|
||||
}
|
||||
'form_schema': {},
|
||||
}
|
||||
@@ -15,8 +15,5 @@ node_type_group_texture = {
|
||||
],
|
||||
}
|
||||
},
|
||||
'form_schema': {
|
||||
'status': {},
|
||||
'order': {}
|
||||
}
|
||||
'form_schema': {},
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
from manage_extra.node_types import _file_embedded_schema
|
||||
from pillar.api.node_types import _file_embedded_schema
|
||||
|
||||
node_type_hdri = {
|
||||
# When adding this node type, make sure to enable CORS from * on the GCS
|
||||
@@ -7,6 +7,11 @@ node_type_hdri = {
|
||||
'description': 'HDR Image',
|
||||
'parent': ['group_hdri'],
|
||||
'dyn_schema': {
|
||||
# Default yaw angle in degrees.
|
||||
'default_yaw': {
|
||||
'type': 'float',
|
||||
'default': 0.0
|
||||
},
|
||||
'status': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
@@ -62,5 +67,5 @@ node_type_hdri = {
|
||||
'content_type': {'visible': False},
|
||||
'tags': {'visible': False},
|
||||
'categories': {'visible': False},
|
||||
}
|
||||
},
|
||||
}
|
||||
24
pillar/api/node_types/page.py
Normal file
24
pillar/api/node_types/page.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from pillar.api.node_types import attachments_embedded_schema
|
||||
|
||||
node_type_page = {
|
||||
'name': 'page',
|
||||
'description': 'A single page',
|
||||
'dyn_schema': {
|
||||
'status': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
'published',
|
||||
'pending'
|
||||
],
|
||||
'default': 'pending'
|
||||
},
|
||||
'url': {
|
||||
'type': 'string'
|
||||
},
|
||||
'attachments': attachments_embedded_schema,
|
||||
},
|
||||
'form_schema': {
|
||||
'attachments': {'visible': False},
|
||||
},
|
||||
'parent': ['project', ],
|
||||
}
|
||||
36
pillar/api/node_types/post.py
Normal file
36
pillar/api/node_types/post.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from pillar.api.node_types import attachments_embedded_schema
|
||||
|
||||
node_type_post = {
|
||||
'name': 'post',
|
||||
'description': 'A blog post, for any project',
|
||||
'dyn_schema': {
|
||||
'content': {
|
||||
'type': 'string',
|
||||
'minlength': 5,
|
||||
'maxlength': 90000,
|
||||
'required': True,
|
||||
'coerce': 'markdown',
|
||||
},
|
||||
'_content_html': {'type': 'string'},
|
||||
'status': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
'published',
|
||||
'pending'
|
||||
],
|
||||
'default': 'pending'
|
||||
},
|
||||
# Global categories, will be enforced to be 1 word
|
||||
'category': {
|
||||
'type': 'string',
|
||||
},
|
||||
'url': {
|
||||
'type': 'string'
|
||||
},
|
||||
'attachments': attachments_embedded_schema,
|
||||
},
|
||||
'form_schema': {
|
||||
'attachments': {'visible': False},
|
||||
},
|
||||
'parent': ['blog', ],
|
||||
}
|
||||
@@ -16,22 +16,11 @@ node_type_storage = {
|
||||
'subdir': {
|
||||
'type': 'string',
|
||||
},
|
||||
# Which backend is used to store the files (gcs, pillar, bam, cdnsun)
|
||||
# Which backend is used to store the files (gcs, local)
|
||||
'backend': {
|
||||
'type': 'string',
|
||||
},
|
||||
},
|
||||
'form_schema': {
|
||||
'subdir': {},
|
||||
'project': {},
|
||||
'backend': {}
|
||||
},
|
||||
'form_schema': {},
|
||||
'parent': ['group', 'project'],
|
||||
'permissions': {
|
||||
# 'groups': [{
|
||||
# 'group': app.config['ADMIN_USER_GROUP'],
|
||||
# 'methods': ['GET', 'PUT', 'POST']
|
||||
# }],
|
||||
# 'users': [],
|
||||
}
|
||||
}
|
||||
@@ -24,5 +24,5 @@ node_type_text = {
|
||||
},
|
||||
'form_schema': {
|
||||
'shared_slug': {'visible': False},
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
from manage_extra.node_types import _file_embedded_schema
|
||||
from pillar.api.node_types import _file_embedded_schema
|
||||
|
||||
node_type_texture = {
|
||||
'name': 'texture',
|
||||
@@ -27,13 +27,19 @@ node_type_texture = {
|
||||
'map_type': {
|
||||
'type': 'string',
|
||||
'allowed': [
|
||||
'color',
|
||||
'specular',
|
||||
'bump',
|
||||
'normal',
|
||||
'translucency',
|
||||
'emission',
|
||||
'alpha'
|
||||
"alpha",
|
||||
"ambient occlusion",
|
||||
"bump",
|
||||
"color",
|
||||
"displacement",
|
||||
"emission",
|
||||
"glossiness",
|
||||
"id",
|
||||
"mask",
|
||||
"normal",
|
||||
"roughness",
|
||||
"specular",
|
||||
"translucency",
|
||||
]}
|
||||
}
|
||||
}
|
||||
@@ -58,15 +64,8 @@ node_type_texture = {
|
||||
}
|
||||
},
|
||||
'form_schema': {
|
||||
'status': {},
|
||||
'content_type': {'visible': False},
|
||||
'files': {},
|
||||
'is_tileable': {},
|
||||
'is_landscape': {},
|
||||
'resolution': {},
|
||||
'aspect_ratio': {},
|
||||
'order': {},
|
||||
'tags': {'visible': False},
|
||||
'categories': {'visible': False},
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1,24 +1,56 @@
|
||||
import base64
|
||||
import functools
|
||||
import logging
|
||||
import urlparse
|
||||
import urllib.parse
|
||||
|
||||
import pymongo.errors
|
||||
import rsa.randnum
|
||||
from bson import ObjectId
|
||||
from flask import current_app, g, Blueprint, request
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
from bson import ObjectId
|
||||
from flask import current_app, Blueprint, request
|
||||
|
||||
from application.modules import file_storage
|
||||
from application.utils import str2id, jsonify
|
||||
from application.utils.authorization import check_permissions, require_login
|
||||
from application.utils.gcs import update_file_name
|
||||
from application.utils.activities import activity_subscribe, activity_object_add
|
||||
from application.utils.algolia import algolia_index_node_delete
|
||||
from application.utils.algolia import algolia_index_node_save
|
||||
from pillar.api.activities import activity_subscribe, activity_object_add
|
||||
from pillar.api.node_types import PILLAR_NAMED_NODE_TYPES
|
||||
from pillar.api.file_storage_backends.gcs import update_file_name
|
||||
from pillar.api.utils import str2id, jsonify
|
||||
from pillar.api.utils.authorization import check_permissions, require_login
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint = Blueprint('nodes', __name__)
|
||||
ROLES_FOR_SHARING = {u'subscriber', u'demo'}
|
||||
blueprint = Blueprint('nodes_api', __name__)
|
||||
ROLES_FOR_SHARING = {'subscriber', 'demo'}
|
||||
|
||||
|
||||
def only_for_node_type_decorator(*required_node_type_names):
|
||||
"""Returns a decorator that checks its first argument's node type.
|
||||
|
||||
If the node type is not of the required node type, returns None,
|
||||
otherwise calls the wrapped function.
|
||||
|
||||
>>> deco = only_for_node_type_decorator('comment')
|
||||
>>> @deco
|
||||
... def handle_comment(node): pass
|
||||
|
||||
>>> deco = only_for_node_type_decorator('comment', 'post')
|
||||
>>> @deco
|
||||
... def handle_comment_or_post(node): pass
|
||||
|
||||
"""
|
||||
|
||||
# Convert to a set for efficient 'x in required_node_type_names' queries.
|
||||
required_node_type_names = set(required_node_type_names)
|
||||
|
||||
def only_for_node_type(wrapped):
|
||||
@functools.wraps(wrapped)
|
||||
def wrapper(node, *args, **kwargs):
|
||||
if node.get('node_type') not in required_node_type_names:
|
||||
return
|
||||
|
||||
return wrapped(node, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
only_for_node_type.__doc__ = "Decorator, immediately returns when " \
|
||||
"the first argument is not of type %s." % required_node_type_names
|
||||
return only_for_node_type
|
||||
|
||||
|
||||
@blueprint.route('/<node_id>/share', methods=['GET', 'POST'])
|
||||
@@ -35,6 +67,8 @@ def share_node(node_id):
|
||||
'node_type': 1,
|
||||
'short_code': 1
|
||||
})
|
||||
if not node:
|
||||
raise wz_exceptions.NotFound('Node %s does not exist.' % node_id)
|
||||
|
||||
check_permissions('nodes', node, request.method)
|
||||
|
||||
@@ -100,7 +134,7 @@ def make_world_gettable(node):
|
||||
log.debug('Ensuring the world can read node %s', node_id)
|
||||
|
||||
world_perms = set(node.get('permissions', {}).get('world', []))
|
||||
world_perms.add(u'GET')
|
||||
world_perms.add('GET')
|
||||
world_perms = list(world_perms)
|
||||
|
||||
result = nodes_coll.update_one({'_id': node_id},
|
||||
@@ -112,13 +146,19 @@ def make_world_gettable(node):
|
||||
node_id)
|
||||
|
||||
|
||||
def create_short_code(node):
|
||||
def create_short_code(node) -> str:
|
||||
"""Generates a new 'short code' for the node."""
|
||||
|
||||
import secrets
|
||||
|
||||
length = current_app.config['SHORT_CODE_LENGTH']
|
||||
bits = rsa.randnum.read_random_bits(32)
|
||||
short_code = base64.b64encode(bits, altchars='xy').rstrip('=')
|
||||
short_code = short_code[:length]
|
||||
|
||||
# Base64 encoding will expand it a bit, so we'll cut that off later.
|
||||
# It's a good idea to start with enough bytes, though.
|
||||
bits = secrets.token_bytes(length)
|
||||
|
||||
short_code = base64.b64encode(bits, altchars=b'xy').rstrip(b'=')
|
||||
short_code = short_code[:length].decode('ascii')
|
||||
|
||||
return short_code
|
||||
|
||||
@@ -126,7 +166,8 @@ def create_short_code(node):
|
||||
def short_link_info(short_code):
|
||||
"""Returns the short link info in a dict."""
|
||||
|
||||
short_link = urlparse.urljoin(current_app.config['SHORT_LINK_BASE_URL'], short_code)
|
||||
short_link = urllib.parse.urljoin(
|
||||
current_app.config['SHORT_LINK_BASE_URL'], short_code)
|
||||
|
||||
return {
|
||||
'short_code': short_code,
|
||||
@@ -134,62 +175,6 @@ def short_link_info(short_code):
|
||||
}
|
||||
|
||||
|
||||
def item_parse_attachments(response):
|
||||
"""Before returning a response, check if the 'attachments' property is
|
||||
defined. If yes, load the file (for the moment only images) in the required
|
||||
variation, get the link and build a Markdown representation. Search in the
|
||||
'field' specified in the attachment and replace the 'slug' tag with the
|
||||
generated link.
|
||||
"""
|
||||
|
||||
attachments = response.get('properties', {}).get('attachments', None)
|
||||
if not attachments:
|
||||
return
|
||||
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
for attachment in attachments:
|
||||
# Make a list from the property path
|
||||
field_name_path = attachment['field'].split('.')
|
||||
# This currently allow to access only properties inside of
|
||||
# the properties property
|
||||
if len(field_name_path) > 1:
|
||||
field_content = response[field_name_path[0]][field_name_path[1]]
|
||||
# This is for the "normal" first level property
|
||||
else:
|
||||
field_content = response[field_name_path[0]]
|
||||
for af in attachment['files']:
|
||||
slug = af['slug']
|
||||
slug_tag = "[{0}]".format(slug)
|
||||
f = files_collection.find_one({'_id': ObjectId(af['file'])})
|
||||
if f is None:
|
||||
af['file'] = None
|
||||
continue
|
||||
size = f['size'] if 'size' in f else 'l'
|
||||
|
||||
# Get the correct variation from the file
|
||||
file_storage.ensure_valid_link(f)
|
||||
thumbnail = next((item for item in f['variations'] if
|
||||
item['size'] == size), None)
|
||||
|
||||
# Build Markdown img string
|
||||
l = ''.format(slug, thumbnail['link'], f['name'])
|
||||
# Parse the content of the file and replace the attachment
|
||||
# tag with the actual image link
|
||||
field_content = field_content.replace(slug_tag, l)
|
||||
|
||||
# Apply the parsed value back to the property. See above for
|
||||
# clarifications on how this is done.
|
||||
if len(field_name_path) > 1:
|
||||
response[field_name_path[0]][field_name_path[1]] = field_content
|
||||
else:
|
||||
response[field_name_path[0]] = field_content
|
||||
|
||||
|
||||
def resource_parse_attachments(response):
|
||||
for item in response['_items']:
|
||||
item_parse_attachments(item)
|
||||
|
||||
|
||||
def before_replacing_node(item, original):
|
||||
check_permissions('nodes', original, 'PUT')
|
||||
update_file_name(item)
|
||||
@@ -200,33 +185,29 @@ def after_replacing_node(item, original):
|
||||
project is private, prevent public indexing.
|
||||
"""
|
||||
|
||||
from pillar.celery import search_index_tasks as index
|
||||
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
project = projects_collection.find_one({'_id': item['project']})
|
||||
if project.get('is_private', False):
|
||||
# Skip index updating and return
|
||||
return
|
||||
|
||||
from algoliasearch.client import AlgoliaException
|
||||
status = item['properties'].get('status', 'unpublished')
|
||||
node_id = str(item['_id'])
|
||||
|
||||
if status == 'published':
|
||||
try:
|
||||
algolia_index_node_save(item)
|
||||
except AlgoliaException as ex:
|
||||
log.warning('Unable to push node info to Algolia for node %s; %s',
|
||||
item.get('_id'), ex)
|
||||
index.node_save.delay(node_id)
|
||||
else:
|
||||
try:
|
||||
algolia_index_node_delete(item)
|
||||
except AlgoliaException as ex:
|
||||
log.warning('Unable to delete node info to Algolia for node %s; %s',
|
||||
item.get('_id'), ex)
|
||||
index.node_delete.delay(node_id)
|
||||
|
||||
|
||||
def before_inserting_nodes(items):
|
||||
"""Before inserting a node in the collection we check if the user is allowed
|
||||
and we append the project id to it.
|
||||
"""
|
||||
from pillar.auth import current_user
|
||||
|
||||
nodes_collection = current_app.data.driver.db['nodes']
|
||||
|
||||
def find_parent_project(node):
|
||||
@@ -248,7 +229,7 @@ def before_inserting_nodes(items):
|
||||
item['project'] = project['_id']
|
||||
|
||||
# Default the 'user' property to the current user.
|
||||
item.setdefault('user', g.current_user['user_id'])
|
||||
item.setdefault('user', current_user.user_id)
|
||||
|
||||
|
||||
def after_inserting_nodes(items):
|
||||
@@ -275,9 +256,13 @@ def after_inserting_nodes(items):
|
||||
else:
|
||||
activity_subscribe(item['user'], 'node', item['_id'])
|
||||
verb = 'commented'
|
||||
else:
|
||||
elif item['node_type'] in PILLAR_NAMED_NODE_TYPES:
|
||||
verb = 'posted'
|
||||
activity_subscribe(item['user'], 'node', item['_id'])
|
||||
else:
|
||||
# Don't automatically create activities for non-Pillar node types,
|
||||
# as we don't know what would be a suitable verb (among other things).
|
||||
continue
|
||||
|
||||
activity_object_add(
|
||||
item['user'],
|
||||
@@ -363,7 +348,7 @@ def node_set_default_picture(node, original=None):
|
||||
# Find the colour map, defaulting to the first image map available.
|
||||
image_file_id = None
|
||||
for image in props.get('files', []):
|
||||
if image_file_id is None or image.get('map_type') == u'color':
|
||||
if image_file_id is None or image.get('map_type') == 'color':
|
||||
image_file_id = image.get('file')
|
||||
else:
|
||||
log.debug('Not setting default picture on node type %s content type %s',
|
||||
@@ -383,27 +368,47 @@ def nodes_set_default_picture(nodes):
|
||||
node_set_default_picture(node)
|
||||
|
||||
|
||||
def before_deleting_node(node: dict):
|
||||
check_permissions('nodes', node, 'DELETE')
|
||||
|
||||
|
||||
def after_deleting_node(item):
|
||||
from algoliasearch.client import AlgoliaException
|
||||
from pillar.celery import search_index_tasks as index
|
||||
index.node_delete.delay(str(item['_id']))
|
||||
|
||||
|
||||
only_for_textures = only_for_node_type_decorator('texture')
|
||||
|
||||
|
||||
@only_for_textures
|
||||
def texture_sort_files(node, original=None):
|
||||
"""Sort files alphabetically by map type, with colour map first."""
|
||||
|
||||
try:
|
||||
algolia_index_node_delete(item)
|
||||
except AlgoliaException as ex:
|
||||
log.warning('Unable to delete node info to Algolia for node %s; %s',
|
||||
item.get('_id'), ex)
|
||||
files = node['properties']['files']
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
# Sort the map types alphabetically, ensuring 'color' comes first.
|
||||
as_dict = {f['map_type']: f for f in files}
|
||||
types = sorted(as_dict.keys(), key=lambda k: '\0' if k == 'color' else k)
|
||||
node['properties']['files'] = [as_dict[map_type] for map_type in types]
|
||||
|
||||
|
||||
def textures_sort_files(nodes):
|
||||
for node in nodes:
|
||||
texture_sort_files(node)
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
|
||||
from . import patch
|
||||
patch.setup_app(app, url_prefix=url_prefix)
|
||||
|
||||
app.on_fetched_item_nodes += before_returning_node
|
||||
app.on_fetched_resource_nodes += before_returning_nodes
|
||||
|
||||
app.on_fetched_item_nodes += item_parse_attachments
|
||||
app.on_fetched_resource_nodes += resource_parse_attachments
|
||||
|
||||
app.on_replace_nodes += before_replacing_node
|
||||
app.on_replace_nodes += texture_sort_files
|
||||
app.on_replace_nodes += deduct_content_type
|
||||
app.on_replace_nodes += node_set_default_picture
|
||||
app.on_replaced_nodes += after_replacing_node
|
||||
@@ -411,8 +416,12 @@ def setup_app(app, url_prefix):
|
||||
app.on_insert_nodes += before_inserting_nodes
|
||||
app.on_insert_nodes += nodes_deduct_content_type
|
||||
app.on_insert_nodes += nodes_set_default_picture
|
||||
app.on_insert_nodes += textures_sort_files
|
||||
app.on_inserted_nodes += after_inserting_nodes
|
||||
|
||||
app.on_update_nodes += texture_sort_files
|
||||
|
||||
app.on_delete_item_nodes += before_deleting_node
|
||||
app.on_deleted_item_nodes += after_deleting_node
|
||||
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
@@ -1,33 +1,55 @@
|
||||
"""PATCH support for comment nodes."""
|
||||
|
||||
import logging
|
||||
|
||||
from flask import current_app
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from application.utils import authorization, authentication, jsonify
|
||||
from pillar.api.utils import authorization, authentication, jsonify
|
||||
|
||||
from . import register_patch_handler
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
ROLES_FOR_COMMENT_VOTING = {u'subscriber', u'demo'}
|
||||
VALID_COMMENT_OPERATIONS = {u'upvote', u'downvote', u'revoke'}
|
||||
COMMENT_VOTING_OPS = {'upvote', 'downvote', 'revoke'}
|
||||
VALID_COMMENT_OPERATIONS = COMMENT_VOTING_OPS.union({'edit'})
|
||||
|
||||
|
||||
@register_patch_handler(u'comment')
|
||||
@register_patch_handler('comment')
|
||||
def patch_comment(node_id, patch):
|
||||
assert_is_valid_patch(node_id, patch)
|
||||
user_id = authentication.current_user_id()
|
||||
|
||||
# Find the node
|
||||
if patch['op'] in COMMENT_VOTING_OPS:
|
||||
result, node = vote_comment(user_id, node_id, patch)
|
||||
else:
|
||||
assert patch['op'] == 'edit', 'Invalid patch operation %s' % patch['op']
|
||||
result, node = edit_comment(user_id, node_id, patch)
|
||||
|
||||
return jsonify({'_status': 'OK',
|
||||
'result': result,
|
||||
'properties': node['properties']
|
||||
})
|
||||
|
||||
|
||||
def vote_comment(user_id, node_id, patch):
|
||||
"""Performs a voting operation."""
|
||||
|
||||
# Find the node. Includes a query on the properties.ratings array so
|
||||
# that we only get the current user's rating.
|
||||
nodes_coll = current_app.data.driver.db['nodes']
|
||||
node_query = {'_id': node_id,
|
||||
'$or': [{'properties.ratings.$.user': {'$exists': False}},
|
||||
{'properties.ratings.$.user': user_id}]}
|
||||
node = nodes_coll.find_one(node_query,
|
||||
projection={'properties': 1})
|
||||
projection={'properties': 1, 'user': 1})
|
||||
if node is None:
|
||||
log.warning('How can the node not be found?')
|
||||
log.warning('User %s wanted to patch non-existing node %s' % (user_id, node_id))
|
||||
raise wz_exceptions.NotFound('Node %s not found' % node_id)
|
||||
|
||||
# We don't allow the user to down/upvote their own nodes.
|
||||
if user_id == node['user']:
|
||||
raise wz_exceptions.Forbidden('You cannot vote on your own node')
|
||||
|
||||
props = node['properties']
|
||||
|
||||
# Find the current rating (if any)
|
||||
@@ -75,13 +97,14 @@ def patch_comment(node_id, patch):
|
||||
return update
|
||||
|
||||
actions = {
|
||||
u'upvote': upvote,
|
||||
u'downvote': downvote,
|
||||
u'revoke': revoke,
|
||||
'upvote': upvote,
|
||||
'downvote': downvote,
|
||||
'revoke': revoke,
|
||||
}
|
||||
action = actions[patch['op']]
|
||||
mongo_update = action()
|
||||
|
||||
nodes_coll = current_app.data.driver.db['nodes']
|
||||
if mongo_update:
|
||||
log.info('Running %s', mongo_update)
|
||||
if rating:
|
||||
@@ -97,10 +120,53 @@ def patch_comment(node_id, patch):
|
||||
projection={'properties.rating_positive': 1,
|
||||
'properties.rating_negative': 1})
|
||||
|
||||
return jsonify({'_status': 'OK',
|
||||
'result': result,
|
||||
'properties': node['properties']
|
||||
})
|
||||
return result, node
|
||||
|
||||
|
||||
def edit_comment(user_id, node_id, patch):
|
||||
"""Edits a single comment.
|
||||
|
||||
Doesn't do permission checking; users are allowed to edit their own
|
||||
comment, and this is not something you want to revoke anyway. Admins
|
||||
can edit all comments.
|
||||
"""
|
||||
|
||||
# Find the node. We need to fetch some more info than we use here, so that
|
||||
# we can pass this stuff to Eve's patch_internal; that way the validation &
|
||||
# authorisation system has enough info to work.
|
||||
nodes_coll = current_app.data.driver.db['nodes']
|
||||
projection = {'user': 1,
|
||||
'project': 1,
|
||||
'node_type': 1}
|
||||
node = nodes_coll.find_one(node_id, projection=projection)
|
||||
if node is None:
|
||||
log.warning('User %s wanted to patch non-existing node %s' % (user_id, node_id))
|
||||
raise wz_exceptions.NotFound('Node %s not found' % node_id)
|
||||
|
||||
if node['user'] != user_id and not authorization.user_has_role('admin'):
|
||||
raise wz_exceptions.Forbidden('You can only edit your own comments.')
|
||||
|
||||
# Use Eve to PATCH this node, as that also updates the etag.
|
||||
r, _, _, status = current_app.patch_internal('nodes',
|
||||
{'properties.content': patch['content'],
|
||||
'project': node['project'],
|
||||
'user': node['user'],
|
||||
'node_type': node['node_type']},
|
||||
concurrency_check=False,
|
||||
_id=node_id)
|
||||
if status != 200:
|
||||
log.error('Error %i editing comment %s for user %s: %s',
|
||||
status, node_id, user_id, r)
|
||||
raise wz_exceptions.InternalServerError('Internal error %i from Eve' % status)
|
||||
else:
|
||||
log.info('User %s edited comment %s', user_id, node_id)
|
||||
|
||||
# Fetch the new content, so the client can show these without querying again.
|
||||
node = nodes_coll.find_one(node_id, projection={
|
||||
'properties.content': 1,
|
||||
'properties._content_html': 1,
|
||||
})
|
||||
return status, node
|
||||
|
||||
|
||||
def assert_is_valid_patch(node_id, patch):
|
||||
@@ -115,8 +181,12 @@ def assert_is_valid_patch(node_id, patch):
|
||||
raise wz_exceptions.BadRequest('Operation should be one of %s',
|
||||
', '.join(VALID_COMMENT_OPERATIONS))
|
||||
|
||||
if op not in COMMENT_VOTING_OPS:
|
||||
# We can't check here, we need the node owner for that.
|
||||
return
|
||||
|
||||
# See whether the user is allowed to patch
|
||||
if authorization.user_matches_roles(ROLES_FOR_COMMENT_VOTING):
|
||||
if authorization.user_matches_roles(current_app.config['ROLES_FOR_COMMENT_VOTING']):
|
||||
log.debug('User is allowed to upvote/downvote comment')
|
||||
return
|
||||
|
||||
110
pillar/api/nodes/moving.py
Normal file
110
pillar/api/nodes/moving.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Code for moving around nodes."""
|
||||
|
||||
import attr
|
||||
import flask_pymongo.wrappers
|
||||
from bson import ObjectId
|
||||
|
||||
from pillar import attrs_extra
|
||||
import pillar.api.file_storage.moving
|
||||
|
||||
|
||||
@attr.s
|
||||
class NodeMover(object):
|
||||
db = attr.ib(validator=attr.validators.instance_of(flask_pymongo.wrappers.Database))
|
||||
skip_gcs = attr.ib(default=False, validator=attr.validators.instance_of(bool))
|
||||
_log = attrs_extra.log('%s.NodeMover' % __name__)
|
||||
|
||||
def change_project(self, node, dest_proj):
|
||||
"""Moves a node and children to a new project."""
|
||||
|
||||
assert isinstance(node, dict)
|
||||
assert isinstance(dest_proj, dict)
|
||||
|
||||
for move_node in self._children(node):
|
||||
self._change_project(move_node, dest_proj)
|
||||
|
||||
def _change_project(self, node, dest_proj):
|
||||
"""Changes the project of a single node, non-recursively."""
|
||||
|
||||
node_id = node['_id']
|
||||
proj_id = dest_proj['_id']
|
||||
self._log.info('Moving node %s to project %s', node_id, proj_id)
|
||||
|
||||
# Find all files in the node.
|
||||
moved_files = set()
|
||||
self._move_files(moved_files, dest_proj, self._files(node.get('picture', None)))
|
||||
self._move_files(moved_files, dest_proj, self._files(node['properties'], 'file'))
|
||||
self._move_files(moved_files, dest_proj, self._files(node['properties'], 'files', 'file'))
|
||||
self._move_files(moved_files, dest_proj,
|
||||
self._files(node['properties'], 'attachments', 'files', 'file'))
|
||||
|
||||
# Switch the node's project after its files have been moved.
|
||||
self._log.info('Switching node %s to project %s', node_id, proj_id)
|
||||
nodes_coll = self.db['nodes']
|
||||
update_result = nodes_coll.update_one({'_id': node_id},
|
||||
{'$set': {'project': proj_id}})
|
||||
if update_result.matched_count != 1:
|
||||
raise RuntimeError(
|
||||
'Unable to update node %s in MongoDB: matched_count=%i; modified_count=%i' % (
|
||||
node_id, update_result.matched_count, update_result.modified_count))
|
||||
|
||||
def _move_files(self, moved_files, dest_proj, file_generator):
|
||||
"""Tries to find all files from the given properties."""
|
||||
|
||||
for file_id in file_generator:
|
||||
if file_id in moved_files:
|
||||
continue
|
||||
moved_files.add(file_id)
|
||||
self.move_file(dest_proj, file_id)
|
||||
|
||||
def move_file(self, dest_proj, file_id):
|
||||
"""Moves a single file to another project"""
|
||||
|
||||
self._log.info('Moving file %s to project %s', file_id, dest_proj['_id'])
|
||||
pillar.api.file_storage.moving.move_to_bucket(file_id, dest_proj['_id'],
|
||||
skip_storage=self.skip_gcs)
|
||||
|
||||
def _files(self, file_ref, *properties):
|
||||
"""Yields file ObjectIDs."""
|
||||
|
||||
# Degenerate cases.
|
||||
if not file_ref:
|
||||
return
|
||||
|
||||
# Single ObjectID
|
||||
if isinstance(file_ref, ObjectId):
|
||||
assert not properties
|
||||
yield file_ref
|
||||
return
|
||||
|
||||
# List of ObjectIDs
|
||||
if isinstance(file_ref, list):
|
||||
for item in file_ref:
|
||||
for subitem in self._files(item, *properties):
|
||||
yield subitem
|
||||
return
|
||||
|
||||
# Dict, use properties[0] as key
|
||||
if isinstance(file_ref, dict):
|
||||
try:
|
||||
subref = file_ref[properties[0]]
|
||||
except KeyError:
|
||||
# Silently skip non-existing keys.
|
||||
return
|
||||
|
||||
for subitem in self._files(subref, *properties[1:]):
|
||||
yield subitem
|
||||
return
|
||||
|
||||
raise TypeError('File ref is of type %s, not implemented' % type(file_ref))
|
||||
|
||||
def _children(self, node):
|
||||
"""Generator, recursively yields the node and its children."""
|
||||
|
||||
yield node
|
||||
|
||||
nodes_coll = self.db['nodes']
|
||||
for child in nodes_coll.find({'parent': node['_id']}):
|
||||
# "yield from self.children(child)" was introduced in Python 3.3
|
||||
for grandchild in self._children(child):
|
||||
yield grandchild
|
||||
@@ -5,11 +5,11 @@ Depends on node_type-specific patch handlers in submodules.
|
||||
|
||||
import logging
|
||||
|
||||
from flask import Blueprint, request
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from application.utils import str2id
|
||||
from application.utils import authorization, mongo, authentication
|
||||
from flask import Blueprint, request
|
||||
from pillar.api.utils import mongo
|
||||
from pillar.api.utils import authorization, authentication
|
||||
from pillar.api.utils import str2id
|
||||
|
||||
from . import custom
|
||||
|
||||
@@ -48,4 +48,4 @@ def patch_node(node_id):
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
|
||||
444
pillar/api/organizations/__init__.py
Normal file
444
pillar/api/organizations/__init__.py
Normal file
@@ -0,0 +1,444 @@
|
||||
"""Organization management.
|
||||
|
||||
Assumes role names that are given to users by organization membership
|
||||
start with the string "org-".
|
||||
"""
|
||||
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import attr
|
||||
import bson
|
||||
import flask
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from pillar import attrs_extra, current_app
|
||||
from pillar.api.utils import remove_private_keys, utcnow
|
||||
|
||||
|
||||
class OrganizationError(Exception):
|
||||
"""Superclass for all Organization-related errors."""
|
||||
|
||||
|
||||
@attr.s
|
||||
class NotEnoughSeats(OrganizationError):
|
||||
"""Thrown when trying to add too many members to the organization."""
|
||||
|
||||
org_id = attr.ib(validator=attr.validators.instance_of(bson.ObjectId))
|
||||
seat_count = attr.ib(validator=attr.validators.instance_of(int))
|
||||
attempted_seat_count = attr.ib(validator=attr.validators.instance_of(int))
|
||||
|
||||
|
||||
@attr.s
|
||||
class OrgManager:
|
||||
"""Organization manager.
|
||||
|
||||
Performs actions on an Organization. Does *NOT* test user permissions -- the caller
|
||||
is responsible for that.
|
||||
"""
|
||||
|
||||
_log = attrs_extra.log('%s.OrgManager' % __name__)
|
||||
|
||||
def create_new_org(self,
|
||||
name: str,
|
||||
admin_uid: bson.ObjectId,
|
||||
seat_count: int,
|
||||
*,
|
||||
org_roles: typing.Iterable[str] = None) -> dict:
|
||||
"""Creates a new Organization.
|
||||
|
||||
Returns the new organization document.
|
||||
"""
|
||||
|
||||
assert isinstance(admin_uid, bson.ObjectId)
|
||||
|
||||
org_doc = {
|
||||
'name': name,
|
||||
'admin_uid': admin_uid,
|
||||
'seat_count': seat_count,
|
||||
}
|
||||
|
||||
if org_roles:
|
||||
org_doc['org_roles'] = list(org_roles)
|
||||
|
||||
r, _, _, status = current_app.post_internal('organizations', org_doc)
|
||||
if status != 201:
|
||||
self._log.error('Error creating organization; status should be 201, not %i: %s',
|
||||
status, r)
|
||||
raise ValueError(f'Unable to create organization, status code {status}')
|
||||
|
||||
org_doc.update(r)
|
||||
return org_doc
|
||||
|
||||
def assign_users(self,
|
||||
org_id: bson.ObjectId,
|
||||
emails: typing.List[str]) -> dict:
|
||||
"""Assigns users to the organization.
|
||||
|
||||
Checks the seat count and throws a NotEnoughSeats exception when the
|
||||
seat count is not sufficient to assign the requested users.
|
||||
|
||||
Users are looked up by email address, and known users are
|
||||
automatically mapped.
|
||||
|
||||
:returns: the new organization document.
|
||||
"""
|
||||
|
||||
self._log.info('Adding %i new members to organization %s', len(emails), org_id)
|
||||
|
||||
users_coll = current_app.db('users')
|
||||
existing_user_docs = list(users_coll.find({'email': {'$in': emails}},
|
||||
projection={'_id': 1, 'email': 1}))
|
||||
unknown_users = set(emails) - {user['email'] for user in existing_user_docs}
|
||||
existing_users = {user['_id'] for user in existing_user_docs}
|
||||
|
||||
return self._assign_users(org_id, unknown_users, existing_users)
|
||||
|
||||
def assign_single_user(self, org_id: bson.ObjectId, *, user_id: bson.ObjectId) -> dict:
|
||||
"""Assigns a single, known user to the organization.
|
||||
|
||||
:returns: the new organization document.
|
||||
"""
|
||||
|
||||
self._log.info('Adding new member %s to organization %s', user_id, org_id)
|
||||
return self._assign_users(org_id, set(), {user_id})
|
||||
|
||||
def _assign_users(self, org_id: bson.ObjectId,
|
||||
unknown_users: typing.Set[str],
|
||||
existing_users: typing.Set[bson.ObjectId]) -> dict:
|
||||
|
||||
if self._log.isEnabledFor(logging.INFO):
|
||||
self._log.info(' - found users: %s', ', '.join(str(uid) for uid in existing_users))
|
||||
self._log.info(' - unknown users: %s', ', '.join(unknown_users))
|
||||
|
||||
org_doc = self._get_org(org_id)
|
||||
|
||||
# Compute the new members.
|
||||
members = set(org_doc.get('members') or []) | existing_users
|
||||
unknown_members = set(org_doc.get('unknown_members') or []) | unknown_users
|
||||
|
||||
# Make sure we don't exceed the current seat count.
|
||||
new_seat_count = len(members) + len(unknown_members)
|
||||
if new_seat_count > org_doc['seat_count']:
|
||||
self._log.warning('assign_users(%s, ...): Trying to increase seats to %i, '
|
||||
'but org only has %i seats.',
|
||||
org_id, new_seat_count, org_doc['seat_count'])
|
||||
raise NotEnoughSeats(org_id, org_doc['seat_count'], new_seat_count)
|
||||
|
||||
# Update the organization.
|
||||
org_doc['members'] = list(members)
|
||||
org_doc['unknown_members'] = list(unknown_members)
|
||||
|
||||
r, _, _, status = current_app.put_internal('organizations',
|
||||
remove_private_keys(org_doc),
|
||||
_id=org_id)
|
||||
if status != 200:
|
||||
self._log.error('Error updating organization; status should be 200, not %i: %s',
|
||||
status, r)
|
||||
raise ValueError(f'Unable to update organization, status code {status}')
|
||||
org_doc.update(r)
|
||||
|
||||
# Update the roles for the affected members
|
||||
for uid in existing_users:
|
||||
self.refresh_roles(uid)
|
||||
|
||||
return org_doc
|
||||
|
||||
def assign_admin(self, org_id: bson.ObjectId, *, user_id: bson.ObjectId):
|
||||
"""Assigns a user as admin user for this organization."""
|
||||
|
||||
assert isinstance(org_id, bson.ObjectId)
|
||||
assert isinstance(user_id, bson.ObjectId)
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
users_coll = current_app.db('users')
|
||||
|
||||
if users_coll.count({'_id': user_id}) == 0:
|
||||
raise ValueError('User not found')
|
||||
|
||||
self._log.info('Updating organization %s, setting admin user to %s', org_id, user_id)
|
||||
org_coll.update_one({'_id': org_id},
|
||||
{'$set': {'admin_uid': user_id}})
|
||||
|
||||
def remove_user(self,
|
||||
org_id: bson.ObjectId,
|
||||
*,
|
||||
user_id: bson.ObjectId = None,
|
||||
email: str = None) -> dict:
|
||||
"""Removes a user from the organization.
|
||||
|
||||
The user can be identified by either user ID or email.
|
||||
|
||||
Returns the new organization document.
|
||||
"""
|
||||
|
||||
users_coll = current_app.db('users')
|
||||
|
||||
assert user_id or email
|
||||
|
||||
# Collect the email address if not given. This ensures the removal
|
||||
# if the email was accidentally in the unknown_members list.
|
||||
if email is None:
|
||||
user_doc = users_coll.find_one(user_id, projection={'email': 1})
|
||||
if user_doc is not None:
|
||||
email = user_doc['email']
|
||||
|
||||
# See if we know this user.
|
||||
if user_id is None:
|
||||
user_doc = users_coll.find_one({'email': email}, projection={'_id': 1})
|
||||
if user_doc is not None:
|
||||
user_id = user_doc['_id']
|
||||
|
||||
if user_id and not users_coll.count({'_id': user_id}):
|
||||
raise wz_exceptions.UnprocessableEntity('User does not exist')
|
||||
|
||||
self._log.info('Removing user %s / %s from organization %s', user_id, email, org_id)
|
||||
|
||||
org_doc = self._get_org(org_id)
|
||||
|
||||
# Compute the new members.
|
||||
if user_id:
|
||||
members = set(org_doc.get('members') or []) - {user_id}
|
||||
org_doc['members'] = list(members)
|
||||
|
||||
if email:
|
||||
unknown_members = set(org_doc.get('unknown_members')) - {email}
|
||||
org_doc['unknown_members'] = list(unknown_members)
|
||||
|
||||
r, _, _, status = current_app.put_internal('organizations',
|
||||
remove_private_keys(org_doc),
|
||||
_id=org_id)
|
||||
if status != 200:
|
||||
self._log.error('Error updating organization; status should be 200, not %i: %s',
|
||||
status, r)
|
||||
raise ValueError(f'Unable to update organization, status code {status}')
|
||||
org_doc.update(r)
|
||||
|
||||
# Update the roles for the affected member.
|
||||
if user_id:
|
||||
self.refresh_roles(user_id)
|
||||
|
||||
return org_doc
|
||||
|
||||
def _get_org(self, org_id: bson.ObjectId, *, projection=None):
|
||||
"""Returns the organization, or raises a ValueError."""
|
||||
|
||||
assert isinstance(org_id, bson.ObjectId)
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
org = org_coll.find_one(org_id, projection=projection)
|
||||
if org is None:
|
||||
raise ValueError(f'Organization {org_id} not found')
|
||||
return org
|
||||
|
||||
def refresh_all_user_roles(self, org_id: bson.ObjectId):
|
||||
"""Refreshes the roles of all members."""
|
||||
|
||||
assert isinstance(org_id, bson.ObjectId)
|
||||
|
||||
org = self._get_org(org_id, projection={'members': 1})
|
||||
members = org.get('members')
|
||||
if not members:
|
||||
self._log.info('Organization %s has no members, nothing to refresh.', org_id)
|
||||
return
|
||||
|
||||
for uid in members:
|
||||
self.refresh_roles(uid)
|
||||
|
||||
def refresh_roles(self, user_id: bson.ObjectId) -> typing.Set[str]:
|
||||
"""Refreshes the user's roles to own roles + organizations' roles.
|
||||
|
||||
:returns: the applied set of roles.
|
||||
"""
|
||||
|
||||
assert isinstance(user_id, bson.ObjectId)
|
||||
|
||||
from pillar.api.service import do_badger
|
||||
|
||||
self._log.info('Refreshing roles for user %s', user_id)
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
tokens_coll = current_app.db('tokens')
|
||||
|
||||
def aggr_roles(coll, match: dict) -> typing.Set[str]:
|
||||
query = coll.aggregate([
|
||||
{'$match': match},
|
||||
{'$project': {'org_roles': 1}},
|
||||
{'$unwind': {'path': '$org_roles'}},
|
||||
{'$group': {
|
||||
'_id': None,
|
||||
'org_roles': {'$addToSet': '$org_roles'},
|
||||
}}])
|
||||
|
||||
# If the user has no organizations/tokens at all, the query will have no results.
|
||||
try:
|
||||
org_roles_doc = query.next()
|
||||
except StopIteration:
|
||||
return set()
|
||||
return set(org_roles_doc['org_roles'])
|
||||
|
||||
# Join all organization-given roles and roles from the tokens collection.
|
||||
org_roles = aggr_roles(org_coll, {'members': user_id})
|
||||
self._log.debug('Organization-given roles for user %s: %s', user_id, org_roles)
|
||||
token_roles = aggr_roles(tokens_coll, {
|
||||
'user': user_id,
|
||||
'expire_time': {"$gt": utcnow()},
|
||||
})
|
||||
self._log.debug('Token-given roles for user %s: %s', user_id, token_roles)
|
||||
org_roles.update(token_roles)
|
||||
|
||||
users_coll = current_app.db('users')
|
||||
user_doc = users_coll.find_one(user_id, projection={'roles': 1})
|
||||
if not user_doc:
|
||||
self._log.warning('Trying refresh roles of non-existing user %s, ignoring', user_id)
|
||||
return set()
|
||||
|
||||
all_user_roles = set(user_doc.get('roles') or [])
|
||||
existing_org_roles = {role for role in all_user_roles
|
||||
if role.startswith('org-')}
|
||||
|
||||
grant_roles = org_roles - all_user_roles
|
||||
revoke_roles = existing_org_roles - org_roles
|
||||
|
||||
if grant_roles:
|
||||
do_badger('grant', roles=grant_roles, user_id=user_id)
|
||||
if revoke_roles:
|
||||
do_badger('revoke', roles=revoke_roles, user_id=user_id)
|
||||
|
||||
return all_user_roles.union(grant_roles) - revoke_roles
|
||||
|
||||
def user_is_admin(self, org_id: bson.ObjectId) -> bool:
|
||||
"""Returns whether the currently logged in user is the admin of the organization."""
|
||||
|
||||
from pillar.api.utils.authentication import current_user_id
|
||||
|
||||
uid = current_user_id()
|
||||
if uid is None:
|
||||
return False
|
||||
|
||||
org = self._get_org(org_id, projection={'admin_uid': 1})
|
||||
return org.get('admin_uid') == uid
|
||||
|
||||
def unknown_member_roles(self, member_email: str) -> typing.Set[str]:
|
||||
"""Returns the set of organization roles for this user.
|
||||
|
||||
Assumes the user is not yet known, i.e. part of the unknown_members lists.
|
||||
"""
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
|
||||
# Aggregate all org-given roles for this user.
|
||||
query = org_coll.aggregate([
|
||||
{'$match': {'unknown_members': member_email}},
|
||||
{'$project': {'org_roles': 1}},
|
||||
{'$unwind': {'path': '$org_roles'}},
|
||||
{'$group': {
|
||||
'_id': None,
|
||||
'org_roles': {'$addToSet': '$org_roles'},
|
||||
}}])
|
||||
|
||||
# If the user has no organizations at all, the query will have no results.
|
||||
try:
|
||||
org_roles_doc = query.next()
|
||||
except StopIteration:
|
||||
return set()
|
||||
|
||||
return set(org_roles_doc['org_roles'])
|
||||
|
||||
def make_member_known(self, member_uid: bson.ObjectId, member_email: str):
|
||||
"""Moves the given member from the unknown_members to the members lists."""
|
||||
|
||||
# This uses a direct PyMongo query rather than using Eve's put_internal,
|
||||
# to prevent simultaneous updates from dropping users.
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
for org in org_coll.find({'unknown_members': member_email}):
|
||||
self._log.info('Updating organization %s, marking member %s/%s as known',
|
||||
org['_id'], member_uid, member_email)
|
||||
org_coll.update_one({'_id': org['_id']},
|
||||
{'$addToSet': {'members': member_uid},
|
||||
'$pull': {'unknown_members': member_email}
|
||||
})
|
||||
|
||||
def org_members(self, member_sting_ids: typing.Iterable[str]) -> typing.List[dict]:
|
||||
"""Returns the user documents of the organization members.
|
||||
|
||||
This is a workaround to provide membership information for
|
||||
organizations without giving 'mortal' users access to /api/users.
|
||||
"""
|
||||
from pillar.api.utils import str2id
|
||||
|
||||
if not member_sting_ids:
|
||||
return []
|
||||
|
||||
member_ids = [str2id(uid) for uid in member_sting_ids]
|
||||
users_coll = current_app.db('users')
|
||||
users = users_coll.find({'_id': {'$in': member_ids}},
|
||||
projection={'_id': 1, 'full_name': 1, 'email': 1})
|
||||
return list(users)
|
||||
|
||||
def user_has_organizations(self, user_id: bson.ObjectId) -> bool:
|
||||
"""Returns True iff the user has anything to do with organizations.
|
||||
|
||||
That is, if the user is admin for and/or member of any organization.
|
||||
"""
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
|
||||
org_count = org_coll.count({'$or': [
|
||||
{'admin_uid': user_id},
|
||||
{'members': user_id}
|
||||
]})
|
||||
|
||||
return bool(org_count)
|
||||
|
||||
def user_is_unknown_member(self, member_email: str) -> bool:
|
||||
"""Return True iff the email is an unknown member of some org."""
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
org_count = org_coll.count({'unknown_members': member_email})
|
||||
return bool(org_count)
|
||||
|
||||
def roles_for_ip_address(self, remote_addr: str) -> typing.Set[str]:
|
||||
"""Find the roles given to the user via org IP range definitions."""
|
||||
|
||||
from . import ip_ranges
|
||||
|
||||
org_coll = current_app.db('organizations')
|
||||
try:
|
||||
q = ip_ranges.query(remote_addr)
|
||||
except ValueError as ex:
|
||||
self._log.warning('Invalid remote address %s, ignoring IP-based roles: %s',
|
||||
remote_addr, ex)
|
||||
return set()
|
||||
|
||||
orgs = org_coll.find(
|
||||
{'ip_ranges': q},
|
||||
projection={'org_roles': True},
|
||||
)
|
||||
return set(role
|
||||
for org in orgs
|
||||
for role in org.get('org_roles', []))
|
||||
|
||||
def roles_for_request(self) -> typing.Set[str]:
|
||||
"""Find roles for user via the request's remote IP address."""
|
||||
|
||||
try:
|
||||
remote_addr = flask.request.access_route[0]
|
||||
except IndexError:
|
||||
return set()
|
||||
|
||||
if not remote_addr:
|
||||
return set()
|
||||
|
||||
roles = self.roles_for_ip_address(remote_addr)
|
||||
self._log.debug('Roles for IP address %s: %s', remote_addr, roles)
|
||||
|
||||
return roles
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
from . import patch, hooks
|
||||
|
||||
hooks.setup_app(app)
|
||||
patch.setup_app(app)
|
||||
48
pillar/api/organizations/hooks.py
Normal file
48
pillar/api/organizations/hooks.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from pillar.api.utils.authentication import current_user
|
||||
|
||||
|
||||
def pre_get_organizations(request, lookup):
|
||||
user = current_user()
|
||||
if user.is_anonymous:
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
if user.has_cap('admin'):
|
||||
# Allow all lookups to admins.
|
||||
return
|
||||
|
||||
# Only allow users to see their own organizations.
|
||||
lookup['$or'] = [{'admin_uid': user.user_id}, {'members': user.user_id}]
|
||||
|
||||
|
||||
def on_fetched_item_organizations(org_doc: dict):
|
||||
"""Filter out binary data.
|
||||
|
||||
Eve cannot return binary data, at least not until we upgrade to a version
|
||||
that depends on Cerberus >= 1.0.
|
||||
"""
|
||||
|
||||
for ipr in org_doc.get('ip_ranges') or []:
|
||||
ipr.pop('start', None)
|
||||
ipr.pop('end', None)
|
||||
ipr.pop('prefix', None) # not binary, but useless without the other fields.
|
||||
|
||||
|
||||
def on_fetched_resource_organizations(response: dict):
|
||||
for org_doc in response.get('_items', []):
|
||||
on_fetched_item_organizations(org_doc)
|
||||
|
||||
|
||||
def pre_post_organizations(request):
|
||||
user = current_user()
|
||||
if not user.has_cap('create-organization'):
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
app.on_pre_GET_organizations += pre_get_organizations
|
||||
app.on_pre_POST_organizations += pre_post_organizations
|
||||
|
||||
app.on_fetched_item_organizations += on_fetched_item_organizations
|
||||
app.on_fetched_resource_organizations += on_fetched_resource_organizations
|
||||
75
pillar/api/organizations/ip_ranges.py
Normal file
75
pillar/api/organizations/ip_ranges.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""IP range support for Organizations."""
|
||||
|
||||
from IPy import IP
|
||||
|
||||
# 128 bits all set to 1
|
||||
ONES_128 = 2 ** 128 - 1
|
||||
|
||||
|
||||
def doc(iprange: str, min_prefixlen6: int=0, min_prefixlen4: int=0) -> dict:
|
||||
"""Convert a human-readable string like '1.2.3.4/24' to a Mongo document.
|
||||
|
||||
This converts the address to IPv6 and computes the start/end addresses
|
||||
of the range. The address, its prefix size, and start and end address,
|
||||
are returned as a dict.
|
||||
|
||||
Addresses are stored as big-endian binary data because MongoDB doesn't
|
||||
support 128 bits integers.
|
||||
|
||||
:param iprange: the IP address and mask size, can be IPv6 or IPv4.
|
||||
:param min_prefixlen6: if given, causes a ValuError when the mask size
|
||||
is too low. Note that the mask size is always
|
||||
evaluated only for IPv6 addresses.
|
||||
:param min_prefixlen4: if given, causes a ValuError when the mask size
|
||||
is too low. Note that the mask size is always
|
||||
evaluated only for IPv4 addresses.
|
||||
:returns: a dict like: {
|
||||
'start': b'xxxxx' with the lowest IP address in the range.
|
||||
'end': b'yyyyy' with the highest IP address in the range.
|
||||
'human': 'aaaa:bbbb::cc00/120' with the human-readable representation.
|
||||
'prefix': 120, the prefix length of the netmask in bits.
|
||||
}
|
||||
"""
|
||||
|
||||
ip = IP(iprange, make_net=True)
|
||||
prefixlen = ip.prefixlen()
|
||||
if ip.version() == 4:
|
||||
if prefixlen < min_prefixlen4:
|
||||
raise ValueError(f'Prefix length {prefixlen} smaller than allowed {min_prefixlen4}')
|
||||
ip = ip.v46map()
|
||||
else:
|
||||
if prefixlen < min_prefixlen6:
|
||||
raise ValueError(f'Prefix length {prefixlen} smaller than allowed {min_prefixlen6}')
|
||||
|
||||
addr = ip.int()
|
||||
|
||||
# Set all address bits to 1 where the mask is 0 to obtain the largest address.
|
||||
end = addr | (ONES_128 % ip.netmask().int())
|
||||
|
||||
# This ensures that even a single host is represented as /128 in the human-readable form.
|
||||
ip.NoPrefixForSingleIp = False
|
||||
|
||||
return {
|
||||
'start': addr.to_bytes(16, 'big'),
|
||||
'end': end.to_bytes(16, 'big'),
|
||||
'human': ip.strCompressed(),
|
||||
'prefix': ip.prefixlen(),
|
||||
}
|
||||
|
||||
|
||||
def query(address: str) -> dict:
|
||||
"""Return a dict usable for querying all organizations whose IP range matches the given one.
|
||||
|
||||
:returns: a dict like:
|
||||
{$elemMatch: {'start': {$lte: b'xxxxx'}, 'end': {$gte: b'xxxxx'}}}
|
||||
"""
|
||||
|
||||
ip = IP(address)
|
||||
if ip.version() == 4:
|
||||
ip = ip.v46map()
|
||||
for_mongo = ip.ip.to_bytes(16, 'big')
|
||||
|
||||
return {'$elemMatch': {
|
||||
'start': {'$lte': for_mongo},
|
||||
'end': {'$gte': for_mongo},
|
||||
}}
|
||||
228
pillar/api/organizations/patch.py
Normal file
228
pillar/api/organizations/patch.py
Normal file
@@ -0,0 +1,228 @@
|
||||
"""Organization patching support."""
|
||||
|
||||
import logging
|
||||
|
||||
import bson
|
||||
from flask import Blueprint, jsonify
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from pillar.api.utils.authentication import current_user
|
||||
from pillar.api.utils import authorization, str2id, jsonify
|
||||
from pillar.api import patch_handler
|
||||
from pillar import current_app
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
patch_api_blueprint = Blueprint('pillar.api.organizations.patch', __name__)
|
||||
|
||||
|
||||
class OrganizationPatchHandler(patch_handler.AbstractPatchHandler):
|
||||
item_name = 'organization'
|
||||
|
||||
@authorization.require_login()
|
||||
def patch_assign_users(self, org_id: bson.ObjectId, patch: dict):
|
||||
"""Assigns users to an organization.
|
||||
|
||||
The calling user must be admin of the organization.
|
||||
"""
|
||||
from . import NotEnoughSeats
|
||||
|
||||
self._assert_is_admin(org_id)
|
||||
|
||||
# Do some basic validation.
|
||||
try:
|
||||
emails = patch['emails']
|
||||
except KeyError:
|
||||
raise wz_exceptions.BadRequest('No key "email" in patch.')
|
||||
|
||||
# Skip empty emails.
|
||||
emails = [stripped
|
||||
for stripped in (email.strip() for email in emails)
|
||||
if stripped]
|
||||
|
||||
log.info('User %s uses PATCH to add users to organization %s',
|
||||
current_user().user_id, org_id)
|
||||
try:
|
||||
org_doc = current_app.org_manager.assign_users(org_id, emails)
|
||||
except NotEnoughSeats:
|
||||
resp = jsonify({'_message': f'Not enough seats to assign {len(emails)} users'})
|
||||
resp.status_code = 422
|
||||
return resp
|
||||
|
||||
return jsonify(org_doc)
|
||||
|
||||
@authorization.require_login()
|
||||
def patch_assign_user(self, org_id: bson.ObjectId, patch: dict):
|
||||
"""Assigns a single user by User ID to an organization.
|
||||
|
||||
The calling user must be admin of the organization.
|
||||
"""
|
||||
from . import NotEnoughSeats
|
||||
self._assert_is_admin(org_id)
|
||||
|
||||
# Do some basic validation.
|
||||
try:
|
||||
user_id = patch['user_id']
|
||||
except KeyError:
|
||||
raise wz_exceptions.BadRequest('No key "user_id" in patch.')
|
||||
|
||||
user_oid = str2id(user_id)
|
||||
log.info('User %s uses PATCH to add user %s to organization %s',
|
||||
current_user().user_id, user_oid, org_id)
|
||||
try:
|
||||
org_doc = current_app.org_manager.assign_single_user(org_id, user_id=user_oid)
|
||||
except NotEnoughSeats:
|
||||
resp = jsonify({'_message': f'Not enough seats to assign this user'})
|
||||
resp.status_code = 422
|
||||
return resp
|
||||
|
||||
return jsonify(org_doc)
|
||||
|
||||
@authorization.require_login()
|
||||
def patch_assign_admin(self, org_id: bson.ObjectId, patch: dict):
|
||||
"""Assigns a single user by User ID as admin of the organization.
|
||||
|
||||
The calling user must be admin of the organization.
|
||||
"""
|
||||
|
||||
self._assert_is_admin(org_id)
|
||||
|
||||
# Do some basic validation.
|
||||
try:
|
||||
user_id = patch['user_id']
|
||||
except KeyError:
|
||||
raise wz_exceptions.BadRequest('No key "user_id" in patch.')
|
||||
|
||||
user_oid = str2id(user_id)
|
||||
log.info('User %s uses PATCH to set user %s as admin for organization %s',
|
||||
current_user().user_id, user_oid, org_id)
|
||||
current_app.org_manager.assign_admin(org_id, user_id=user_oid)
|
||||
|
||||
@authorization.require_login()
|
||||
def patch_remove_user(self, org_id: bson.ObjectId, patch: dict):
|
||||
"""Removes a user from an organization.
|
||||
|
||||
The calling user must be admin of the organization.
|
||||
"""
|
||||
|
||||
# Do some basic validation.
|
||||
email = patch.get('email') or None
|
||||
user_id = patch.get('user_id')
|
||||
user_oid = str2id(user_id) if user_id else None
|
||||
|
||||
# Users require admin rights on the org, except when removing themselves.
|
||||
current_user_id = current_user().user_id
|
||||
if user_oid is None or user_oid != current_user_id:
|
||||
self._assert_is_admin(org_id)
|
||||
|
||||
log.info('User %s uses PATCH to remove user %s from organization %s',
|
||||
current_user_id, user_oid, org_id)
|
||||
|
||||
org_doc = current_app.org_manager.remove_user(org_id, user_id=user_oid, email=email)
|
||||
return jsonify(org_doc)
|
||||
|
||||
def _assert_is_admin(self, org_id):
|
||||
om = current_app.org_manager
|
||||
|
||||
if current_user().has_cap('admin'):
|
||||
# Always allow admins to edit every organization.
|
||||
return
|
||||
|
||||
if not om.user_is_admin(org_id):
|
||||
log.warning('User %s uses PATCH to edit organization %s, '
|
||||
'but is not admin of that Organization. Request denied.',
|
||||
current_user().user_id, org_id)
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
@authorization.require_login()
|
||||
def patch_edit_from_web(self, org_id: bson.ObjectId, patch: dict):
|
||||
"""Updates Organization fields from the web.
|
||||
|
||||
The PATCH command supports the following payload. The 'name' field must
|
||||
be set, all other fields are optional. When an optional field is
|
||||
ommitted it will be handled as an instruction to clear that field.
|
||||
{'name': str,
|
||||
'description': str,
|
||||
'website': str,
|
||||
'location': str,
|
||||
'ip_ranges': list of human-readable IP ranges}
|
||||
"""
|
||||
|
||||
from pymongo.results import UpdateResult
|
||||
from . import ip_ranges
|
||||
|
||||
self._assert_is_admin(org_id)
|
||||
user = current_user()
|
||||
current_user_id = user.user_id
|
||||
|
||||
# Only take known fields from the patch, don't just copy everything.
|
||||
update = {
|
||||
'name': patch['name'].strip(),
|
||||
'description': patch.get('description', '').strip(),
|
||||
'website': patch.get('website', '').strip(),
|
||||
'location': patch.get('location', '').strip(),
|
||||
}
|
||||
unset = {}
|
||||
|
||||
# Special transformation for IP ranges
|
||||
iprs = patch.get('ip_ranges')
|
||||
if iprs:
|
||||
ipr_docs = []
|
||||
for r in iprs:
|
||||
try:
|
||||
doc = ip_ranges.doc(r, min_prefixlen6=48, min_prefixlen4=8)
|
||||
except ValueError as ex:
|
||||
raise wz_exceptions.UnprocessableEntity(f'Invalid IP range {r!r}: {ex}')
|
||||
ipr_docs.append(doc)
|
||||
update['ip_ranges'] = ipr_docs
|
||||
else:
|
||||
unset['ip_ranges'] = True
|
||||
|
||||
refresh_user_roles = False
|
||||
if user.has_cap('admin'):
|
||||
if 'seat_count' in patch:
|
||||
update['seat_count'] = int(patch['seat_count'])
|
||||
if 'org_roles' in patch:
|
||||
org_roles = [stripped for stripped in (role.strip() for role in patch['org_roles'])
|
||||
if stripped]
|
||||
if not all(role.startswith('org-') for role in org_roles):
|
||||
raise wz_exceptions.UnprocessableEntity(
|
||||
'Invalid role given, all roles must start with "org-"')
|
||||
|
||||
update['org_roles'] = org_roles
|
||||
refresh_user_roles = True
|
||||
|
||||
self.log.info('User %s edits Organization %s: %s', current_user_id, org_id, update)
|
||||
|
||||
validator = current_app.validator_for_resource('organizations')
|
||||
if not validator.validate_update(update, org_id):
|
||||
resp = jsonify({
|
||||
'_errors': validator.errors,
|
||||
'_message': ', '.join(f'{field}: {error}'
|
||||
for field, error in validator.errors.items()),
|
||||
})
|
||||
resp.status_code = 422
|
||||
return resp
|
||||
|
||||
# Figure out what to set and what to unset
|
||||
for_mongo = {'$set': update}
|
||||
if unset:
|
||||
for_mongo['$unset'] = unset
|
||||
|
||||
organizations_coll = current_app.db('organizations')
|
||||
result: UpdateResult = organizations_coll.update_one({'_id': org_id}, for_mongo)
|
||||
|
||||
if result.matched_count != 1:
|
||||
self.log.warning('User %s edits Organization %s but update matched %i items',
|
||||
current_user_id, org_id, result.matched_count)
|
||||
raise wz_exceptions.BadRequest()
|
||||
|
||||
if refresh_user_roles:
|
||||
self.log.info('Organization roles set for org %s, refreshing users', org_id)
|
||||
current_app.org_manager.refresh_all_user_roles(org_id)
|
||||
|
||||
return '', 204
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
OrganizationPatchHandler(patch_api_blueprint)
|
||||
app.register_api_blueprint(patch_api_blueprint, url_prefix='/organizations')
|
||||
92
pillar/api/patch_handler.py
Normal file
92
pillar/api/patch_handler.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Handler for PATCH requests.
|
||||
|
||||
This supports PATCH request in the sense described by William Durand:
|
||||
http://williamdurand.fr/2014/02/14/please-do-not-patch-like-an-idiot/
|
||||
|
||||
Each PATCH should be a JSON dict with at least a key 'op' with the
|
||||
name of the operation to perform.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
import flask
|
||||
|
||||
from pillar.api.utils import authorization
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AbstractPatchHandler:
|
||||
"""Abstract PATCH handler supporting multiple operations.
|
||||
|
||||
Each operation, i.e. possible value of the 'op' key in the PATCH body,
|
||||
should be matched to a similarly named "patch_xxx" function in a subclass.
|
||||
For example, the operation "set-owner" is mapped to "patch_set_owner".
|
||||
|
||||
:cvar route: the Flask/Werkzeug route to attach this handler to.
|
||||
For most handlers, the default will be fine.
|
||||
:cvar item_name: the name of the things to patch, like "job", "task" etc.
|
||||
Only used for logging.
|
||||
"""
|
||||
|
||||
route: str = '/<object_id>'
|
||||
item_name: str = None
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
if not cls.route:
|
||||
raise ValueError('Subclass must set route')
|
||||
if not cls.item_name:
|
||||
raise ValueError('Subclass must set item_name')
|
||||
|
||||
def __init__(self, blueprint: flask.Blueprint):
|
||||
self.log: logging.Logger = log.getChild(self.__class__.__name__)
|
||||
self.patch_handlers = {
|
||||
name[6:].replace('_', '-'): getattr(self, name)
|
||||
for name in dir(self)
|
||||
if name.startswith('patch_') and callable(getattr(self, name))
|
||||
}
|
||||
|
||||
if self.log.isEnabledFor(logging.INFO):
|
||||
self.log.info('Creating PATCH handler %s.%s%s for operations: %s',
|
||||
blueprint.name, self.patch.__name__, self.route,
|
||||
sorted(self.patch_handlers.keys()))
|
||||
|
||||
blueprint.add_url_rule(self.route,
|
||||
self.patch.__name__,
|
||||
self.patch,
|
||||
methods=['PATCH'])
|
||||
|
||||
@authorization.require_login()
|
||||
def patch(self, object_id: str):
|
||||
from flask import request
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
from pillar.api.utils import str2id, authentication
|
||||
|
||||
# Parse the request
|
||||
real_object_id = str2id(object_id)
|
||||
patch = request.get_json()
|
||||
if not patch:
|
||||
self.log.info('Bad PATCH request, did not contain JSON')
|
||||
raise wz_exceptions.BadRequest('Patch must contain JSON')
|
||||
|
||||
try:
|
||||
patch_op = patch['op']
|
||||
except KeyError:
|
||||
self.log.info("Bad PATCH request, did not contain 'op' key")
|
||||
raise wz_exceptions.BadRequest("PATCH should contain 'op' key to denote operation.")
|
||||
|
||||
log.debug('User %s wants to PATCH "%s" %s %s',
|
||||
authentication.current_user_id(), patch_op, self.item_name, real_object_id)
|
||||
|
||||
# Find the PATCH handler for the operation.
|
||||
try:
|
||||
handler = self.patch_handlers[patch_op]
|
||||
except KeyError:
|
||||
log.warning('No %s PATCH handler for operation %r', self.item_name, patch_op)
|
||||
raise wz_exceptions.BadRequest('Operation %r not supported' % patch_op)
|
||||
|
||||
# Let the PATCH handler do its thing.
|
||||
response = handler(real_object_id, patch)
|
||||
if response is None:
|
||||
return '', 204
|
||||
return response
|
||||
29
pillar/api/projects/__init__.py
Normal file
29
pillar/api/projects/__init__.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from . import hooks
|
||||
from .routes import blueprint_api
|
||||
|
||||
|
||||
def setup_app(app, api_prefix):
|
||||
from . import patch
|
||||
patch.setup_app(app)
|
||||
|
||||
app.on_replace_projects += hooks.override_is_private_field
|
||||
app.on_replace_projects += hooks.before_edit_check_permissions
|
||||
app.on_replace_projects += hooks.protect_sensitive_fields
|
||||
|
||||
app.on_update_projects += hooks.override_is_private_field
|
||||
app.on_update_projects += hooks.before_edit_check_permissions
|
||||
app.on_update_projects += hooks.protect_sensitive_fields
|
||||
|
||||
app.on_delete_item_projects += hooks.before_delete_project
|
||||
app.on_deleted_item_projects += hooks.after_delete_project
|
||||
|
||||
app.on_insert_projects += hooks.before_inserting_override_is_private_field
|
||||
app.on_insert_projects += hooks.before_inserting_projects
|
||||
app.on_inserted_projects += hooks.after_inserting_projects
|
||||
|
||||
app.on_fetched_item_projects += hooks.before_returning_project_permissions
|
||||
app.on_fetched_resource_projects += hooks.before_returning_project_resource_permissions
|
||||
app.on_fetched_item_projects += hooks.project_node_type_has_method
|
||||
app.on_fetched_resource_projects += hooks.projects_node_type_has_method
|
||||
|
||||
app.register_api_blueprint(blueprint_api, url_prefix=api_prefix)
|
||||
243
pillar/api/projects/hooks.py
Normal file
243
pillar/api/projects/hooks.py
Normal file
@@ -0,0 +1,243 @@
|
||||
import copy
|
||||
import logging
|
||||
|
||||
from flask import request, abort
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.api.node_types.asset import node_type_asset
|
||||
from pillar.api.node_types.comment import node_type_comment
|
||||
from pillar.api.node_types.group import node_type_group
|
||||
from pillar.api.node_types.group_texture import node_type_group_texture
|
||||
from pillar.api.node_types.texture import node_type_texture
|
||||
from pillar.api.file_storage_backends import default_storage_backend
|
||||
from pillar.api.utils import authorization, authentication
|
||||
from pillar.api.utils import remove_private_keys
|
||||
from pillar.api.utils.authorization import user_has_role, check_permissions
|
||||
from pillar.auth import current_user
|
||||
from .utils import abort_with_error
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Default project permissions for the admin group.
|
||||
DEFAULT_ADMIN_GROUP_PERMISSIONS = ['GET', 'PUT', 'POST', 'DELETE']
|
||||
|
||||
|
||||
def before_inserting_projects(items):
|
||||
"""Strip unwanted properties, that will be assigned after creation. Also,
|
||||
verify permission to create a project (check quota, check role).
|
||||
|
||||
:param items: List of project docs that have been inserted (normally one)
|
||||
"""
|
||||
|
||||
# Allow admin users to do whatever they want.
|
||||
if user_has_role('admin'):
|
||||
return
|
||||
|
||||
for item in items:
|
||||
item.pop('url', None)
|
||||
|
||||
|
||||
def override_is_private_field(project, original):
|
||||
"""Override the 'is_private' property from the world permissions.
|
||||
|
||||
:param project: the project, which will be updated
|
||||
"""
|
||||
|
||||
# No permissions, no access.
|
||||
if 'permissions' not in project:
|
||||
project['is_private'] = True
|
||||
return
|
||||
|
||||
world_perms = project['permissions'].get('world', [])
|
||||
is_private = 'GET' not in world_perms
|
||||
project['is_private'] = is_private
|
||||
|
||||
|
||||
def before_inserting_override_is_private_field(projects):
|
||||
for project in projects:
|
||||
override_is_private_field(project, None)
|
||||
|
||||
|
||||
def before_edit_check_permissions(document, original):
|
||||
check_permissions('projects', original, request.method)
|
||||
|
||||
|
||||
def before_delete_project(document):
|
||||
"""Checks permissions before we allow deletion"""
|
||||
|
||||
check_permissions('projects', document, request.method)
|
||||
log.info('Deleting project %s on behalf of user %s', document['_id'], current_user)
|
||||
|
||||
|
||||
def after_delete_project(project: dict):
|
||||
"""Perform delete on the project's files too."""
|
||||
|
||||
from eve.methods.delete import delete
|
||||
|
||||
pid = project['_id']
|
||||
log.info('Project %s was deleted, also deleting its files.', pid)
|
||||
|
||||
r, _, _, status = delete('files', {'project': pid})
|
||||
if status != 204:
|
||||
log.warning('Unable to delete files of project %s: %s', pid, r)
|
||||
|
||||
|
||||
def protect_sensitive_fields(document, original):
|
||||
"""When not logged in as admin, prevents update to certain fields."""
|
||||
|
||||
# Allow admin users to do whatever they want.
|
||||
if user_has_role('admin'):
|
||||
return
|
||||
|
||||
def revert(name):
|
||||
if name not in original:
|
||||
try:
|
||||
del document[name]
|
||||
except KeyError:
|
||||
pass
|
||||
return
|
||||
document[name] = original[name]
|
||||
|
||||
revert('status')
|
||||
revert('category')
|
||||
revert('user')
|
||||
|
||||
if 'url' in original:
|
||||
revert('url')
|
||||
|
||||
|
||||
def after_inserting_projects(projects):
|
||||
"""After inserting a project in the collection we do some processing such as:
|
||||
- apply the right permissions
|
||||
- define basic node types
|
||||
- optionally generate a url
|
||||
- initialize storage space
|
||||
|
||||
:param projects: List of project docs that have been inserted (normally one)
|
||||
"""
|
||||
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
for project in projects:
|
||||
owner_id = project.get('user', None)
|
||||
owner = users_collection.find_one(owner_id)
|
||||
after_inserting_project(project, owner)
|
||||
|
||||
|
||||
def after_inserting_project(project, db_user):
|
||||
from pillar.auth import UserClass
|
||||
|
||||
project_id = project['_id']
|
||||
user_id = db_user['_id']
|
||||
|
||||
# Create a project-specific admin group (with name matching the project id)
|
||||
result, _, _, status = current_app.post_internal('groups', {'name': str(project_id)})
|
||||
if status != 201:
|
||||
log.error('Unable to create admin group for new project %s: %s',
|
||||
project_id, result)
|
||||
return abort_with_error(status)
|
||||
|
||||
admin_group_id = result['_id']
|
||||
log.debug('Created admin group %s for project %s', admin_group_id, project_id)
|
||||
|
||||
# Assign the current user to the group
|
||||
db_user.setdefault('groups', []).append(admin_group_id)
|
||||
|
||||
result, _, _, status = current_app.patch_internal('users', {'groups': db_user['groups']},
|
||||
_id=user_id)
|
||||
if status != 200:
|
||||
log.error('Unable to add user %s as member of admin group %s for new project %s: %s',
|
||||
user_id, admin_group_id, project_id, result)
|
||||
return abort_with_error(status)
|
||||
log.debug('Made user %s member of group %s', user_id, admin_group_id)
|
||||
|
||||
# Assign the group to the project with admin rights
|
||||
owner_user = UserClass.construct('', db_user)
|
||||
is_admin = authorization.is_admin(owner_user)
|
||||
world_permissions = ['GET'] if is_admin else []
|
||||
permissions = {
|
||||
'world': world_permissions,
|
||||
'users': [],
|
||||
'groups': [
|
||||
{'group': admin_group_id,
|
||||
'methods': DEFAULT_ADMIN_GROUP_PERMISSIONS[:]},
|
||||
]
|
||||
}
|
||||
|
||||
def with_permissions(node_type):
|
||||
copied = copy.deepcopy(node_type)
|
||||
copied['permissions'] = permissions
|
||||
return copied
|
||||
|
||||
# Assign permissions to the project itself, as well as to the node_types
|
||||
project['permissions'] = permissions
|
||||
project['node_types'] = [
|
||||
with_permissions(node_type_group),
|
||||
with_permissions(node_type_asset),
|
||||
with_permissions(node_type_comment),
|
||||
with_permissions(node_type_texture),
|
||||
with_permissions(node_type_group_texture),
|
||||
]
|
||||
|
||||
# Allow admin users to use whatever url they want.
|
||||
if not is_admin or not project.get('url'):
|
||||
if project.get('category', '') == 'home':
|
||||
project['url'] = 'home'
|
||||
else:
|
||||
project['url'] = "p-{!s}".format(project_id)
|
||||
|
||||
# Initialize storage using the default specified in STORAGE_BACKEND
|
||||
default_storage_backend(str(project_id))
|
||||
|
||||
# Commit the changes directly to the MongoDB; a PUT is not allowed yet,
|
||||
# as the project doesn't have a valid permission structure.
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
result = projects_collection.update_one({'_id': project_id},
|
||||
{'$set': remove_private_keys(project)})
|
||||
if result.matched_count != 1:
|
||||
log.error('Unable to update project %s: %s', project_id, result.raw_result)
|
||||
abort_with_error(500)
|
||||
|
||||
|
||||
def before_returning_project_permissions(response):
|
||||
# Run validation process, since GET on nodes entry point is public
|
||||
check_permissions('projects', response, 'GET', append_allowed_methods=True)
|
||||
|
||||
|
||||
def before_returning_project_resource_permissions(response):
|
||||
# Return only those projects the user has access to.
|
||||
allow = []
|
||||
for project in response['_items']:
|
||||
if authorization.has_permissions('projects', project,
|
||||
'GET', append_allowed_methods=True):
|
||||
allow.append(project)
|
||||
else:
|
||||
log.debug('User %s requested project %s, but has no access to it; filtered out.',
|
||||
authentication.current_user_id(), project['_id'])
|
||||
|
||||
response['_items'] = allow
|
||||
|
||||
|
||||
def project_node_type_has_method(response):
|
||||
"""Check for a specific request arg, and check generate the allowed_methods
|
||||
list for the required node_type.
|
||||
"""
|
||||
|
||||
node_type_name = request.args.get('node_type', '')
|
||||
|
||||
# Proceed only node_type has been requested
|
||||
if not node_type_name:
|
||||
return
|
||||
|
||||
# Look up the node type in the project document
|
||||
if not any(node_type.get('name') == node_type_name
|
||||
for node_type in response['node_types']):
|
||||
return abort(404)
|
||||
|
||||
# Check permissions and append the allowed_methods to the node_type
|
||||
check_permissions('projects', response, 'GET', append_allowed_methods=True,
|
||||
check_node_type=node_type_name)
|
||||
|
||||
|
||||
def projects_node_type_has_method(response):
|
||||
for project in response['_items']:
|
||||
project_node_type_has_method(project)
|
||||
44
pillar/api/projects/merging.py
Normal file
44
pillar/api/projects/merging.py
Normal file
@@ -0,0 +1,44 @@
|
||||
"""Code for merging projects."""
|
||||
import logging
|
||||
|
||||
from bson import ObjectId
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.api.file_storage.moving import move_to_bucket
|
||||
from pillar.api.utils import random_etag, utcnow
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def merge_project(pid_from: ObjectId, pid_to: ObjectId):
|
||||
"""Move nodes and files from one project to another.
|
||||
|
||||
Note that this may invalidate the nodes, as their node type definition
|
||||
may differ between projects.
|
||||
"""
|
||||
log.info('Moving project contents from %s to %s', pid_from, pid_to)
|
||||
assert isinstance(pid_from, ObjectId)
|
||||
assert isinstance(pid_to, ObjectId)
|
||||
|
||||
files_coll = current_app.db('files')
|
||||
nodes_coll = current_app.db('nodes')
|
||||
|
||||
# Move the files first. Since this requires API calls to an external
|
||||
# service, this is more likely to go wrong than moving the nodes.
|
||||
to_move = files_coll.find({'project': pid_from}, projection={'_id': 1})
|
||||
log.info('Moving %d files to project %s', to_move.count(), pid_to)
|
||||
for file_doc in to_move:
|
||||
fid = file_doc['_id']
|
||||
log.debug('moving file %s to project %s', fid, pid_to)
|
||||
move_to_bucket(fid, pid_to)
|
||||
|
||||
# Mass-move the nodes.
|
||||
etag = random_etag()
|
||||
result = nodes_coll.update_many(
|
||||
{'project': pid_from},
|
||||
{'$set': {'project': pid_to,
|
||||
'_etag': etag,
|
||||
'_updated': utcnow(),
|
||||
}}
|
||||
)
|
||||
log.info('Moved %d nodes to project %s', result.modified_count, pid_to)
|
||||
85
pillar/api/projects/patch.py
Normal file
85
pillar/api/projects/patch.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Project patching support."""
|
||||
|
||||
import logging
|
||||
|
||||
import flask
|
||||
from flask import Blueprint, request
|
||||
import werkzeug.exceptions as wz_exceptions
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.auth import current_user
|
||||
from pillar.api.utils import random_etag, str2id, utcnow
|
||||
from pillar.api.utils import authorization
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint = Blueprint('projects.patch', __name__)
|
||||
|
||||
|
||||
@blueprint.route('/<project_id>', methods=['PATCH'])
|
||||
@authorization.require_login()
|
||||
def patch_project(project_id: str):
|
||||
"""Undelete a project.
|
||||
|
||||
This is done via a custom PATCH due to the lack of transactions of MongoDB;
|
||||
we cannot undelete both project-referenced files and file-referenced
|
||||
projects in one atomic operation.
|
||||
"""
|
||||
|
||||
# Parse the request
|
||||
pid = str2id(project_id)
|
||||
patch = request.get_json()
|
||||
if not patch:
|
||||
raise wz_exceptions.BadRequest('Expected JSON body')
|
||||
|
||||
log.debug('User %s wants to PATCH project %s: %s', current_user, pid, patch)
|
||||
|
||||
# 'undelete' is the only operation we support now, so no fancy handler registration.
|
||||
op = patch.get('op', '')
|
||||
if op != 'undelete':
|
||||
log.warning('User %s sent unsupported PATCH op %r to project %s: %s',
|
||||
current_user, op, pid, patch)
|
||||
raise wz_exceptions.BadRequest(f'unsupported operation {op!r}')
|
||||
|
||||
# Get the project to find the user's permissions.
|
||||
proj_coll = current_app.db('projects')
|
||||
proj = proj_coll.find_one({'_id': pid})
|
||||
if not proj:
|
||||
raise wz_exceptions.NotFound(f'project {pid} not found')
|
||||
allowed = authorization.compute_allowed_methods('projects', proj)
|
||||
if 'PUT' not in allowed:
|
||||
log.warning('User %s tried to undelete project %s but only has permissions %r',
|
||||
current_user, pid, allowed)
|
||||
raise wz_exceptions.Forbidden(f'no PUT access to project {pid}')
|
||||
|
||||
if not proj.get('_deleted', False):
|
||||
raise wz_exceptions.BadRequest(f'project {pid} was not deleted, unable to undelete')
|
||||
|
||||
# Undelete the files. We cannot do this via Eve, as it doesn't support
|
||||
# PATCHing collections, so direct MongoDB modification is used to set
|
||||
# _deleted=False and provide new _etag and _updated values.
|
||||
new_etag = random_etag()
|
||||
|
||||
log.debug('undeleting files before undeleting project %s', pid)
|
||||
files_coll = current_app.db('files')
|
||||
update_result = files_coll.update_many(
|
||||
{'project': pid},
|
||||
{'$set': {'_deleted': False,
|
||||
'_etag': new_etag,
|
||||
'_updated': utcnow()}})
|
||||
log.info('undeleted %d of %d file documents of project %s',
|
||||
update_result.modified_count, update_result.matched_count, pid)
|
||||
|
||||
log.info('undeleting project %s on behalf of user %s', pid, current_user)
|
||||
update_result = proj_coll.update_one({'_id': pid},
|
||||
{'$set': {'_deleted': False}})
|
||||
log.info('undeleted %d project document %s', update_result.modified_count, pid)
|
||||
|
||||
resp = flask.Response('', status=204)
|
||||
resp.location = flask.url_for('projects.view', project_url=proj['url'])
|
||||
return resp
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
# This needs to be on the same URL prefix as Eve uses for the collection,
|
||||
# and not /p as used for the other Projects API calls.
|
||||
app.register_api_blueprint(blueprint, url_prefix='/projects')
|
||||
145
pillar/api/projects/routes.py
Normal file
145
pillar/api/projects/routes.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
from bson import ObjectId
|
||||
from flask import Blueprint, request, current_app, make_response, url_for
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from pillar.api.utils import authorization, jsonify, str2id
|
||||
from pillar.api.utils import mongo
|
||||
from pillar.api.utils.authorization import require_login, check_permissions
|
||||
from pillar.auth import current_user
|
||||
|
||||
from . import utils
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
blueprint_api = Blueprint('projects_api', __name__)
|
||||
|
||||
|
||||
@blueprint_api.route('/create', methods=['POST'])
|
||||
@authorization.require_login(require_cap='subscriber')
|
||||
def create_project(overrides=None):
|
||||
"""Creates a new project."""
|
||||
|
||||
if request.mimetype == 'application/json':
|
||||
project_name = request.json['name']
|
||||
else:
|
||||
project_name = request.form['project_name']
|
||||
user_id = current_user.user_id
|
||||
|
||||
project = utils.create_new_project(project_name, user_id, overrides)
|
||||
|
||||
# Return the project in the response.
|
||||
loc = url_for('projects|item_lookup', _id=project['_id'])
|
||||
return jsonify(project, status=201, headers={'Location': loc})
|
||||
|
||||
|
||||
@blueprint_api.route('/users', methods=['GET', 'POST'])
|
||||
@authorization.require_login()
|
||||
def project_manage_users():
|
||||
"""Manage users of a project. In this initial implementation, we handle
|
||||
addition and removal of a user to the admin group of a project.
|
||||
No changes are done on the project itself.
|
||||
"""
|
||||
|
||||
from pillar.api.utils import str2id
|
||||
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
|
||||
# TODO: check if user is admin of the project before anything
|
||||
if request.method == 'GET':
|
||||
project_id = request.args['project_id']
|
||||
project = projects_collection.find_one({'_id': ObjectId(project_id)})
|
||||
admin_group_id = project['permissions']['groups'][0]['group']
|
||||
|
||||
users = users_collection.find(
|
||||
{'groups': {'$in': [admin_group_id]}},
|
||||
{'username': 1, 'email': 1, 'full_name': 1})
|
||||
return jsonify({'_status': 'OK', '_items': list(users)})
|
||||
|
||||
# The request is not a form, since it comes from the API sdk
|
||||
data = json.loads(request.data)
|
||||
project_id = str2id(data['project_id'])
|
||||
target_user_id = str2id(data['user_id'])
|
||||
action = data['action']
|
||||
current_user_id = current_user.user_id
|
||||
|
||||
project = projects_collection.find_one({'_id': project_id})
|
||||
|
||||
# Check if the current_user is owner of the project, or removing themselves.
|
||||
if not authorization.user_has_role('admin'):
|
||||
remove_self = target_user_id == current_user_id and action == 'remove'
|
||||
if project['user'] != current_user_id and not remove_self:
|
||||
log.warning('User %s tries to %s %s to/from project %s, but is not allowed',
|
||||
current_user_id, action, target_user_id, project_id)
|
||||
utils.abort_with_error(403)
|
||||
|
||||
admin_group = utils.get_admin_group(project)
|
||||
|
||||
# Get the user and add the admin group to it
|
||||
if action == 'add':
|
||||
operation = '$addToSet'
|
||||
log.info('project_manage_users: Adding user %s to admin group of project %s',
|
||||
target_user_id, project_id)
|
||||
elif action == 'remove':
|
||||
log.info('project_manage_users: Removing user %s from admin group of project %s',
|
||||
target_user_id, project_id)
|
||||
operation = '$pull'
|
||||
else:
|
||||
log.warning('project_manage_users: Unsupported action %r called by user %s',
|
||||
action, current_user_id)
|
||||
raise wz_exceptions.UnprocessableEntity()
|
||||
|
||||
users_collection.update({'_id': target_user_id},
|
||||
{operation: {'groups': admin_group['_id']}})
|
||||
|
||||
user = users_collection.find_one({'_id': target_user_id},
|
||||
{'username': 1, 'email': 1,
|
||||
'full_name': 1})
|
||||
|
||||
if not user:
|
||||
return jsonify({'_status': 'ERROR'}), 404
|
||||
|
||||
user['_status'] = 'OK'
|
||||
return jsonify(user)
|
||||
|
||||
|
||||
@blueprint_api.route('/<string:project_id>/quotas')
|
||||
@require_login()
|
||||
def project_quotas(project_id):
|
||||
"""Returns information about the project's limits."""
|
||||
|
||||
# Check that the user has GET permissions on the project itself.
|
||||
project = mongo.find_one_or_404('projects', project_id)
|
||||
check_permissions('projects', project, 'GET')
|
||||
|
||||
file_size_used = utils.project_total_file_size(project_id)
|
||||
|
||||
info = {
|
||||
'file_size_quota': None, # TODO: implement this later.
|
||||
'file_size_used': file_size_used,
|
||||
}
|
||||
|
||||
return jsonify(info)
|
||||
|
||||
|
||||
@blueprint_api.route('/<project_id>/<node_type>', methods=['OPTIONS', 'GET'])
|
||||
def get_allowed_methods(project_id=None, node_type=None):
|
||||
"""Returns allowed methods to create a node of a certain type.
|
||||
|
||||
Either project_id or parent_node_id must be given. If the latter is given,
|
||||
the former is deducted from it.
|
||||
"""
|
||||
|
||||
project = mongo.find_one_or_404('projects', str2id(project_id))
|
||||
proj_methods = authorization.compute_allowed_methods('projects', project, node_type)
|
||||
|
||||
resp = make_response()
|
||||
resp.headers['Allowed'] = ', '.join(sorted(proj_methods))
|
||||
resp.status_code = 204
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
189
pillar/api/projects/utils.py
Normal file
189
pillar/api/projects/utils.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import logging
|
||||
import typing
|
||||
|
||||
from bson import ObjectId
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
from werkzeug.exceptions import abort
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.auth import current_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def project_total_file_size(project_id):
|
||||
"""Returns the total number of bytes used by files of this project."""
|
||||
|
||||
files = current_app.data.driver.db['files']
|
||||
file_size_used = files.aggregate([
|
||||
{'$match': {'project': ObjectId(project_id)}},
|
||||
{'$project': {'length_aggregate_in_bytes': 1}},
|
||||
{'$group': {'_id': None,
|
||||
'all_files': {'$sum': '$length_aggregate_in_bytes'}}}
|
||||
])
|
||||
|
||||
# The aggregate function returns a cursor, not a document.
|
||||
try:
|
||||
return next(file_size_used)['all_files']
|
||||
except StopIteration:
|
||||
# No files used at all.
|
||||
return 0
|
||||
|
||||
|
||||
def get_admin_group_id(project_id: ObjectId) -> ObjectId:
|
||||
assert isinstance(project_id, ObjectId)
|
||||
|
||||
project = current_app.db('projects').find_one({'_id': project_id},
|
||||
{'permissions': 1})
|
||||
if not project:
|
||||
raise ValueError(f'Project {project_id} does not exist.')
|
||||
|
||||
# TODO: search through all groups to find the one with the project ID as its name,
|
||||
# or identify "the admin group" in a different way (for example the group with DELETE rights).
|
||||
try:
|
||||
admin_group_id = ObjectId(project['permissions']['groups'][0]['group'])
|
||||
except KeyError:
|
||||
raise ValueError(f'Project {project_id} does not seem to have an admin group')
|
||||
|
||||
return admin_group_id
|
||||
|
||||
|
||||
def get_admin_group(project: dict) -> dict:
|
||||
"""Returns the admin group for the project."""
|
||||
|
||||
groups_collection = current_app.data.driver.db['groups']
|
||||
|
||||
# TODO: see get_admin_group_id
|
||||
admin_group_id = ObjectId(project['permissions']['groups'][0]['group'])
|
||||
group = groups_collection.find_one({'_id': admin_group_id})
|
||||
|
||||
if group is None:
|
||||
raise ValueError('Unable to handle project without admin group.')
|
||||
|
||||
if group['name'] != str(project['_id']):
|
||||
log.error('User %s tries to get admin group for project %s, '
|
||||
'but that does not have the project ID as group name: %s',
|
||||
current_user.user_id, project.get('_id', '-unknown-'), group)
|
||||
return abort_with_error(403)
|
||||
|
||||
return group
|
||||
|
||||
|
||||
def user_rights_in_project(project_id: ObjectId) -> frozenset:
|
||||
"""Returns the set of HTTP methods allowed on the given project for the current user."""
|
||||
|
||||
from pillar.api.utils import authorization
|
||||
|
||||
assert isinstance(project_id, ObjectId)
|
||||
|
||||
proj_coll = current_app.db().projects
|
||||
proj = proj_coll.find_one({'_id': project_id})
|
||||
|
||||
return frozenset(authorization.compute_allowed_methods('projects', proj))
|
||||
|
||||
|
||||
def abort_with_error(status):
|
||||
"""Aborts with the given status, or 500 if the status doesn't indicate an error.
|
||||
|
||||
If the status is < 400, status 500 is used instead.
|
||||
"""
|
||||
|
||||
abort(status if status // 100 >= 4 else 500)
|
||||
raise wz_exceptions.InternalServerError('abort() should have aborted!')
|
||||
|
||||
|
||||
def create_new_project(project_name, user_id, overrides):
|
||||
"""Creates a new project owned by the given user."""
|
||||
|
||||
log.info('Creating new project "%s" for user %s', project_name, user_id)
|
||||
|
||||
# Create the project itself, the rest will be done by the after-insert hook.
|
||||
project = {'description': '',
|
||||
'name': project_name,
|
||||
'node_types': [],
|
||||
'status': 'published',
|
||||
'user': user_id,
|
||||
'is_private': True,
|
||||
'permissions': {},
|
||||
'url': '',
|
||||
'summary': '',
|
||||
'category': 'assets', # TODO: allow the user to choose this.
|
||||
}
|
||||
if overrides is not None:
|
||||
project.update(overrides)
|
||||
|
||||
result, _, _, status = current_app.post_internal('projects', project)
|
||||
if status != 201:
|
||||
log.error('Unable to create project "%s": %s', project_name, result)
|
||||
return abort_with_error(status)
|
||||
project.update(result)
|
||||
|
||||
# Now re-fetch the project, as both the initial document and the returned
|
||||
# result do not contain the same etag as the database. This also updates
|
||||
# other fields set by hooks.
|
||||
document = current_app.data.driver.db['projects'].find_one(project['_id'])
|
||||
project.update(document)
|
||||
|
||||
log.info('Created project %s for user %s', project['_id'], user_id)
|
||||
|
||||
return project
|
||||
|
||||
|
||||
def get_node_type(project, node_type_name):
|
||||
"""Returns the named node type, or None if it doesn't exist."""
|
||||
|
||||
return next((nt for nt in project['node_types']
|
||||
if nt['name'] == node_type_name), None)
|
||||
|
||||
|
||||
def node_type_dict(project: dict) -> typing.Dict[str, dict]:
|
||||
"""Return the node types of the project as dictionary.
|
||||
|
||||
The returned dictionary will be keyed by the node type name.
|
||||
"""
|
||||
return {nt['name']: nt for nt in project['node_types']}
|
||||
|
||||
|
||||
def project_id(project_url: str) -> ObjectId:
|
||||
"""Returns the object ID, or raises a ValueError when not found."""
|
||||
|
||||
proj_coll = current_app.db('projects')
|
||||
proj = proj_coll.find_one({'url': project_url}, projection={'_id': True})
|
||||
|
||||
if not proj:
|
||||
raise ValueError(f'project with url={project_url!r} not found')
|
||||
return proj['_id']
|
||||
|
||||
|
||||
def get_project(project_url: str) -> dict:
|
||||
"""Find a project in the database, raises ValueError if not found.
|
||||
|
||||
:param project_url: URL of the project
|
||||
"""
|
||||
|
||||
proj_coll = current_app.db('projects')
|
||||
project = proj_coll.find_one({'url': project_url, '_deleted': {'$ne': True}})
|
||||
if not project:
|
||||
raise ValueError(f'project url={project_url!r} does not exist')
|
||||
|
||||
return project
|
||||
|
||||
|
||||
def put_project(project: dict):
|
||||
"""Puts a project into the database via Eve.
|
||||
|
||||
:param project: the project data, should be the entire project document
|
||||
:raises ValueError: if the project cannot be saved.
|
||||
"""
|
||||
|
||||
from pillar.api.utils import remove_private_keys
|
||||
from pillarsdk.utils import remove_none_attributes
|
||||
|
||||
pid = ObjectId(project['_id'])
|
||||
proj_no_priv = remove_private_keys(project)
|
||||
proj_no_none = remove_none_attributes(proj_no_priv)
|
||||
result, _, _, status_code = current_app.put_internal('projects', proj_no_none, _id=pid)
|
||||
|
||||
if status_code != 200:
|
||||
raise ValueError(f"Can't update project {pid}, "
|
||||
f"status {status_code} with issues: {result}")
|
||||
9
pillar/api/search/__init__.py
Normal file
9
pillar/api/search/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .routes import blueprint_search
|
||||
from . import queries
|
||||
|
||||
|
||||
def setup_app(app, url_prefix: str = None):
|
||||
app.register_api_blueprint(
|
||||
blueprint_search, url_prefix=url_prefix)
|
||||
|
||||
queries.setup_app(app)
|
||||
40
pillar/api/search/algolia_indexing.py
Normal file
40
pillar/api/search/algolia_indexing.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import logging
|
||||
|
||||
from algoliasearch.helpers import AlgoliaException
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def push_updated_user(user_to_index: dict):
|
||||
"""Push an update to the index when a user document is updated."""
|
||||
|
||||
from pillar.api.utils.algolia import index_user_save
|
||||
|
||||
try:
|
||||
index_user_save(user_to_index)
|
||||
except AlgoliaException as ex:
|
||||
log.warning(
|
||||
'Unable to push user info to Algolia for user "%s", id=%s; %s', # noqa
|
||||
user_to_index.get('username'),
|
||||
user_to_index.get('objectID'), ex)
|
||||
|
||||
|
||||
def index_node_save(node_to_index: dict):
|
||||
"""Save parsed node document to the index."""
|
||||
from pillar.api.utils import algolia
|
||||
|
||||
try:
|
||||
algolia.index_node_save(node_to_index)
|
||||
except AlgoliaException as ex:
|
||||
log.warning(
|
||||
'Unable to push node info to Algolia for node %s; %s', node_to_index, ex) # noqa
|
||||
|
||||
|
||||
def index_node_delete(delete_id: str):
|
||||
"""Delete node using id."""
|
||||
from pillar.api.utils import algolia
|
||||
|
||||
try:
|
||||
algolia.index_node_delete(delete_id)
|
||||
except AlgoliaException as ex:
|
||||
log.warning('Unable to delete node info to Algolia for node %s; %s', delete_id, ex) # noqa
|
||||
189
pillar/api/search/documents.py
Normal file
189
pillar/api/search/documents.py
Normal file
@@ -0,0 +1,189 @@
|
||||
"""
|
||||
Define elasticsearch document mapping.
|
||||
|
||||
Elasticsearch consist of two parts:
|
||||
|
||||
- Part 1: Define the documents in which you define who fields will be indexed.
|
||||
- Part 2: Building elasticsearch json queries.
|
||||
|
||||
BOTH of these parts are equally importand to havea search API that returns
|
||||
relevant results.
|
||||
"""
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import elasticsearch_dsl as es
|
||||
from elasticsearch_dsl import analysis
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
edge_ngram_filter = analysis.token_filter(
|
||||
'edge_ngram_filter',
|
||||
type='edge_ngram',
|
||||
min_gram=1,
|
||||
max_gram=15
|
||||
)
|
||||
|
||||
autocomplete = es.analyzer(
|
||||
'autocomplete',
|
||||
tokenizer='standard',
|
||||
filter=['standard', 'asciifolding', 'lowercase', edge_ngram_filter]
|
||||
)
|
||||
|
||||
|
||||
class User(es.DocType):
|
||||
"""Elastic document describing user."""
|
||||
|
||||
objectID = es.Keyword()
|
||||
|
||||
username = es.Text(fielddata=True, analyzer=autocomplete)
|
||||
username_exact = es.Keyword()
|
||||
full_name = es.Text(fielddata=True, analyzer=autocomplete)
|
||||
|
||||
roles = es.Keyword(multi=True)
|
||||
groups = es.Keyword(multi=True)
|
||||
|
||||
email = es.Text(fielddata=True, analyzer=autocomplete)
|
||||
email_exact = es.Keyword()
|
||||
|
||||
class Meta:
|
||||
index = 'users'
|
||||
|
||||
|
||||
class Node(es.DocType):
|
||||
"""
|
||||
Elastic document describing user
|
||||
"""
|
||||
|
||||
node_type = es.Keyword()
|
||||
|
||||
objectID = es.Keyword()
|
||||
|
||||
name = es.Text(
|
||||
fielddata=True,
|
||||
analyzer=autocomplete
|
||||
)
|
||||
|
||||
user = es.Object(
|
||||
fields={
|
||||
'id': es.Keyword(),
|
||||
'name': es.Text(
|
||||
fielddata=True,
|
||||
analyzer=autocomplete)
|
||||
}
|
||||
)
|
||||
|
||||
description = es.Text()
|
||||
|
||||
is_free = es.Boolean()
|
||||
|
||||
project = es.Object(
|
||||
fields={
|
||||
'id': es.Keyword(),
|
||||
'name': es.Keyword(),
|
||||
}
|
||||
)
|
||||
|
||||
media = es.Keyword()
|
||||
|
||||
picture = es.Keyword()
|
||||
|
||||
tags = es.Keyword(multi=True)
|
||||
license_notes = es.Text()
|
||||
|
||||
created_at = es.Date()
|
||||
updated_at = es.Date()
|
||||
|
||||
class Meta:
|
||||
index = 'nodes'
|
||||
|
||||
|
||||
def create_doc_from_user_data(user_to_index: dict) -> typing.Optional[User]:
|
||||
"""
|
||||
Create the document to store in a search engine for this user.
|
||||
|
||||
See pillar.celery.search_index_task
|
||||
|
||||
:returns: an ElasticSearch document or None if user_to_index has no data.
|
||||
"""
|
||||
|
||||
if not user_to_index:
|
||||
return None
|
||||
|
||||
doc_id = str(user_to_index.get('objectID', ''))
|
||||
|
||||
if not doc_id:
|
||||
log.error('USER ID is missing %s', user_to_index)
|
||||
raise KeyError('Trying to create document without id')
|
||||
|
||||
doc = User(_id=doc_id)
|
||||
doc.objectID = str(user_to_index['objectID'])
|
||||
doc.username = user_to_index['username']
|
||||
doc.username_exact = user_to_index['username']
|
||||
doc.full_name = user_to_index['full_name']
|
||||
doc.roles = list(map(str, user_to_index['roles']))
|
||||
doc.groups = list(map(str, user_to_index['groups']))
|
||||
doc.email = user_to_index['email']
|
||||
doc.email_exact = user_to_index['email']
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
def create_doc_from_node_data(node_to_index: dict) -> typing.Optional[Node]:
|
||||
"""
|
||||
Create the document to store in a search engine for this node.
|
||||
|
||||
See pillar.celery.search_index_task
|
||||
|
||||
:returns: an ElasticSearch document or None if node_to_index has no data.
|
||||
"""
|
||||
|
||||
if not node_to_index:
|
||||
return None
|
||||
|
||||
# node stuff
|
||||
doc_id = str(node_to_index.get('objectID', ''))
|
||||
|
||||
if not doc_id:
|
||||
log.error('ID missing %s', node_to_index)
|
||||
return None
|
||||
|
||||
doc = Node(_id=doc_id)
|
||||
|
||||
doc.objectID = str(node_to_index['objectID'])
|
||||
doc.node_type = node_to_index['node_type']
|
||||
doc.name = node_to_index['name']
|
||||
doc.user.id = str(node_to_index['user']['_id'])
|
||||
doc.user.name = node_to_index['user']['full_name']
|
||||
doc.project.id = str(node_to_index['project']['_id'])
|
||||
doc.project.name = node_to_index['project']['name']
|
||||
|
||||
if node_to_index['node_type'] == 'asset':
|
||||
doc.media = node_to_index['media']
|
||||
|
||||
doc.picture = node_to_index.get('picture')
|
||||
|
||||
doc.tags = node_to_index.get('tags')
|
||||
doc.license_notes = node_to_index.get('license_notes')
|
||||
|
||||
doc.created_at = node_to_index['created']
|
||||
doc.updated_at = node_to_index['updated']
|
||||
|
||||
return doc
|
||||
|
||||
|
||||
def create_doc_from_user(user_to_index: dict) -> User:
|
||||
"""
|
||||
Create a user document from user
|
||||
"""
|
||||
|
||||
doc_id = str(user_to_index['objectID'])
|
||||
doc = User(_id=doc_id)
|
||||
doc.objectID = str(user_to_index['objectID'])
|
||||
doc.full_name = user_to_index['full_name']
|
||||
doc.username = user_to_index['username']
|
||||
doc.roles = user_to_index['roles']
|
||||
doc.groups = user_to_index['groups']
|
||||
doc.email = user_to_index['email']
|
||||
|
||||
return doc
|
||||
65
pillar/api/search/elastic_indexing.py
Normal file
65
pillar/api/search/elastic_indexing.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import logging
|
||||
|
||||
from elasticsearch_dsl.connections import connections
|
||||
from elasticsearch.exceptions import NotFoundError
|
||||
|
||||
from pillar import current_app
|
||||
from . import documents
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
elk_hosts = current_app.config['ELASTIC_SEARCH_HOSTS']
|
||||
|
||||
connections.create_connection(
|
||||
hosts=elk_hosts,
|
||||
sniff_on_start=False,
|
||||
timeout=20)
|
||||
|
||||
|
||||
def push_updated_user(user_to_index: dict):
|
||||
"""
|
||||
Push an update to the Elastic index when a user item is updated.
|
||||
"""
|
||||
if not user_to_index:
|
||||
return
|
||||
|
||||
doc = documents.create_doc_from_user_data(user_to_index)
|
||||
|
||||
if not doc:
|
||||
return
|
||||
|
||||
index = current_app.config['ELASTIC_INDICES']['USER']
|
||||
log.debug('Index %r update user doc %s in ElasticSearch.', index, doc._id)
|
||||
doc.save(index=index)
|
||||
|
||||
|
||||
def index_node_save(node_to_index: dict):
|
||||
"""
|
||||
Push an update to the Elastic index when a node item is saved.
|
||||
"""
|
||||
if not node_to_index:
|
||||
return
|
||||
|
||||
doc = documents.create_doc_from_node_data(node_to_index)
|
||||
|
||||
if not doc:
|
||||
return
|
||||
|
||||
index = current_app.config['ELASTIC_INDICES']['NODE']
|
||||
log.debug('Index %r update node doc %s in ElasticSearch.', index, doc._id)
|
||||
doc.save(index=index)
|
||||
|
||||
|
||||
def index_node_delete(delete_id: str):
|
||||
"""
|
||||
Delete node document from Elastic index useing a node id
|
||||
"""
|
||||
index = current_app.config['ELASTIC_INDICES']['NODE']
|
||||
log.debug('Index %r node doc delete %s', index, delete_id)
|
||||
|
||||
try:
|
||||
doc: documents.Node = documents.Node.get(id=delete_id)
|
||||
doc.delete(index=index)
|
||||
except NotFoundError:
|
||||
# seems to be gone already..
|
||||
pass
|
||||
64
pillar/api/search/index.py
Normal file
64
pillar/api/search/index.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from elasticsearch.exceptions import NotFoundError
|
||||
from elasticsearch_dsl.connections import connections
|
||||
import elasticsearch_dsl as es
|
||||
|
||||
from pillar import current_app
|
||||
|
||||
from . import documents
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ResetIndexTask(object):
|
||||
""" Clear and build index / mapping """
|
||||
|
||||
# Key into the ELASTIC_INDICES dict in the app config.
|
||||
index_key: str = ''
|
||||
|
||||
# List of elastic document types
|
||||
doc_types: List = []
|
||||
name = 'remove index'
|
||||
|
||||
def __init__(self):
|
||||
if not self.index_key:
|
||||
raise ValueError("No index specified")
|
||||
|
||||
if not self.doc_types:
|
||||
raise ValueError("No doc_types specified")
|
||||
|
||||
connections.create_connection(
|
||||
hosts=current_app.config['ELASTIC_SEARCH_HOSTS'],
|
||||
# sniff_on_start=True,
|
||||
retry_on_timeout=True,
|
||||
)
|
||||
|
||||
def execute(self):
|
||||
index = current_app.config['ELASTIC_INDICES'][self.index_key]
|
||||
idx = es.Index(index)
|
||||
|
||||
try:
|
||||
idx.delete(ignore=404)
|
||||
except NotFoundError:
|
||||
log.warning("Could not delete index '%s', ignoring", index)
|
||||
else:
|
||||
log.info("Deleted index %s", index)
|
||||
|
||||
# create doc types
|
||||
for dt in self.doc_types:
|
||||
idx.doc_type(dt)
|
||||
|
||||
# create index
|
||||
idx.create()
|
||||
|
||||
|
||||
class ResetNodeIndex(ResetIndexTask):
|
||||
index_key = 'NODE'
|
||||
doc_types = [documents.Node]
|
||||
|
||||
|
||||
class ResetUserIndex(ResetIndexTask):
|
||||
index_key = 'USER'
|
||||
doc_types = [documents.User]
|
||||
183
pillar/api/search/queries.py
Normal file
183
pillar/api/search/queries.py
Normal file
@@ -0,0 +1,183 @@
|
||||
import json
|
||||
import logging
|
||||
import typing
|
||||
|
||||
from elasticsearch import Elasticsearch
|
||||
from elasticsearch_dsl import Search, Q
|
||||
from elasticsearch_dsl.query import Query
|
||||
|
||||
from pillar import current_app
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
NODE_AGG_TERMS = ['node_type', 'media', 'tags', 'is_free']
|
||||
USER_AGG_TERMS = ['roles', ]
|
||||
ITEMS_PER_PAGE = 10
|
||||
|
||||
# Will be set in setup_app()
|
||||
client: Elasticsearch = None
|
||||
|
||||
|
||||
def add_aggs_to_search(search, agg_terms):
|
||||
"""
|
||||
Add facets / aggregations to the search result
|
||||
"""
|
||||
|
||||
for term in agg_terms:
|
||||
search.aggs.bucket(term, 'terms', field=term)
|
||||
|
||||
|
||||
def make_must(must: list, terms: dict) -> list:
|
||||
""" Given term parameters append must queries to the must list """
|
||||
|
||||
for field, value in terms.items():
|
||||
if value:
|
||||
must.append({'match': {field: value}})
|
||||
|
||||
return must
|
||||
|
||||
|
||||
def nested_bool(must: list, should: list, terms: dict, *, index_alias: str) -> Search:
|
||||
"""
|
||||
Create a nested bool, where the aggregation selection is a must.
|
||||
|
||||
:param index_alias: 'USER' or 'NODE', see ELASTIC_INDICES config.
|
||||
"""
|
||||
must = make_must(must, terms)
|
||||
bool_query = Q('bool', should=should)
|
||||
must.append(bool_query)
|
||||
bool_query = Q('bool', must=must)
|
||||
|
||||
index = current_app.config['ELASTIC_INDICES'][index_alias]
|
||||
search = Search(using=client, index=index)
|
||||
search.query = bool_query
|
||||
|
||||
return search
|
||||
|
||||
|
||||
def do_node_search(query: str, terms: dict, page: int, project_id: str='') -> dict:
|
||||
"""
|
||||
Given user query input and term refinements
|
||||
search for public published nodes
|
||||
"""
|
||||
|
||||
should = [
|
||||
Q('match', name=query),
|
||||
|
||||
{"match": {"project.name": query}},
|
||||
{"match": {"user.name": query}},
|
||||
|
||||
Q('match', description=query),
|
||||
Q('term', media=query),
|
||||
Q('term', tags=query),
|
||||
]
|
||||
|
||||
must = []
|
||||
if project_id:
|
||||
must.append({'term': {'project.id': project_id}})
|
||||
|
||||
if not query:
|
||||
should = []
|
||||
|
||||
search = nested_bool(must, should, terms, index_alias='NODE')
|
||||
if not query:
|
||||
search = search.sort('-created_at')
|
||||
add_aggs_to_search(search, NODE_AGG_TERMS)
|
||||
search = paginate(search, page)
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(search.to_dict(), indent=4))
|
||||
|
||||
response = search.execute()
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(response.to_dict(), indent=4))
|
||||
|
||||
return response.to_dict()
|
||||
|
||||
|
||||
def do_user_search(query: str, terms: dict, page: int) -> dict:
|
||||
""" return user objects represented in elasicsearch result dict"""
|
||||
|
||||
must, should = _common_user_search(query)
|
||||
search = nested_bool(must, should, terms, index_alias='USER')
|
||||
add_aggs_to_search(search, USER_AGG_TERMS)
|
||||
search = paginate(search, page)
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(search.to_dict(), indent=4))
|
||||
|
||||
response = search.execute()
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(response.to_dict(), indent=4))
|
||||
|
||||
return response.to_dict()
|
||||
|
||||
|
||||
def _common_user_search(query: str) -> (typing.List[Query], typing.List[Query]):
|
||||
"""Construct (must,shoud) for regular + admin user search."""
|
||||
if not query:
|
||||
return [], []
|
||||
|
||||
should = []
|
||||
|
||||
if '@' in query:
|
||||
should.append({'term': {'email_exact': {'value': query, 'boost': 50}}})
|
||||
email_boost = 25
|
||||
else:
|
||||
email_boost = 1
|
||||
|
||||
should.extend([
|
||||
Q('match', username=query),
|
||||
Q('match', full_name=query),
|
||||
{'match': {'email': {'query': query, 'boost': email_boost}}},
|
||||
{'term': {'username_exact': {'value': query, 'boost': 50}}},
|
||||
])
|
||||
|
||||
return [], should
|
||||
|
||||
|
||||
def do_user_search_admin(query: str, terms: dict, page: int) -> dict:
|
||||
"""
|
||||
return users seach result dict object
|
||||
search all user fields and provide aggregation information
|
||||
"""
|
||||
|
||||
must, should = _common_user_search(query)
|
||||
|
||||
if query:
|
||||
# We most likely got and id field. we should find it.
|
||||
if len(query) == len('563aca02c379cf0005e8e17d'):
|
||||
should.append({'term': {
|
||||
'objectID': {
|
||||
'value': query, # the thing we're looking for
|
||||
'boost': 100, # how much more it counts for the score
|
||||
}
|
||||
}})
|
||||
|
||||
search = nested_bool(must, should, terms, index_alias='USER')
|
||||
add_aggs_to_search(search, USER_AGG_TERMS)
|
||||
search = paginate(search, page)
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(search.to_dict(), indent=4))
|
||||
|
||||
response = search.execute()
|
||||
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
log.debug(json.dumps(response.to_dict(), indent=4))
|
||||
|
||||
return response.to_dict()
|
||||
|
||||
|
||||
def paginate(search: Search, page_idx: int) -> Search:
|
||||
return search[page_idx * ITEMS_PER_PAGE:(page_idx + 1) * ITEMS_PER_PAGE]
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
global client
|
||||
|
||||
hosts = app.config['ELASTIC_SEARCH_HOSTS']
|
||||
log.getChild('setup_app').info('Creating ElasticSearch client for %s', hosts)
|
||||
client = Elasticsearch(hosts)
|
||||
110
pillar/api/search/routes.py
Normal file
110
pillar/api/search/routes.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import logging
|
||||
|
||||
from flask import Blueprint, request
|
||||
import elasticsearch.exceptions as elk_ex
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
from pillar.api.utils import authorization, jsonify
|
||||
|
||||
from . import queries
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
blueprint_search = Blueprint('elksearch', __name__)
|
||||
|
||||
TERMS = [
|
||||
'node_type', 'media',
|
||||
'tags', 'is_free', 'projectname',
|
||||
'roles',
|
||||
]
|
||||
|
||||
|
||||
def _term_filters() -> dict:
|
||||
"""
|
||||
Check if frontent wants to filter stuff
|
||||
on specific fields AKA facets
|
||||
|
||||
return mapping with term field name
|
||||
and provided user term value
|
||||
"""
|
||||
return {term: request.args.get(term, '') for term in TERMS}
|
||||
|
||||
|
||||
def _page_index() -> int:
|
||||
"""Return the page index from the query string."""
|
||||
try:
|
||||
page_idx = int(request.args.get('page') or '0')
|
||||
except TypeError:
|
||||
log.info('invalid page number %r received', request.args.get('page'))
|
||||
raise wz_exceptions.BadRequest()
|
||||
return page_idx
|
||||
|
||||
|
||||
@blueprint_search.route('/')
|
||||
def search_nodes():
|
||||
searchword = request.args.get('q', '')
|
||||
project_id = request.args.get('project', '')
|
||||
terms = _term_filters()
|
||||
page_idx = _page_index()
|
||||
|
||||
result = queries.do_node_search(searchword, terms, page_idx, project_id)
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
@blueprint_search.route('/user')
|
||||
def search_user():
|
||||
searchword = request.args.get('q', '')
|
||||
terms = _term_filters()
|
||||
page_idx = _page_index()
|
||||
# result is the raw elasticseach output.
|
||||
# we need to filter fields in case of user objects.
|
||||
|
||||
try:
|
||||
result = queries.do_user_search(searchword, terms, page_idx)
|
||||
except elk_ex.ElasticsearchException as ex:
|
||||
resp = jsonify({'_message': str(ex)})
|
||||
resp.status_code = 500
|
||||
return resp
|
||||
|
||||
# filter sensitive stuff
|
||||
# we only need. objectID, full_name, username
|
||||
hits = result.get('hits', {})
|
||||
|
||||
new_hits = []
|
||||
|
||||
for hit in hits.get('hits'):
|
||||
source = hit['_source']
|
||||
single_hit = {
|
||||
'_source': {
|
||||
'objectID': source.get('objectID'),
|
||||
'username': source.get('username'),
|
||||
'full_name': source.get('full_name'),
|
||||
}
|
||||
}
|
||||
|
||||
new_hits.append(single_hit)
|
||||
|
||||
# replace search result with safe subset
|
||||
result['hits']['hits'] = new_hits
|
||||
|
||||
return jsonify(result)
|
||||
|
||||
|
||||
@blueprint_search.route('/admin/user')
|
||||
@authorization.require_login(require_cap='admin')
|
||||
def search_user_admin():
|
||||
"""
|
||||
User search over all fields.
|
||||
"""
|
||||
|
||||
searchword = request.args.get('q', '')
|
||||
terms = _term_filters()
|
||||
page_idx = _page_index()
|
||||
|
||||
try:
|
||||
result = queries.do_user_search_admin(searchword, terms, page_idx)
|
||||
except elk_ex.ElasticsearchException as ex:
|
||||
resp = jsonify({'_message': str(ex)})
|
||||
resp.status_code = 500
|
||||
return resp
|
||||
|
||||
return jsonify(result)
|
||||
@@ -1,24 +1,31 @@
|
||||
"""Service accounts."""
|
||||
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import blinker
|
||||
from flask import Blueprint, current_app, g, request
|
||||
import bson
|
||||
|
||||
from flask import Blueprint, current_app, request
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from application.utils import authorization, authentication, str2id, mongo, jsonify
|
||||
from application.modules import local_auth
|
||||
from pillar.api import local_auth
|
||||
from pillar.api.utils import authorization, authentication
|
||||
|
||||
blueprint = Blueprint('service', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
signal_user_changed_role = blinker.NamedSignal('badger:user_changed_role')
|
||||
|
||||
ROLES_WITH_GROUPS = {u'admin', u'demo', u'subscriber'}
|
||||
ROLES_WITH_GROUPS = {'admin', 'demo', 'subscriber'}
|
||||
|
||||
# Map of role name to group ID, for the above groups.
|
||||
role_to_group_id = {}
|
||||
|
||||
|
||||
class ServiceAccountCreationError(Exception):
|
||||
"""Raised when a service account cannot be created."""
|
||||
|
||||
|
||||
@blueprint.before_app_first_request
|
||||
def fetch_role_to_group_id_map():
|
||||
"""Fills the _role_to_group_id mapping upon application startup."""
|
||||
@@ -38,7 +45,7 @@ def fetch_role_to_group_id_map():
|
||||
|
||||
|
||||
@blueprint.route('/badger', methods=['POST'])
|
||||
@authorization.require_login(require_roles={u'service', u'badger'}, require_all=True)
|
||||
@authorization.require_login(require_roles={'service', 'badger'}, require_all=True)
|
||||
def badger():
|
||||
if request.mimetype != 'application/json':
|
||||
log.debug('Received %s instead of application/json', request.mimetype)
|
||||
@@ -70,42 +77,76 @@ def badger():
|
||||
action, user_email, role, action, role)
|
||||
return 'Role not allowed', 403
|
||||
|
||||
return do_badger(action, user_email, role)
|
||||
return do_badger(action, role=role, user_email=user_email)
|
||||
|
||||
|
||||
def do_badger(action, user_email, role):
|
||||
"""Performs a badger action, returning a HTTP response."""
|
||||
def do_badger(action: str, *,
|
||||
role: str=None, roles: typing.Iterable[str]=None,
|
||||
user_email: str = '', user_id: bson.ObjectId = None):
|
||||
"""Performs a badger action, returning a HTTP response.
|
||||
|
||||
Either role or roles must be given.
|
||||
Either user_email or user_id must be given.
|
||||
"""
|
||||
|
||||
if action not in {'grant', 'revoke'}:
|
||||
log.error('do_badger(%r, %r, %r, %r): action %r not supported.',
|
||||
action, role, user_email, user_id, action)
|
||||
raise wz_exceptions.BadRequest('Action %r not supported' % action)
|
||||
|
||||
if not user_email:
|
||||
if not user_email and user_id is None:
|
||||
log.error('do_badger(%r, %r, %r, %r): neither email nor user_id given.',
|
||||
action, role, user_email, user_id)
|
||||
raise wz_exceptions.BadRequest('User email not given')
|
||||
|
||||
if not role:
|
||||
raise wz_exceptions.BadRequest('Role not given')
|
||||
if bool(role) == bool(roles):
|
||||
log.error('do_badger(%r, role=%r, roles=%r, %r, %r): '
|
||||
'either "role" or "roles" must be given.',
|
||||
action, role, roles, user_email, user_id)
|
||||
raise wz_exceptions.BadRequest('Invalid role(s) given')
|
||||
|
||||
# If only a single role was given, handle it as a set of one role.
|
||||
if not roles:
|
||||
roles = {role}
|
||||
del role
|
||||
|
||||
users_coll = current_app.data.driver.db['users']
|
||||
|
||||
# Fetch the user
|
||||
db_user = users_coll.find_one({'email': user_email}, projection={'roles': 1, 'groups': 1})
|
||||
if user_email:
|
||||
query = {'email': user_email}
|
||||
else:
|
||||
query = user_id
|
||||
db_user = users_coll.find_one(query, projection={'roles': 1, 'groups': 1})
|
||||
if db_user is None:
|
||||
log.warning('badger(%s, %s, %s): user not found', action, user_email, role)
|
||||
log.warning('badger(%s, roles=%s, user_email=%s, user_id=%s): user not found',
|
||||
action, roles, user_email, user_id)
|
||||
return 'User not found', 404
|
||||
|
||||
# Apply the action
|
||||
roles = set(db_user.get('roles') or [])
|
||||
user_roles = set(db_user.get('roles') or [])
|
||||
if action == 'grant':
|
||||
roles.add(role)
|
||||
user_roles |= roles
|
||||
else:
|
||||
roles.discard(role)
|
||||
user_roles -= roles
|
||||
|
||||
groups = manage_user_group_membership(db_user, role, action)
|
||||
groups = None
|
||||
for role in roles:
|
||||
groups = manage_user_group_membership(db_user, role, action)
|
||||
|
||||
updates = {'roles': list(roles)}
|
||||
if groups is None:
|
||||
# No change for this role
|
||||
continue
|
||||
|
||||
# Also update db_user for the next iteration.
|
||||
db_user['groups'] = groups
|
||||
|
||||
updates = {'roles': list(user_roles)}
|
||||
if groups is not None:
|
||||
updates['groups'] = list(groups)
|
||||
|
||||
log.debug('badger(%s, %s, user_email=%s, user_id=%s): applying updates %r',
|
||||
action, role, user_email, user_id, updates)
|
||||
users_coll.update_one({'_id': db_user['_id']},
|
||||
{'$set': updates})
|
||||
|
||||
@@ -116,19 +157,6 @@ def do_badger(action, user_email, role):
|
||||
return '', 204
|
||||
|
||||
|
||||
@blueprint.route('/urler/<project_id>', methods=['GET'])
|
||||
@authorization.require_login(require_roles={u'service', u'urler'}, require_all=True)
|
||||
def urler(project_id):
|
||||
"""Returns the URL of any project."""
|
||||
|
||||
project_id = str2id(project_id)
|
||||
project = mongo.find_one_or_404('projects', project_id,
|
||||
projection={'url': 1})
|
||||
return jsonify({
|
||||
'_id': project_id,
|
||||
'url': project['url']})
|
||||
|
||||
|
||||
def manage_user_group_membership(db_user, role, action):
|
||||
"""Some roles have associated groups; this function maintains group & role membership.
|
||||
|
||||
@@ -162,38 +190,52 @@ def manage_user_group_membership(db_user, role, action):
|
||||
return user_groups
|
||||
|
||||
|
||||
def create_service_account(email, roles, service):
|
||||
def create_service_account(email: str, roles: typing.Iterable, service: dict,
|
||||
*, full_name: str=None):
|
||||
"""Creates a service account with the given roles + the role 'service'.
|
||||
|
||||
:param email: email address associated with the account
|
||||
:type email: str
|
||||
:param email: optional email address associated with the account.
|
||||
:param roles: iterable of role names
|
||||
:param service: dict of the 'service' key in the user.
|
||||
:type service: dict
|
||||
:param full_name: Full name of the service account. If None, will be set to
|
||||
something reasonable.
|
||||
|
||||
:return: tuple (user doc, token doc)
|
||||
"""
|
||||
from eve.methods.post import post_internal
|
||||
|
||||
# Create a user with the correct roles.
|
||||
roles = list(set(roles).union({u'service'}))
|
||||
user = {'username': email,
|
||||
roles = sorted(set(roles).union({'service'}))
|
||||
user_id = bson.ObjectId()
|
||||
|
||||
log.info('Creating service account %s with roles %s', user_id, roles)
|
||||
user = {'_id': user_id,
|
||||
'username': f'SRV-{user_id}',
|
||||
'groups': [],
|
||||
'roles': roles,
|
||||
'settings': {'email_communications': 0},
|
||||
'auth': [],
|
||||
'full_name': email,
|
||||
'email': email,
|
||||
'full_name': full_name or f'SRV-{user_id}',
|
||||
'service': service}
|
||||
result, _, _, status = post_internal('users', user)
|
||||
if email:
|
||||
user['email'] = email
|
||||
result, _, _, status = current_app.post_internal('users', user)
|
||||
|
||||
if status != 201:
|
||||
raise SystemExit('Error creating user {}: {}'.format(email, result))
|
||||
raise ServiceAccountCreationError('Error creating user {}: {}'.format(user_id, result))
|
||||
user.update(result)
|
||||
|
||||
# Create an authentication token that won't expire for a long time.
|
||||
token = local_auth.generate_and_store_token(user['_id'], days=36500, prefix='SRV')
|
||||
token = generate_auth_token(user['_id'])
|
||||
|
||||
return user, token
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
def generate_auth_token(service_account_id) -> dict:
|
||||
"""Generates an authentication token for a service account."""
|
||||
|
||||
token_info = local_auth.generate_and_store_token(service_account_id, days=36500, prefix=b'SRV')
|
||||
return token_info
|
||||
|
||||
|
||||
def setup_app(app, api_prefix):
|
||||
app.register_api_blueprint(blueprint, url_prefix=api_prefix)
|
||||
79
pillar/api/users/__init__.py
Normal file
79
pillar/api/users/__init__.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import logging
|
||||
|
||||
import bson
|
||||
from flask import current_app
|
||||
|
||||
from . import hooks
|
||||
from .routes import blueprint_api
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def remove_user_from_group(user_id: bson.ObjectId, group_id: bson.ObjectId):
|
||||
"""Removes the user from the given group.
|
||||
|
||||
Directly uses MongoDB, so that it doesn't require any special permissions.
|
||||
"""
|
||||
|
||||
log.info('Removing user %s from group %s', user_id, group_id)
|
||||
user_group_action(user_id, group_id, '$pull')
|
||||
|
||||
|
||||
def add_user_to_group(user_id: bson.ObjectId, group_id: bson.ObjectId):
|
||||
"""Makes the user member of the given group.
|
||||
|
||||
Directly uses MongoDB, so that it doesn't require any special permissions.
|
||||
"""
|
||||
|
||||
log.info('Adding user %s to group %s', user_id, group_id)
|
||||
user_group_action(user_id, group_id, '$addToSet')
|
||||
|
||||
|
||||
def user_group_action(user_id: bson.ObjectId, group_id: bson.ObjectId, action: str):
|
||||
"""Performs a group action (add/remove).
|
||||
|
||||
:param user_id: the user's ObjectID.
|
||||
:param group_id: the group's ObjectID.
|
||||
:param action: either '$pull' to remove from a group, or '$addToSet' to add to a group.
|
||||
"""
|
||||
|
||||
from pymongo.results import UpdateResult
|
||||
|
||||
assert isinstance(user_id, bson.ObjectId)
|
||||
assert isinstance(group_id, bson.ObjectId)
|
||||
assert action in {'$pull', '$addToSet'}
|
||||
|
||||
users_coll = current_app.db('users')
|
||||
result: UpdateResult = users_coll.update_one(
|
||||
{'_id': user_id},
|
||||
{action: {'groups': group_id}},
|
||||
)
|
||||
|
||||
if result.matched_count == 0:
|
||||
raise ValueError(f'Unable to {action} user {user_id} membership of group {group_id}; '
|
||||
f'user not found.')
|
||||
|
||||
|
||||
def _update_search_user_changed_role(sender, user: dict):
|
||||
log.debug('Sending updated user %s to Algolia due to role change', user['_id'])
|
||||
hooks.push_updated_user_to_search(user, original=None)
|
||||
|
||||
|
||||
def setup_app(app, api_prefix):
|
||||
from pillar.api import service
|
||||
|
||||
app.on_pre_GET_users += hooks.check_user_access
|
||||
app.on_post_GET_users += hooks.post_GET_user
|
||||
app.on_pre_PUT_users += hooks.check_put_access
|
||||
app.on_pre_PUT_users += hooks.before_replacing_user
|
||||
app.on_replaced_users += hooks.push_updated_user_to_search
|
||||
app.on_replaced_users += hooks.send_blinker_signal_roles_changed
|
||||
app.on_fetched_item_users += hooks.after_fetching_user
|
||||
app.on_fetched_resource_users += hooks.after_fetching_user_resource
|
||||
|
||||
app.on_insert_users += hooks.before_inserting_users
|
||||
app.on_inserted_users += hooks.after_inserting_users
|
||||
|
||||
app.register_api_blueprint(blueprint_api, url_prefix=api_prefix)
|
||||
|
||||
service.signal_user_changed_role.connect(_update_search_user_changed_role)
|
||||
206
pillar/api/users/hooks.py
Normal file
206
pillar/api/users/hooks.py
Normal file
@@ -0,0 +1,206 @@
|
||||
import copy
|
||||
import json
|
||||
|
||||
import bson
|
||||
from eve.utils import parse_request
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from pillar import current_app
|
||||
from pillar.api.users.routes import log
|
||||
from pillar.api.utils.authorization import user_has_role
|
||||
import pillar.auth
|
||||
|
||||
USER_EDITABLE_FIELDS = {'full_name', 'username', 'email', 'settings'}
|
||||
|
||||
# These fields nobody is allowed to touch directly, not even admins.
|
||||
USER_ALWAYS_RESTORE_FIELDS = {'auth'}
|
||||
|
||||
|
||||
def before_replacing_user(request, lookup):
|
||||
"""Prevents changes to any field of the user doc, except USER_EDITABLE_FIELDS."""
|
||||
|
||||
# Find the user that is being replaced
|
||||
req = parse_request('users')
|
||||
req.projection = json.dumps({key: 0 for key in USER_EDITABLE_FIELDS})
|
||||
original = current_app.data.find_one('users', req, **lookup)
|
||||
|
||||
# Make sure that the replacement has a valid auth field.
|
||||
put_data = request.get_json()
|
||||
if put_data is None:
|
||||
raise wz_exceptions.BadRequest('No JSON data received')
|
||||
|
||||
# We should get a ref to the cached JSON, and not a copy. This will allow us to
|
||||
# modify the cached JSON so that Eve sees our modifications.
|
||||
assert put_data is request.get_json()
|
||||
|
||||
# Reset fields that shouldn't be edited to their original values. This is only
|
||||
# needed when users are editing themselves; admins are allowed to edit much more.
|
||||
if not pillar.auth.current_user.has_cap('admin'):
|
||||
for db_key, db_value in original.items():
|
||||
if db_key[0] == '_' or db_key in USER_EDITABLE_FIELDS:
|
||||
continue
|
||||
|
||||
if db_key in original:
|
||||
put_data[db_key] = copy.deepcopy(original[db_key])
|
||||
|
||||
# Remove fields added by this PUT request, except when they are user-editable.
|
||||
for put_key in list(put_data.keys()):
|
||||
if put_key[0] == '_' or put_key in USER_EDITABLE_FIELDS:
|
||||
continue
|
||||
|
||||
if put_key not in original:
|
||||
del put_data[put_key]
|
||||
|
||||
# Always restore those fields
|
||||
for db_key in USER_ALWAYS_RESTORE_FIELDS:
|
||||
if db_key in original:
|
||||
put_data[db_key] = copy.deepcopy(original[db_key])
|
||||
else:
|
||||
del put_data[db_key]
|
||||
|
||||
# Regular users should always have an email address
|
||||
if 'service' not in put_data.get('roles', ()):
|
||||
if not put_data.get('email'):
|
||||
raise wz_exceptions.UnprocessableEntity(
|
||||
'email field must be given')
|
||||
|
||||
|
||||
def push_updated_user_to_search(user, original):
|
||||
"""
|
||||
Push an update to the Search index when a user
|
||||
item is updated
|
||||
"""
|
||||
|
||||
from pillar.celery import search_index_tasks as searchindex
|
||||
|
||||
searchindex.updated_user.delay(str(user['_id']))
|
||||
|
||||
|
||||
def send_blinker_signal_roles_changed(user, original):
|
||||
"""
|
||||
Sends a Blinker signal that the user roles were
|
||||
changed, so others can respond.
|
||||
"""
|
||||
|
||||
current_roles = set(user.get('roles', []))
|
||||
original_roles = set(original.get('roles', []))
|
||||
|
||||
if current_roles == original_roles:
|
||||
return
|
||||
|
||||
from pillar.api.service import signal_user_changed_role
|
||||
|
||||
log.info('User %s changed roles to %s, sending Blinker signal',
|
||||
user.get('_id'), current_roles)
|
||||
signal_user_changed_role.send(current_app, user=user)
|
||||
|
||||
|
||||
def check_user_access(request, lookup):
|
||||
"""Modifies the lookup dict to limit returned user info."""
|
||||
|
||||
user = pillar.auth.get_current_user()
|
||||
|
||||
# Admins can do anything and get everything, except the 'auth' block.
|
||||
if user.has_cap('admin'):
|
||||
return
|
||||
|
||||
if not lookup and user.is_anonymous:
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
# Add a filter to only return the current user.
|
||||
if '_id' not in lookup:
|
||||
lookup['_id'] = user.user_id
|
||||
|
||||
|
||||
def check_put_access(request, lookup):
|
||||
"""Only allow PUT to the current user, or all users if admin."""
|
||||
|
||||
user = pillar.auth.get_current_user()
|
||||
if user.has_cap('admin'):
|
||||
return
|
||||
|
||||
if user.is_anonymous:
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
if str(lookup['_id']) != str(user.user_id):
|
||||
raise wz_exceptions.Forbidden()
|
||||
|
||||
|
||||
def after_fetching_user(user):
|
||||
# Deny access to auth block; authentication stuff is managed by
|
||||
# custom end-points.
|
||||
user.pop('auth', None)
|
||||
|
||||
current_user = pillar.auth.get_current_user()
|
||||
|
||||
# Admins can do anything and get everything, except the 'auth' block.
|
||||
if current_user.has_cap('admin'):
|
||||
return
|
||||
|
||||
# Only allow full access to the current user.
|
||||
if current_user.is_authenticated and str(user['_id']) == str(current_user.user_id):
|
||||
return
|
||||
|
||||
# Remove all fields except public ones.
|
||||
public_fields = {'full_name', 'username', 'email', 'extension_props_public'}
|
||||
for field in list(user.keys()):
|
||||
if field not in public_fields:
|
||||
del user[field]
|
||||
|
||||
|
||||
def after_fetching_user_resource(response):
|
||||
for user in response['_items']:
|
||||
after_fetching_user(user)
|
||||
|
||||
|
||||
def post_GET_user(request, payload):
|
||||
json_data = json.loads(payload.data)
|
||||
# Check if we are querying the users endpoint (instead of the single user)
|
||||
if json_data.get('_id') is None:
|
||||
return
|
||||
# json_data['computed_permissions'] = \
|
||||
# compute_permissions(json_data['_id'], app.data.driver)
|
||||
payload.data = json.dumps(json_data)
|
||||
|
||||
|
||||
def grant_org_roles(user_doc):
|
||||
"""Handle any organization this user may be part of."""
|
||||
|
||||
email = user_doc.get('email')
|
||||
if not email:
|
||||
log.info('Unable to check new user for organization membership, no email address: %r',
|
||||
user_doc)
|
||||
return
|
||||
|
||||
org_roles = current_app.org_manager.unknown_member_roles(email)
|
||||
if not org_roles:
|
||||
log.debug('No organization roles for user %r', email)
|
||||
return
|
||||
|
||||
log.info('Granting organization roles %r to user %r', org_roles, email)
|
||||
new_roles = set(user_doc.get('roles') or []) | org_roles
|
||||
user_doc['roles'] = list(new_roles)
|
||||
|
||||
|
||||
def before_inserting_users(user_docs):
|
||||
"""Grants organization roles to the created users."""
|
||||
|
||||
for user_doc in user_docs:
|
||||
grant_org_roles(user_doc)
|
||||
|
||||
|
||||
def after_inserting_users(user_docs):
|
||||
"""Moves the users from the unknown_members to the members list of their organizations."""
|
||||
|
||||
om = current_app.org_manager
|
||||
for user_doc in user_docs:
|
||||
user_id = user_doc.get('_id')
|
||||
user_email = user_doc.get('email')
|
||||
|
||||
if not user_id or not user_email:
|
||||
# Missing emails can happen when creating a service account, it's fine.
|
||||
log.info('User created with _id=%r and email=%r, unable to check organizations',
|
||||
user_id, user_email)
|
||||
continue
|
||||
|
||||
om.make_member_known(user_id, user_email)
|
||||
21
pillar/api/users/routes.py
Normal file
21
pillar/api/users/routes.py
Normal file
@@ -0,0 +1,21 @@
|
||||
import logging
|
||||
|
||||
from eve.methods.get import get
|
||||
from flask import Blueprint
|
||||
|
||||
from pillar.api.utils import jsonify
|
||||
from pillar.api.utils.authorization import require_login
|
||||
from pillar.auth import current_user
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint_api = Blueprint('users_api', __name__)
|
||||
|
||||
|
||||
@blueprint_api.route('/me')
|
||||
@require_login()
|
||||
def my_info():
|
||||
eve_resp, _, _, status, _ = get('users', {'_id': current_user.user_id})
|
||||
resp = jsonify(eve_resp['_items'][0], status=status)
|
||||
return resp
|
||||
|
||||
|
||||
248
pillar/api/utils/__init__.py
Normal file
248
pillar/api/utils/__init__.py
Normal file
@@ -0,0 +1,248 @@
|
||||
import base64
|
||||
import copy
|
||||
import datetime
|
||||
import functools
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import typing
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
|
||||
import bson.objectid
|
||||
import bson.tz_util
|
||||
from eve import RFC1123_DATE_FORMAT
|
||||
from flask import current_app
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
import pymongo.results
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def node_setattr(node, key, value):
|
||||
"""Sets a node property by dotted key.
|
||||
|
||||
Modifies the node in-place. Deletes None values.
|
||||
|
||||
:type node: dict
|
||||
:type key: str
|
||||
:param value: the value to set, or None to delete the key.
|
||||
"""
|
||||
|
||||
set_on = node
|
||||
while key and '.' in key:
|
||||
head, key = key.split('.', 1)
|
||||
set_on = set_on[head]
|
||||
|
||||
if value is None:
|
||||
set_on.pop(key, None)
|
||||
else:
|
||||
set_on[key] = value
|
||||
|
||||
|
||||
def remove_private_keys(document):
|
||||
"""Removes any key that starts with an underscore, returns result as new
|
||||
dictionary.
|
||||
"""
|
||||
doc_copy = copy.deepcopy(document)
|
||||
for key in list(doc_copy.keys()):
|
||||
if key.startswith('_'):
|
||||
del doc_copy[key]
|
||||
|
||||
try:
|
||||
del doc_copy['allowed_methods']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return doc_copy
|
||||
|
||||
|
||||
class PillarJSONEncoder(json.JSONEncoder):
|
||||
"""JSON encoder with support for Pillar resources."""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return obj.strftime(RFC1123_DATE_FORMAT)
|
||||
|
||||
if isinstance(obj, bson.ObjectId):
|
||||
return str(obj)
|
||||
|
||||
if isinstance(obj, pymongo.results.UpdateResult):
|
||||
return obj.raw_result
|
||||
|
||||
# Let the base class default method raise the TypeError
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def dumps(mongo_doc, **kwargs):
|
||||
"""json.dumps() for MongoDB documents."""
|
||||
return json.dumps(mongo_doc, cls=PillarJSONEncoder, **kwargs)
|
||||
|
||||
|
||||
def jsonify(mongo_doc, status=200, headers=None):
|
||||
"""JSonifies a Mongo document into a Flask response object."""
|
||||
|
||||
return current_app.response_class(dumps(mongo_doc),
|
||||
mimetype='application/json',
|
||||
status=status,
|
||||
headers=headers)
|
||||
|
||||
|
||||
def bsonify(mongo_doc, status=200, headers=None):
|
||||
"""BSonifies a Mongo document into a Flask response object."""
|
||||
|
||||
import bson
|
||||
|
||||
data = bson.BSON.encode(mongo_doc)
|
||||
return current_app.response_class(data,
|
||||
mimetype='application/bson',
|
||||
status=status,
|
||||
headers=headers)
|
||||
|
||||
|
||||
def skip_when_testing(func):
|
||||
"""Decorator, skips the decorated function when app.config['TESTING']"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if current_app.config['TESTING']:
|
||||
log.debug('Skipping call to %s(...) due to TESTING', func.__name__)
|
||||
return None
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def project_get_node_type(project_document, node_type_node_name):
|
||||
"""Return a node_type subdocument for a project. If none is found, return
|
||||
None.
|
||||
"""
|
||||
|
||||
if project_document is None:
|
||||
return None
|
||||
|
||||
return next((node_type for node_type in project_document['node_types']
|
||||
if node_type['name'] == node_type_node_name), None)
|
||||
|
||||
|
||||
def str2id(document_id: str) -> bson.ObjectId:
|
||||
"""Returns the document ID as ObjectID, or raises a BadRequest exception.
|
||||
|
||||
:raises: wz_exceptions.BadRequest
|
||||
"""
|
||||
|
||||
if not document_id:
|
||||
log.debug('str2id(%r): Invalid Object ID', document_id)
|
||||
raise wz_exceptions.BadRequest('Invalid object ID %r' % document_id)
|
||||
|
||||
try:
|
||||
return bson.ObjectId(document_id)
|
||||
except (bson.objectid.InvalidId, TypeError):
|
||||
log.debug('str2id(%r): Invalid Object ID', document_id)
|
||||
raise wz_exceptions.BadRequest('Invalid object ID %r' % document_id)
|
||||
|
||||
|
||||
def gravatar(email: str, size=64) -> typing.Optional[str]:
|
||||
if email is None:
|
||||
return None
|
||||
|
||||
parameters = {'s': str(size), 'd': 'mm'}
|
||||
return "https://www.gravatar.com/avatar/" + \
|
||||
hashlib.md5(email.encode()).hexdigest() + \
|
||||
"?" + urllib.parse.urlencode(parameters)
|
||||
|
||||
|
||||
class MetaFalsey(type):
|
||||
def __bool__(cls):
|
||||
return False
|
||||
|
||||
|
||||
class DoesNotExistMeta(MetaFalsey):
|
||||
def __repr__(cls) -> str:
|
||||
return 'DoesNotExist'
|
||||
|
||||
|
||||
class DoesNotExist(object, metaclass=DoesNotExistMeta):
|
||||
"""Returned as value by doc_diff if a value does not exist."""
|
||||
|
||||
|
||||
def doc_diff(doc1, doc2, *, falsey_is_equal=True, superkey: str = None):
|
||||
"""Generator, yields differences between documents.
|
||||
|
||||
Yields changes as (key, value in doc1, value in doc2) tuples, where
|
||||
the value can also be the DoesNotExist class. Does not report changed
|
||||
private keys (i.e. the standard Eve keys starting with underscores).
|
||||
|
||||
Sub-documents (i.e. dicts) are recursed, and dot notation is used
|
||||
for the keys if changes are found.
|
||||
|
||||
If falsey_is_equal=True, all Falsey values compare as equal, i.e. this
|
||||
function won't report differences between DoesNotExist, False, '', and 0.
|
||||
"""
|
||||
|
||||
private_keys = {'_id', '_etag', '_deleted', '_updated', '_created'}
|
||||
|
||||
def combine_key(some_key):
|
||||
"""Combine this key with the superkey.
|
||||
|
||||
Keep the key type the same, unless we have to combine with a superkey.
|
||||
"""
|
||||
if not superkey:
|
||||
return some_key
|
||||
if isinstance(some_key, str) and some_key[0] == '[':
|
||||
return f'{superkey}{some_key}'
|
||||
return f'{superkey}.{some_key}'
|
||||
|
||||
if doc1 is doc2:
|
||||
return
|
||||
|
||||
if falsey_is_equal and not bool(doc1) and not bool(doc2):
|
||||
return
|
||||
|
||||
if isinstance(doc1, dict) and isinstance(doc2, dict):
|
||||
for key in set(doc1.keys()).union(set(doc2.keys())):
|
||||
if key in private_keys:
|
||||
continue
|
||||
|
||||
val1 = doc1.get(key, DoesNotExist)
|
||||
val2 = doc2.get(key, DoesNotExist)
|
||||
|
||||
yield from doc_diff(val1, val2,
|
||||
falsey_is_equal=falsey_is_equal,
|
||||
superkey=combine_key(key))
|
||||
return
|
||||
|
||||
if isinstance(doc1, list) and isinstance(doc2, list):
|
||||
for idx in range(max(len(doc1), len(doc2))):
|
||||
try:
|
||||
item1 = doc1[idx]
|
||||
except IndexError:
|
||||
item1 = DoesNotExist
|
||||
try:
|
||||
item2 = doc2[idx]
|
||||
except IndexError:
|
||||
item2 = DoesNotExist
|
||||
|
||||
subkey = f'[{idx}]'
|
||||
if item1 is DoesNotExist or item2 is DoesNotExist:
|
||||
yield combine_key(subkey), item1, item2
|
||||
else:
|
||||
yield from doc_diff(item1, item2,
|
||||
falsey_is_equal=falsey_is_equal,
|
||||
superkey=combine_key(subkey))
|
||||
return
|
||||
|
||||
if doc1 != doc2:
|
||||
yield superkey, doc1, doc2
|
||||
|
||||
|
||||
def random_etag() -> str:
|
||||
"""Random string usable as etag."""
|
||||
|
||||
randbytes = random.getrandbits(256).to_bytes(32, 'big')
|
||||
return base64.b64encode(randbytes)[:-1].decode()
|
||||
|
||||
|
||||
def utcnow() -> datetime.datetime:
|
||||
return datetime.datetime.now(tz=bson.tz_util.utc)
|
||||
33
pillar/api/utils/algolia.py
Normal file
33
pillar/api/utils/algolia.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
from bson import ObjectId
|
||||
|
||||
from pillar import current_app
|
||||
from . import skip_when_testing
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def index_user_save(to_index_user: dict):
|
||||
index_users = current_app.algolia_index_users
|
||||
if not index_users:
|
||||
log.debug('No Algolia index defined, so nothing to do.')
|
||||
return
|
||||
|
||||
# Create or update Algolia index for the user
|
||||
index_users.save_object(to_index_user)
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def index_node_save(node_to_index):
|
||||
if not current_app.algolia_index_nodes:
|
||||
return
|
||||
current_app.algolia_index_nodes.save_object(node_to_index)
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def index_node_delete(delete_id):
|
||||
if current_app.algolia_index_nodes is None:
|
||||
return
|
||||
current_app.algolia_index_nodes.delete_object(delete_id)
|
||||
431
pillar/api/utils/authentication.py
Normal file
431
pillar/api/utils/authentication.py
Normal file
@@ -0,0 +1,431 @@
|
||||
"""Generic authentication.
|
||||
|
||||
Contains functionality to validate tokens, create users and tokens, and make
|
||||
unique usernames from emails. Calls out to the pillar_server.modules.blender_id
|
||||
module for Blender ID communication.
|
||||
"""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import hmac
|
||||
import hashlib
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import bson
|
||||
from flask import g, current_app
|
||||
from flask import request
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from pillar.api.utils import remove_private_keys, utcnow
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Construction is done when requested, since constructing a UserClass instance
|
||||
# requires an application context to look up capabilities. We set the initial
|
||||
# value to a not-None singleton to be able to differentiate between
|
||||
# g.current_user set to "not logged in" or "uninitialised CLI_USER".
|
||||
CLI_USER = ...
|
||||
|
||||
|
||||
def force_cli_user():
|
||||
"""Sets g.current_user to the CLI_USER object.
|
||||
|
||||
This is used as a marker to avoid authorization checks and just allow everything.
|
||||
"""
|
||||
|
||||
global CLI_USER
|
||||
|
||||
from pillar.auth import UserClass
|
||||
|
||||
if CLI_USER is ...:
|
||||
CLI_USER = UserClass.construct('CLI', {
|
||||
'_id': 'CLI',
|
||||
'groups': [],
|
||||
'roles': {'admin'},
|
||||
'email': 'local@nowhere',
|
||||
'username': 'CLI',
|
||||
})
|
||||
log.info('CONSTRUCTED CLI USER %s of type %s', id(CLI_USER), id(type(CLI_USER)))
|
||||
|
||||
log.info('Logging in as CLI_USER (%s) of type %s, circumventing authentication.',
|
||||
id(CLI_USER), id(type(CLI_USER)))
|
||||
g.current_user = CLI_USER
|
||||
|
||||
|
||||
def find_user_in_db(user_info: dict, provider='blender-id') -> dict:
|
||||
"""Find the user in our database, creating/updating the returned document where needed.
|
||||
|
||||
First, search for the user using its id from the provider, then try to look the user up via the
|
||||
email address.
|
||||
|
||||
Does NOT update the user in the database.
|
||||
|
||||
:param user_info: Information (id, email and full_name) from the auth provider
|
||||
:param provider: One of the supported providers
|
||||
"""
|
||||
|
||||
users = current_app.data.driver.db['users']
|
||||
|
||||
user_id = user_info['id']
|
||||
query = {'$or': [
|
||||
{'auth': {'$elemMatch': {
|
||||
'user_id': str(user_id),
|
||||
'provider': provider}}},
|
||||
{'email': user_info['email']},
|
||||
]}
|
||||
log.debug('Querying: %s', query)
|
||||
db_user = users.find_one(query)
|
||||
|
||||
if db_user:
|
||||
log.debug('User with %s id %s already in our database, updating with info from %s',
|
||||
provider, user_id, provider)
|
||||
db_user['email'] = user_info['email']
|
||||
|
||||
# Find out if an auth entry for the current provider already exists
|
||||
provider_entry = [element for element in db_user['auth'] if element['provider'] == provider]
|
||||
if not provider_entry:
|
||||
db_user['auth'].append({
|
||||
'provider': provider,
|
||||
'user_id': str(user_id),
|
||||
'token': ''})
|
||||
else:
|
||||
log.debug('User %r not yet in our database, create a new one.', user_id)
|
||||
db_user = create_new_user_document(
|
||||
email=user_info['email'],
|
||||
user_id=user_id,
|
||||
username=user_info['full_name'],
|
||||
provider=provider)
|
||||
db_user['username'] = make_unique_username(user_info['email'])
|
||||
if not db_user['full_name']:
|
||||
db_user['full_name'] = db_user['username']
|
||||
|
||||
return db_user
|
||||
|
||||
|
||||
def validate_token(*, force=False):
|
||||
"""Validate the token provided in the request and populate the current_user
|
||||
flask.g object, so that permissions and access to a resource can be defined
|
||||
from it.
|
||||
|
||||
When the token is successfully validated, sets `g.current_user` to contain
|
||||
the user information, otherwise it is set to None.
|
||||
|
||||
:param force: don't trust g.current_user and force a re-check.
|
||||
:returns: True iff the user is logged in with a valid Blender ID token.
|
||||
"""
|
||||
|
||||
from pillar.auth import AnonymousUser
|
||||
|
||||
# Trust a pre-existing g.current_user
|
||||
if not force:
|
||||
cur = getattr(g, 'current_user', None)
|
||||
if cur is not None and cur.is_authenticated:
|
||||
log.debug('skipping token check because current user is already set to %s', cur)
|
||||
return True
|
||||
|
||||
auth_header = request.headers.get('Authorization') or ''
|
||||
if request.authorization:
|
||||
token = request.authorization.username
|
||||
oauth_subclient = request.authorization.password
|
||||
elif auth_header.startswith('Bearer '):
|
||||
token = auth_header[7:].strip()
|
||||
oauth_subclient = ''
|
||||
else:
|
||||
# Check the session, the user might be logged in through Flask-Login.
|
||||
from pillar import auth
|
||||
|
||||
token = auth.get_blender_id_oauth_token()
|
||||
oauth_subclient = None
|
||||
|
||||
if not token:
|
||||
# If no authorization headers are provided, we are getting a request
|
||||
# from a non logged in user. Proceed accordingly.
|
||||
log.debug('No authentication headers, so not logged in.')
|
||||
g.current_user = AnonymousUser()
|
||||
return False
|
||||
|
||||
return validate_this_token(token, oauth_subclient) is not None
|
||||
|
||||
|
||||
def validate_this_token(token, oauth_subclient=None):
|
||||
"""Validates a given token, and sets g.current_user.
|
||||
|
||||
:returns: the user in MongoDB, or None if not a valid token.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
from pillar.auth import UserClass, AnonymousUser, user_authenticated
|
||||
|
||||
g.current_user = None
|
||||
_delete_expired_tokens()
|
||||
|
||||
# Check the users to see if there is one with this Blender ID token.
|
||||
db_token = find_token(token, oauth_subclient)
|
||||
if not db_token:
|
||||
log.debug('Token %r not found in our local database.', token)
|
||||
|
||||
# If no valid token is found in our local database, we issue a new
|
||||
# request to the Blender ID server to verify the validity of the token
|
||||
# passed via the HTTP header. We will get basic user info if the user
|
||||
# is authorized, and we will store the token in our local database.
|
||||
from pillar.api import blender_id
|
||||
|
||||
db_user, status = blender_id.validate_create_user('', token, oauth_subclient)
|
||||
else:
|
||||
# log.debug("User is already in our database and token hasn't expired yet.")
|
||||
users = current_app.data.driver.db['users']
|
||||
db_user = users.find_one(db_token['user'])
|
||||
|
||||
if db_user is None:
|
||||
log.debug('Validation failed, user not logged in')
|
||||
g.current_user = AnonymousUser()
|
||||
return None
|
||||
|
||||
g.current_user = UserClass.construct(token, db_user)
|
||||
user_authenticated.send(None)
|
||||
|
||||
return db_user
|
||||
|
||||
|
||||
def remove_token(token: str):
|
||||
"""Removes the token from the database."""
|
||||
|
||||
tokens_coll = current_app.db('tokens')
|
||||
token_hashed = hash_auth_token(token)
|
||||
|
||||
# TODO: remove matching on unhashed tokens once all tokens have been hashed.
|
||||
lookup = {'$or': [{'token': token}, {'token_hashed': token_hashed}]}
|
||||
del_res = tokens_coll.delete_many(lookup)
|
||||
log.debug('Removed token %r, matched %d documents', token, del_res.deleted_count)
|
||||
|
||||
|
||||
def find_token(token, is_subclient_token=False, **extra_filters):
|
||||
"""Returns the token document, or None if it doesn't exist (or is expired)."""
|
||||
|
||||
tokens_coll = current_app.db('tokens')
|
||||
token_hashed = hash_auth_token(token)
|
||||
|
||||
# TODO: remove matching on unhashed tokens once all tokens have been hashed.
|
||||
lookup = {'$or': [{'token': token}, {'token_hashed': token_hashed}],
|
||||
'is_subclient_token': True if is_subclient_token else {'$in': [False, None]},
|
||||
'expire_time': {"$gt": utcnow()}}
|
||||
lookup.update(extra_filters)
|
||||
|
||||
db_token = tokens_coll.find_one(lookup)
|
||||
return db_token
|
||||
|
||||
|
||||
def hash_auth_token(token: str) -> str:
|
||||
"""Returns the hashed authentication token.
|
||||
|
||||
The token is hashed using HMAC and then base64-encoded.
|
||||
"""
|
||||
|
||||
hmac_key = current_app.config['AUTH_TOKEN_HMAC_KEY']
|
||||
token_hmac = hmac.new(hmac_key, msg=token.encode('utf8'), digestmod=hashlib.sha256)
|
||||
digest = token_hmac.digest()
|
||||
|
||||
return base64.b64encode(digest).decode('ascii')
|
||||
|
||||
|
||||
def store_token(user_id, token: str, token_expiry, oauth_subclient_id=False,
|
||||
org_roles: typing.Set[str] = frozenset()):
|
||||
"""Stores an authentication token.
|
||||
|
||||
:returns: the token document from MongoDB
|
||||
"""
|
||||
|
||||
assert isinstance(token, str), 'token must be string type, not %r' % type(token)
|
||||
|
||||
token_data = {
|
||||
'user': user_id,
|
||||
'token_hashed': hash_auth_token(token),
|
||||
'expire_time': token_expiry,
|
||||
}
|
||||
if oauth_subclient_id:
|
||||
token_data['is_subclient_token'] = True
|
||||
if org_roles:
|
||||
token_data['org_roles'] = sorted(org_roles)
|
||||
|
||||
r, _, _, status = current_app.post_internal('tokens', token_data)
|
||||
|
||||
if status not in {200, 201}:
|
||||
log.error('Unable to store authentication token: %s', r)
|
||||
raise RuntimeError('Unable to store authentication token.')
|
||||
|
||||
token_data.update(r)
|
||||
return token_data
|
||||
|
||||
|
||||
def create_new_user(email, username, user_id):
|
||||
"""Creates a new user in our local database.
|
||||
|
||||
@param email: the user's email
|
||||
@param username: the username, which is also used as full name.
|
||||
@param user_id: the user ID from the Blender ID server.
|
||||
@returns: the user ID from our local database.
|
||||
"""
|
||||
|
||||
user_data = create_new_user_document(email, user_id, username)
|
||||
r = current_app.post_internal('users', user_data)
|
||||
user_id = r[0]['_id']
|
||||
return user_id
|
||||
|
||||
|
||||
def create_new_user_document(email, user_id, username, provider='blender-id',
|
||||
token='', *, full_name=''):
|
||||
"""Creates a new user document, without storing it in MongoDB. The token
|
||||
parameter is a password in case provider is "local".
|
||||
"""
|
||||
|
||||
user_data = {
|
||||
'full_name': full_name or username,
|
||||
'username': username,
|
||||
'email': email,
|
||||
'auth': [{
|
||||
'provider': provider,
|
||||
'user_id': str(user_id),
|
||||
'token': token}],
|
||||
'settings': {
|
||||
'email_communications': 1
|
||||
},
|
||||
'groups': [],
|
||||
}
|
||||
return user_data
|
||||
|
||||
|
||||
def make_unique_username(email):
|
||||
"""Creates a unique username from the email address.
|
||||
|
||||
@param email: the email address
|
||||
@returns: the new username
|
||||
@rtype: str
|
||||
"""
|
||||
|
||||
username = email.split('@')[0]
|
||||
# Check for min length of username (otherwise validation fails)
|
||||
username = "___{0}".format(username) if len(username) < 3 else username
|
||||
|
||||
users = current_app.data.driver.db['users']
|
||||
user_from_username = users.find_one({'username': username})
|
||||
|
||||
if not user_from_username:
|
||||
return username
|
||||
|
||||
# Username exists, make it unique by adding some number after it.
|
||||
suffix = 1
|
||||
while True:
|
||||
unique_name = '%s%i' % (username, suffix)
|
||||
user_from_username = users.find_one({'username': unique_name})
|
||||
if user_from_username is None:
|
||||
return unique_name
|
||||
suffix += 1
|
||||
|
||||
|
||||
def _delete_expired_tokens():
|
||||
"""Deletes tokens that have expired.
|
||||
|
||||
For debugging, we keep expired tokens around for a few days, so that we
|
||||
can determine that a token was expired rather than not created in the
|
||||
first place. It also grants some leeway in clock synchronisation.
|
||||
"""
|
||||
|
||||
token_coll = current_app.data.driver.db['tokens']
|
||||
|
||||
expiry_date = utcnow() - datetime.timedelta(days=7)
|
||||
result = token_coll.delete_many({'expire_time': {"$lt": expiry_date}})
|
||||
# log.debug('Deleted %i expired authentication tokens', result.deleted_count)
|
||||
|
||||
|
||||
def current_user_id() -> typing.Optional[bson.ObjectId]:
|
||||
"""None-safe fetching of user ID. Can return None itself, though."""
|
||||
|
||||
user = current_user()
|
||||
return user.user_id
|
||||
|
||||
|
||||
def current_user():
|
||||
"""Returns the current user, or an AnonymousUser if not logged in.
|
||||
|
||||
:rtype: pillar.auth.UserClass
|
||||
"""
|
||||
|
||||
import pillar.auth
|
||||
|
||||
user: pillar.auth.UserClass = g.get('current_user')
|
||||
if user is None:
|
||||
return pillar.auth.AnonymousUser()
|
||||
|
||||
return user
|
||||
|
||||
|
||||
def setup_app(app):
|
||||
@app.before_request
|
||||
def validate_token_at_each_request():
|
||||
validate_token()
|
||||
|
||||
|
||||
def upsert_user(db_user):
|
||||
"""Inserts/updates the user in MongoDB.
|
||||
|
||||
Retries a few times when there are uniqueness issues in the username.
|
||||
|
||||
:returns: the user's database ID and the status of the PUT/POST.
|
||||
The status is 201 on insert, and 200 on update.
|
||||
:type: (ObjectId, int)
|
||||
"""
|
||||
|
||||
if 'subscriber' in db_user.get('groups', []):
|
||||
log.error('Non-ObjectID string found in user.groups: %s', db_user)
|
||||
raise wz_exceptions.InternalServerError(
|
||||
'Non-ObjectID string found in user.groups: %s' % db_user)
|
||||
|
||||
if not db_user['full_name']:
|
||||
# Blender ID doesn't need a full name, but we do.
|
||||
db_user['full_name'] = db_user['username']
|
||||
|
||||
r = {}
|
||||
for retry in range(5):
|
||||
if '_id' in db_user:
|
||||
# Update the existing user
|
||||
attempted_eve_method = 'PUT'
|
||||
db_id = db_user['_id']
|
||||
r, _, _, status = current_app.put_internal('users', remove_private_keys(db_user),
|
||||
_id=db_id)
|
||||
if status == 422:
|
||||
log.error('Status %i trying to PUT user %s with values %s, should not happen! %s',
|
||||
status, db_id, remove_private_keys(db_user), r)
|
||||
else:
|
||||
# Create a new user, retry for non-unique usernames.
|
||||
attempted_eve_method = 'POST'
|
||||
r, _, _, status = current_app.post_internal('users', db_user)
|
||||
|
||||
if status not in {200, 201}:
|
||||
log.error('Status %i trying to create user with values %s: %s',
|
||||
status, db_user, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
db_id = r['_id']
|
||||
db_user.update(r) # update with database/eve-generated fields.
|
||||
|
||||
if status == 422:
|
||||
# Probably non-unique username, so retry a few times with different usernames.
|
||||
log.info('Error creating new user: %s', r)
|
||||
username_issue = r.get('_issues', {}).get('username', '')
|
||||
if 'not unique' in username_issue:
|
||||
# Retry
|
||||
db_user['username'] = make_unique_username(db_user['email'])
|
||||
continue
|
||||
|
||||
# Saving was successful, or at least didn't break on a non-unique username.
|
||||
break
|
||||
else:
|
||||
log.error('Unable to create new user %s: %s', db_user, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
if status not in (200, 201):
|
||||
log.error('internal response from %s to Eve: %r %r', attempted_eve_method, status, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
return db_id, status
|
||||
@@ -7,7 +7,7 @@ from flask import abort
|
||||
from flask import current_app
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
CHECK_PERMISSIONS_IMPLEMENTED_FOR = {'projects', 'nodes'}
|
||||
CHECK_PERMISSIONS_IMPLEMENTED_FOR = {'projects', 'nodes', 'flamenco_jobs'}
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@@ -27,6 +27,12 @@ def check_permissions(collection_name, resource, method, append_allowed_methods=
|
||||
:param check_node_type: node type to check. Only valid when collection_name='projects'.
|
||||
:type check_node_type: str
|
||||
"""
|
||||
from pillar.auth import get_current_user
|
||||
from .authentication import CLI_USER
|
||||
|
||||
if get_current_user() is CLI_USER:
|
||||
log.debug('Short-circuiting check_permissions() for CLI user')
|
||||
return
|
||||
|
||||
if not has_permissions(collection_name, resource, method, append_allowed_methods,
|
||||
check_node_type):
|
||||
@@ -45,6 +51,8 @@ def compute_allowed_methods(collection_name, resource, check_node_type=None):
|
||||
:rtype: set
|
||||
"""
|
||||
|
||||
import pillar.auth
|
||||
|
||||
# Check some input values.
|
||||
if collection_name not in CHECK_PERMISSIONS_IMPLEMENTED_FOR:
|
||||
raise ValueError('compute_allowed_methods only implemented for %s, not for %s',
|
||||
@@ -62,15 +70,18 @@ def compute_allowed_methods(collection_name, resource, check_node_type=None):
|
||||
|
||||
# Accumulate allowed methods from the user, group and world level.
|
||||
allowed_methods = set()
|
||||
current_user = g.current_user
|
||||
if current_user:
|
||||
user = pillar.auth.get_current_user()
|
||||
|
||||
if user.is_authenticated:
|
||||
user_is_admin = is_admin(user)
|
||||
|
||||
# If the user is authenticated, proceed to compare the group permissions
|
||||
for permission in computed_permissions.get('groups', ()):
|
||||
if permission['group'] in current_user['groups']:
|
||||
if user_is_admin or permission['group'] in user.group_ids:
|
||||
allowed_methods.update(permission['methods'])
|
||||
|
||||
for permission in computed_permissions.get('users', ()):
|
||||
if current_user['user_id'] == permission['user']:
|
||||
if user_is_admin or user.user_id == permission['user']:
|
||||
allowed_methods.update(permission['methods'])
|
||||
|
||||
# Check if the node is public or private. This must be set for non logged
|
||||
@@ -132,6 +143,14 @@ def compute_aggr_permissions(collection_name, resource, check_node_type=None):
|
||||
if check_node_type is None:
|
||||
return project['permissions']
|
||||
node_type_name = check_node_type
|
||||
elif 'node_type' not in resource:
|
||||
# Neither a project, nor a node, therefore is another collection
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
project = projects_collection.find_one(
|
||||
ObjectId(resource['project']),
|
||||
{'permissions': 1})
|
||||
return project['permissions']
|
||||
|
||||
else:
|
||||
# Not a project, so it's a node.
|
||||
assert 'project' in resource
|
||||
@@ -155,7 +174,7 @@ def compute_aggr_permissions(collection_name, resource, check_node_type=None):
|
||||
project_permissions = project['permissions']
|
||||
|
||||
# Find the node type from the project.
|
||||
node_type = next((node_type for node_type in project['node_types']
|
||||
node_type = next((node_type for node_type in project.get('node_types', ())
|
||||
if node_type['name'] == node_type_name), None)
|
||||
if node_type is None: # This node type is not known, so doesn't give permissions.
|
||||
node_type_permissions = {}
|
||||
@@ -203,6 +222,8 @@ def merge_permissions(*args):
|
||||
:returns: combined list of permissions.
|
||||
"""
|
||||
|
||||
from pillar.auth import current_user
|
||||
|
||||
if not args:
|
||||
return {}
|
||||
|
||||
@@ -224,25 +245,35 @@ def merge_permissions(*args):
|
||||
from0 = args[0].get(plural_name, [])
|
||||
from1 = args[1].get(plural_name, [])
|
||||
|
||||
asdict0 = {permission[field_name]: permission['methods'] for permission in from0}
|
||||
asdict1 = {permission[field_name]: permission['methods'] for permission in from1}
|
||||
try:
|
||||
asdict0 = {permission[field_name]: permission['methods'] for permission in from0}
|
||||
except KeyError:
|
||||
log.exception('KeyError creating asdict0 for %r permissions; user=%s; args[0]=%r',
|
||||
field_name, current_user.user_id, args[0])
|
||||
asdict0 = {}
|
||||
try:
|
||||
asdict1 = {permission[field_name]: permission['methods'] for permission in from1}
|
||||
except KeyError:
|
||||
log.exception('KeyError creating asdict1 for %r permissions; user=%s; args[1]=%r',
|
||||
field_name, current_user.user_id, args[1])
|
||||
asdict1 = {}
|
||||
|
||||
keys = set(asdict0.keys() + asdict1.keys())
|
||||
keys = set(asdict0.keys()).union(set(asdict1.keys()))
|
||||
for key in maybe_sorted(keys):
|
||||
methods0 = asdict0.get(key, [])
|
||||
methods1 = asdict1.get(key, [])
|
||||
methods = maybe_sorted(set(methods0).union(set(methods1)))
|
||||
effective.setdefault(plural_name, []).append({field_name: key, u'methods': methods})
|
||||
effective.setdefault(plural_name, []).append({field_name: key, 'methods': methods})
|
||||
|
||||
merge(u'user')
|
||||
merge(u'group')
|
||||
merge('user')
|
||||
merge('group')
|
||||
|
||||
# Gather permissions for world
|
||||
world0 = args[0].get('world', [])
|
||||
world1 = args[1].get('world', [])
|
||||
world_methods = set(world0).union(set(world1))
|
||||
if world_methods:
|
||||
effective[u'world'] = maybe_sorted(world_methods)
|
||||
effective['world'] = maybe_sorted(world_methods)
|
||||
|
||||
# Recurse for longer merges
|
||||
if len(args) > 2:
|
||||
@@ -251,39 +282,83 @@ def merge_permissions(*args):
|
||||
return effective
|
||||
|
||||
|
||||
def require_login(require_roles=set(),
|
||||
require_all=False):
|
||||
def require_login(*, require_roles=set(),
|
||||
require_cap='',
|
||||
require_all=False,
|
||||
redirect_to_login=False,
|
||||
error_view=None):
|
||||
"""Decorator that enforces users to authenticate.
|
||||
|
||||
Optionally only allows access to users with a certain role.
|
||||
Optionally only allows access to users with a certain role and/or capability.
|
||||
|
||||
Either check on roles or on a capability, but never on both. There is no
|
||||
require_all check for capabilities; if you need to check for multiple
|
||||
capabilities at once, it's a sign that you need to add another capability
|
||||
and give it to everybody that needs it.
|
||||
|
||||
:param require_roles: set of roles.
|
||||
:param require_cap: a capability.
|
||||
:param require_all:
|
||||
When False (the default): if the user's roles have a
|
||||
non-empty intersection with the given roles, access is granted.
|
||||
When True: require the user to have all given roles before access is
|
||||
granted.
|
||||
:param redirect_to_login: Determines the behaviour when the user is not
|
||||
logged in. When False (the default), a 403 Forbidden response is
|
||||
returned; this is suitable for API calls. When True, the user is
|
||||
redirected to the login page; this is suitable for user-facing web
|
||||
requests, and mimicks the flask_login behaviour.
|
||||
:param error_view: Callable that returns a Flask response object. This is
|
||||
sent back to the client instead of the default 403 Forbidden.
|
||||
"""
|
||||
|
||||
from flask import request, redirect, url_for, Response
|
||||
|
||||
if not isinstance(require_roles, set):
|
||||
raise TypeError('require_roles param should be a set, but is a %r' % type(require_roles))
|
||||
raise TypeError(f'require_roles param should be a set, but is {type(require_roles)!r}')
|
||||
|
||||
if not isinstance(require_cap, str):
|
||||
raise TypeError(f'require_caps param should be a str, but is {type(require_cap)!r}')
|
||||
|
||||
if require_roles and require_cap:
|
||||
raise ValueError('either use require_roles or require_cap, but not both')
|
||||
|
||||
if require_all and not require_roles:
|
||||
raise ValueError('require_login(require_all=True) cannot be used with empty require_roles.')
|
||||
|
||||
def render_error() -> Response:
|
||||
if error_view is None:
|
||||
abort(403)
|
||||
resp: Response = error_view()
|
||||
resp.status_code = 403
|
||||
return resp
|
||||
|
||||
def decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not user_matches_roles(require_roles, require_all):
|
||||
if g.current_user is None:
|
||||
# We don't need to log at a higher level, as this is very common.
|
||||
# Many browsers first try to see whether authentication is needed
|
||||
# at all, before sending the password.
|
||||
log.debug('Unauthenticated acces to %s attempted.', func)
|
||||
else:
|
||||
log.warning('User %s is authenticated, but does not have required roles %s to '
|
||||
'access %s', g.current_user['user_id'], require_roles, func)
|
||||
abort(403)
|
||||
import pillar.auth
|
||||
|
||||
current_user = pillar.auth.get_current_user()
|
||||
if current_user.is_anonymous:
|
||||
# We don't need to log at a higher level, as this is very common.
|
||||
# Many browsers first try to see whether authentication is needed
|
||||
# at all, before sending the password.
|
||||
log.debug('Unauthenticated access to %s attempted.', func)
|
||||
if redirect_to_login:
|
||||
# Redirect using a 303 See Other, since even a POST
|
||||
# request should cause a GET on the login page.
|
||||
return redirect(url_for('users.login', next=request.url), 303)
|
||||
return render_error()
|
||||
|
||||
if require_roles and not current_user.matches_roles(require_roles, require_all):
|
||||
log.info('User %s is authenticated, but does not have required roles %s to '
|
||||
'access %s', current_user.user_id, require_roles, func)
|
||||
return render_error()
|
||||
|
||||
if require_cap and not current_user.has_cap(require_cap):
|
||||
log.info('User %s is authenticated, but does not have required capability %s to '
|
||||
'access %s', current_user.user_id, require_cap, func)
|
||||
return render_error()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
|
||||
@@ -326,14 +401,36 @@ def ab_testing(require_roles=set(),
|
||||
def user_has_role(role, user=None):
|
||||
"""Returns True iff the user is logged in and has the given role."""
|
||||
|
||||
if user is None:
|
||||
user = g.get('current_user')
|
||||
import pillar.auth
|
||||
|
||||
if user is None:
|
||||
user = pillar.auth.get_current_user()
|
||||
if user is not None and not isinstance(user, pillar.auth.UserClass):
|
||||
raise TypeError(f'pillar.auth.current_user should be instance of UserClass, '
|
||||
f'not {type(user)}')
|
||||
elif not isinstance(user, pillar.auth.UserClass):
|
||||
raise TypeError(f'user should be instance of UserClass, not {type(user)}')
|
||||
|
||||
if user.is_anonymous:
|
||||
return False
|
||||
|
||||
roles = user.get('roles') or ()
|
||||
return role in roles
|
||||
return user.has_role(role)
|
||||
|
||||
|
||||
def user_has_cap(capability: str, user=None) -> bool:
|
||||
"""Returns True iff the user is logged in and has the given capability."""
|
||||
|
||||
import pillar.auth
|
||||
|
||||
assert capability
|
||||
|
||||
if user is None:
|
||||
user = pillar.auth.get_current_user()
|
||||
|
||||
if not isinstance(user, pillar.auth.UserClass):
|
||||
raise TypeError(f'user should be instance of UserClass, not {type(user)}')
|
||||
|
||||
return user.has_cap(capability)
|
||||
|
||||
|
||||
def user_matches_roles(require_roles=set(),
|
||||
@@ -348,25 +445,16 @@ def user_matches_roles(require_roles=set(),
|
||||
returning True.
|
||||
"""
|
||||
|
||||
if not isinstance(require_roles, set):
|
||||
raise TypeError('require_roles param should be a set, but is a %r' % type(require_roles))
|
||||
import pillar.auth
|
||||
|
||||
if require_all and not require_roles:
|
||||
raise ValueError('require_login(require_all=True) cannot be used with empty require_roles.')
|
||||
user = pillar.auth.get_current_user()
|
||||
if not isinstance(user, pillar.auth.UserClass):
|
||||
raise TypeError(f'user should be instance of UserClass, not {type(user)}')
|
||||
|
||||
current_user = g.get('current_user')
|
||||
|
||||
if current_user is None:
|
||||
return False
|
||||
|
||||
intersection = require_roles.intersection(current_user['roles'])
|
||||
if require_all:
|
||||
return len(intersection) == len(require_roles)
|
||||
|
||||
return not bool(require_roles) or bool(intersection)
|
||||
return user.matches_roles(require_roles, require_all)
|
||||
|
||||
|
||||
def is_admin(user):
|
||||
"""Returns True iff the given user has the admin role."""
|
||||
"""Returns True iff the given user has the admin capability."""
|
||||
|
||||
return user_has_role(u'admin', user)
|
||||
return user_has_cap('admin', user)
|
||||
@@ -1,5 +1,7 @@
|
||||
import datetime
|
||||
from hashlib import md5
|
||||
import base64
|
||||
|
||||
from flask import current_app
|
||||
|
||||
|
||||
@@ -17,19 +19,20 @@ def hash_file_path(file_path, expiry_timestamp=None):
|
||||
if current_app.config['CDN_USE_URL_SIGNING']:
|
||||
|
||||
url_signing_key = current_app.config['CDN_URL_SIGNING_KEY']
|
||||
hash_string = domain_subfolder + file_path + url_signing_key
|
||||
to_hash = domain_subfolder + file_path + url_signing_key
|
||||
|
||||
if not expiry_timestamp:
|
||||
expiry_timestamp = datetime.datetime.now() + datetime.timedelta(hours=24)
|
||||
expiry_timestamp = expiry_timestamp.strftime('%s')
|
||||
|
||||
hash_string = expiry_timestamp + hash_string
|
||||
to_hash = expiry_timestamp + to_hash
|
||||
if isinstance(to_hash, str):
|
||||
to_hash = to_hash.encode()
|
||||
|
||||
expiry_timestamp = "," + str(expiry_timestamp)
|
||||
|
||||
hashed_file_path = md5(hash_string).digest().encode('base64')[:-1]
|
||||
hashed_file_path = hashed_file_path.replace('+', '-')
|
||||
hashed_file_path = hashed_file_path.replace('/', '_')
|
||||
hashed_file_path = base64.b64encode(md5(to_hash).digest())[:-1].decode()
|
||||
hashed_file_path = hashed_file_path.replace('+', '-').replace('/', '_')
|
||||
|
||||
asset_url = asset_url + \
|
||||
'?secure=' + \
|
||||
@@ -3,8 +3,6 @@ import os
|
||||
|
||||
from flask import current_app
|
||||
|
||||
from application import encoding_service_client
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -18,7 +16,7 @@ class Encoder:
|
||||
"""Create an encoding job. Return the backend used as well as an id.
|
||||
"""
|
||||
if current_app.config['ENCODING_BACKEND'] != 'zencoder' or \
|
||||
encoding_service_client is None:
|
||||
current_app.encoding_service_client is None:
|
||||
log.error('I can only work with Zencoder, check the config file.')
|
||||
return None
|
||||
|
||||
@@ -33,11 +31,14 @@ class Encoder:
|
||||
options = dict(notifications=current_app.config['ZENCODER_NOTIFICATIONS_URL'])
|
||||
|
||||
outputs = [{'format': v['format'],
|
||||
'url': os.path.join(storage_base, v['file_path'])}
|
||||
'url': os.path.join(storage_base, v['file_path']),
|
||||
'upscale': False,
|
||||
'size': '{width}x{height}'.format(**v),
|
||||
}
|
||||
for v in src_file['variations']]
|
||||
r = encoding_service_client.job.create(file_input,
|
||||
outputs=outputs,
|
||||
options=options)
|
||||
r = current_app.encoding_service_client.job.create(file_input,
|
||||
outputs=outputs,
|
||||
options=options)
|
||||
if r.code != 201:
|
||||
log.error('Error %i creating Zencoder job: %s', r.code, r.body)
|
||||
return None
|
||||
@@ -47,8 +48,10 @@ class Encoder:
|
||||
|
||||
@staticmethod
|
||||
def job_progress(job_id):
|
||||
if isinstance(encoding_service_client, Zencoder):
|
||||
r = encoding_service_client.job.progress(int(job_id))
|
||||
from zencoder import Zencoder
|
||||
|
||||
if isinstance(current_app.encoding_service_client, Zencoder):
|
||||
r = current_app.encoding_service_client.job.progress(int(job_id))
|
||||
return r.body
|
||||
else:
|
||||
return None
|
||||
@@ -1,47 +1,61 @@
|
||||
import os
|
||||
import json
|
||||
import typing
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import subprocess
|
||||
|
||||
from PIL import Image
|
||||
from flask import current_app
|
||||
|
||||
# Images with these modes will be thumbed to PNG, others to JPEG.
|
||||
MODES_FOR_PNG = {'RGBA', 'LA'}
|
||||
|
||||
def generate_local_thumbnails(name_base, src):
|
||||
|
||||
def generate_local_thumbnails(fp_base: str, src: pathlib.Path):
|
||||
"""Given a source image, use Pillow to generate thumbnails according to the
|
||||
application settings.
|
||||
|
||||
:param name_base: the thumbnail will get a field 'name': '{basename}-{thumbsize}.jpg'
|
||||
:type name_base: str
|
||||
:param fp_base: the thumbnail will get a field
|
||||
'file_path': '{fp_base}-{thumbsize}.{ext}'
|
||||
:param src: the path of the image to be thumbnailed
|
||||
:type src: str
|
||||
"""
|
||||
|
||||
thumbnail_settings = current_app.config['UPLOADS_LOCAL_STORAGE_THUMBNAILS']
|
||||
thumbnails = []
|
||||
|
||||
save_to_base, _ = os.path.splitext(src)
|
||||
name_base, _ = os.path.splitext(name_base)
|
||||
for size, settings in thumbnail_settings.items():
|
||||
im = Image.open(src)
|
||||
extra_args = {}
|
||||
|
||||
for size, settings in thumbnail_settings.iteritems():
|
||||
dst = '{0}-{1}{2}'.format(save_to_base, size, '.jpg')
|
||||
name = '{0}-{1}{2}'.format(name_base, size, '.jpg')
|
||||
# If the source image has transparency, save as PNG
|
||||
if im.mode in MODES_FOR_PNG:
|
||||
suffix = '.png'
|
||||
imformat = 'PNG'
|
||||
else:
|
||||
suffix = '.jpg'
|
||||
imformat = 'JPEG'
|
||||
extra_args = {'quality': 95}
|
||||
dst = src.with_name(f'{src.stem}-{size}{suffix}')
|
||||
|
||||
if settings['crop']:
|
||||
resize_and_crop(src, dst, settings['size'])
|
||||
width, height = settings['size']
|
||||
im = resize_and_crop(im, settings['size'])
|
||||
else:
|
||||
im = Image.open(src).convert('RGB')
|
||||
im.thumbnail(settings['size'])
|
||||
im.save(dst, "JPEG")
|
||||
width, height = im.size
|
||||
im.thumbnail(settings['size'], resample=Image.LANCZOS)
|
||||
width, height = im.size
|
||||
|
||||
if imformat == 'JPEG':
|
||||
im = im.convert('RGB')
|
||||
im.save(dst, format=imformat, optimize=True, **extra_args)
|
||||
|
||||
thumb_info = {'size': size,
|
||||
'file_path': name,
|
||||
'local_path': dst,
|
||||
'length': os.stat(dst).st_size,
|
||||
'file_path': f'{fp_base}-{size}{suffix}',
|
||||
'local_path': str(dst),
|
||||
'length': dst.stat().st_size,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'md5': '',
|
||||
'content_type': 'image/jpeg'}
|
||||
'content_type': f'image/{imformat.lower()}'}
|
||||
|
||||
if size == 't':
|
||||
thumb_info['is_public'] = True
|
||||
@@ -51,63 +65,40 @@ def generate_local_thumbnails(name_base, src):
|
||||
return thumbnails
|
||||
|
||||
|
||||
def resize_and_crop(img_path, modified_path, size, crop_type='middle'):
|
||||
"""
|
||||
Resize and crop an image to fit the specified size. Thanks to:
|
||||
https://gist.github.com/sigilioso/2957026
|
||||
def resize_and_crop(img: Image, size: typing.Tuple[int, int]) -> Image:
|
||||
"""Resize and crop an image to fit the specified size.
|
||||
|
||||
args:
|
||||
img_path: path for the image to resize.
|
||||
modified_path: path to store the modified image.
|
||||
size: `(width, height)` tuple.
|
||||
crop_type: can be 'top', 'middle' or 'bottom', depending on this
|
||||
value, the image will cropped getting the 'top/left', 'middle' or
|
||||
'bottom/right' of the image to fit the size.
|
||||
raises:
|
||||
Exception: if can not open the file in img_path of there is problems
|
||||
to save the image.
|
||||
ValueError: if an invalid `crop_type` is provided.
|
||||
Thanks to: https://gist.github.com/sigilioso/2957026
|
||||
|
||||
:param img: opened PIL.Image to work on
|
||||
:param size: `(width, height)` tuple.
|
||||
"""
|
||||
# If height is higher we resize vertically, if not we resize horizontally
|
||||
img = Image.open(img_path).convert('RGB')
|
||||
# Get current and desired ratio for the images
|
||||
img_ratio = img.size[0] / float(img.size[1])
|
||||
ratio = size[0] / float(size[1])
|
||||
cur_w, cur_h = img.size # current
|
||||
img_ratio = cur_w / cur_h
|
||||
|
||||
w, h = size # desired
|
||||
ratio = w / h
|
||||
|
||||
# The image is scaled/cropped vertically or horizontally depending on the ratio
|
||||
if ratio > img_ratio:
|
||||
img = img.resize((size[0], int(round(size[0] * img.size[1] / img.size[0]))),
|
||||
Image.ANTIALIAS)
|
||||
# Crop in the top, middle or bottom
|
||||
if crop_type == 'top':
|
||||
box = (0, 0, img.size[0], size[1])
|
||||
elif crop_type == 'middle':
|
||||
box = (0, int(round((img.size[1] - size[1]) / 2)), img.size[0],
|
||||
int(round((img.size[1] + size[1]) / 2)))
|
||||
elif crop_type == 'bottom':
|
||||
box = (0, img.size[1] - size[1], img.size[0], img.size[1])
|
||||
else:
|
||||
raise ValueError('ERROR: invalid value for crop_type')
|
||||
uncropped_h = (w * cur_h) // cur_w
|
||||
img = img.resize((w, uncropped_h), Image.ANTIALIAS)
|
||||
box = (0, (uncropped_h - h) // 2,
|
||||
w, (uncropped_h + h) // 2)
|
||||
img = img.crop(box)
|
||||
elif ratio < img_ratio:
|
||||
img = img.resize((int(round(size[1] * img.size[0] / img.size[1])), size[1]),
|
||||
Image.ANTIALIAS)
|
||||
# Crop in the top, middle or bottom
|
||||
if crop_type == 'top':
|
||||
box = (0, 0, size[0], img.size[1])
|
||||
elif crop_type == 'middle':
|
||||
box = (int(round((img.size[0] - size[0]) / 2)), 0,
|
||||
int(round((img.size[0] + size[0]) / 2)), img.size[1])
|
||||
elif crop_type == 'bottom':
|
||||
box = (img.size[0] - size[0], 0, img.size[0], img.size[1])
|
||||
else:
|
||||
raise ValueError('ERROR: invalid value for crop_type')
|
||||
uncropped_w = (h * cur_w) // cur_h
|
||||
img = img.resize((uncropped_w, h), Image.ANTIALIAS)
|
||||
box = ((uncropped_w - w) // 2, 0,
|
||||
(uncropped_w + w) // 2, h)
|
||||
img = img.crop(box)
|
||||
else:
|
||||
img = img.resize((size[0], size[1]),
|
||||
Image.ANTIALIAS)
|
||||
img = img.resize((w, h), Image.ANTIALIAS)
|
||||
|
||||
# If the scale is the same, we do not need to crop
|
||||
img.save(modified_path, "JPEG")
|
||||
return img
|
||||
|
||||
|
||||
def get_video_data(filepath):
|
||||
@@ -143,7 +134,7 @@ def get_video_data(filepath):
|
||||
res_y=video_stream['height'],
|
||||
)
|
||||
if video_stream['sample_aspect_ratio'] != '1:1':
|
||||
print '[warning] Pixel aspect ratio is not square!'
|
||||
print('[warning] Pixel aspect ratio is not square!')
|
||||
|
||||
return outdata
|
||||
|
||||
@@ -190,14 +181,14 @@ def ffmpeg_encode(src, format, res_y=720):
|
||||
dst = os.path.splitext(src)
|
||||
dst = "{0}-{1}p.{2}".format(dst[0], res_y, format)
|
||||
args.append(dst)
|
||||
print "Encoding {0} to {1}".format(src, format)
|
||||
print("Encoding {0} to {1}".format(src, format))
|
||||
returncode = subprocess.call([current_app.config['BIN_FFMPEG']] + args)
|
||||
if returncode == 0:
|
||||
print "Successfully encoded {0}".format(dst)
|
||||
print("Successfully encoded {0}".format(dst))
|
||||
else:
|
||||
print "Error during encode"
|
||||
print "Code: {0}".format(returncode)
|
||||
print "Command: {0}".format(current_app.config['BIN_FFMPEG'] + " " + " ".join(args))
|
||||
print("Error during encode")
|
||||
print("Code: {0}".format(returncode))
|
||||
print("Command: {0}".format(current_app.config['BIN_FFMPEG'] + " " + " ".join(args)))
|
||||
dst = None
|
||||
# return path of the encoded video
|
||||
return dst
|
||||
86
pillar/api/utils/node_type_utils.py
Normal file
86
pillar/api/utils/node_type_utils.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import copy
|
||||
import logging
|
||||
import types
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def assign_permissions(project, node_types, permission_callback):
|
||||
"""Generator, yields the node types with certain permissions set.
|
||||
|
||||
The permission_callback is called for each node type, and each user
|
||||
and group permission in the project, and should return the appropriate
|
||||
extra permissions for that node type.
|
||||
|
||||
Yields copies of the given node types with new permissions.
|
||||
|
||||
permission_callback(node_type, uwg, ident, proj_methods) is returned, where
|
||||
- 'node_type' is the node type dict
|
||||
- 'ugw' is either 'user', 'group', or 'world',
|
||||
- 'ident' is the group or user ID, or None when ugw is 'world',
|
||||
- 'proj_methods' is the list of already-allowed project methods.
|
||||
"""
|
||||
|
||||
proj_perms = project['permissions']
|
||||
|
||||
for nt in node_types:
|
||||
permissions = {}
|
||||
|
||||
for key in ('users', 'groups'):
|
||||
perms = proj_perms.get(key)
|
||||
if not perms:
|
||||
continue
|
||||
|
||||
singular = key.rstrip('s')
|
||||
for perm in perms:
|
||||
assert isinstance(perm, dict), 'perm should be dict, but is %r' % perm
|
||||
ident = perm[singular] # group or user ID.
|
||||
|
||||
methods_to_allow = permission_callback(nt, singular, ident, perm['methods'])
|
||||
if not methods_to_allow:
|
||||
continue
|
||||
|
||||
permissions.setdefault(key, []).append(
|
||||
{singular: ident,
|
||||
'methods': methods_to_allow}
|
||||
)
|
||||
|
||||
# World permissions are simpler.
|
||||
world_methods_to_allow = permission_callback(nt, 'world', None,
|
||||
permissions.get('world', []))
|
||||
if world_methods_to_allow:
|
||||
permissions.setdefault('world', []).extend(world_methods_to_allow)
|
||||
|
||||
node_type = copy.deepcopy(nt)
|
||||
if permissions:
|
||||
node_type['permissions'] = permissions
|
||||
yield node_type
|
||||
|
||||
|
||||
def add_to_project(project, node_types, replace_existing):
|
||||
"""Adds the given node types to the project.
|
||||
|
||||
Overwrites any existing by the same name when replace_existing=True.
|
||||
"""
|
||||
|
||||
assert isinstance(project, dict)
|
||||
assert isinstance(node_types, (list, set, frozenset, tuple, types.GeneratorType)), \
|
||||
'node_types is of wrong type %s' % type(node_types)
|
||||
|
||||
project_id = project['_id']
|
||||
|
||||
for node_type in node_types:
|
||||
found = [nt for nt in project['node_types']
|
||||
if nt['name'] == node_type['name']]
|
||||
if found:
|
||||
assert len(found) == 1, 'node type name should be unique (found %ix)' % len(found)
|
||||
|
||||
# TODO: validate that the node type contains all the properties Attract needs.
|
||||
if replace_existing:
|
||||
log.info('Replacing existing node type %s on project %s',
|
||||
node_type['name'], project_id)
|
||||
project['node_types'].remove(found[0])
|
||||
else:
|
||||
continue
|
||||
|
||||
project['node_types'].append(node_type)
|
||||
87
pillar/api/utils/rating.py
Normal file
87
pillar/api/utils/rating.py
Normal file
@@ -0,0 +1,87 @@
|
||||
# These functions come from Reddit
|
||||
# https://github.com/reddit/reddit/blob/master/r2/r2/lib/db/_sorts.pyx
|
||||
|
||||
# Additional resources
|
||||
# http://www.redditblog.com/2009/10/reddits-new-comment-sorting-system.html
|
||||
# http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
|
||||
# http://amix.dk/blog/post/19588
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from math import log
|
||||
from math import sqrt
|
||||
|
||||
epoch = datetime(1970, 1, 1, 0, 0, 0, 0, timezone.utc)
|
||||
|
||||
|
||||
def epoch_seconds(date):
|
||||
"""Returns the number of seconds from the epoch to date."""
|
||||
td = date - epoch
|
||||
return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)
|
||||
|
||||
|
||||
def score(ups, downs):
|
||||
return ups - downs
|
||||
|
||||
|
||||
def hot(ups, downs, date):
|
||||
"""The hot formula. Reddit's hot ranking uses the logarithm function to
|
||||
weight the first votes higher than the rest.
|
||||
The first 10 upvotes have the same weight as the next 100 upvotes which
|
||||
have the same weight as the next 1000, etc.
|
||||
|
||||
Dillo authors: we modified the formula to give more weight to negative
|
||||
votes when an entry is controversial.
|
||||
|
||||
TODO: make this function more dynamic so that different defaults can be
|
||||
specified depending on the item that is being rated.
|
||||
"""
|
||||
|
||||
s = score(ups, downs)
|
||||
order = log(max(abs(s), 1), 10)
|
||||
sign = 1 if s > 0 else -1 if s < 0 else 0
|
||||
seconds = epoch_seconds(date) - 1134028003
|
||||
base_hot = round(sign * order + seconds / 45000, 7)
|
||||
|
||||
if downs > 1:
|
||||
rating_delta = 100 * (downs - ups) / downs
|
||||
if rating_delta < 25:
|
||||
# The post is controversial
|
||||
return base_hot
|
||||
base_hot = base_hot - (downs * 6)
|
||||
|
||||
return base_hot
|
||||
|
||||
|
||||
def _confidence(ups, downs):
|
||||
n = ups + downs
|
||||
|
||||
if n == 0:
|
||||
return 0
|
||||
|
||||
z = 1.0 #1.0 = 85%, 1.6 = 95%
|
||||
phat = float(ups) / n
|
||||
return sqrt(phat+z*z/(2*n)-z*((phat*(1-phat)+z*z/(4*n))/n))/(1+z*z/n)
|
||||
|
||||
|
||||
def confidence(ups, downs):
|
||||
if ups + downs == 0:
|
||||
return 0
|
||||
else:
|
||||
return _confidence(ups, downs)
|
||||
|
||||
|
||||
def update_hot(document):
|
||||
"""Update the hotness of a document given its current ratings.
|
||||
|
||||
We expect the document to implement the ratings_embedded_schema in
|
||||
a 'ratings' property.
|
||||
"""
|
||||
|
||||
dt = document['_created']
|
||||
dt = dt.replace(tzinfo=timezone.utc)
|
||||
|
||||
document['properties']['ratings']['hot'] = hot(
|
||||
document['properties']['ratings']['positive'],
|
||||
document['properties']['ratings']['negative'],
|
||||
dt,
|
||||
)
|
||||
1
pillar/api/utils/storage.py
Normal file
1
pillar/api/utils/storage.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Utility for managing storage backends and files."""
|
||||
@@ -1,268 +0,0 @@
|
||||
import logging.config
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
from bson import ObjectId
|
||||
from datetime import datetime
|
||||
from flask import g
|
||||
from flask import request
|
||||
from flask import abort
|
||||
from eve import Eve
|
||||
|
||||
from eve.auth import TokenAuth
|
||||
from eve.io.mongo import Validator
|
||||
|
||||
from application.utils import project_get_node_type
|
||||
|
||||
RFC1123_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
|
||||
|
||||
|
||||
class ValidateCustomFields(Validator):
|
||||
def convert_properties(self, properties, node_schema):
|
||||
for prop in node_schema:
|
||||
if not prop in properties:
|
||||
continue
|
||||
schema_prop = node_schema[prop]
|
||||
prop_type = schema_prop['type']
|
||||
if prop_type == 'dict':
|
||||
properties[prop] = self.convert_properties(
|
||||
properties[prop], schema_prop['schema'])
|
||||
if prop_type == 'list':
|
||||
if properties[prop] in ['', '[]']:
|
||||
properties[prop] = []
|
||||
for k, val in enumerate(properties[prop]):
|
||||
if not 'schema' in schema_prop:
|
||||
continue
|
||||
item_schema = {'item': schema_prop['schema']}
|
||||
item_prop = {'item': properties[prop][k]}
|
||||
properties[prop][k] = self.convert_properties(
|
||||
item_prop, item_schema)['item']
|
||||
# Convert datetime string to RFC1123 datetime
|
||||
elif prop_type == 'datetime':
|
||||
prop_val = properties[prop]
|
||||
properties[prop] = datetime.strptime(prop_val, RFC1123_DATE_FORMAT)
|
||||
elif prop_type == 'objectid':
|
||||
prop_val = properties[prop]
|
||||
if prop_val:
|
||||
properties[prop] = ObjectId(prop_val)
|
||||
else:
|
||||
properties[prop] = None
|
||||
|
||||
return properties
|
||||
|
||||
def _validate_valid_properties(self, valid_properties, field, value):
|
||||
projects_collection = app.data.driver.db['projects']
|
||||
lookup = {'_id': ObjectId(self.document['project'])}
|
||||
|
||||
project = projects_collection.find_one(lookup, {
|
||||
'node_types.name': 1,
|
||||
'node_types.dyn_schema': 1,
|
||||
})
|
||||
if project is None:
|
||||
log.warning('Unknown project %s, declared by node %s',
|
||||
lookup, self.document.get('_id'))
|
||||
self._error(field, 'Unknown project')
|
||||
return False
|
||||
|
||||
node_type_name = self.document['node_type']
|
||||
node_type = project_get_node_type(project, node_type_name)
|
||||
if node_type is None:
|
||||
log.warning('Project %s has no node type %s, declared by node %s',
|
||||
project, node_type_name, self.document.get('_id'))
|
||||
self._error(field, 'Unknown node type')
|
||||
return False
|
||||
|
||||
try:
|
||||
value = self.convert_properties(value, node_type['dyn_schema'])
|
||||
except Exception as e:
|
||||
log.warning("Error converting form properties", exc_info=True)
|
||||
|
||||
v = Validator(node_type['dyn_schema'])
|
||||
val = v.validate(value)
|
||||
|
||||
if val:
|
||||
return True
|
||||
|
||||
log.warning('Error validating properties for node %s: %s', self.document, v.errors)
|
||||
self._error(field, "Error validating properties")
|
||||
|
||||
|
||||
# We specify a settings.py file because when running on wsgi we can't detect it
|
||||
# automatically. The default path (which works in Docker) can be overridden with
|
||||
# an env variable.
|
||||
settings_path = os.environ.get(
|
||||
'EVE_SETTINGS', '/data/git/pillar/pillar/settings.py')
|
||||
app = Eve(settings=settings_path, validator=ValidateCustomFields)
|
||||
|
||||
# Load configuration from three different sources, to make it easy to override
|
||||
# settings with secrets, as well as for development & testing.
|
||||
app_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
app.config.from_pyfile(os.path.join(app_root, 'config.py'), silent=False)
|
||||
app.config.from_pyfile(os.path.join(app_root, 'config_local.py'), silent=True)
|
||||
from_envvar = os.environ.get('PILLAR_CONFIG')
|
||||
if from_envvar:
|
||||
# Don't use from_envvar, as we want different behaviour. If the envvar
|
||||
# is not set, it's fine (i.e. silent=True), but if it is set and the
|
||||
# configfile doesn't exist, it should error out (i.e. silent=False).
|
||||
app.config.from_pyfile(from_envvar, silent=False)
|
||||
|
||||
# Set the TMP environment variable to manage where uploads are stored.
|
||||
# These are all used by tempfile.mkstemp(), but we don't knwow in whic
|
||||
# order. As such, we remove all used variables but the one we set.
|
||||
tempfile.tempdir = app.config['STORAGE_DIR']
|
||||
os.environ['TMP'] = app.config['STORAGE_DIR']
|
||||
os.environ.pop('TEMP', None)
|
||||
os.environ.pop('TMPDIR', None)
|
||||
|
||||
|
||||
# Configure logging
|
||||
logging.config.dictConfig(app.config['LOGGING'])
|
||||
log = logging.getLogger(__name__)
|
||||
if app.config['DEBUG']:
|
||||
log.info('Pillar starting, debug=%s', app.config['DEBUG'])
|
||||
|
||||
# Get the Git hash
|
||||
try:
|
||||
git_cmd = ['git', '-C', app_root, 'describe', '--always']
|
||||
description = subprocess.check_output(git_cmd)
|
||||
app.config['GIT_REVISION'] = description.strip()
|
||||
except (subprocess.CalledProcessError, OSError) as ex:
|
||||
log.warning('Unable to run "git describe" to get git revision: %s', ex)
|
||||
app.config['GIT_REVISION'] = 'unknown'
|
||||
log.info('Git revision %r', app.config['GIT_REVISION'])
|
||||
|
||||
# Configure Bugsnag
|
||||
if not app.config.get('TESTING') and app.config.get('BUGSNAG_API_KEY'):
|
||||
import bugsnag
|
||||
import bugsnag.flask
|
||||
import bugsnag.handlers
|
||||
|
||||
bugsnag.configure(
|
||||
api_key=app.config['BUGSNAG_API_KEY'],
|
||||
project_root="/data/git/pillar/pillar",
|
||||
)
|
||||
bugsnag.flask.handle_exceptions(app)
|
||||
|
||||
bs_handler = bugsnag.handlers.BugsnagHandler()
|
||||
bs_handler.setLevel(logging.ERROR)
|
||||
log.addHandler(bs_handler)
|
||||
else:
|
||||
log.info('Bugsnag NOT configured.')
|
||||
|
||||
# Google Cloud project
|
||||
try:
|
||||
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = \
|
||||
app.config['GCLOUD_APP_CREDENTIALS']
|
||||
except KeyError:
|
||||
raise SystemExit('GCLOUD_APP_CREDENTIALS configuration is missing')
|
||||
|
||||
# Storage backend (GCS)
|
||||
try:
|
||||
os.environ['GCLOUD_PROJECT'] = app.config['GCLOUD_PROJECT']
|
||||
except KeyError:
|
||||
raise SystemExit('GCLOUD_PROJECT configuration value is missing')
|
||||
|
||||
# Algolia search
|
||||
if app.config['SEARCH_BACKEND'] == 'algolia':
|
||||
from algoliasearch import algoliasearch
|
||||
|
||||
client = algoliasearch.Client(
|
||||
app.config['ALGOLIA_USER'],
|
||||
app.config['ALGOLIA_API_KEY'])
|
||||
algolia_index_users = client.init_index(app.config['ALGOLIA_INDEX_USERS'])
|
||||
algolia_index_nodes = client.init_index(app.config['ALGOLIA_INDEX_NODES'])
|
||||
else:
|
||||
algolia_index_users = None
|
||||
algolia_index_nodes = None
|
||||
|
||||
# Encoding backend
|
||||
if app.config['ENCODING_BACKEND'] == 'zencoder':
|
||||
from zencoder import Zencoder
|
||||
encoding_service_client = Zencoder(app.config['ZENCODER_API_KEY'])
|
||||
else:
|
||||
encoding_service_client = None
|
||||
|
||||
from utils.authentication import validate_token
|
||||
from utils.authorization import check_permissions
|
||||
from utils.activities import notification_parse
|
||||
from modules.projects import before_inserting_projects
|
||||
from modules.projects import after_inserting_projects
|
||||
|
||||
|
||||
@app.before_request
|
||||
def validate_token_at_every_request():
|
||||
validate_token()
|
||||
|
||||
|
||||
def before_returning_item_notifications(response):
|
||||
if request.args.get('parse'):
|
||||
notification_parse(response)
|
||||
|
||||
|
||||
def before_returning_resource_notifications(response):
|
||||
for item in response['_items']:
|
||||
if request.args.get('parse'):
|
||||
notification_parse(item)
|
||||
|
||||
|
||||
app.on_fetched_item_notifications += before_returning_item_notifications
|
||||
app.on_fetched_resource_notifications += before_returning_resource_notifications
|
||||
|
||||
|
||||
@app.before_first_request
|
||||
def setup_db_indices():
|
||||
"""Adds missing database indices.
|
||||
|
||||
This does NOT drop and recreate existing indices,
|
||||
nor does it reconfigure existing indices.
|
||||
If you want that, drop them manually first.
|
||||
"""
|
||||
|
||||
log.debug('Adding missing database indices.')
|
||||
|
||||
import pymongo
|
||||
|
||||
db = app.data.driver.db
|
||||
|
||||
coll = db['tokens']
|
||||
coll.create_index([('user', pymongo.ASCENDING)])
|
||||
coll.create_index([('token', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['notifications']
|
||||
coll.create_index([('user', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['activities-subscriptions']
|
||||
coll.create_index([('context_object', pymongo.ASCENDING)])
|
||||
|
||||
coll = db['nodes']
|
||||
# This index is used for queries on project, and for queries on
|
||||
# the combination (project, node type).
|
||||
coll.create_index([('project', pymongo.ASCENDING),
|
||||
('node_type', pymongo.ASCENDING)])
|
||||
coll.create_index([('parent', pymongo.ASCENDING)])
|
||||
coll.create_index([('short_code', pymongo.ASCENDING)],
|
||||
sparse=True, unique=True)
|
||||
|
||||
|
||||
# The encoding module (receive notification and report progress)
|
||||
from modules.encoding import encoding
|
||||
from modules.blender_id import blender_id
|
||||
from modules import projects
|
||||
from modules import local_auth
|
||||
from modules import file_storage
|
||||
from modules import users
|
||||
from modules import nodes
|
||||
from modules import latest
|
||||
from modules import blender_cloud
|
||||
from modules import service
|
||||
|
||||
app.register_blueprint(encoding, url_prefix='/encoding')
|
||||
app.register_blueprint(blender_id, url_prefix='/blender_id')
|
||||
projects.setup_app(app, url_prefix='/p')
|
||||
local_auth.setup_app(app, url_prefix='/auth')
|
||||
file_storage.setup_app(app, url_prefix='/storage')
|
||||
latest.setup_app(app, url_prefix='/latest')
|
||||
blender_cloud.setup_app(app, url_prefix='/bcloud')
|
||||
users.setup_app(app, url_prefix='/users')
|
||||
service.setup_app(app, url_prefix='/service')
|
||||
nodes.setup_app(app, url_prefix='/nodes')
|
||||
@@ -1,240 +0,0 @@
|
||||
"""Blender ID subclient endpoint.
|
||||
|
||||
Also contains functionality for other parts of Pillar to perform communication
|
||||
with Blender ID.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
from bson import tz_util
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from flask import Blueprint, request, current_app, abort, jsonify
|
||||
from eve.methods.post import post_internal
|
||||
from eve.methods.put import put_internal
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from application.utils import authentication, remove_private_keys
|
||||
|
||||
blender_id = Blueprint('blender_id', __name__)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@blender_id.route('/store_scst', methods=['POST'])
|
||||
def store_subclient_token():
|
||||
"""Verifies & stores a user's subclient-specific token."""
|
||||
|
||||
user_id = request.form['user_id'] # User ID at BlenderID
|
||||
subclient_id = request.form['subclient_id']
|
||||
scst = request.form['token']
|
||||
|
||||
db_user, status = validate_create_user(user_id, scst, subclient_id)
|
||||
|
||||
if db_user is None:
|
||||
log.warning('Unable to verify subclient token with Blender ID.')
|
||||
return jsonify({'status': 'fail',
|
||||
'error': 'BLENDER ID ERROR'}), 403
|
||||
|
||||
return jsonify({'status': 'success',
|
||||
'subclient_user_id': str(db_user['_id'])}), status
|
||||
|
||||
|
||||
def blender_id_endpoint():
|
||||
"""Gets the endpoint for the authentication API. If the env variable
|
||||
is defined, it's possible to override the (default) production address.
|
||||
"""
|
||||
return current_app.config['BLENDER_ID_ENDPOINT'].rstrip('/')
|
||||
|
||||
|
||||
def validate_create_user(blender_id_user_id, token, oauth_subclient_id):
|
||||
"""Validates a user against Blender ID, creating the user in our database.
|
||||
|
||||
:param blender_id_user_id: the user ID at the BlenderID server.
|
||||
:param token: the OAuth access token.
|
||||
:param oauth_subclient_id: the subclient ID, or empty string if not a subclient.
|
||||
:returns: (user in MongoDB, HTTP status 200 or 201)
|
||||
"""
|
||||
|
||||
# Verify with Blender ID
|
||||
log.debug('Storing token for BlenderID user %s', blender_id_user_id)
|
||||
user_info, token_expiry = validate_token(blender_id_user_id, token, oauth_subclient_id)
|
||||
|
||||
if user_info is None:
|
||||
log.debug('Unable to verify token with Blender ID.')
|
||||
return None, None
|
||||
|
||||
# Blender ID can be queried without user ID, and will always include the
|
||||
# correct user ID in its response.
|
||||
log.debug('Obtained user info from Blender ID: %s', user_info)
|
||||
blender_id_user_id = user_info['id']
|
||||
|
||||
# Store the user info in MongoDB.
|
||||
db_user = find_user_in_db(blender_id_user_id, user_info)
|
||||
db_id, status = upsert_user(db_user, blender_id_user_id)
|
||||
|
||||
# Store the token in MongoDB.
|
||||
authentication.store_token(db_id, token, token_expiry, oauth_subclient_id)
|
||||
|
||||
return db_user, status
|
||||
|
||||
|
||||
def upsert_user(db_user, blender_id_user_id):
|
||||
"""Inserts/updates the user in MongoDB.
|
||||
|
||||
Retries a few times when there are uniqueness issues in the username.
|
||||
|
||||
:returns: the user's database ID and the status of the PUT/POST.
|
||||
The status is 201 on insert, and 200 on update.
|
||||
:type: (ObjectId, int)
|
||||
"""
|
||||
|
||||
if u'subscriber' in db_user.get('groups', []):
|
||||
log.error('Non-ObjectID string found in user.groups: %s', db_user)
|
||||
raise wz_exceptions.InternalServerError('Non-ObjectID string found in user.groups: %s' % db_user)
|
||||
|
||||
r = {}
|
||||
for retry in range(5):
|
||||
if '_id' in db_user:
|
||||
# Update the existing user
|
||||
attempted_eve_method = 'PUT'
|
||||
db_id = db_user['_id']
|
||||
r, _, _, status = put_internal('users', remove_private_keys(db_user),
|
||||
_id=db_id)
|
||||
if status == 422:
|
||||
log.error('Status %i trying to PUT user %s with values %s, should not happen! %s',
|
||||
status, db_id, remove_private_keys(db_user), r)
|
||||
else:
|
||||
# Create a new user, retry for non-unique usernames.
|
||||
attempted_eve_method = 'POST'
|
||||
r, _, _, status = post_internal('users', db_user)
|
||||
|
||||
if status not in {200, 201}:
|
||||
log.error('Status %i trying to create user for BlenderID %s with values %s: %s',
|
||||
status, blender_id_user_id, db_user, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
db_id = r['_id']
|
||||
db_user.update(r) # update with database/eve-generated fields.
|
||||
|
||||
if status == 422:
|
||||
# Probably non-unique username, so retry a few times with different usernames.
|
||||
log.info('Error creating new user: %s', r)
|
||||
username_issue = r.get('_issues', {}).get(u'username', '')
|
||||
if u'not unique' in username_issue:
|
||||
# Retry
|
||||
db_user['username'] = authentication.make_unique_username(db_user['email'])
|
||||
continue
|
||||
|
||||
# Saving was successful, or at least didn't break on a non-unique username.
|
||||
break
|
||||
else:
|
||||
log.error('Unable to create new user %s: %s', db_user, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
if status not in (200, 201):
|
||||
log.error('internal response from %s to Eve: %r %r', attempted_eve_method, status, r)
|
||||
raise wz_exceptions.InternalServerError()
|
||||
|
||||
return db_id, status
|
||||
|
||||
|
||||
def validate_token(user_id, token, oauth_subclient_id):
|
||||
"""Verifies a subclient token with Blender ID.
|
||||
|
||||
:returns: (user info, token expiry) on success, or (None, None) on failure.
|
||||
The user information from Blender ID is returned as dict
|
||||
{'email': 'a@b', 'full_name': 'AB'}, token expiry as a datime.datetime.
|
||||
:rtype: dict
|
||||
"""
|
||||
|
||||
our_subclient_id = current_app.config['BLENDER_ID_SUBCLIENT_ID']
|
||||
|
||||
# Check that IF there is a subclient ID given, it is the correct one.
|
||||
if oauth_subclient_id and our_subclient_id != oauth_subclient_id:
|
||||
log.warning('validate_token(): BlenderID user %s is trying to use the wrong subclient '
|
||||
'ID %r; treating as invalid login.', user_id, oauth_subclient_id)
|
||||
return None, None
|
||||
|
||||
# Validate against BlenderID.
|
||||
log.debug('Validating subclient token for BlenderID user %r, subclient %r', user_id,
|
||||
oauth_subclient_id)
|
||||
payload = {'user_id': user_id,
|
||||
'token': token}
|
||||
if oauth_subclient_id:
|
||||
payload['subclient_id'] = oauth_subclient_id
|
||||
|
||||
url = '{0}/u/validate_token'.format(blender_id_endpoint())
|
||||
log.debug('POSTing to %r', url)
|
||||
|
||||
# Retry a few times when POSTing to BlenderID fails.
|
||||
# Source: http://stackoverflow.com/a/15431343/875379
|
||||
s = requests.Session()
|
||||
s.mount(blender_id_endpoint(), HTTPAdapter(max_retries=5))
|
||||
|
||||
# POST to Blender ID, handling errors as negative verification results.
|
||||
try:
|
||||
r = s.post(url, data=payload, timeout=5,
|
||||
verify=current_app.config['TLS_CERT_FILE'])
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
log.error('Connection error trying to POST to %s, handling as invalid token.', url)
|
||||
return None, None
|
||||
|
||||
if r.status_code != 200:
|
||||
log.debug('Token %s invalid, HTTP status %i returned', token, r.status_code)
|
||||
return None, None
|
||||
|
||||
resp = r.json()
|
||||
if resp['status'] != 'success':
|
||||
log.warning('Failed response from %s: %s', url, resp)
|
||||
return None, None
|
||||
|
||||
expires = _compute_token_expiry(resp['token_expires'])
|
||||
|
||||
return resp['user'], expires
|
||||
|
||||
|
||||
def _compute_token_expiry(token_expires_string):
|
||||
"""Computes token expiry based on current time and BlenderID expiry.
|
||||
|
||||
Expires our side of the token when either the BlenderID token expires,
|
||||
or in one hour. The latter case is to ensure we periodically verify
|
||||
the token.
|
||||
"""
|
||||
|
||||
date_format = current_app.config['RFC1123_DATE_FORMAT']
|
||||
blid_expiry = datetime.datetime.strptime(token_expires_string, date_format)
|
||||
blid_expiry = blid_expiry.replace(tzinfo=tz_util.utc)
|
||||
our_expiry = datetime.datetime.now(tz=tz_util.utc) + datetime.timedelta(hours=1)
|
||||
|
||||
return min(blid_expiry, our_expiry)
|
||||
|
||||
|
||||
def find_user_in_db(blender_id_user_id, user_info):
|
||||
"""Find the user in our database, creating/updating the returned document where needed.
|
||||
|
||||
Does NOT update the user in the database.
|
||||
"""
|
||||
|
||||
users = current_app.data.driver.db['users']
|
||||
|
||||
query = {'auth': {'$elemMatch': {'user_id': str(blender_id_user_id),
|
||||
'provider': 'blender-id'}}}
|
||||
log.debug('Querying: %s', query)
|
||||
db_user = users.find_one(query)
|
||||
|
||||
if db_user:
|
||||
log.debug('User blender_id_user_id=%r already in our database, '
|
||||
'updating with info from Blender ID.', blender_id_user_id)
|
||||
db_user['email'] = user_info['email']
|
||||
else:
|
||||
log.debug('User %r not yet in our database, create a new one.', blender_id_user_id)
|
||||
db_user = authentication.create_new_user_document(
|
||||
email=user_info['email'],
|
||||
user_id=blender_id_user_id,
|
||||
username=user_info['full_name'])
|
||||
db_user['username'] = authentication.make_unique_username(user_info['email'])
|
||||
if not db_user['full_name']:
|
||||
db_user['full_name'] = db_user['username']
|
||||
|
||||
return db_user
|
||||
@@ -1,123 +0,0 @@
|
||||
import itertools
|
||||
|
||||
import pymongo
|
||||
from flask import Blueprint, current_app
|
||||
|
||||
from application.utils import jsonify
|
||||
|
||||
blueprint = Blueprint('latest', __name__)
|
||||
|
||||
|
||||
def keep_fetching(collection, db_filter, projection, sort, py_filter, batch_size=12):
|
||||
"""Yields results for which py_filter returns True"""
|
||||
|
||||
projection['_deleted'] = 1
|
||||
curs = collection.find(db_filter, projection).sort(sort)
|
||||
curs.batch_size(batch_size)
|
||||
|
||||
for doc in curs:
|
||||
if doc.get('_deleted'):
|
||||
continue
|
||||
doc.pop('_deleted', None)
|
||||
if py_filter(doc):
|
||||
yield doc
|
||||
|
||||
|
||||
def latest_nodes(db_filter, projection, py_filter, limit):
|
||||
nodes = current_app.data.driver.db['nodes']
|
||||
|
||||
proj = {
|
||||
'_created': 1,
|
||||
'_updated': 1,
|
||||
}
|
||||
proj.update(projection)
|
||||
|
||||
latest = keep_fetching(nodes, db_filter, proj,
|
||||
[('_created', pymongo.DESCENDING)],
|
||||
py_filter, limit)
|
||||
|
||||
result = list(itertools.islice(latest, limit))
|
||||
return result
|
||||
|
||||
|
||||
def has_public_project(node_doc):
|
||||
"""Returns True iff the project the node belongs to is public."""
|
||||
|
||||
project_id = node_doc.get('project')
|
||||
return is_project_public(project_id)
|
||||
|
||||
|
||||
# TODO: cache result, at least for a limited amt. of time, or for this HTTP request.
|
||||
def is_project_public(project_id):
|
||||
"""Returns True iff the project is public."""
|
||||
|
||||
project = current_app.data.driver.db['projects'].find_one(project_id)
|
||||
if not project:
|
||||
return False
|
||||
|
||||
return not project.get('is_private')
|
||||
|
||||
|
||||
@blueprint.route('/assets')
|
||||
def latest_assets():
|
||||
latest = latest_nodes({'node_type': 'asset', 'properties.status': 'published'},
|
||||
{'name': 1, 'project': 1, 'user': 1, 'node_type': 1,
|
||||
'parent': 1, 'picture': 1, 'properties.status': 1,
|
||||
'properties.content_type': 1,
|
||||
'permissions.world': 1},
|
||||
has_public_project, 12)
|
||||
|
||||
embed_user(latest)
|
||||
embed_project(latest)
|
||||
|
||||
return jsonify({'_items': latest})
|
||||
|
||||
|
||||
def embed_user(latest):
|
||||
users = current_app.data.driver.db['users']
|
||||
|
||||
for comment in latest:
|
||||
user_id = comment['user']
|
||||
comment['user'] = users.find_one(user_id, {'auth': 0, 'groups': 0, 'roles': 0,
|
||||
'settings': 0, 'email': 0,
|
||||
'_created': 0, '_updated': 0, '_etag': 0})
|
||||
|
||||
|
||||
def embed_project(latest):
|
||||
projects = current_app.data.driver.db['projects']
|
||||
|
||||
for comment in latest:
|
||||
project_id = comment['project']
|
||||
comment['project'] = projects.find_one(project_id, {'_id': 1, 'name': 1, 'url': 1})
|
||||
|
||||
|
||||
@blueprint.route('/comments')
|
||||
def latest_comments():
|
||||
latest = latest_nodes({'node_type': 'comment', 'properties.status': 'published'},
|
||||
{'project': 1, 'parent': 1, 'user': 1,
|
||||
'properties.content': 1, 'node_type': 1, 'properties.status': 1,
|
||||
'properties.is_reply': 1},
|
||||
has_public_project, 6)
|
||||
|
||||
# Embed the comments' parents.
|
||||
nodes = current_app.data.driver.db['nodes']
|
||||
parents = {}
|
||||
for comment in latest:
|
||||
parent_id = comment['parent']
|
||||
|
||||
if parent_id in parents:
|
||||
comment['parent'] = parents[parent_id]
|
||||
continue
|
||||
|
||||
parent = nodes.find_one(parent_id)
|
||||
parents[parent_id] = parent
|
||||
comment['parent'] = parent
|
||||
|
||||
embed_project(latest)
|
||||
embed_user(latest)
|
||||
|
||||
return jsonify({'_items': latest})
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
@@ -1,472 +0,0 @@
|
||||
import copy
|
||||
import logging
|
||||
import json
|
||||
|
||||
from bson import ObjectId
|
||||
from eve.methods.post import post_internal
|
||||
from eve.methods.patch import patch_internal
|
||||
from flask import g, Blueprint, request, abort, current_app, make_response
|
||||
from gcloud import exceptions as gcs_exceptions
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
|
||||
from application.utils import remove_private_keys, jsonify, mongo, str2id
|
||||
from application.utils import authorization, authentication
|
||||
from application.utils.gcs import GoogleCloudStorageBucket
|
||||
from application.utils.authorization import user_has_role, check_permissions, require_login
|
||||
from manage_extra.node_types.asset import node_type_asset
|
||||
from manage_extra.node_types.comment import node_type_comment
|
||||
from manage_extra.node_types.group import node_type_group
|
||||
from manage_extra.node_types.texture import node_type_texture
|
||||
from manage_extra.node_types.group_texture import node_type_group_texture
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint = Blueprint('projects', __name__)
|
||||
|
||||
# Default project permissions for the admin group.
|
||||
DEFAULT_ADMIN_GROUP_PERMISSIONS = ['GET', 'PUT', 'POST', 'DELETE']
|
||||
|
||||
|
||||
def before_inserting_projects(items):
|
||||
"""Strip unwanted properties, that will be assigned after creation. Also,
|
||||
verify permission to create a project (check quota, check role).
|
||||
|
||||
:param items: List of project docs that have been inserted (normally one)
|
||||
"""
|
||||
|
||||
# Allow admin users to do whatever they want.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
for item in items:
|
||||
item.pop('url', None)
|
||||
|
||||
|
||||
def override_is_private_field(project, original):
|
||||
"""Override the 'is_private' property from the world permissions.
|
||||
|
||||
:param project: the project, which will be updated
|
||||
"""
|
||||
|
||||
# No permissions, no access.
|
||||
if 'permissions' not in project:
|
||||
project['is_private'] = True
|
||||
return
|
||||
|
||||
world_perms = project['permissions'].get('world', [])
|
||||
is_private = 'GET' not in world_perms
|
||||
project['is_private'] = is_private
|
||||
|
||||
|
||||
def before_inserting_override_is_private_field(projects):
|
||||
for project in projects:
|
||||
override_is_private_field(project, None)
|
||||
|
||||
|
||||
def before_edit_check_permissions(document, original):
|
||||
# Allow admin users to do whatever they want.
|
||||
# TODO: possibly move this into the check_permissions function.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
check_permissions('projects', original, request.method)
|
||||
|
||||
|
||||
def before_delete_project(document):
|
||||
"""Checks permissions before we allow deletion"""
|
||||
|
||||
# Allow admin users to do whatever they want.
|
||||
# TODO: possibly move this into the check_permissions function.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
check_permissions('projects', document, request.method)
|
||||
|
||||
|
||||
def protect_sensitive_fields(document, original):
|
||||
"""When not logged in as admin, prevents update to certain fields."""
|
||||
|
||||
# Allow admin users to do whatever they want.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
def revert(name):
|
||||
if name not in original:
|
||||
try:
|
||||
del document[name]
|
||||
except KeyError:
|
||||
pass
|
||||
return
|
||||
document[name] = original[name]
|
||||
|
||||
revert('status')
|
||||
revert('category')
|
||||
revert('user')
|
||||
|
||||
if 'url' in original:
|
||||
revert('url')
|
||||
|
||||
|
||||
def after_inserting_projects(projects):
|
||||
"""After inserting a project in the collection we do some processing such as:
|
||||
- apply the right permissions
|
||||
- define basic node types
|
||||
- optionally generate a url
|
||||
- initialize storage space
|
||||
|
||||
:param projects: List of project docs that have been inserted (normally one)
|
||||
"""
|
||||
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
for project in projects:
|
||||
owner_id = project.get('user', None)
|
||||
owner = users_collection.find_one(owner_id)
|
||||
after_inserting_project(project, owner)
|
||||
|
||||
|
||||
def after_inserting_project(project, db_user):
|
||||
project_id = project['_id']
|
||||
user_id = db_user['_id']
|
||||
|
||||
# Create a project-specific admin group (with name matching the project id)
|
||||
result, _, _, status = post_internal('groups', {'name': str(project_id)})
|
||||
if status != 201:
|
||||
log.error('Unable to create admin group for new project %s: %s',
|
||||
project_id, result)
|
||||
return abort_with_error(status)
|
||||
|
||||
admin_group_id = result['_id']
|
||||
log.debug('Created admin group %s for project %s', admin_group_id, project_id)
|
||||
|
||||
# Assign the current user to the group
|
||||
db_user.setdefault('groups', []).append(admin_group_id)
|
||||
|
||||
result, _, _, status = patch_internal('users', {'groups': db_user['groups']}, _id=user_id)
|
||||
if status != 200:
|
||||
log.error('Unable to add user %s as member of admin group %s for new project %s: %s',
|
||||
user_id, admin_group_id, project_id, result)
|
||||
return abort_with_error(status)
|
||||
log.debug('Made user %s member of group %s', user_id, admin_group_id)
|
||||
|
||||
# Assign the group to the project with admin rights
|
||||
is_admin = authorization.is_admin(db_user)
|
||||
world_permissions = ['GET'] if is_admin else []
|
||||
permissions = {
|
||||
'world': world_permissions,
|
||||
'users': [],
|
||||
'groups': [
|
||||
{'group': admin_group_id,
|
||||
'methods': DEFAULT_ADMIN_GROUP_PERMISSIONS[:]},
|
||||
]
|
||||
}
|
||||
|
||||
def with_permissions(node_type):
|
||||
copied = copy.deepcopy(node_type)
|
||||
copied['permissions'] = permissions
|
||||
return copied
|
||||
|
||||
# Assign permissions to the project itself, as well as to the node_types
|
||||
project['permissions'] = permissions
|
||||
project['node_types'] = [
|
||||
with_permissions(node_type_group),
|
||||
with_permissions(node_type_asset),
|
||||
with_permissions(node_type_comment),
|
||||
with_permissions(node_type_texture),
|
||||
with_permissions(node_type_group_texture),
|
||||
]
|
||||
|
||||
# Allow admin users to use whatever url they want.
|
||||
if not is_admin or not project.get('url'):
|
||||
if project.get('category', '') == 'home':
|
||||
project['url'] = 'home'
|
||||
else:
|
||||
project['url'] = "p-{!s}".format(project_id)
|
||||
|
||||
# Initialize storage page (defaults to GCS)
|
||||
if current_app.config.get('TESTING'):
|
||||
log.warning('Not creating Google Cloud Storage bucket while running unit tests!')
|
||||
else:
|
||||
try:
|
||||
gcs_storage = GoogleCloudStorageBucket(str(project_id))
|
||||
if gcs_storage.bucket.exists():
|
||||
log.info('Created GCS instance for project %s', project_id)
|
||||
else:
|
||||
log.warning('Unable to create GCS instance for project %s', project_id)
|
||||
except gcs_exceptions.Forbidden as ex:
|
||||
log.warning('GCS forbids me to create CGS instance for project %s: %s', project_id, ex)
|
||||
|
||||
# Commit the changes directly to the MongoDB; a PUT is not allowed yet,
|
||||
# as the project doesn't have a valid permission structure.
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
result = projects_collection.update_one({'_id': project_id},
|
||||
{'$set': remove_private_keys(project)})
|
||||
if result.matched_count != 1:
|
||||
log.warning('Unable to update project %s: %s', project_id, result.raw_result)
|
||||
abort_with_error(500)
|
||||
|
||||
|
||||
def create_new_project(project_name, user_id, overrides):
|
||||
"""Creates a new project owned by the given user."""
|
||||
|
||||
log.info('Creating new project "%s" for user %s', project_name, user_id)
|
||||
|
||||
# Create the project itself, the rest will be done by the after-insert hook.
|
||||
project = {'description': '',
|
||||
'name': project_name,
|
||||
'node_types': [],
|
||||
'status': 'published',
|
||||
'user': user_id,
|
||||
'is_private': True,
|
||||
'permissions': {},
|
||||
'url': '',
|
||||
'summary': '',
|
||||
'category': 'assets', # TODO: allow the user to choose this.
|
||||
}
|
||||
if overrides is not None:
|
||||
project.update(overrides)
|
||||
|
||||
result, _, _, status = post_internal('projects', project)
|
||||
if status != 201:
|
||||
log.error('Unable to create project "%s": %s', project_name, result)
|
||||
return abort_with_error(status)
|
||||
project.update(result)
|
||||
|
||||
# Now re-fetch the project, as both the initial document and the returned
|
||||
# result do not contain the same etag as the database. This also updates
|
||||
# other fields set by hooks.
|
||||
document = current_app.data.driver.db['projects'].find_one(project['_id'])
|
||||
project.update(document)
|
||||
|
||||
log.info('Created project %s for user %s', project['_id'], user_id)
|
||||
|
||||
return project
|
||||
|
||||
|
||||
@blueprint.route('/create', methods=['POST'])
|
||||
@authorization.require_login(require_roles={u'admin', u'subscriber', u'demo'})
|
||||
def create_project(overrides=None):
|
||||
"""Creates a new project."""
|
||||
|
||||
if request.mimetype == 'application/json':
|
||||
project_name = request.json['name']
|
||||
else:
|
||||
project_name = request.form['project_name']
|
||||
user_id = g.current_user['user_id']
|
||||
|
||||
project = create_new_project(project_name, user_id, overrides)
|
||||
|
||||
# Return the project in the response.
|
||||
return jsonify(project, status=201, headers={'Location': '/projects/%s' % project['_id']})
|
||||
|
||||
|
||||
@blueprint.route('/users', methods=['GET', 'POST'])
|
||||
@authorization.require_login()
|
||||
def project_manage_users():
|
||||
"""Manage users of a project. In this initial implementation, we handle
|
||||
addition and removal of a user to the admin group of a project.
|
||||
No changes are done on the project itself.
|
||||
"""
|
||||
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
|
||||
# TODO: check if user is admin of the project before anything
|
||||
if request.method == 'GET':
|
||||
project_id = request.args['project_id']
|
||||
project = projects_collection.find_one({'_id': ObjectId(project_id)})
|
||||
admin_group_id = project['permissions']['groups'][0]['group']
|
||||
|
||||
users = users_collection.find(
|
||||
{'groups': {'$in': [admin_group_id]}},
|
||||
{'username': 1, 'email': 1, 'full_name': 1})
|
||||
return jsonify({'_status': 'OK', '_items': list(users)})
|
||||
|
||||
# The request is not a form, since it comes from the API sdk
|
||||
data = json.loads(request.data)
|
||||
project_id = ObjectId(data['project_id'])
|
||||
target_user_id = ObjectId(data['user_id'])
|
||||
action = data['action']
|
||||
current_user_id = g.current_user['user_id']
|
||||
|
||||
project = projects_collection.find_one({'_id': project_id})
|
||||
|
||||
# Check if the current_user is owner of the project, or removing themselves.
|
||||
remove_self = target_user_id == current_user_id and action == 'remove'
|
||||
if project['user'] != current_user_id and not remove_self:
|
||||
return abort_with_error(403)
|
||||
|
||||
admin_group = get_admin_group(project)
|
||||
|
||||
# Get the user and add the admin group to it
|
||||
if action == 'add':
|
||||
operation = '$addToSet'
|
||||
log.info('project_manage_users: Adding user %s to admin group of project %s',
|
||||
target_user_id, project_id)
|
||||
elif action == 'remove':
|
||||
log.info('project_manage_users: Removing user %s from admin group of project %s',
|
||||
target_user_id, project_id)
|
||||
operation = '$pull'
|
||||
else:
|
||||
log.warning('project_manage_users: Unsupported action %r called by user %s',
|
||||
action, current_user_id)
|
||||
raise wz_exceptions.UnprocessableEntity()
|
||||
|
||||
users_collection.update({'_id': target_user_id},
|
||||
{operation: {'groups': admin_group['_id']}})
|
||||
|
||||
user = users_collection.find_one({'_id': target_user_id},
|
||||
{'username': 1, 'email': 1,
|
||||
'full_name': 1})
|
||||
|
||||
if not user:
|
||||
return jsonify({'_status': 'ERROR'}), 404
|
||||
|
||||
user['_status'] = 'OK'
|
||||
return jsonify(user)
|
||||
|
||||
|
||||
def get_admin_group(project):
|
||||
"""Returns the admin group for the project."""
|
||||
|
||||
groups_collection = current_app.data.driver.db['groups']
|
||||
|
||||
# TODO: search through all groups to find the one with the project ID as its name.
|
||||
admin_group_id = ObjectId(project['permissions']['groups'][0]['group'])
|
||||
group = groups_collection.find_one({'_id': admin_group_id})
|
||||
|
||||
if group is None:
|
||||
raise ValueError('Unable to handle project without admin group.')
|
||||
|
||||
if group['name'] != str(project['_id']):
|
||||
return abort_with_error(403)
|
||||
|
||||
return group
|
||||
|
||||
|
||||
def abort_with_error(status):
|
||||
"""Aborts with the given status, or 500 if the status doesn't indicate an error.
|
||||
|
||||
If the status is < 400, status 500 is used instead.
|
||||
"""
|
||||
|
||||
abort(status if status // 100 >= 4 else 500)
|
||||
|
||||
|
||||
@blueprint.route('/<string:project_id>/quotas')
|
||||
@require_login()
|
||||
def project_quotas(project_id):
|
||||
"""Returns information about the project's limits."""
|
||||
|
||||
# Check that the user has GET permissions on the project itself.
|
||||
project = mongo.find_one_or_404('projects', project_id)
|
||||
check_permissions('projects', project, 'GET')
|
||||
|
||||
file_size_used = project_total_file_size(project_id)
|
||||
|
||||
info = {
|
||||
'file_size_quota': None, # TODO: implement this later.
|
||||
'file_size_used': file_size_used,
|
||||
}
|
||||
|
||||
return jsonify(info)
|
||||
|
||||
|
||||
def project_total_file_size(project_id):
|
||||
"""Returns the total number of bytes used by files of this project."""
|
||||
|
||||
files = current_app.data.driver.db['files']
|
||||
file_size_used = files.aggregate([
|
||||
{'$match': {'project': ObjectId(project_id)}},
|
||||
{'$project': {'length_aggregate_in_bytes': 1}},
|
||||
{'$group': {'_id': None,
|
||||
'all_files': {'$sum': '$length_aggregate_in_bytes'}}}
|
||||
])
|
||||
|
||||
# The aggregate function returns a cursor, not a document.
|
||||
try:
|
||||
return next(file_size_used)['all_files']
|
||||
except StopIteration:
|
||||
# No files used at all.
|
||||
return 0
|
||||
|
||||
|
||||
def before_returning_project_permissions(response):
|
||||
# Run validation process, since GET on nodes entry point is public
|
||||
check_permissions('projects', response, 'GET', append_allowed_methods=True)
|
||||
|
||||
|
||||
def before_returning_project_resource_permissions(response):
|
||||
# Return only those projects the user has access to.
|
||||
allow = []
|
||||
for project in response['_items']:
|
||||
if authorization.has_permissions('projects', project,
|
||||
'GET', append_allowed_methods=True):
|
||||
allow.append(project)
|
||||
else:
|
||||
log.debug('User %s requested project %s, but has no access to it; filtered out.',
|
||||
authentication.current_user_id(), project['_id'])
|
||||
|
||||
response['_items'] = allow
|
||||
|
||||
|
||||
def project_node_type_has_method(response):
|
||||
"""Check for a specific request arg, and check generate the allowed_methods
|
||||
list for the required node_type.
|
||||
"""
|
||||
|
||||
node_type_name = request.args.get('node_type', '')
|
||||
|
||||
# Proceed only node_type has been requested
|
||||
if not node_type_name:
|
||||
return
|
||||
|
||||
# Look up the node type in the project document
|
||||
if not any(node_type.get('name') == node_type_name
|
||||
for node_type in response['node_types']):
|
||||
return abort(404)
|
||||
|
||||
# Check permissions and append the allowed_methods to the node_type
|
||||
check_permissions('projects', response, 'GET', append_allowed_methods=True,
|
||||
check_node_type=node_type_name)
|
||||
|
||||
|
||||
def projects_node_type_has_method(response):
|
||||
for project in response['_items']:
|
||||
project_node_type_has_method(project)
|
||||
|
||||
|
||||
@blueprint.route('/<project_id>/<node_type>', methods=['OPTIONS', 'GET'])
|
||||
def get_allowed_methods(project_id=None, node_type=None):
|
||||
"""Returns allowed methods to create a node of a certain type.
|
||||
|
||||
Either project_id or parent_node_id must be given. If the latter is given,
|
||||
the former is deducted from it.
|
||||
"""
|
||||
|
||||
project = mongo.find_one_or_404('projects', str2id(project_id))
|
||||
proj_methods = authorization.compute_allowed_methods('projects', project, node_type)
|
||||
|
||||
resp = make_response()
|
||||
resp.headers['Allowed'] = ', '.join(sorted(proj_methods))
|
||||
resp.status_code = 204
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.on_replace_projects += override_is_private_field
|
||||
app.on_replace_projects += before_edit_check_permissions
|
||||
app.on_replace_projects += protect_sensitive_fields
|
||||
app.on_update_projects += override_is_private_field
|
||||
app.on_update_projects += before_edit_check_permissions
|
||||
app.on_update_projects += protect_sensitive_fields
|
||||
app.on_delete_item_projects += before_delete_project
|
||||
app.on_insert_projects += before_inserting_override_is_private_field
|
||||
app.on_insert_projects += before_inserting_projects
|
||||
app.on_inserted_projects += after_inserting_projects
|
||||
|
||||
app.on_fetched_item_projects += before_returning_project_permissions
|
||||
app.on_fetched_resource_projects += before_returning_project_resource_permissions
|
||||
app.on_fetched_item_projects += project_node_type_has_method
|
||||
app.on_fetched_resource_projects += projects_node_type_has_method
|
||||
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
@@ -1,160 +0,0 @@
|
||||
import copy
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
from flask import g, current_app, Blueprint
|
||||
|
||||
from werkzeug.exceptions import Forbidden
|
||||
from eve.utils import parse_request
|
||||
from eve.methods.get import get
|
||||
|
||||
from application.utils.authorization import user_has_role, require_login
|
||||
from application.utils import jsonify
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
blueprint = Blueprint('users', __name__)
|
||||
|
||||
|
||||
@blueprint.route('/me')
|
||||
@require_login()
|
||||
def my_info():
|
||||
eve_resp, _, _, status, _ = get('users', {'_id': g.current_user['user_id']})
|
||||
resp = jsonify(eve_resp['_items'][0], status=status)
|
||||
return resp
|
||||
|
||||
|
||||
def gravatar(email, size=64):
|
||||
parameters = {'s': str(size), 'd': 'mm'}
|
||||
return "https://www.gravatar.com/avatar/" + \
|
||||
hashlib.md5(str(email)).hexdigest() + \
|
||||
"?" + urllib.urlencode(parameters)
|
||||
|
||||
|
||||
def post_GET_user(request, payload):
|
||||
json_data = json.loads(payload.data)
|
||||
# Check if we are querying the users endpoint (instead of the single user)
|
||||
if json_data.get('_id') is None:
|
||||
return
|
||||
# json_data['computed_permissions'] = \
|
||||
# compute_permissions(json_data['_id'], app.data.driver)
|
||||
payload.data = json.dumps(json_data)
|
||||
|
||||
|
||||
def before_replacing_user(request, lookup):
|
||||
"""Loads the auth field from the database, preventing any changes."""
|
||||
|
||||
# Find the user that is being replaced
|
||||
req = parse_request('users')
|
||||
req.projection = json.dumps({'auth': 1})
|
||||
original = current_app.data.find_one('users', req, **lookup)
|
||||
|
||||
# Make sure that the replacement has a valid auth field.
|
||||
updates = request.get_json()
|
||||
assert updates is request.get_json() # We should get a ref to the cached JSON, and not a copy.
|
||||
|
||||
if 'auth' in original:
|
||||
updates['auth'] = copy.deepcopy(original['auth'])
|
||||
else:
|
||||
updates.pop('auth', None)
|
||||
|
||||
|
||||
def push_updated_user_to_algolia(user, original):
|
||||
"""Push an update to the Algolia index when a user item is updated"""
|
||||
|
||||
from algoliasearch.client import AlgoliaException
|
||||
from application.utils.algolia import algolia_index_user_save
|
||||
|
||||
try:
|
||||
algolia_index_user_save(user)
|
||||
except AlgoliaException as ex:
|
||||
log.warning('Unable to push user info to Algolia for user "%s", id=%s; %s',
|
||||
user.get('username'), user.get('_id'), ex)
|
||||
|
||||
|
||||
def send_blinker_signal_roles_changed(user, original):
|
||||
"""Sends a Blinker signal that the user roles were changed, so others can respond."""
|
||||
|
||||
if user.get('roles') == original.get('roles'):
|
||||
return
|
||||
|
||||
from application.modules.service import signal_user_changed_role
|
||||
|
||||
log.info('User %s changed roles to %s, sending Blinker signal',
|
||||
user.get('_id'), user.get('roles'))
|
||||
signal_user_changed_role.send(current_app, user=user)
|
||||
|
||||
|
||||
def check_user_access(request, lookup):
|
||||
"""Modifies the lookup dict to limit returned user info."""
|
||||
|
||||
# No access when not logged in.
|
||||
current_user = g.get('current_user')
|
||||
current_user_id = current_user['user_id'] if current_user else None
|
||||
|
||||
# Admins can do anything and get everything, except the 'auth' block.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
if not lookup and not current_user:
|
||||
raise Forbidden()
|
||||
|
||||
# Add a filter to only return the current user.
|
||||
if '_id' not in lookup:
|
||||
lookup['_id'] = current_user['user_id']
|
||||
|
||||
|
||||
def check_put_access(request, lookup):
|
||||
"""Only allow PUT to the current user, or all users if admin."""
|
||||
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
current_user = g.get('current_user')
|
||||
if not current_user:
|
||||
raise Forbidden()
|
||||
|
||||
if str(lookup['_id']) != str(current_user['user_id']):
|
||||
raise Forbidden()
|
||||
|
||||
|
||||
def after_fetching_user(user):
|
||||
# Deny access to auth block; authentication stuff is managed by
|
||||
# custom end-points.
|
||||
user.pop('auth', None)
|
||||
|
||||
current_user = g.get('current_user')
|
||||
current_user_id = current_user['user_id'] if current_user else None
|
||||
|
||||
# Admins can do anything and get everything, except the 'auth' block.
|
||||
if user_has_role(u'admin'):
|
||||
return
|
||||
|
||||
# Only allow full access to the current user.
|
||||
if str(user['_id']) == str(current_user_id):
|
||||
return
|
||||
|
||||
# Remove all fields except public ones.
|
||||
public_fields = {'full_name', 'email'}
|
||||
for field in list(user.keys()):
|
||||
if field not in public_fields:
|
||||
del user[field]
|
||||
|
||||
|
||||
def after_fetching_user_resource(response):
|
||||
for user in response['_items']:
|
||||
after_fetching_user(user)
|
||||
|
||||
|
||||
def setup_app(app, url_prefix):
|
||||
app.on_pre_GET_users += check_user_access
|
||||
app.on_post_GET_users += post_GET_user
|
||||
app.on_pre_PUT_users += check_put_access
|
||||
app.on_pre_PUT_users += before_replacing_user
|
||||
app.on_replaced_users += push_updated_user_to_algolia
|
||||
app.on_replaced_users += send_blinker_signal_roles_changed
|
||||
app.on_fetched_item_users += after_fetching_user
|
||||
app.on_fetched_resource_users += after_fetching_user_resource
|
||||
|
||||
app.register_blueprint(blueprint, url_prefix=url_prefix)
|
||||
@@ -1,3 +0,0 @@
|
||||
# Ignore everything but self
|
||||
*
|
||||
!.gitignore
|
||||
@@ -1,106 +0,0 @@
|
||||
import copy
|
||||
import json
|
||||
import datetime
|
||||
import functools
|
||||
import logging
|
||||
|
||||
import bson.objectid
|
||||
from eve import RFC1123_DATE_FORMAT
|
||||
from flask import current_app
|
||||
from werkzeug import exceptions as wz_exceptions
|
||||
import pymongo.results
|
||||
|
||||
__all__ = ('remove_private_keys', 'PillarJSONEncoder')
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def remove_private_keys(document):
|
||||
"""Removes any key that starts with an underscore, returns result as new
|
||||
dictionary.
|
||||
"""
|
||||
doc_copy = copy.deepcopy(document)
|
||||
for key in list(doc_copy.keys()):
|
||||
if key.startswith('_'):
|
||||
del doc_copy[key]
|
||||
|
||||
try:
|
||||
del doc_copy['allowed_methods']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
return doc_copy
|
||||
|
||||
|
||||
class PillarJSONEncoder(json.JSONEncoder):
|
||||
"""JSON encoder with support for Pillar resources."""
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return obj.strftime(RFC1123_DATE_FORMAT)
|
||||
|
||||
if isinstance(obj, bson.ObjectId):
|
||||
return str(obj)
|
||||
|
||||
if isinstance(obj, pymongo.results.UpdateResult):
|
||||
return obj.raw_result
|
||||
|
||||
# Let the base class default method raise the TypeError
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def dumps(mongo_doc, **kwargs):
|
||||
"""json.dumps() for MongoDB documents."""
|
||||
return json.dumps(mongo_doc, cls=PillarJSONEncoder, **kwargs)
|
||||
|
||||
|
||||
def jsonify(mongo_doc, status=200, headers=None):
|
||||
"""JSonifies a Mongo document into a Flask response object."""
|
||||
|
||||
return current_app.response_class(dumps(mongo_doc),
|
||||
mimetype='application/json',
|
||||
status=status,
|
||||
headers=headers)
|
||||
|
||||
|
||||
def skip_when_testing(func):
|
||||
"""Decorator, skips the decorated function when app.config['TESTING']"""
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if current_app.config['TESTING']:
|
||||
log.debug('Skipping call to %s(...) due to TESTING', func.func_name)
|
||||
return None
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def project_get_node_type(project_document, node_type_node_name):
|
||||
"""Return a node_type subdocument for a project. If none is found, return
|
||||
None.
|
||||
"""
|
||||
|
||||
if project_document is None:
|
||||
return None
|
||||
|
||||
return next((node_type for node_type in project_document['node_types']
|
||||
if node_type['name'] == node_type_node_name), None)
|
||||
|
||||
|
||||
def str2id(document_id):
|
||||
"""Returns the document ID as ObjectID, or raises a BadRequest exception.
|
||||
|
||||
:type document_id: str
|
||||
:rtype: bson.ObjectId
|
||||
:raises: wz_exceptions.BadRequest
|
||||
"""
|
||||
|
||||
if not document_id:
|
||||
log.debug('str2id(%r): Invalid Object ID', document_id)
|
||||
raise wz_exceptions.BadRequest('Invalid object ID %r' % document_id)
|
||||
|
||||
try:
|
||||
return bson.ObjectId(document_id)
|
||||
except bson.objectid.InvalidId:
|
||||
log.debug('str2id(%r): Invalid Object ID', document_id)
|
||||
raise wz_exceptions.BadRequest('Invalid object ID %r' % document_id)
|
||||
@@ -1,98 +0,0 @@
|
||||
import logging
|
||||
|
||||
from bson import ObjectId
|
||||
from flask import current_app
|
||||
|
||||
from application import algolia_index_users
|
||||
from application import algolia_index_nodes
|
||||
from application.modules.file_storage import generate_link
|
||||
from . import skip_when_testing
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
INDEX_ALLOWED_USER_ROLES = {'admin', 'subscriber', 'demo'}
|
||||
INDEX_ALLOWED_NODE_TYPES = {'asset', 'texture', 'group', 'hdri'}
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def algolia_index_user_save(user):
|
||||
if algolia_index_users is None:
|
||||
return
|
||||
# Strip unneeded roles
|
||||
if 'roles' in user:
|
||||
roles = set(user['roles']).intersection(INDEX_ALLOWED_USER_ROLES)
|
||||
else:
|
||||
roles = set()
|
||||
if algolia_index_users:
|
||||
# Create or update Algolia index for the user
|
||||
algolia_index_users.save_object({
|
||||
'objectID': user['_id'],
|
||||
'full_name': user['full_name'],
|
||||
'username': user['username'],
|
||||
'roles': list(roles),
|
||||
'groups': user['groups'],
|
||||
'email': user['email']
|
||||
})
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def algolia_index_node_save(node):
|
||||
if node['node_type'] in INDEX_ALLOWED_NODE_TYPES and algolia_index_nodes:
|
||||
# If a nodes does not have status published, do not index
|
||||
if 'status' in node['properties'] \
|
||||
and node['properties']['status'] != 'published':
|
||||
return
|
||||
|
||||
projects_collection = current_app.data.driver.db['projects']
|
||||
project = projects_collection.find_one({'_id': ObjectId(node['project'])})
|
||||
|
||||
users_collection = current_app.data.driver.db['users']
|
||||
user = users_collection.find_one({'_id': ObjectId(node['user'])})
|
||||
|
||||
node_ob = {
|
||||
'objectID': node['_id'],
|
||||
'name': node['name'],
|
||||
'project': {
|
||||
'_id': project['_id'],
|
||||
'name': project['name']
|
||||
},
|
||||
'created': node['_created'],
|
||||
'updated': node['_updated'],
|
||||
'node_type': node['node_type'],
|
||||
'user': {
|
||||
'_id': user['_id'],
|
||||
'full_name': user['full_name']
|
||||
},
|
||||
}
|
||||
if 'description' in node and node['description']:
|
||||
node_ob['description'] = node['description']
|
||||
if 'picture' in node and node['picture']:
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
lookup = {'_id': ObjectId(node['picture'])}
|
||||
picture = files_collection.find_one(lookup)
|
||||
if picture['backend'] == 'gcs':
|
||||
variation_t = next((item for item in picture['variations'] \
|
||||
if item['size'] == 't'), None)
|
||||
if variation_t:
|
||||
node_ob['picture'] = generate_link(picture['backend'],
|
||||
variation_t['file_path'], project_id=str(picture['project']),
|
||||
is_public=True)
|
||||
# If the node has world permissions, compute the Free permission
|
||||
if 'permissions' in node and 'world' in node['permissions']:
|
||||
if 'GET' in node['permissions']['world']:
|
||||
node_ob['is_free'] = True
|
||||
# Append the media key if the node is of node_type 'asset'
|
||||
if node['node_type'] == 'asset':
|
||||
node_ob['media'] = node['properties']['content_type']
|
||||
# Add tags
|
||||
if 'tags' in node['properties']:
|
||||
node_ob['tags'] = node['properties']['tags']
|
||||
|
||||
algolia_index_nodes.save_object(node_ob)
|
||||
|
||||
|
||||
@skip_when_testing
|
||||
def algolia_index_node_delete(node):
|
||||
if algolia_index_nodes is None:
|
||||
return
|
||||
algolia_index_nodes.delete_object(node['_id'])
|
||||
@@ -1,198 +0,0 @@
|
||||
"""Generic authentication.
|
||||
|
||||
Contains functionality to validate tokens, create users and tokens, and make
|
||||
unique usernames from emails. Calls out to the application.modules.blender_id
|
||||
module for Blender ID communication.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import datetime
|
||||
|
||||
from bson import tz_util
|
||||
from flask import g
|
||||
from flask import request
|
||||
from flask import current_app
|
||||
from eve.methods.post import post_internal
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def validate_token():
|
||||
"""Validate the token provided in the request and populate the current_user
|
||||
flask.g object, so that permissions and access to a resource can be defined
|
||||
from it.
|
||||
|
||||
When the token is successfully validated, sets `g.current_user` to contain
|
||||
the user information, otherwise it is set to None.
|
||||
|
||||
@returns True iff the user is logged in with a valid Blender ID token.
|
||||
"""
|
||||
|
||||
# Default to no user at all.
|
||||
g.current_user = None
|
||||
|
||||
_delete_expired_tokens()
|
||||
|
||||
if not request.authorization:
|
||||
# If no authorization headers are provided, we are getting a request
|
||||
# from a non logged in user. Proceed accordingly.
|
||||
log.debug('No authentication headers, so not logged in.')
|
||||
return False
|
||||
|
||||
# Check the users to see if there is one with this Blender ID token.
|
||||
token = request.authorization.username
|
||||
oauth_subclient = request.authorization.password
|
||||
|
||||
db_token = find_token(token, oauth_subclient)
|
||||
if not db_token:
|
||||
log.debug('Token %s not found in our local database.', token)
|
||||
|
||||
# If no valid token is found in our local database, we issue a new
|
||||
# request to the Blender ID server to verify the validity of the token
|
||||
# passed via the HTTP header. We will get basic user info if the user
|
||||
# is authorized, and we will store the token in our local database.
|
||||
from application.modules import blender_id
|
||||
|
||||
db_user, status = blender_id.validate_create_user('', token, oauth_subclient)
|
||||
else:
|
||||
# log.debug("User is already in our database and token hasn't expired yet.")
|
||||
users = current_app.data.driver.db['users']
|
||||
db_user = users.find_one(db_token['user'])
|
||||
|
||||
if db_user is None:
|
||||
log.debug('Validation failed, user not logged in')
|
||||
return False
|
||||
|
||||
g.current_user = {'user_id': db_user['_id'],
|
||||
'groups': db_user['groups'],
|
||||
'roles': set(db_user.get('roles', []))}
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def find_token(token, is_subclient_token=False, **extra_filters):
|
||||
"""Returns the token document, or None if it doesn't exist (or is expired)."""
|
||||
|
||||
tokens_collection = current_app.data.driver.db['tokens']
|
||||
|
||||
# TODO: remove expired tokens from collection.
|
||||
lookup = {'token': token,
|
||||
'is_subclient_token': True if is_subclient_token else {'$in': [False, None]},
|
||||
'expire_time': {"$gt": datetime.datetime.now(tz=tz_util.utc)}}
|
||||
lookup.update(extra_filters)
|
||||
|
||||
db_token = tokens_collection.find_one(lookup)
|
||||
return db_token
|
||||
|
||||
|
||||
def store_token(user_id, token, token_expiry, oauth_subclient_id=False):
|
||||
"""Stores an authentication token.
|
||||
|
||||
:returns: the token document from MongoDB
|
||||
"""
|
||||
|
||||
token_data = {
|
||||
'user': user_id,
|
||||
'token': token,
|
||||
'expire_time': token_expiry,
|
||||
}
|
||||
if oauth_subclient_id:
|
||||
token_data['is_subclient_token'] = True
|
||||
|
||||
r, _, _, status = post_internal('tokens', token_data)
|
||||
|
||||
if status not in {200, 201}:
|
||||
log.error('Unable to store authentication token: %s', r)
|
||||
raise RuntimeError('Unable to store authentication token.')
|
||||
|
||||
token_data.update(r)
|
||||
return token_data
|
||||
|
||||
|
||||
def create_new_user(email, username, user_id):
|
||||
"""Creates a new user in our local database.
|
||||
|
||||
@param email: the user's email
|
||||
@param username: the username, which is also used as full name.
|
||||
@param user_id: the user ID from the Blender ID server.
|
||||
@returns: the user ID from our local database.
|
||||
"""
|
||||
|
||||
user_data = create_new_user_document(email, user_id, username)
|
||||
r = post_internal('users', user_data)
|
||||
user_id = r[0]['_id']
|
||||
return user_id
|
||||
|
||||
|
||||
def create_new_user_document(email, user_id, username, provider='blender-id',
|
||||
token=''):
|
||||
"""Creates a new user document, without storing it in MongoDB. The token
|
||||
parameter is a password in case provider is "local".
|
||||
"""
|
||||
|
||||
user_data = {
|
||||
'full_name': username,
|
||||
'username': username,
|
||||
'email': email,
|
||||
'auth': [{
|
||||
'provider': provider,
|
||||
'user_id': str(user_id),
|
||||
'token': token}],
|
||||
'settings': {
|
||||
'email_communications': 1
|
||||
},
|
||||
'groups': [],
|
||||
}
|
||||
return user_data
|
||||
|
||||
|
||||
def make_unique_username(email):
|
||||
"""Creates a unique username from the email address.
|
||||
|
||||
@param email: the email address
|
||||
@returns: the new username
|
||||
@rtype: str
|
||||
"""
|
||||
|
||||
username = email.split('@')[0]
|
||||
# Check for min length of username (otherwise validation fails)
|
||||
username = "___{0}".format(username) if len(username) < 3 else username
|
||||
|
||||
users = current_app.data.driver.db['users']
|
||||
user_from_username = users.find_one({'username': username})
|
||||
|
||||
if not user_from_username:
|
||||
return username
|
||||
|
||||
# Username exists, make it unique by adding some number after it.
|
||||
suffix = 1
|
||||
while True:
|
||||
unique_name = '%s%i' % (username, suffix)
|
||||
user_from_username = users.find_one({'username': unique_name})
|
||||
if user_from_username is None:
|
||||
return unique_name
|
||||
suffix += 1
|
||||
|
||||
|
||||
def _delete_expired_tokens():
|
||||
"""Deletes tokens that have expired.
|
||||
|
||||
For debugging, we keep expired tokens around for a few days, so that we
|
||||
can determine that a token was expired rather than not created in the
|
||||
first place. It also grants some leeway in clock synchronisation.
|
||||
"""
|
||||
|
||||
token_coll = current_app.data.driver.db['tokens']
|
||||
|
||||
now = datetime.datetime.now(tz_util.utc)
|
||||
expiry_date = now - datetime.timedelta(days=7)
|
||||
|
||||
result = token_coll.delete_many({'expire_time': {"$lt": expiry_date}})
|
||||
# log.debug('Deleted %i expired authentication tokens', result.deleted_count)
|
||||
|
||||
|
||||
def current_user_id():
|
||||
"""None-safe fetching of user ID. Can return None itself, though."""
|
||||
|
||||
current_user = g.get('current_user') or {}
|
||||
return current_user.get('user_id')
|
||||
@@ -1,224 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from bson import ObjectId
|
||||
from gcloud.storage.client import Client
|
||||
from gcloud.exceptions import NotFound
|
||||
from flask import current_app, g
|
||||
from werkzeug.local import LocalProxy
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_client():
|
||||
"""Stores the GCS client on the global Flask object.
|
||||
|
||||
The GCS client is not user-specific anyway.
|
||||
|
||||
:rtype: Client
|
||||
"""
|
||||
|
||||
_gcs = getattr(g, '_gcs_client', None)
|
||||
if _gcs is None:
|
||||
_gcs = g._gcs_client = Client()
|
||||
return _gcs
|
||||
|
||||
|
||||
# This hides the specifics of how/where we store the GCS client,
|
||||
# and allows the rest of the code to use 'gcs' as a simple variable
|
||||
# that does the right thing.
|
||||
gcs = LocalProxy(get_client)
|
||||
|
||||
|
||||
class GoogleCloudStorageBucket(object):
|
||||
"""Cloud Storage bucket interface. We create a bucket for every project. In
|
||||
the bucket we create first level subdirs as follows:
|
||||
- '_' (will contain hashed assets, and stays on top of default listing)
|
||||
- 'svn' (svn checkout mirror)
|
||||
- 'shared' (any additional folder of static folder that is accessed via a
|
||||
node of 'storage' node_type)
|
||||
|
||||
:type bucket_name: string
|
||||
:param bucket_name: Name of the bucket.
|
||||
|
||||
:type subdir: string
|
||||
:param subdir: The local entry point to browse the bucket.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, bucket_name, subdir='_/'):
|
||||
try:
|
||||
self.bucket = gcs.get_bucket(bucket_name)
|
||||
except NotFound:
|
||||
self.bucket = gcs.bucket(bucket_name)
|
||||
# Hardcode the bucket location to EU
|
||||
self.bucket.location = 'EU'
|
||||
# Optionally enable CORS from * (currently only used for vrview)
|
||||
# self.bucket.cors = [
|
||||
# {
|
||||
# "origin": ["*"],
|
||||
# "responseHeader": ["Content-Type"],
|
||||
# "method": ["GET", "HEAD", "DELETE"],
|
||||
# "maxAgeSeconds": 3600
|
||||
# }
|
||||
# ]
|
||||
self.bucket.create()
|
||||
|
||||
self.subdir = subdir
|
||||
|
||||
def List(self, path=None):
|
||||
"""Display the content of a subdir in the project bucket. If the path
|
||||
points to a file the listing is simply empty.
|
||||
|
||||
:type path: string
|
||||
:param path: The relative path to the directory or asset.
|
||||
"""
|
||||
if path and not path.endswith('/'):
|
||||
path += '/'
|
||||
prefix = os.path.join(self.subdir, path)
|
||||
|
||||
fields_to_return = 'nextPageToken,items(name,size,contentType),prefixes'
|
||||
req = self.bucket.list_blobs(fields=fields_to_return, prefix=prefix,
|
||||
delimiter='/')
|
||||
|
||||
files = []
|
||||
for f in req:
|
||||
filename = os.path.basename(f.name)
|
||||
if filename != '': # Skip own folder name
|
||||
files.append(dict(
|
||||
path=os.path.relpath(f.name, self.subdir),
|
||||
text=filename,
|
||||
type=f.content_type))
|
||||
|
||||
directories = []
|
||||
for dir_path in req.prefixes:
|
||||
directory_name = os.path.basename(os.path.normpath(dir_path))
|
||||
directories.append(dict(
|
||||
text=directory_name,
|
||||
path=os.path.relpath(dir_path, self.subdir),
|
||||
type='group_storage',
|
||||
children=True))
|
||||
# print os.path.basename(os.path.normpath(path))
|
||||
|
||||
list_dict = dict(
|
||||
name=os.path.basename(os.path.normpath(path)),
|
||||
type='group_storage',
|
||||
children=files + directories
|
||||
)
|
||||
|
||||
return list_dict
|
||||
|
||||
def blob_to_dict(self, blob):
|
||||
blob.reload()
|
||||
expiration = datetime.datetime.now() + datetime.timedelta(days=1)
|
||||
expiration = int(time.mktime(expiration.timetuple()))
|
||||
return dict(
|
||||
updated=blob.updated,
|
||||
name=os.path.basename(blob.name),
|
||||
size=blob.size,
|
||||
content_type=blob.content_type,
|
||||
signed_url=blob.generate_signed_url(expiration),
|
||||
public_url=blob.public_url)
|
||||
|
||||
def Get(self, path, to_dict=True):
|
||||
"""Get selected file info if the path matches.
|
||||
|
||||
:type path: string
|
||||
:param path: The relative path to the file.
|
||||
:type to_dict: bool
|
||||
:param to_dict: Return the object as a dictionary.
|
||||
"""
|
||||
path = os.path.join(self.subdir, path)
|
||||
blob = self.bucket.blob(path)
|
||||
if blob.exists():
|
||||
if to_dict:
|
||||
return self.blob_to_dict(blob)
|
||||
else:
|
||||
return blob
|
||||
else:
|
||||
return None
|
||||
|
||||
def Post(self, full_path, path=None):
|
||||
"""Create new blob and upload data to it.
|
||||
"""
|
||||
path = path if path else os.path.join('_', os.path.basename(full_path))
|
||||
blob = self.bucket.blob(path)
|
||||
if blob.exists():
|
||||
return None
|
||||
blob.upload_from_filename(full_path)
|
||||
return blob
|
||||
# return self.blob_to_dict(blob) # Has issues with threading
|
||||
|
||||
def Delete(self, path):
|
||||
"""Delete blob (when removing an asset or replacing a preview)"""
|
||||
|
||||
# We want to get the actual blob to delete
|
||||
blob = self.Get(path, to_dict=False)
|
||||
try:
|
||||
blob.delete()
|
||||
return True
|
||||
except NotFound:
|
||||
return None
|
||||
|
||||
def update_name(self, blob, name):
|
||||
"""Set the ContentDisposition metadata so that when a file is downloaded
|
||||
it has a human-readable name.
|
||||
"""
|
||||
blob.content_disposition = u'attachment; filename="{0}"'.format(name)
|
||||
blob.patch()
|
||||
|
||||
|
||||
def update_file_name(node):
|
||||
"""Assign to the CGS blob the same name of the asset node. This way when
|
||||
downloading an asset we get a human-readable name.
|
||||
"""
|
||||
|
||||
# Process only files that are not processing
|
||||
if node['properties'].get('status', '') == 'processing':
|
||||
return
|
||||
|
||||
def _format_name(name, override_ext, size=None, map_type=u''):
|
||||
root, _ = os.path.splitext(name)
|
||||
size = u'-{}'.format(size) if size else u''
|
||||
map_type = u'-{}'.format(map_type) if map_type else u''
|
||||
return u'{}{}{}{}'.format(root, size, map_type, override_ext)
|
||||
|
||||
def _update_name(file_id, file_props):
|
||||
files_collection = current_app.data.driver.db['files']
|
||||
file_doc = files_collection.find_one({'_id': ObjectId(file_id)})
|
||||
|
||||
if file_doc is None or file_doc.get('backend') != 'gcs':
|
||||
return
|
||||
|
||||
# For textures -- the map type should be part of the name.
|
||||
map_type = file_props.get('map_type', u'')
|
||||
|
||||
storage = GoogleCloudStorageBucket(str(node['project']))
|
||||
blob = storage.Get(file_doc['file_path'], to_dict=False)
|
||||
# Pick file extension from original filename
|
||||
_, ext = os.path.splitext(file_doc['filename'])
|
||||
name = _format_name(node['name'], ext, map_type=map_type)
|
||||
storage.update_name(blob, name)
|
||||
|
||||
# Assign the same name to variations
|
||||
for v in file_doc.get('variations', []):
|
||||
_, override_ext = os.path.splitext(v['file_path'])
|
||||
name = _format_name(node['name'], override_ext, v['size'], map_type=map_type)
|
||||
blob = storage.Get(v['file_path'], to_dict=False)
|
||||
if blob is None:
|
||||
log.info('Unable to find blob for file %s in project %s. This can happen if the '
|
||||
'video encoding is still processing.', v['file_path'], node['project'])
|
||||
continue
|
||||
storage.update_name(blob, name)
|
||||
|
||||
# Currently we search for 'file' and 'files' keys in the object properties.
|
||||
# This could become a bit more flexible and realy on a true reference of the
|
||||
# file object type from the schema.
|
||||
if 'file' in node['properties']:
|
||||
_update_name(node['properties']['file'], {})
|
||||
|
||||
if 'files' in node['properties']:
|
||||
for file_props in node['properties']['files']:
|
||||
_update_name(file_props['file'], file_props)
|
||||
@@ -1,83 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from flask import current_app
|
||||
from application.utils.gcs import GoogleCloudStorageBucket
|
||||
|
||||
|
||||
def get_sizedata(filepath):
|
||||
outdata = dict(
|
||||
size=int(os.stat(filepath).st_size)
|
||||
)
|
||||
return outdata
|
||||
|
||||
|
||||
def rsync(path, remote_dir=''):
|
||||
BIN_SSH = current_app.config['BIN_SSH']
|
||||
BIN_RSYNC = current_app.config['BIN_RSYNC']
|
||||
|
||||
DRY_RUN = False
|
||||
arguments = ['--verbose', '--ignore-existing', '--recursive', '--human-readable']
|
||||
logs_path = current_app.config['CDN_SYNC_LOGS']
|
||||
storage_address = current_app.config['CDN_STORAGE_ADDRESS']
|
||||
user = current_app.config['CDN_STORAGE_USER']
|
||||
rsa_key_path = current_app.config['CDN_RSA_KEY']
|
||||
known_hosts_path = current_app.config['CDN_KNOWN_HOSTS']
|
||||
|
||||
if DRY_RUN:
|
||||
arguments.append('--dry-run')
|
||||
folder_arguments = list(arguments)
|
||||
if rsa_key_path:
|
||||
folder_arguments.append(
|
||||
'-e ' + BIN_SSH + ' -i ' + rsa_key_path + ' -o "StrictHostKeyChecking=no"')
|
||||
# if known_hosts_path:
|
||||
# folder_arguments.append("-o UserKnownHostsFile " + known_hosts_path)
|
||||
folder_arguments.append("--log-file=" + logs_path + "/rsync.log")
|
||||
folder_arguments.append(path)
|
||||
folder_arguments.append(user + "@" + storage_address + ":/public/" + remote_dir)
|
||||
# print (folder_arguments)
|
||||
devnull = open(os.devnull, 'wb')
|
||||
# DEBUG CONFIG
|
||||
# print folder_arguments
|
||||
# proc = subprocess.Popen(['rsync'] + folder_arguments)
|
||||
# stdout, stderr = proc.communicate()
|
||||
subprocess.Popen(['nohup', BIN_RSYNC] + folder_arguments, stdout=devnull, stderr=devnull)
|
||||
|
||||
|
||||
def remote_storage_sync(path): # can be both folder and file
|
||||
if os.path.isfile(path):
|
||||
filename = os.path.split(path)[1]
|
||||
rsync(path, filename[:2] + '/')
|
||||
else:
|
||||
if os.path.exists(path):
|
||||
rsync(path)
|
||||
else:
|
||||
raise IOError('ERROR: path not found')
|
||||
|
||||
|
||||
def push_to_storage(project_id, full_path, backend='cgs'):
|
||||
"""Move a file from temporary/processing local storage to a storage endpoint.
|
||||
By default we store items in a Google Cloud Storage bucket named after the
|
||||
project id.
|
||||
"""
|
||||
|
||||
def push_single_file(project_id, full_path, backend):
|
||||
if backend == 'cgs':
|
||||
storage = GoogleCloudStorageBucket(project_id, subdir='_')
|
||||
blob = storage.Post(full_path)
|
||||
# XXX Make public on the fly if it's an image and small preview.
|
||||
# This should happen by reading the database (push to storage
|
||||
# should change to accomodate it).
|
||||
if blob is not None and full_path.endswith('-t.jpg'):
|
||||
blob.make_public()
|
||||
os.remove(full_path)
|
||||
|
||||
if os.path.isfile(full_path):
|
||||
push_single_file(project_id, full_path, backend)
|
||||
else:
|
||||
if os.path.exists(full_path):
|
||||
for root, dirs, files in os.walk(full_path):
|
||||
for name in files:
|
||||
push_single_file(project_id, os.path.join(root, name), backend)
|
||||
else:
|
||||
raise IOError('ERROR: path not found')
|
||||
20
pillar/attrs_extra.py
Normal file
20
pillar/attrs_extra.py
Normal file
@@ -0,0 +1,20 @@
|
||||
"""Extra functionality for attrs."""
|
||||
|
||||
import functools
|
||||
import logging
|
||||
|
||||
import attr
|
||||
|
||||
string = functools.partial(attr.ib, validator=attr.validators.instance_of(str))
|
||||
|
||||
|
||||
def log(name):
|
||||
"""Returns a logger attr.ib
|
||||
|
||||
:param name: name to pass to logging.getLogger()
|
||||
:rtype: attr.ib
|
||||
"""
|
||||
return attr.ib(default=logging.getLogger(name),
|
||||
repr=False,
|
||||
hash=False,
|
||||
cmp=False)
|
||||
274
pillar/auth/__init__.py
Normal file
274
pillar/auth/__init__.py
Normal file
@@ -0,0 +1,274 @@
|
||||
"""Authentication code common to the web and api modules."""
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import typing
|
||||
|
||||
import blinker
|
||||
import bson
|
||||
from flask import session, g
|
||||
import flask_login
|
||||
from werkzeug.local import LocalProxy
|
||||
|
||||
from pillar import current_app
|
||||
|
||||
user_authenticated = blinker.Signal('Sent whenever a user was authenticated')
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# Mapping from user role to capabilities obtained by users with that role.
|
||||
CAPABILITIES = collections.defaultdict(**{
|
||||
'subscriber': {'subscriber', 'home-project'},
|
||||
'demo': {'subscriber', 'home-project'},
|
||||
'admin': {'video-encoding', 'admin',
|
||||
'view-pending-nodes', 'edit-project-node-types'},
|
||||
}, default_factory=frozenset)
|
||||
|
||||
|
||||
class UserClass(flask_login.UserMixin):
|
||||
def __init__(self, token: typing.Optional[str]):
|
||||
# We store the Token instead of ID
|
||||
self.id = token
|
||||
self.username: str = None
|
||||
self.full_name: str = None
|
||||
self.user_id: bson.ObjectId = None
|
||||
self.objectid: str = None
|
||||
self.gravatar: str = None
|
||||
self.email: str = None
|
||||
self.roles: typing.List[str] = []
|
||||
self.groups: typing.List[str] = [] # NOTE: these are stringified object IDs.
|
||||
self.group_ids: typing.List[bson.ObjectId] = []
|
||||
self.capabilities: typing.Set[str] = set()
|
||||
|
||||
# Lazily evaluated
|
||||
self._has_organizations: typing.Optional[bool] = None
|
||||
|
||||
@classmethod
|
||||
def construct(cls, token: str, db_user: dict) -> 'UserClass':
|
||||
"""Constructs a new UserClass instance from a Mongo user document."""
|
||||
|
||||
from ..api import utils
|
||||
|
||||
user = cls(token)
|
||||
|
||||
user.user_id = db_user.get('_id')
|
||||
user.roles = db_user.get('roles') or []
|
||||
user.group_ids = db_user.get('groups') or []
|
||||
user.email = db_user.get('email') or ''
|
||||
user.username = db_user.get('username') or ''
|
||||
user.full_name = db_user.get('full_name') or ''
|
||||
|
||||
# Derived properties
|
||||
user.objectid = str(user.user_id or '')
|
||||
user.gravatar = utils.gravatar(user.email)
|
||||
user.groups = [str(g) for g in user.group_ids]
|
||||
user.collect_capabilities()
|
||||
|
||||
return user
|
||||
|
||||
def __repr__(self):
|
||||
return f'UserClass(user_id={self.user_id})'
|
||||
|
||||
def __str__(self):
|
||||
return f'{self.__class__.__name__}(id={self.user_id}, email={self.email!r}'
|
||||
|
||||
def __getitem__(self, item):
|
||||
"""Compatibility layer with old dict-based g.current_user object."""
|
||||
|
||||
if item == 'user_id':
|
||||
return self.user_id
|
||||
if item == 'groups':
|
||||
return self.group_ids
|
||||
if item == 'roles':
|
||||
return set(self.roles)
|
||||
|
||||
raise KeyError(f'No such key {item!r}')
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""Compatibility layer with old dict-based g.current_user object."""
|
||||
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
return default
|
||||
|
||||
def collect_capabilities(self):
|
||||
"""Constructs the capabilities set given the user's current roles.
|
||||
|
||||
Requires an application context to be active.
|
||||
"""
|
||||
|
||||
app_caps = current_app.user_caps
|
||||
|
||||
self.capabilities = set().union(*(app_caps[role] for role in self.roles))
|
||||
|
||||
def has_role(self, *roles):
|
||||
"""Returns True iff the user has one or more of the given roles."""
|
||||
|
||||
if not self.roles:
|
||||
return False
|
||||
|
||||
return bool(set(self.roles).intersection(set(roles)))
|
||||
|
||||
def has_cap(self, *capabilities: typing.Iterable[str]) -> bool:
|
||||
"""Returns True iff the user has one or more of the given capabilities."""
|
||||
|
||||
if not self.capabilities:
|
||||
return False
|
||||
|
||||
return bool(set(self.capabilities).intersection(set(capabilities)))
|
||||
|
||||
def matches_roles(self,
|
||||
require_roles=set(),
|
||||
require_all=False) -> bool:
|
||||
"""Returns True iff the user's roles matches the query.
|
||||
|
||||
:param require_roles: set of roles.
|
||||
:param require_all:
|
||||
When False (the default): if the user's roles have a
|
||||
non-empty intersection with the given roles, returns True.
|
||||
When True: require the user to have all given roles before
|
||||
returning True.
|
||||
"""
|
||||
|
||||
if not isinstance(require_roles, set):
|
||||
raise TypeError(f'require_roles param should be a set, but is {type(require_roles)!r}')
|
||||
|
||||
if require_all and not require_roles:
|
||||
raise ValueError('require_login(require_all=True) cannot be used with '
|
||||
'empty require_roles.')
|
||||
|
||||
intersection = require_roles.intersection(self.roles)
|
||||
if require_all:
|
||||
return len(intersection) == len(require_roles)
|
||||
|
||||
return not bool(require_roles) or bool(intersection)
|
||||
|
||||
def has_organizations(self) -> bool:
|
||||
"""Returns True iff this user administers or is member of any organization."""
|
||||
|
||||
if self._has_organizations is None:
|
||||
assert self.user_id
|
||||
self._has_organizations = current_app.org_manager.user_has_organizations(self.user_id)
|
||||
|
||||
return bool(self._has_organizations)
|
||||
|
||||
|
||||
class AnonymousUser(flask_login.AnonymousUserMixin, UserClass):
|
||||
def __init__(self):
|
||||
super().__init__(token=None)
|
||||
|
||||
def has_role(self, *roles):
|
||||
return False
|
||||
|
||||
def has_cap(self, *capabilities):
|
||||
return False
|
||||
|
||||
def has_organizations(self) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def _load_user(token) -> typing.Union[UserClass, AnonymousUser]:
|
||||
"""Loads a user by their token.
|
||||
|
||||
:returns: returns a UserClass instance if logged in, or an AnonymousUser() if not.
|
||||
"""
|
||||
|
||||
from ..api.utils import authentication
|
||||
|
||||
if not token:
|
||||
return AnonymousUser()
|
||||
|
||||
db_user = authentication.validate_this_token(token)
|
||||
if not db_user:
|
||||
# There is a token, but it's not valid. We should reset the user's session.
|
||||
session.clear()
|
||||
return AnonymousUser()
|
||||
|
||||
user = UserClass.construct(token, db_user)
|
||||
|
||||
return user
|
||||
|
||||
|
||||
def config_login_manager(app):
|
||||
"""Configures the Flask-Login manager, used for the web endpoints."""
|
||||
|
||||
login_manager = flask_login.LoginManager()
|
||||
login_manager.init_app(app)
|
||||
login_manager.login_view = "users.login"
|
||||
login_manager.login_message = ''
|
||||
login_manager.anonymous_user = AnonymousUser
|
||||
# noinspection PyTypeChecker
|
||||
login_manager.user_loader(_load_user)
|
||||
|
||||
return login_manager
|
||||
|
||||
|
||||
def login_user(oauth_token: str, *, load_from_db=False):
|
||||
"""Log in the user identified by the given token."""
|
||||
|
||||
if load_from_db:
|
||||
user = _load_user(oauth_token)
|
||||
else:
|
||||
user = UserClass(oauth_token)
|
||||
flask_login.login_user(user, remember=True)
|
||||
g.current_user = user
|
||||
user_authenticated.send(None)
|
||||
|
||||
|
||||
def logout_user():
|
||||
"""Forces a logout of the current user."""
|
||||
|
||||
from ..api.utils import authentication
|
||||
|
||||
token = get_blender_id_oauth_token()
|
||||
if token:
|
||||
authentication.remove_token(token)
|
||||
|
||||
session.clear()
|
||||
flask_login.logout_user()
|
||||
g.current_user = AnonymousUser()
|
||||
|
||||
|
||||
def get_blender_id_oauth_token() -> str:
|
||||
"""Returns the Blender ID auth token, or an empty string if there is none."""
|
||||
|
||||
from flask import request
|
||||
|
||||
token = session.get('blender_id_oauth_token')
|
||||
if token:
|
||||
if isinstance(token, (tuple, list)):
|
||||
# In a past version of Pillar we accidentally stored tuples in the session.
|
||||
# Such sessions should be actively fixed.
|
||||
# TODO(anyone, after 2017-12-01): refactor this if-block so that it just converts
|
||||
# the token value to a string and use that instead.
|
||||
token = token[0]
|
||||
session['blender_id_oauth_token'] = token
|
||||
return token
|
||||
|
||||
if request.authorization and request.authorization.username:
|
||||
return request.authorization.username
|
||||
|
||||
if current_user.is_authenticated and current_user.id:
|
||||
return current_user.id
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def get_current_user() -> UserClass:
|
||||
"""Returns the current user as a UserClass instance.
|
||||
|
||||
Never returns None; returns an AnonymousUser() instance instead.
|
||||
|
||||
This function is intended to be used when pillar.auth.current_user is
|
||||
accessed many times in the same scope. Calling this function is then
|
||||
more efficient, since it doesn't have to resolve the LocalProxy for
|
||||
each access to the returned object.
|
||||
"""
|
||||
|
||||
from ..api.utils.authentication import current_user
|
||||
|
||||
return current_user()
|
||||
|
||||
|
||||
current_user: UserClass = LocalProxy(get_current_user)
|
||||
"""The current user."""
|
||||
219
pillar/auth/oauth.py
Normal file
219
pillar/auth/oauth.py
Normal file
@@ -0,0 +1,219 @@
|
||||
import abc
|
||||
import attr
|
||||
import json
|
||||
import logging
|
||||
|
||||
from rauth import OAuth2Service
|
||||
from flask import current_app, url_for, request, redirect, session, Response
|
||||
|
||||
|
||||
@attr.s
|
||||
class OAuthUserResponse:
|
||||
"""Represents user information requested to an OAuth provider after
|
||||
authenticating.
|
||||
"""
|
||||
|
||||
id = attr.ib(validator=attr.validators.instance_of(str))
|
||||
email = attr.ib(validator=attr.validators.instance_of(str))
|
||||
|
||||
|
||||
class OAuthError(Exception):
|
||||
"""Superclass of all exceptions raised by this module."""
|
||||
|
||||
|
||||
class ProviderConfigurationMissing(OAuthError):
|
||||
"""Raised when an OAuth provider is used but not configured."""
|
||||
|
||||
|
||||
class ProviderNotImplemented(OAuthError):
|
||||
"""Raised when a provider is requested that does not exist."""
|
||||
|
||||
|
||||
class OAuthCodeNotProvided(OAuthError):
|
||||
"""Raised when the 'code' arg is not provided in the OAuth callback."""
|
||||
|
||||
|
||||
class ProviderNotConfigured:
|
||||
"""Dummy class that indicates a provider isn't configured."""
|
||||
|
||||
|
||||
class OAuthSignIn(metaclass=abc.ABCMeta):
|
||||
provider_name: str = None # set in each subclass.
|
||||
|
||||
_providers = None # initialized in get_provider()
|
||||
_log = logging.getLogger(f'{__name__}.OAuthSignIn')
|
||||
|
||||
def __init__(self):
|
||||
credentials = current_app.config['OAUTH_CREDENTIALS'].get(self.provider_name)
|
||||
if not credentials:
|
||||
raise ProviderConfigurationMissing(
|
||||
f'Missing OAuth credentials for {self.provider_name}')
|
||||
|
||||
self.consumer_id = credentials['id']
|
||||
self.consumer_secret = credentials['secret']
|
||||
|
||||
# Set in a subclass
|
||||
self.service: OAuth2Service = None
|
||||
|
||||
@abc.abstractmethod
|
||||
def authorize(self) -> Response:
|
||||
"""Redirect to the correct authorization endpoint for the current provider.
|
||||
|
||||
Depending on the provider, we sometimes have to specify a different
|
||||
'scope'.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def callback(self) -> OAuthUserResponse:
|
||||
"""Callback performed after authorizing the user.
|
||||
|
||||
This is usually a request to a protected /me endpoint to query for
|
||||
user information, such as user id and email address.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_callback_url(self):
|
||||
return url_for('users.oauth_callback', provider=self.provider_name,
|
||||
_external=True, _scheme=current_app.config['SCHEME'])
|
||||
|
||||
@staticmethod
|
||||
def auth_code_from_request() -> str:
|
||||
try:
|
||||
return request.args['code']
|
||||
except KeyError:
|
||||
raise OAuthCodeNotProvided('A code argument was not provided in the request')
|
||||
|
||||
@staticmethod
|
||||
def decode_json(payload):
|
||||
return json.loads(payload.decode('utf-8'))
|
||||
|
||||
def make_oauth_session(self):
|
||||
return self.service.get_auth_session(
|
||||
data={'code': self.auth_code_from_request(),
|
||||
'grant_type': 'authorization_code',
|
||||
'redirect_uri': self.get_callback_url()},
|
||||
decoder=self.decode_json
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_provider(cls, provider_name) -> 'OAuthSignIn':
|
||||
if cls._providers is None:
|
||||
cls._init_providers()
|
||||
|
||||
try:
|
||||
provider = cls._providers[provider_name]
|
||||
except KeyError:
|
||||
raise ProviderNotImplemented(f'No such OAuth provider {provider_name}')
|
||||
|
||||
if provider is ProviderNotConfigured:
|
||||
raise ProviderConfigurationMissing(f'OAuth provider {provider_name} not configured')
|
||||
|
||||
return provider
|
||||
|
||||
@classmethod
|
||||
def _init_providers(cls):
|
||||
cls._providers = {}
|
||||
|
||||
for provider_class in cls.__subclasses__():
|
||||
try:
|
||||
provider = provider_class()
|
||||
except ProviderConfigurationMissing:
|
||||
cls._log.info('OAuth provider %s not configured',
|
||||
provider_class.provider_name)
|
||||
provider = ProviderNotConfigured
|
||||
cls._providers[provider_class.provider_name] = provider
|
||||
|
||||
|
||||
class BlenderIdSignIn(OAuthSignIn):
|
||||
provider_name = 'blender-id'
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
base_url = current_app.config['BLENDER_ID_ENDPOINT']
|
||||
|
||||
self.service = OAuth2Service(
|
||||
name='blender-id',
|
||||
client_id=self.consumer_id,
|
||||
client_secret=self.consumer_secret,
|
||||
authorize_url='%s/oauth/authorize' % base_url,
|
||||
access_token_url='%s/oauth/token' % base_url,
|
||||
base_url='%s/api/' % base_url
|
||||
)
|
||||
|
||||
def authorize(self):
|
||||
return redirect(self.service.get_authorize_url(
|
||||
scope='email',
|
||||
response_type='code',
|
||||
redirect_uri=self.get_callback_url())
|
||||
)
|
||||
|
||||
def callback(self):
|
||||
oauth_session = self.make_oauth_session()
|
||||
|
||||
# TODO handle exception for failed oauth or not authorized
|
||||
access_token = oauth_session.access_token
|
||||
assert isinstance(access_token, str), f'oauth token must be str, not {type(access_token)}'
|
||||
|
||||
session['blender_id_oauth_token'] = access_token
|
||||
me = oauth_session.get('user').json()
|
||||
return OAuthUserResponse(str(me['id']), me['email'])
|
||||
|
||||
|
||||
class FacebookSignIn(OAuthSignIn):
|
||||
provider_name = 'facebook'
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.service = OAuth2Service(
|
||||
name='facebook',
|
||||
client_id=self.consumer_id,
|
||||
client_secret=self.consumer_secret,
|
||||
authorize_url='https://graph.facebook.com/oauth/authorize',
|
||||
access_token_url='https://graph.facebook.com/oauth/access_token',
|
||||
base_url='https://graph.facebook.com/'
|
||||
)
|
||||
|
||||
def authorize(self):
|
||||
return redirect(self.service.get_authorize_url(
|
||||
scope='email',
|
||||
response_type='code',
|
||||
redirect_uri=self.get_callback_url())
|
||||
)
|
||||
|
||||
def callback(self):
|
||||
oauth_session = self.make_oauth_session()
|
||||
|
||||
me = oauth_session.get('me?fields=id,email').json()
|
||||
# TODO handle case when user chooses not to disclose en email
|
||||
# see https://developers.facebook.com/docs/graph-api/reference/user/
|
||||
return OAuthUserResponse(me['id'], me.get('email'))
|
||||
|
||||
|
||||
class GoogleSignIn(OAuthSignIn):
|
||||
provider_name = 'google'
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.service = OAuth2Service(
|
||||
name='google',
|
||||
client_id=self.consumer_id,
|
||||
client_secret=self.consumer_secret,
|
||||
authorize_url='https://accounts.google.com/o/oauth2/auth',
|
||||
access_token_url='https://accounts.google.com/o/oauth2/token',
|
||||
base_url='https://www.googleapis.com/oauth2/v1/'
|
||||
)
|
||||
|
||||
def authorize(self):
|
||||
return redirect(self.service.get_authorize_url(
|
||||
scope='https://www.googleapis.com/auth/userinfo.email',
|
||||
response_type='code',
|
||||
redirect_uri=self.get_callback_url())
|
||||
)
|
||||
|
||||
def callback(self):
|
||||
oauth_session = self.make_oauth_session()
|
||||
|
||||
me = oauth_session.get('userinfo').json()
|
||||
return OAuthUserResponse(str(me['id']), me['email'])
|
||||
52
pillar/bugsnag_extra.py
Normal file
52
pillar/bugsnag_extra.py
Normal file
@@ -0,0 +1,52 @@
|
||||
# Keys in the user's session dictionary that are removed before sending to Bugsnag.
|
||||
SESSION_KEYS_TO_REMOVE = ('blender_id_oauth_token', 'user_id')
|
||||
|
||||
|
||||
def add_pillar_request_to_notification(notification):
|
||||
"""Adds request metadata to the Bugsnag notifications.
|
||||
|
||||
This basically copies bugsnag.flask.add_flask_request_to_notification,
|
||||
but is altered to include Pillar-specific metadata.
|
||||
"""
|
||||
from flask import request, session
|
||||
from bugsnag.wsgi import request_path
|
||||
import pillar.auth
|
||||
|
||||
if not request:
|
||||
return
|
||||
|
||||
notification.context = "%s %s" % (request.method,
|
||||
request_path(request.environ))
|
||||
|
||||
if 'id' not in notification.user:
|
||||
user: pillar.auth.UserClass = pillar.auth.current_user._get_current_object()
|
||||
notification.set_user(id=user.user_id,
|
||||
email=user.email,
|
||||
name=user.username)
|
||||
notification.user['roles'] = sorted(user.roles)
|
||||
notification.user['capabilities'] = sorted(user.capabilities)
|
||||
|
||||
session_dict = dict(session)
|
||||
for key in SESSION_KEYS_TO_REMOVE:
|
||||
try:
|
||||
del session_dict[key]
|
||||
except KeyError:
|
||||
pass
|
||||
notification.add_tab("session", session_dict)
|
||||
notification.add_tab("environment", dict(request.environ))
|
||||
|
||||
remote_addr = request.remote_addr
|
||||
forwarded_for = request.headers.get('X-Forwarded-For')
|
||||
if forwarded_for:
|
||||
remote_addr = f'{forwarded_for} (proxied via {remote_addr})'
|
||||
|
||||
notification.add_tab("request", {
|
||||
"method": request.method,
|
||||
"url": request.base_url,
|
||||
"headers": dict(request.headers),
|
||||
"params": dict(request.form),
|
||||
"data": {'request.data': request.data,
|
||||
'request.json': request.get_json()},
|
||||
"endpoint": request.endpoint,
|
||||
"remote_addr": remote_addr,
|
||||
})
|
||||
6
pillar/celery/__init__.py
Normal file
6
pillar/celery/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""Tasks to be run by the Celery worker.
|
||||
|
||||
If you create a new submodule/subpackage, be sure to add it to
|
||||
PillarServer._config_celery() too.
|
||||
|
||||
"""
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user