2 Commits

Author SHA1 Message Date
ec8f5032e9 Embed iframe shortcodes into container 2018-08-05 14:23:05 +02:00
7a5af9282c WIP on new landing pages or projects 2018-08-05 14:22:47 +02:00
314 changed files with 19390 additions and 28701 deletions

View File

@@ -1,3 +0,0 @@
{
"presets": ["@babel/preset-env"]
}

2
.gitignore vendored
View File

@@ -12,7 +12,6 @@ config_local.py
/build
/.cache
/.pytest_cache/
/*.egg-info/
profile.stats
/dump/
@@ -27,7 +26,6 @@ profile.stats
pillar/web/static/assets/css/*.css
pillar/web/static/assets/js/*.min.js
pillar/web/static/assets/js/vendor/video.min.js
pillar/web/static/storage/
pillar/web/static/uploads/
pillar/web/templates/

View File

@@ -65,12 +65,6 @@ You can run the Celery Worker using `manage.py celery worker`.
Find other Celery operations with the `manage.py celery` command.
## Elasticsearch
Pillar uses [Elasticsearch](https://www.elastic.co/products/elasticsearch) to power the search engine.
You will need to run the `manage.py elastic reset_index` command to initialize the indexing.
If you need to reindex your documents in elastic you run the `manage.py elastic reindex` command.
## Translations
If the language you want to support doesn't exist, you need to run: `translations init es_AR`.

View File

@@ -1,51 +1,37 @@
let argv = require('minimist')(process.argv.slice(2));
let autoprefixer = require('gulp-autoprefixer');
let cache = require('gulp-cached');
let chmod = require('gulp-chmod');
let concat = require('gulp-concat');
let git = require('gulp-git');
let gulpif = require('gulp-if');
let gulp = require('gulp');
let livereload = require('gulp-livereload');
let plumber = require('gulp-plumber');
let pug = require('gulp-pug');
let rename = require('gulp-rename');
let sass = require('gulp-sass');
let sourcemaps = require('gulp-sourcemaps');
let uglify = require('gulp-uglify-es').default;
let browserify = require('browserify');
let babelify = require('babelify');
let sourceStream = require('vinyl-source-stream');
let glob = require('glob');
let es = require('event-stream');
let path = require('path');
let buffer = require('vinyl-buffer');
var argv = require('minimist')(process.argv.slice(2));
var autoprefixer = require('gulp-autoprefixer');
var cache = require('gulp-cached');
var chmod = require('gulp-chmod');
var concat = require('gulp-concat');
var git = require('gulp-git');
var gulpif = require('gulp-if');
var gulp = require('gulp');
var livereload = require('gulp-livereload');
var plumber = require('gulp-plumber');
var pug = require('gulp-pug');
var rename = require('gulp-rename');
var sass = require('gulp-sass');
var sourcemaps = require('gulp-sourcemaps');
var uglify = require('gulp-uglify');
let enabled = {
var enabled = {
uglify: argv.production,
maps: !argv.production,
maps: argv.production,
failCheck: !argv.production,
prettyPug: !argv.production,
cachify: !argv.production,
cleanup: argv.production,
chmod: argv.production,
};
let destination = {
var destination = {
css: 'pillar/web/static/assets/css',
pug: 'pillar/web/templates',
js: 'pillar/web/static/assets/js',
}
let source = {
bootstrap: 'node_modules/bootstrap/',
jquery: 'node_modules/jquery/',
popper: 'node_modules/popper.js/',
vue: 'node_modules/vue/',
}
/* Stylesheets */
gulp.task('styles', function(done) {
/* CSS */
gulp.task('styles', function() {
gulp.src('src/styles/**/*.sass')
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(gulpif(enabled.maps, sourcemaps.init()))
@@ -56,12 +42,11 @@ gulp.task('styles', function(done) {
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
.pipe(gulp.dest(destination.css))
.pipe(gulpif(argv.livereload, livereload()));
done();
});
/* Templates */
gulp.task('templates', function(done) {
/* Templates - Pug */
gulp.task('templates', function() {
gulp.src('src/templates/**/*.pug')
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(gulpif(enabled.cachify, cache('templating')))
@@ -70,12 +55,11 @@ gulp.task('templates', function(done) {
}))
.pipe(gulp.dest(destination.pug))
.pipe(gulpif(argv.livereload, livereload()));
done();
});
/* Individual Uglified Scripts */
gulp.task('scripts', function(done) {
gulp.task('scripts', function() {
gulp.src('src/scripts/*.js')
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(gulpif(enabled.cachify, cache('scripting')))
@@ -83,131 +67,56 @@ gulp.task('scripts', function(done) {
.pipe(gulpif(enabled.uglify, uglify()))
.pipe(rename({suffix: '.min'}))
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
.pipe(gulpif(enabled.chmod, chmod(0o644)))
.pipe(chmod(644))
.pipe(gulp.dest(destination.js))
.pipe(gulpif(argv.livereload, livereload()));
done();
});
function browserify_base(entry) {
let pathSplited = path.dirname(entry).split(path.sep);
let moduleName = pathSplited[pathSplited.length - 1];
return browserify({
entries: [entry],
standalone: 'pillar.' + moduleName,
})
.transform(babelify, { "presets": ["@babel/preset-env"] })
.bundle()
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(sourceStream(path.basename(entry)))
.pipe(buffer())
.pipe(rename({
basename: moduleName,
extname: '.min.js'
}));
}
/**
* Transcompile and package common modules to be included in tutti.js.
*
* Example:
* src/scripts/js/es6/common/api/init.js
* src/scripts/js/es6/common/events/init.js
* Everything exported in api/init.js will end up in module pillar.api.*, and everything exported in events/init.js
* will end up in pillar.events.*
*/
function browserify_common() {
return glob.sync('src/scripts/js/es6/common/**/init.js').map(browserify_base);
}
/**
* Transcompile and package individual modules.
*
* Example:
* src/scripts/js/es6/individual/coolstuff/init.js
* Will create a coolstuff.js and everything exported in init.js will end up in namespace pillar.coolstuff.*
*/
gulp.task('scripts_browserify', function(done) {
glob('src/scripts/js/es6/individual/**/init.js', function(err, files) {
if(err) done(err);
var tasks = files.map(function(entry) {
return browserify_base(entry)
.pipe(gulpif(enabled.maps, sourcemaps.init()))
.pipe(gulpif(enabled.uglify, uglify()))
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
.pipe(gulp.dest(destination.js));
});
es.merge(tasks).on('end', done);
})
});
/* Collection of scripts in src/scripts/tutti/ and src/scripts/js/es6/common/ to merge into tutti.min.js
* Since it's always loaded, it's only for functions that we want site-wide.
* It also includes jQuery and Bootstrap (and its dependency popper), since
* the site doesn't work without it anyway.*/
gulp.task('scripts_concat_tutti', function(done) {
let toUglify = [
source.jquery + 'dist/jquery.min.js',
source.vue + (enabled.uglify ? 'dist/vue.min.js' : 'dist/vue.js'),
source.popper + 'dist/umd/popper.min.js',
source.bootstrap + 'js/dist/index.js',
source.bootstrap + 'js/dist/util.js',
source.bootstrap + 'js/dist/alert.js',
source.bootstrap + 'js/dist/collapse.js',
source.bootstrap + 'js/dist/dropdown.js',
source.bootstrap + 'js/dist/tooltip.js',
'src/scripts/tutti/**/*.js'
];
es.merge(gulp.src(toUglify), ...browserify_common())
/* Collection of scripts in src/scripts/tutti/ to merge into tutti.min.js */
/* Since it's always loaded, it's only for functions that we want site-wide */
gulp.task('scripts_concat_tutti', function() {
gulp.src('src/scripts/tutti/**/*.js')
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(gulpif(enabled.maps, sourcemaps.init()))
.pipe(concat("tutti.min.js"))
.pipe(gulpif(enabled.uglify, uglify()))
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
.pipe(gulpif(enabled.chmod, chmod(0o644)))
.pipe(chmod(644))
.pipe(gulp.dest(destination.js))
.pipe(gulpif(argv.livereload, livereload()));
done();
});
/* Simply move these vendor scripts from node_modules. */
gulp.task('scripts_move_vendor', function(done) {
let toMove = [
'node_modules/video.js/dist/video.min.js',
];
gulp.src(toMove)
.pipe(gulp.dest(destination.js + '/vendor/'));
done();
gulp.task('scripts_concat_markdown', function() {
gulp.src('src/scripts/markdown/**/*.js')
.pipe(gulpif(enabled.failCheck, plumber()))
.pipe(gulpif(enabled.maps, sourcemaps.init()))
.pipe(concat("markdown.min.js"))
.pipe(gulpif(enabled.uglify, uglify()))
.pipe(gulpif(enabled.maps, sourcemaps.write(".")))
.pipe(chmod(644))
.pipe(gulp.dest(destination.js))
.pipe(gulpif(argv.livereload, livereload()));
});
// While developing, run 'gulp watch'
gulp.task('watch',function(done) {
gulp.task('watch',function() {
// Only listen for live reloads if ran with --livereload
if (argv.livereload){
livereload.listen();
}
gulp.watch('src/styles/**/*.sass',gulp.series('styles'));
gulp.watch('src/templates/**/*.pug',gulp.series('templates'));
gulp.watch('src/scripts/*.js',gulp.series('scripts'));
gulp.watch('src/scripts/tutti/**/*.js',gulp.series('scripts_concat_tutti'));
gulp.watch('src/scripts/js/**/*.js',gulp.series(['scripts_browserify', 'scripts_concat_tutti']));
done();
gulp.watch('src/styles/**/*.sass',['styles']);
gulp.watch('src/templates/**/*.pug',['templates']);
gulp.watch('src/scripts/*.js',['scripts']);
gulp.watch('src/scripts/tutti/**/*.js',['scripts_concat_tutti']);
gulp.watch('src/scripts/markdown/**/*.js',['scripts_concat_markdown']);
});
// Erases all generated files in output directories.
gulp.task('cleanup', function(done) {
let paths = [];
gulp.task('cleanup', function() {
var paths = [];
for (attr in destination) {
paths.push(destination[attr]);
}
@@ -215,20 +124,17 @@ gulp.task('cleanup', function(done) {
git.clean({ args: '-f -X ' + paths.join(' ') }, function (err) {
if(err) throw err;
});
done();
});
// Run 'gulp' to build everything at once
let tasks = [];
var tasks = [];
if (enabled.cleanup) tasks.push('cleanup');
// gulp.task('default', gulp.parallel('styles', 'templates', 'scripts', 'scripts_tutti'));
gulp.task('default', gulp.parallel(tasks.concat([
gulp.task('default', tasks.concat([
'styles',
'templates',
'scripts',
'scripts_concat_tutti',
'scripts_move_vendor',
'scripts_browserify',
])));
'scripts_concat_markdown',
]));

View File

@@ -1,180 +0,0 @@
// For a detailed explanation regarding each configuration property, visit:
// https://jestjs.io/docs/en/configuration.html
module.exports = {
// All imported modules in your tests should be mocked automatically
// automock: false,
// Stop running tests after the first failure
// bail: false,
// Respect "browser" field in package.json when resolving modules
// browser: false,
// The directory where Jest should store its cached dependency information
// cacheDirectory: "/tmp/jest_rs",
// Automatically clear mock calls and instances between every test
clearMocks: true,
// Indicates whether the coverage information should be collected while executing the test
// collectCoverage: false,
// An array of glob patterns indicating a set of files for which coverage information should be collected
// collectCoverageFrom: null,
// The directory where Jest should output its coverage files
// coverageDirectory: null,
// An array of regexp pattern strings used to skip coverage collection
// coveragePathIgnorePatterns: [
// "/node_modules/"
// ],
// A list of reporter names that Jest uses when writing coverage reports
// coverageReporters: [
// "json",
// "text",
// "lcov",
// "clover"
// ],
// An object that configures minimum threshold enforcement for coverage results
// coverageThreshold: null,
// Make calling deprecated APIs throw helpful error messages
// errorOnDeprecated: false,
// Force coverage collection from ignored files usin a array of glob patterns
// forceCoverageMatch: [],
// A path to a module which exports an async function that is triggered once before all test suites
// globalSetup: null,
// A path to a module which exports an async function that is triggered once after all test suites
// globalTeardown: null,
// A set of global variables that need to be available in all test environments
// globals: {},
// An array of directory names to be searched recursively up from the requiring module's location
// moduleDirectories: [
// "node_modules"
// ],
// An array of file extensions your modules use
// moduleFileExtensions: [
// "js",
// "json",
// "jsx",
// "node"
// ],
// A map from regular expressions to module names that allow to stub out resources with a single module
// moduleNameMapper: {},
// An array of regexp pattern strings, matched against all module paths before considered 'visible' to the module loader
// modulePathIgnorePatterns: [],
// Activates notifications for test results
// notify: false,
// An enum that specifies notification mode. Requires { notify: true }
// notifyMode: "always",
// A preset that is used as a base for Jest's configuration
// preset: null,
// Run tests from one or more projects
// projects: null,
// Use this configuration option to add custom reporters to Jest
// reporters: undefined,
// Automatically reset mock state between every test
// resetMocks: false,
// Reset the module registry before running each individual test
// resetModules: false,
// A path to a custom resolver
// resolver: null,
// Automatically restore mock state between every test
// restoreMocks: false,
// The root directory that Jest should scan for tests and modules within
// rootDir: null,
// A list of paths to directories that Jest should use to search for files in
// roots: [
// "<rootDir>"
// ],
// Allows you to use a custom runner instead of Jest's default test runner
// runner: "jest-runner",
// The paths to modules that run some code to configure or set up the testing environment before each test
setupFiles: ["<rootDir>/src/scripts/js/es6/test_config/test-env.js"],
// The path to a module that runs some code to configure or set up the testing framework before each test
// setupTestFrameworkScriptFile: null,
// A list of paths to snapshot serializer modules Jest should use for snapshot testing
// snapshotSerializers: [],
// The test environment that will be used for testing
testEnvironment: "jsdom",
// Options that will be passed to the testEnvironment
// testEnvironmentOptions: {},
// Adds a location field to test results
// testLocationInResults: false,
// The glob patterns Jest uses to detect test files
// testMatch: [
// "**/__tests__/**/*.js?(x)",
// "**/?(*.)+(spec|test).js?(x)"
// ],
// An array of regexp pattern strings that are matched against all test paths, matched tests are skipped
// testPathIgnorePatterns: [
// "/node_modules/"
// ],
// The regexp pattern Jest uses to detect test files
// testRegex: "",
// This option allows the use of a custom results processor
// testResultsProcessor: null,
// This option allows use of a custom test runner
// testRunner: "jasmine2",
// This option sets the URL for the jsdom environment. It is reflected in properties such as location.href
// testURL: "http://localhost",
// Setting this value to "fake" allows the use of fake timers for functions such as "setTimeout"
// timers: "real",
// A map from regular expressions to paths to transformers
// transform: null,
// An array of regexp pattern strings that are matched against all source file paths, matched files will skip transformation
// transformIgnorePatterns: [
// "/node_modules/"
// ],
// An array of regexp pattern strings that are matched against all modules before the module loader will automatically return a mock for them
// unmockedModulePathPatterns: undefined,
// Indicates whether each individual test should be reported during the run
// verbose: null,
// An array of regexp patterns that are matched against all source file paths before re-running tests in watch mode
// watchPathIgnorePatterns: [],
// Whether to use watchman for file crawling
// watchman: true,
};

11968
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -4,51 +4,23 @@
"author": "Blender Institute",
"repository": {
"type": "git",
"url": "git://git.blender.org/pillar.git"
"url": "https://github.com/armadillica/pillar.git"
},
"devDependencies": {
"@babel/core": "7.1.6",
"@babel/preset-env": "7.1.6",
"acorn": "5.7.3",
"babel-core": "7.0.0-bridge.0",
"babelify": "10.0.0",
"browserify": "16.2.3",
"gulp": "4.0.0",
"gulp-autoprefixer": "6.0.0",
"gulp-babel": "8.0.0",
"gulp-cached": "1.1.1",
"gulp-chmod": "2.0.0",
"gulp-concat": "2.6.1",
"gulp-git": "2.8.0",
"gulp-if": "2.0.2",
"gulp-livereload": "4.0.0",
"gulp-plumber": "1.2.0",
"gulp-pug": "4.0.1",
"gulp-rename": "1.4.0",
"gulp-sass": "4.0.1",
"gulp-sourcemaps": "2.6.4",
"gulp-uglify-es": "1.0.4",
"jest": "23.6.0",
"minimist": "1.2.0",
"vinyl-buffer": "1.0.1",
"vinyl-source-stream": "2.0.0"
},
"dependencies": {
"bootstrap": "4.1.3",
"glob": "7.1.3",
"jquery": "3.3.1",
"natives": "^1.1.6",
"popper.js": "1.14.4",
"video.js": "7.2.2",
"vue": "2.5.17"
},
"scripts": {
"test": "jest"
},
"__COMMENTS__": [
"natives@1.1.6 for Gulp 3.x on Node 10.x: https://github.com/gulpjs/gulp/issues/2162#issuecomment-385197164"
],
"resolutions": {
"natives": "1.1.6"
"gulp": "~3.9.1",
"gulp-autoprefixer": "~2.3.1",
"gulp-cached": "~1.1.0",
"gulp-chmod": "~1.3.0",
"gulp-concat": "~2.6.0",
"gulp-if": "^2.0.1",
"gulp-git": "~2.4.2",
"gulp-livereload": "~3.8.1",
"gulp-plumber": "~1.1.0",
"gulp-pug": "~3.2.0",
"gulp-rename": "~1.2.2",
"gulp-sass": "~2.3.1",
"gulp-sourcemaps": "~1.6.0",
"gulp-uglify": "~1.5.3",
"minimist": "^1.2.0"
}
}

View File

@@ -140,6 +140,8 @@ class PillarServer(BlinkerCompatibleEve):
self.org_manager = pillar.api.organizations.OrgManager()
self.before_first_request(self.setup_db_indices)
# Make CSRF protection available to the application. By default it is
# disabled on all endpoints. More info at WTF_CSRF_CHECK_DEFAULT in config.py
self.csrf = CSRFProtect(self)
@@ -278,7 +280,7 @@ class PillarServer(BlinkerCompatibleEve):
self.encoding_service_client = Zencoder(self.config['ZENCODER_API_KEY'])
def _config_caching(self):
from flask_caching import Cache
from flask_cache import Cache
self.cache = Cache(self)
def set_languages(self, translations_folder: pathlib.Path):
@@ -477,11 +479,10 @@ class PillarServer(BlinkerCompatibleEve):
# Pillar-defined Celery task modules:
celery_task_modules = [
'pillar.celery.badges',
'pillar.celery.email_tasks',
'pillar.celery.file_link_tasks',
'pillar.celery.search_index_tasks',
'pillar.celery.tasks',
'pillar.celery.search_index_tasks',
'pillar.celery.file_link_tasks',
'pillar.celery.email_tasks',
]
# Allow Pillar extensions from defining their own Celery tasks.
@@ -703,8 +704,6 @@ class PillarServer(BlinkerCompatibleEve):
def finish_startup(self):
self.log.info('Using MongoDB database %r', self.config['MONGO_DBNAME'])
with self.app_context():
self.setup_db_indices()
self._config_celery()
api.setup_app(self)
@@ -712,10 +711,6 @@ class PillarServer(BlinkerCompatibleEve):
authentication.setup_app(self)
# Register Flask Debug Toolbar (disabled by default).
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(self)
for ext in self.pillar_extensions.values():
self.log.info('Setting up extension %s', ext.name)
ext.setup_app(self)
@@ -726,7 +721,6 @@ class PillarServer(BlinkerCompatibleEve):
self._config_user_caps()
# Only enable this when debugging.
# TODO(fsiddi): Consider removing this in favor of the routes tab in Flask Debug Toolbar.
# self._list_routes()
def setup_db_indices(self):
@@ -766,8 +760,6 @@ class PillarServer(BlinkerCompatibleEve):
coll.create_index([('properties.status', pymongo.ASCENDING),
('node_type', pymongo.ASCENDING),
('_created', pymongo.DESCENDING)])
# Used for asset tags
coll.create_index([('properties.tags', pymongo.ASCENDING)])
coll = db['projects']
# This index is used for statistics, and for fetching public projects.
@@ -790,7 +782,7 @@ class PillarServer(BlinkerCompatibleEve):
return 'basic ' + base64.b64encode('%s:%s' % (username, subclient_id))
def post_internal(self, resource: str, payl=None, skip_validation=False):
"""Workaround for Eve issue https://github.com/pyeve/eve/issues/810"""
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
from eve.methods.post import post_internal
url = self.config['URLS'][resource]
@@ -800,7 +792,7 @@ class PillarServer(BlinkerCompatibleEve):
def put_internal(self, resource: str, payload=None, concurrency_check=False,
skip_validation=False, **lookup):
"""Workaround for Eve issue https://github.com/pyeve/eve/issues/810"""
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
from eve.methods.put import put_internal
url = self.config['URLS'][resource]
@@ -811,7 +803,7 @@ class PillarServer(BlinkerCompatibleEve):
def patch_internal(self, resource: str, payload=None, concurrency_check=False,
skip_validation=False, **lookup):
"""Workaround for Eve issue https://github.com/pyeve/eve/issues/810"""
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
from eve.methods.patch import patch_internal
url = self.config['URLS'][resource]
@@ -822,7 +814,7 @@ class PillarServer(BlinkerCompatibleEve):
def delete_internal(self, resource: str, concurrency_check=False,
suppress_callbacks=False, **lookup):
"""Workaround for Eve issue https://github.com/pyeve/eve/issues/810"""
"""Workaround for Eve issue https://github.com/nicolaiarocci/eve/issues/810"""
from eve.methods.delete import deleteitem_internal
url = self.config['URLS'][resource]

View File

@@ -1,6 +1,6 @@
def setup_app(app):
from . import encoding, blender_id, projects, local_auth, file_storage
from . import users, nodes, latest, blender_cloud, service, activities, timeline
from . import users, nodes, latest, blender_cloud, service, activities
from . import organizations
from . import search
@@ -11,7 +11,6 @@ def setup_app(app):
local_auth.setup_app(app, url_prefix='/auth')
file_storage.setup_app(app, url_prefix='/storage')
latest.setup_app(app, url_prefix='/latest')
timeline.setup_app(app, url_prefix='/timeline')
blender_cloud.setup_app(app, url_prefix='/bcloud')
users.setup_app(app, api_prefix='/users')
service.setup_app(app, api_prefix='/service')

View File

@@ -1,5 +1,4 @@
import logging
from html.parser import HTMLParser
from flask import request, current_app
from pillar.api.utils import gravatar
@@ -8,15 +7,6 @@ from pillar.auth import current_user
log = logging.getLogger(__name__)
class CommentHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.data = []
def handle_data(self, data):
self.data.append(data)
def notification_parse(notification):
activities_collection = current_app.data.driver.db['activities']
activities_subscriptions_collection = \
@@ -40,14 +30,9 @@ def notification_parse(notification):
object_type = 'comment'
object_name = ''
object_id = activity['object']
context_object_type = node['parent']['node_type']
# If node_type is 'dillo_post', just call it 'post'
node_type = 'post' if context_object_type.endswith('_post') else \
context_object_type
if node['parent']['user'] == current_user.user_id:
owner = f"your {node_type}"
owner = "your {0}".format(node['parent']['node_type'])
else:
parent_comment_user = users_collection.find_one(
{'_id': node['parent']['user']})
@@ -55,22 +40,10 @@ def notification_parse(notification):
user_name = 'their'
else:
user_name = "{0}'s".format(parent_comment_user['username'])
owner = "{0} {1}".format(user_name, node['parent']['node_type'])
owner = f"{user_name} {node_type}"
context_object_name = f"{node['parent']['name'][:50]}..."
if context_object_type == 'comment':
# Parse the comment content, which might be HTML and extract
# some text from it.
parser = CommentHTMLParser()
# Trim the comment content to 50 chars, the parser will handle it
parser.feed(node['properties']['content'][:50])
try:
comment_content = parser.data[0]
except KeyError:
comment_content = '...'
# Trim the parsed text down to 15 charss
context_object_name = f"{comment_content[:50]}..."
context_object_type = node['parent']['node_type']
context_object_name = owner
context_object_id = activity['context_object']
if activity['verb'] == 'replied':
action = 'replied to'
@@ -79,15 +52,13 @@ def notification_parse(notification):
else:
action = activity['verb']
action = f'{action} {owner}'
lookup = {
'user': current_user.user_id,
'context_object_type': 'node',
'context_object': context_object_id,
}
subscription = activities_subscriptions_collection.find_one(lookup)
if subscription and subscription['notifications']['web'] is True:
if subscription and subscription['notifications']['web'] == True:
is_subscribed = True
else:
is_subscribed = False
@@ -148,8 +119,6 @@ def activity_subscribe(user_id, context_object_type, context_object_id):
# If no subscription exists, we create one
if not subscription:
# Workaround for issue: https://github.com/pyeve/eve/issues/1174
lookup['notifications'] = {}
current_app.post_internal('activities-subscriptions', lookup)

View File

@@ -6,7 +6,6 @@ with Blender ID.
import datetime
import logging
from urllib.parse import urljoin
import requests
from bson import tz_util
@@ -115,14 +114,13 @@ def validate_token(user_id, token, oauth_subclient_id):
# We only want to accept Blender Cloud tokens.
payload['client_id'] = current_app.config['OAUTH_CREDENTIALS']['blender-id']['id']
blender_id_endpoint = current_app.config['BLENDER_ID_ENDPOINT']
url = urljoin(blender_id_endpoint, 'u/validate_token')
url = '{0}/u/validate_token'.format(current_app.config['BLENDER_ID_ENDPOINT'])
log.debug('POSTing to %r', url)
# Retry a few times when POSTing to BlenderID fails.
# Source: http://stackoverflow.com/a/15431343/875379
s = requests.Session()
s.mount(blender_id_endpoint, HTTPAdapter(max_retries=5))
s.mount(current_app.config['BLENDER_ID_ENDPOINT'], HTTPAdapter(max_retries=5))
# POST to Blender ID, handling errors as negative verification results.
try:
@@ -220,7 +218,7 @@ def fetch_blenderid_user() -> dict:
my_log = log.getChild('fetch_blenderid_user')
bid_url = urljoin(current_app.config['BLENDER_ID_ENDPOINT'], 'api/user')
bid_url = '%s/api/user' % current_app.config['BLENDER_ID_ENDPOINT']
my_log.debug('Fetching user info from %s', bid_url)
credentials = current_app.config['OAUTH_CREDENTIALS']['blender-id']
@@ -265,7 +263,7 @@ def setup_app(app, url_prefix):
def switch_user_url(next_url: str) -> str:
from urllib.parse import quote
base_url = urljoin(current_app.config['BLENDER_ID_ENDPOINT'], 'switch')
base_url = '%s/switch' % current_app.config['BLENDER_ID_ENDPOINT']
if next_url:
return '%s?next=%s' % (base_url, quote(next_url))
return base_url

View File

@@ -1,17 +1,17 @@
from datetime import datetime
import logging
from bson import ObjectId, tz_util
from datetime import datetime
import cerberus.errors
from eve.io.mongo import Validator
from flask import current_app
from pillar import markdown
import pillar.markdown
log = logging.getLogger(__name__)
class ValidateCustomFields(Validator):
# TODO: split this into a convert_property(property, schema) and call that from this function.
def convert_properties(self, properties, node_schema):
"""Converts datetime strings and ObjectId strings to actual Python objects."""
@@ -73,11 +73,6 @@ class ValidateCustomFields(Validator):
dict_property[key] = self.convert_properties(item_prop, item_schema)['item']
def _validate_valid_properties(self, valid_properties, field, value):
"""Fake property that triggers node dynamic property validation.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
from pillar.api.utils import project_get_node_type
projects_collection = current_app.data.driver.db['projects']
@@ -112,7 +107,7 @@ class ValidateCustomFields(Validator):
if val:
# This ensures the modifications made by v's coercion rules are
# visible to this validator's output.
self.document[field] = v.document
self.current[field] = v.current
return True
log.warning('Error validating properties for node %s: %s', self.document, v.errors)
@@ -123,9 +118,6 @@ class ValidateCustomFields(Validator):
Combine "required_after_creation=True" with "required=False" to allow
pre-insert hooks to set default values.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if not required_after_creation:
@@ -133,14 +125,14 @@ class ValidateCustomFields(Validator):
# validator at all.
return
if self.document_id is None:
if self._id is None:
# This is a creation call, in which case this validator shouldn't run.
return
if not value:
self._error(field, "Value is required once the document was created")
def _validator_iprange(self, field_name: str, value: str):
def _validate_type_iprange(self, field_name: str, value: str):
"""Ensure the field contains a valid IP address.
Supports both IPv6 and IPv4 ranges. Requires the IPy module.
@@ -157,19 +149,40 @@ class ValidateCustomFields(Validator):
if ip.prefixlen() == 0:
self._error(field_name, 'Zero-length prefix is not allowed')
def _normalize_coerce_markdown(self, markdown_field: str) -> str:
"""
Cache markdown as html.
def _validate_type_binary(self, field_name: str, value: bytes):
"""Add support for binary type.
:param markdown_field: name of the field containing Markdown
:return: html string
This type was actually introduced in Cerberus 1.0, so we can drop
support for this once Eve starts using that version (or newer).
"""
my_log = log.getChild('_normalize_coerce_markdown')
mdown = self.document.get(markdown_field, '')
html = markdown.markdown(mdown)
my_log.debug('Generated html for markdown field %s in doc with id %s',
markdown_field, id(self.document))
return html
if not isinstance(value, (bytes, bytearray)):
self._error(field_name, f'wrong value type {type(value)}, expected bytes or bytearray')
def _validate_coerce(self, coerce, field: str, value):
"""Override Cerberus' _validate_coerce method for richer features.
This now supports named coercion functions (available in Cerberus 1.0+)
and passes the field name to coercion functions as well.
"""
if isinstance(coerce, str):
coerce = getattr(self, f'_normalize_coerce_{coerce}')
try:
return coerce(field, value)
except (TypeError, ValueError):
self._error(field, cerberus.errors.ERROR_COERCION_FAILED.format(field))
def _normalize_coerce_markdown(self, field: str, value):
"""Render Markdown from this field into {field}_html.
The field name MUST NOT end in `_html`. The Markdown is read from this
field and the rendered HTML is written to the field `{field}_html`.
"""
html = pillar.markdown.markdown(value)
field_name = pillar.markdown.cache_field_name(field)
self.current[field_name] = html
return value
if __name__ == '__main__':
@@ -177,12 +190,12 @@ if __name__ == '__main__':
v = ValidateCustomFields()
v.schema = {
'foo': {'type': 'string', 'validator': 'markdown'},
'foo': {'type': 'string', 'coerce': 'markdown'},
'foo_html': {'type': 'string'},
'nested': {
'type': 'dict',
'schema': {
'bar': {'type': 'string', 'validator': 'markdown'},
'bar': {'type': 'string', 'coerce': 'markdown'},
'bar_html': {'type': 'string'},
}
}

View File

@@ -1,8 +1,5 @@
import os
from pillar.api.node_types.utils import markdown_fields
STORAGE_BACKENDS = ["local", "pillar", "cdnsun", "gcs", "unittest"]
URL_PREFIX = 'api'
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
@@ -124,43 +121,12 @@ users_schema = {
'service': {
'type': 'dict',
'allow_unknown': True,
},
# Node-specific information for this user.
'nodes': {
'type': 'dict',
'schema': {
# Per watched video info about where the user left off, both in time and in percent.
'view_progress': {
'type': 'dict',
# Keyed by Node ID of the video asset. MongoDB doesn't support using
# ObjectIds as key, so we cast them to string instead.
'keyschema': {'type': 'string'},
'valueschema': {
'type': 'dict',
'schema': {
'progress_in_sec': {'type': 'float', 'min': 0},
'progress_in_percent': {'type': 'integer', 'min': 0, 'max': 100},
# When the progress was last updated, so we can limit this history to
# the last-watched N videos if we want, or show stuff in chrono order.
'last_watched': {'type': 'datetime'},
# True means progress_in_percent = 100, for easy querying
'done': {'type': 'boolean', 'default': False},
},
},
},
},
},
'badges': {
'type': 'dict',
'schema': {
'html': {'type': 'string'}, # HTML fetched from Blender ID.
'expires': {'type': 'datetime'}, # When we should fetch it again.
},
'badger': {
'type': 'list',
'schema': {'type': 'string'}
}
}
},
# Properties defined by extensions. Extensions should use their name (see the
@@ -186,7 +152,12 @@ organizations_schema = {
'maxlength': 128,
'required': True
},
**markdown_fields('description', maxlength=256),
'description': {
'type': 'string',
'maxlength': 256,
'coerce': 'markdown',
},
'_description_html': {'type': 'string'},
'website': {
'type': 'string',
'maxlength': 256,
@@ -256,7 +227,7 @@ organizations_schema = {
'start': {'type': 'binary', 'required': True},
'end': {'type': 'binary', 'required': True},
'prefix': {'type': 'integer', 'required': True},
'human': {'type': 'string', 'required': True, 'validator': 'iprange'},
'human': {'type': 'iprange', 'required': True},
}
},
},
@@ -319,7 +290,11 @@ nodes_schema = {
'maxlength': 128,
'required': True,
},
**markdown_fields('description'),
'description': {
'type': 'string',
'coerce': 'markdown',
},
'_description_html': {'type': 'string'},
'picture': _file_embedded_schema,
'order': {
'type': 'integer',
@@ -352,7 +327,7 @@ nodes_schema = {
'properties': {
'type': 'dict',
'valid_properties': True,
'required': True
'required': True,
},
'permissions': {
'type': 'dict',
@@ -370,11 +345,11 @@ tokens_schema = {
},
'token': {
'type': 'string',
'required': True,
'required': False,
},
'token_hashed': {
'type': 'string',
'required': False,
'required': True,
},
'expire_time': {
'type': 'datetime',
@@ -393,13 +368,6 @@ tokens_schema = {
'type': 'string',
},
},
# OAuth scopes granted to this token.
'oauth_scopes': {
'type': 'list',
'default': [],
'schema': {'type': 'string'},
}
}
files_schema = {
@@ -457,7 +425,7 @@ files_schema = {
'backend': {
'type': 'string',
'required': True,
'allowed': STORAGE_BACKENDS,
'allowed': ["local", "pillar", "cdnsun", "gcs", "unittest"]
},
# Where the file is in the backend storage itself. In the case of GCS,
@@ -569,7 +537,11 @@ projects_schema = {
'maxlength': 128,
'required': True,
},
**markdown_fields('description'),
'description': {
'type': 'string',
'coerce': 'markdown',
},
'_description_html': {'type': 'string'},
# Short summary for the project
'summary': {
'type': 'string',
@@ -579,8 +551,6 @@ projects_schema = {
'picture_square': _file_embedded_schema,
# Header
'picture_header': _file_embedded_schema,
# Picture with a 16:9 aspect ratio (for Open Graph)
'picture_16_9': _file_embedded_schema,
'header_node': dict(
nullable=True,
**_node_embedded_schema
@@ -863,9 +833,4 @@ UPSET_ON_PUT = False # do not create new document on PUT of non-existant URL.
X_DOMAINS = '*'
X_ALLOW_CREDENTIALS = True
X_HEADERS = 'Authorization'
RENDERERS = ['eve.render.JSONRenderer']
# TODO(Sybren): this is a quick workaround to make /p/{url}/jstree work again.
# Apparently Eve is now stricter in checking against MONGO_QUERY_BLACKLIST, and
# blocks our use of $regex.
MONGO_QUERY_BLACKLIST = ['$where']
XML = False

View File

@@ -5,7 +5,6 @@ import mimetypes
import os
import pathlib
import tempfile
import time
import typing
import uuid
from hashlib import md5
@@ -131,67 +130,6 @@ def _process_image(bucket: Bucket,
src_file['status'] = 'complete'
def _video_duration_seconds(filename: pathlib.Path) -> typing.Optional[int]:
"""Get the duration of a video file using ffprobe
https://superuser.com/questions/650291/how-to-get-video-duration-in-seconds
:param filename: file path to video
:return: video duration in seconds
"""
import subprocess
def run(cli_args):
if log.isEnabledFor(logging.INFO):
import shlex
cmd = ' '.join(shlex.quote(s) for s in cli_args)
log.info('Calling %s', cmd)
ffprobe = subprocess.run(
cli_args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=10, # seconds
)
if ffprobe.returncode:
import shlex
cmd = ' '.join(shlex.quote(s) for s in cli_args)
log.error('Error running %s: stopped with return code %i',
cmd, ffprobe.returncode)
log.error('Output was: %s', ffprobe.stdout)
return None
try:
return int(float(ffprobe.stdout))
except ValueError as e:
log.exception('ffprobe produced invalid number: %s', ffprobe.stdout)
return None
ffprobe_from_container_args = [
current_app.config['BIN_FFPROBE'],
'-v', 'error',
'-show_entries', 'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
str(filename),
]
ffprobe_from_stream_args = [
current_app.config['BIN_FFPROBE'],
'-v', 'error',
'-hide_banner',
'-select_streams', 'v:0', # we only care about the first video stream
'-show_entries', 'stream=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
str(filename),
]
duration = run(ffprobe_from_stream_args) or \
run(ffprobe_from_container_args) or \
None
return duration
def _video_size_pixels(filename: pathlib.Path) -> typing.Tuple[int, int]:
"""Figures out the size (in pixels) of the video file.
@@ -282,10 +220,8 @@ def _process_video(gcs,
# by determining the video size here we already have this information in the file
# document before Zencoder calls our notification URL. It also opens up possibilities
# for other encoding backends that don't support this functionality.
video_path = pathlib.Path(local_file.name)
video_width, video_height = _video_size_pixels(video_path)
video_width, video_height = _video_size_pixels(pathlib.Path(local_file.name))
capped_video_width, capped_video_height = _video_cap_at_1080(video_width, video_height)
video_duration = _video_duration_seconds(video_path)
# Create variations
root, _ = os.path.splitext(src_file['file_path'])
@@ -298,13 +234,12 @@ def _process_video(gcs,
content_type='video/{}'.format(v),
file_path='{}-{}.{}'.format(root, v, v),
size='',
duration=0,
width=capped_video_width,
height=capped_video_height,
length=0,
md5='',
)
if video_duration:
file_variation['duration'] = video_duration
# Append file variation. Originally mp4 and webm were the available options,
# that's why we build a list.
src_file['variations'].append(file_variation)
@@ -610,7 +545,6 @@ def refresh_links_for_backend(backend_name, chunk_size, expiry_seconds):
import gcloud.exceptions
my_log = log.getChild(f'refresh_links_for_backend.{backend_name}')
start_time = time.time()
# Retrieve expired links.
files_collection = current_app.data.driver.db['files']
@@ -634,10 +568,10 @@ def refresh_links_for_backend(backend_name, chunk_size, expiry_seconds):
return
if 0 < chunk_size == document_count:
my_log.info('Found %d documents to refresh, probably limited by the chunk size %d',
document_count, chunk_size)
my_log.info('Found %d documents to refresh, probably limited by the chunk size.',
document_count)
else:
my_log.info('Found %d documents to refresh, chunk size=%d', document_count, chunk_size)
my_log.info('Found %d documents to refresh.', document_count)
refreshed = 0
report_chunks = min(max(5, document_count // 25), 100)
@@ -681,10 +615,8 @@ def refresh_links_for_backend(backend_name, chunk_size, expiry_seconds):
'links', refreshed)
return
if refreshed % report_chunks != 0:
my_log.info('Refreshed %i links', refreshed)
my_log.info('Refresh took %s', datetime.timedelta(seconds=time.time() - start_time))
@require_login()
def create_file_doc(name, filename, content_type, length, project,

View File

@@ -90,11 +90,12 @@ class Blob(metaclass=abc.ABCMeta):
def __init__(self, name: str, bucket: Bucket) -> None:
self.name = name
"""Name of this blob in the bucket."""
self.bucket = bucket
self._size_in_bytes: typing.Optional[int] = None
self.filename: str = None
"""Name of the file for the Content-Disposition header when downloading it."""
self._log = logging.getLogger(f'{__name__}.Blob')
def __repr__(self):
@@ -132,19 +133,12 @@ class Blob(metaclass=abc.ABCMeta):
file_size=file_size)
@abc.abstractmethod
def update_filename(self, filename: str, *, is_attachment=True):
def update_filename(self, filename: str):
"""Sets the filename which is used when downloading the file.
Not all storage backends support this, and will use the on-disk filename instead.
"""
@abc.abstractmethod
def update_content_type(self, content_type: str, content_encoding: str = ''):
"""Set the content type (and optionally content encoding).
Not all storage backends support this.
"""
@abc.abstractmethod
def get_url(self, *, is_public: bool) -> str:
"""Returns the URL to access this blob.

View File

@@ -174,7 +174,7 @@ class GoogleCloudStorageBlob(Blob):
self.gblob.reload()
self._size_in_bytes = self.gblob.size
def update_filename(self, filename: str, *, is_attachment=True):
def update_filename(self, filename: str):
"""Set the ContentDisposition metadata so that when a file is downloaded
it has a human-readable name.
"""
@@ -182,17 +182,7 @@ class GoogleCloudStorageBlob(Blob):
if '"' in filename:
raise ValueError(f'Filename is not allowed to have double quote in it: {filename!r}')
if is_attachment:
self.gblob.content_disposition = f'attachment; filename="{filename}"'
else:
self.gblob.content_disposition = f'filename="{filename}"'
self.gblob.patch()
def update_content_type(self, content_type: str, content_encoding: str = ''):
"""Set the content type (and optionally content encoding)."""
self.gblob.content_type = content_type
self.gblob.content_encoding = content_encoding
self.gblob.patch()
def get_url(self, *, is_public: bool) -> str:

View File

@@ -113,13 +113,10 @@ class LocalBlob(Blob):
self._size_in_bytes = file_size
def update_filename(self, filename: str, *, is_attachment=True):
def update_filename(self, filename: str):
# TODO: implement this for local storage.
self._log.info('update_filename(%r) not supported', filename)
def update_content_type(self, content_type: str, content_encoding: str = ''):
self._log.info('update_content_type(%r, %r) not supported', content_type, content_encoding)
def make_public(self):
# No-op on this storage backend.
pass

View File

@@ -29,6 +29,7 @@ def latest_nodes(db_filter, projection, limit):
proj = {
'_created': 1,
'_updated': 1,
'user.full_name': 1,
'project._id': 1,
'project.url': 1,
'project.name': 1,
@@ -69,7 +70,6 @@ def latest_assets():
{'name': 1, 'node_type': 1,
'parent': 1, 'picture': 1, 'properties.status': 1,
'properties.content_type': 1,
'properties.duration_seconds': 1,
'permissions.world': 1},
12)
@@ -80,7 +80,7 @@ def latest_assets():
def latest_comments():
latest = latest_nodes({'node_type': 'comment',
'properties.status': 'published'},
{'parent': 1, 'user.full_name': 1,
{'parent': 1,
'properties.content': 1, 'node_type': 1,
'properties.status': 1,
'properties.is_reply': 1},

View File

@@ -94,10 +94,17 @@ def generate_and_store_token(user_id, days=15, prefix=b'') -> dict:
# Use 'xy' as altargs to prevent + and / characters from appearing.
# We never have to b64decode the string anyway.
token = prefix + base64.b64encode(random_bits, altchars=b'xy').strip(b'=')
token_bytes = prefix + base64.b64encode(random_bits, altchars=b'xy').strip(b'=')
token = token_bytes.decode('ascii')
token_expiry = utcnow() + datetime.timedelta(days=days)
return store_token(user_id, token.decode('ascii'), token_expiry)
token_data = store_token(user_id, token, token_expiry)
# Include the token in the returned document so that it can be stored client-side,
# in configuration, etc.
token_data['token'] = token
return token_data
def hash_password(password: str, salt: typing.Union[str, bytes]) -> str:

View File

@@ -12,7 +12,7 @@ ATTACHMENT_SLUG_REGEX = r'[a-zA-Z0-9_\-]+'
attachments_embedded_schema = {
'type': 'dict',
# TODO: will be renamed to 'keyschema' in Cerberus 1.0
'keyschema': {
'propertyschema': {
'type': 'string',
'regex': '^%s$' % ATTACHMENT_SLUG_REGEX,
},
@@ -23,6 +23,14 @@ attachments_embedded_schema = {
'type': 'objectid',
'required': True,
},
'link': {
'type': 'string',
'allowed': ['self', 'none', 'custom'],
'default': 'self',
},
'link_custom': {
'type': 'string',
},
'collection': {
'type': 'string',
'allowed': ['files'],

View File

@@ -24,10 +24,6 @@ node_type_asset = {
'content_type': {
'type': 'string'
},
# The duration of a video asset in seconds.
'duration_seconds': {
'type': 'integer'
},
# We point to the original file (and use it to extract any relevant
# variation useful for our scope).
'file': _file_embedded_schema,
@@ -62,7 +58,6 @@ node_type_asset = {
},
'form_schema': {
'content_type': {'visible': False},
'duration_seconds': {'visible': False},
'order': {'visible': False},
'tags': {'visible': False},
'categories': {'visible': False},

View File

@@ -1,15 +1,15 @@
from pillar.api.node_types import attachments_embedded_schema
from pillar.api.node_types.utils import markdown_fields
node_type_comment = {
'name': 'comment',
'description': 'Comments for asset nodes, pages, etc.',
'dyn_schema': {
# The actual comment content
**markdown_fields(
'content',
minlength=5,
required=True),
'content': {
'type': 'string',
'minlength': 5,
'required': True,
'coerce': 'markdown',
},
'_content_html': {'type': 'string'},
'status': {
'type': 'string',
'allowed': [
@@ -51,8 +51,7 @@ node_type_comment = {
}
},
'confidence': {'type': 'float'},
'is_reply': {'type': 'boolean'},
'attachments': attachments_embedded_schema,
'is_reply': {'type': 'boolean'}
},
'form_schema': {},
'parent': ['asset', 'comment'],

View File

@@ -3,7 +3,7 @@ node_type_group = {
'description': 'Folder node type',
'parent': ['group', 'project'],
'dyn_schema': {
# Used for sorting within the context of a group
'order': {
'type': 'integer'
},
@@ -20,8 +20,7 @@ node_type_group = {
'notes': {
'type': 'string',
'maxlength': 256,
}
},
},
'form_schema': {
'url': {'visible': False},

View File

@@ -1,14 +1,17 @@
from pillar.api.node_types import attachments_embedded_schema
from pillar.api.node_types.utils import markdown_fields
node_type_post = {
'name': 'post',
'description': 'A blog post, for any project',
'dyn_schema': {
**markdown_fields('content',
minlength=5,
maxlength=90000,
required=True),
'content': {
'type': 'string',
'minlength': 5,
'maxlength': 90000,
'required': True,
'coerce': 'markdown',
},
'_content_html': {'type': 'string'},
'status': {
'type': 'string',
'allowed': [

View File

@@ -1,34 +0,0 @@
from pillar import markdown
def markdown_fields(field: str, **kwargs) -> dict:
"""
Creates a field for the markdown, and a field for the cached html.
Example usage:
schema = {'myDoc': {
'type': 'list',
'schema': {
'type': 'dict',
'schema': {
**markdown_fields('content', required=True),
}
},
}}
:param field:
:return:
"""
cache_field = markdown.cache_field_name(field)
return {
field: {
'type': 'string',
**kwargs
},
cache_field: {
'type': 'string',
'readonly': True,
'default': field, # Name of the field containing the markdown. Will be input to the coerce function.
'coerce': 'markdown',
}
}

View File

@@ -1,20 +1,56 @@
import base64
import datetime
import functools
import logging
import urllib.parse
import pymongo.errors
import werkzeug.exceptions as wz_exceptions
from bson import ObjectId
from flask import current_app, Blueprint, request
from pillar.api.nodes import eve_hooks, comments, activities
from pillar.api.activities import activity_subscribe, activity_object_add
from pillar.api.node_types import PILLAR_NAMED_NODE_TYPES
from pillar.api.file_storage_backends.gcs import update_file_name
from pillar.api.utils import str2id, jsonify
from pillar.api.utils.authorization import check_permissions, require_login
from pillar.web.utils import pretty_date
log = logging.getLogger(__name__)
blueprint = Blueprint('nodes_api', __name__)
# TODO(fsiddi) Propose changes to make commenting roles a configuration value.
ROLES_FOR_SHARING = ROLES_FOR_COMMENTING = set()
ROLES_FOR_SHARING = {'subscriber', 'demo'}
def only_for_node_type_decorator(*required_node_type_names):
"""Returns a decorator that checks its first argument's node type.
If the node type is not of the required node type, returns None,
otherwise calls the wrapped function.
>>> deco = only_for_node_type_decorator('comment')
>>> @deco
... def handle_comment(node): pass
>>> deco = only_for_node_type_decorator('comment', 'post')
>>> @deco
... def handle_comment_or_post(node): pass
"""
# Convert to a set for efficient 'x in required_node_type_names' queries.
required_node_type_names = set(required_node_type_names)
def only_for_node_type(wrapped):
@functools.wraps(wrapped)
def wrapper(node, *args, **kwargs):
if node.get('node_type') not in required_node_type_names:
return
return wrapped(node, *args, **kwargs)
return wrapper
only_for_node_type.__doc__ = "Decorator, immediately returns when " \
"the first argument is not of type %s." % required_node_type_names
return only_for_node_type
@blueprint.route('/<node_id>/share', methods=['GET', 'POST'])
@@ -49,121 +85,7 @@ def share_node(node_id):
else:
return '', 204
return jsonify(eve_hooks.short_link_info(short_code), status=status)
@blueprint.route('/<string(length=24):node_path>/comments', methods=['GET'])
def get_node_comments(node_path: str):
node_id = str2id(node_path)
return comments.get_node_comments(node_id)
@blueprint.route('/<string(length=24):node_path>/comments', methods=['POST'])
@require_login(require_roles=ROLES_FOR_COMMENTING)
def post_node_comment(node_path: str):
node_id = str2id(node_path)
msg = request.json['msg']
attachments = request.json.get('attachments', {})
return comments.post_node_comment(node_id, msg, attachments)
@blueprint.route('/<string(length=24):node_path>/comments/<string(length=24):comment_path>', methods=['PATCH'])
@require_login(require_roles=ROLES_FOR_COMMENTING)
def patch_node_comment(node_path: str, comment_path: str):
node_id = str2id(node_path)
comment_id = str2id(comment_path)
msg = request.json['msg']
attachments = request.json.get('attachments', {})
return comments.patch_node_comment(node_id, comment_id, msg, attachments)
@blueprint.route('/<string(length=24):node_path>/comments/<string(length=24):comment_path>/vote', methods=['POST'])
@require_login(require_roles=ROLES_FOR_COMMENTING)
def post_node_comment_vote(node_path: str, comment_path: str):
node_id = str2id(node_path)
comment_id = str2id(comment_path)
vote_str = request.json['vote']
vote = int(vote_str)
return comments.post_node_comment_vote(node_id, comment_id, vote)
@blueprint.route('/<string(length=24):node_path>/activities', methods=['GET'])
def activities_for_node(node_path: str):
node_id = str2id(node_path)
return jsonify(activities.for_node(node_id))
@blueprint.route('/tagged/')
@blueprint.route('/tagged/<tag>')
def tagged(tag=''):
"""Return all tagged nodes of public projects as JSON."""
from pillar.auth import current_user
# We explicitly register the tagless endpoint to raise a 404, otherwise the PATCH
# handler on /api/nodes/<node_id> will return a 405 Method Not Allowed.
if not tag:
raise wz_exceptions.NotFound()
# Build the (cached) list of tagged nodes
agg_list = _tagged(tag)
for node in agg_list:
if node['properties'].get('duration_seconds'):
node['properties']['duration'] = datetime.timedelta(seconds=node['properties']['duration_seconds'])
if node.get('_created') is not None:
node['pretty_created'] = pretty_date(node['_created'])
# If the user is anonymous, no more information is needed and we return
if current_user.is_anonymous:
return jsonify(agg_list)
# If the user is authenticated, attach view_progress for video assets
view_progress = current_user.nodes['view_progress']
for node in agg_list:
node_id = str(node['_id'])
# View progress should be added only for nodes of type 'asset' and
# with content_type 'video', only if the video was already in the watched
# list for the current user.
if node_id in view_progress:
node['view_progress'] = view_progress[node_id]
return jsonify(agg_list)
def _tagged(tag: str):
"""Fetch all public nodes with the given tag.
This function is cached, see setup_app().
"""
nodes_coll = current_app.db('nodes')
agg = nodes_coll.aggregate([
{'$match': {'properties.tags': tag,
'_deleted': {'$ne': True}}},
# Only get nodes from public projects. This is done after matching the
# tagged nodes, because most likely nobody else will be able to tag
# nodes anyway.
{'$lookup': {
'from': 'projects',
'localField': 'project',
'foreignField': '_id',
'as': '_project',
}},
{'$unwind': '$_project'},
{'$match': {'_project.is_private': False}},
{'$addFields': {
'project._id': '$_project._id',
'project.name': '$_project.name',
'project.url': '$_project.url',
}},
# Don't return the entire project/file for each node.
{'$project': {'_project': False}},
{'$sort': {'_created': -1}}
])
return list(agg)
return jsonify(short_link_info(short_code), status=status)
def generate_and_store_short_code(node):
@@ -241,35 +163,265 @@ def create_short_code(node) -> str:
return short_code
def short_link_info(short_code):
"""Returns the short link info in a dict."""
short_link = urllib.parse.urljoin(
current_app.config['SHORT_LINK_BASE_URL'], short_code)
return {
'short_code': short_code,
'short_link': short_link,
}
def before_replacing_node(item, original):
check_permissions('nodes', original, 'PUT')
update_file_name(item)
def after_replacing_node(item, original):
"""Push an update to the Algolia index when a node item is updated. If the
project is private, prevent public indexing.
"""
from pillar.celery import search_index_tasks as index
projects_collection = current_app.data.driver.db['projects']
project = projects_collection.find_one({'_id': item['project']})
if project.get('is_private', False):
# Skip index updating and return
return
status = item['properties'].get('status', 'unpublished')
node_id = str(item['_id'])
if status == 'published':
index.node_save.delay(node_id)
else:
index.node_delete.delay(node_id)
def before_inserting_nodes(items):
"""Before inserting a node in the collection we check if the user is allowed
and we append the project id to it.
"""
from pillar.auth import current_user
nodes_collection = current_app.data.driver.db['nodes']
def find_parent_project(node):
"""Recursive function that finds the ultimate parent of a node."""
if node and 'parent' in node:
parent = nodes_collection.find_one({'_id': node['parent']})
return find_parent_project(parent)
if node:
return node
else:
return None
for item in items:
check_permissions('nodes', item, 'POST')
if 'parent' in item and 'project' not in item:
parent = nodes_collection.find_one({'_id': item['parent']})
project = find_parent_project(parent)
if project:
item['project'] = project['_id']
# Default the 'user' property to the current user.
item.setdefault('user', current_user.user_id)
def after_inserting_nodes(items):
for item in items:
# Skip subscriptions for first level items (since the context is not a
# node, but a project).
# TODO: support should be added for mixed context
if 'parent' not in item:
return
context_object_id = item['parent']
if item['node_type'] == 'comment':
nodes_collection = current_app.data.driver.db['nodes']
parent = nodes_collection.find_one({'_id': item['parent']})
# Always subscribe to the parent node
activity_subscribe(item['user'], 'node', item['parent'])
if parent['node_type'] == 'comment':
# If the parent is a comment, we provide its own parent as
# context. We do this in order to point the user to an asset
# or group when viewing the notification.
verb = 'replied'
context_object_id = parent['parent']
# Subscribe to the parent of the parent comment (post or group)
activity_subscribe(item['user'], 'node', parent['parent'])
else:
activity_subscribe(item['user'], 'node', item['_id'])
verb = 'commented'
elif item['node_type'] in PILLAR_NAMED_NODE_TYPES:
verb = 'posted'
activity_subscribe(item['user'], 'node', item['_id'])
else:
# Don't automatically create activities for non-Pillar node types,
# as we don't know what would be a suitable verb (among other things).
continue
activity_object_add(
item['user'],
verb,
'node',
item['_id'],
'node',
context_object_id
)
def deduct_content_type(node_doc, original=None):
"""Deduct the content type from the attached file, if any."""
if node_doc['node_type'] != 'asset':
log.debug('deduct_content_type: called on node type %r, ignoring', node_doc['node_type'])
return
node_id = node_doc.get('_id')
try:
file_id = ObjectId(node_doc['properties']['file'])
except KeyError:
if node_id is None:
# Creation of a file-less node is allowed, but updates aren't.
return
log.warning('deduct_content_type: Asset without properties.file, rejecting.')
raise wz_exceptions.UnprocessableEntity('Missing file property for asset node')
files = current_app.data.driver.db['files']
file_doc = files.find_one({'_id': file_id},
{'content_type': 1})
if not file_doc:
log.warning('deduct_content_type: Node %s refers to non-existing file %s, rejecting.',
node_id, file_id)
raise wz_exceptions.UnprocessableEntity('File property refers to non-existing file')
# Guess the node content type from the file content type
file_type = file_doc['content_type']
if file_type.startswith('video/'):
content_type = 'video'
elif file_type.startswith('image/'):
content_type = 'image'
else:
content_type = 'file'
node_doc['properties']['content_type'] = content_type
def nodes_deduct_content_type(nodes):
for node in nodes:
deduct_content_type(node)
def before_returning_node(node):
# Run validation process, since GET on nodes entry point is public
check_permissions('nodes', node, 'GET', append_allowed_methods=True)
# Embed short_link_info if the node has a short_code.
short_code = node.get('short_code')
if short_code:
node['short_link'] = short_link_info(short_code)['short_link']
def before_returning_nodes(nodes):
for node in nodes['_items']:
before_returning_node(node)
def node_set_default_picture(node, original=None):
"""Uses the image of an image asset or colour map of texture node as picture."""
if node.get('picture'):
log.debug('Node %s already has a picture, not overriding', node.get('_id'))
return
node_type = node.get('node_type')
props = node.get('properties', {})
content = props.get('content_type')
if node_type == 'asset' and content == 'image':
image_file_id = props.get('file')
elif node_type == 'texture':
# Find the colour map, defaulting to the first image map available.
image_file_id = None
for image in props.get('files', []):
if image_file_id is None or image.get('map_type') == 'color':
image_file_id = image.get('file')
else:
log.debug('Not setting default picture on node type %s content type %s',
node_type, content)
return
if image_file_id is None:
log.debug('Nothing to set the picture to.')
return
log.debug('Setting default picture for node %s to %s', node.get('_id'), image_file_id)
node['picture'] = image_file_id
def nodes_set_default_picture(nodes):
for node in nodes:
node_set_default_picture(node)
def before_deleting_node(node: dict):
check_permissions('nodes', node, 'DELETE')
def after_deleting_node(item):
from pillar.celery import search_index_tasks as index
index.node_delete.delay(str(item['_id']))
only_for_textures = only_for_node_type_decorator('texture')
@only_for_textures
def texture_sort_files(node, original=None):
"""Sort files alphabetically by map type, with colour map first."""
try:
files = node['properties']['files']
except KeyError:
return
# Sort the map types alphabetically, ensuring 'color' comes first.
as_dict = {f['map_type']: f for f in files}
types = sorted(as_dict.keys(), key=lambda k: '\0' if k == 'color' else k)
node['properties']['files'] = [as_dict[map_type] for map_type in types]
def textures_sort_files(nodes):
for node in nodes:
texture_sort_files(node)
def setup_app(app, url_prefix):
global _tagged
cached = app.cache.memoize(timeout=300)
_tagged = cached(_tagged)
from . import patch
patch.setup_app(app, url_prefix=url_prefix)
app.on_fetched_item_nodes += eve_hooks.before_returning_node
app.on_fetched_resource_nodes += eve_hooks.before_returning_nodes
app.on_fetched_item_nodes += before_returning_node
app.on_fetched_resource_nodes += before_returning_nodes
app.on_replace_nodes += eve_hooks.before_replacing_node
app.on_replace_nodes += eve_hooks.texture_sort_files
app.on_replace_nodes += eve_hooks.deduct_content_type_and_duration
app.on_replace_nodes += eve_hooks.node_set_default_picture
app.on_replaced_nodes += eve_hooks.after_replacing_node
app.on_replace_nodes += before_replacing_node
app.on_replace_nodes += texture_sort_files
app.on_replace_nodes += deduct_content_type
app.on_replace_nodes += node_set_default_picture
app.on_replaced_nodes += after_replacing_node
app.on_insert_nodes += eve_hooks.before_inserting_nodes
app.on_insert_nodes += eve_hooks.nodes_deduct_content_type_and_duration
app.on_insert_nodes += eve_hooks.nodes_set_default_picture
app.on_insert_nodes += eve_hooks.textures_sort_files
app.on_inserted_nodes += eve_hooks.after_inserting_nodes
app.on_insert_nodes += before_inserting_nodes
app.on_insert_nodes += nodes_deduct_content_type
app.on_insert_nodes += nodes_set_default_picture
app.on_insert_nodes += textures_sort_files
app.on_inserted_nodes += after_inserting_nodes
app.on_update_nodes += eve_hooks.texture_sort_files
app.on_update_nodes += texture_sort_files
app.on_delete_item_nodes += eve_hooks.before_deleting_node
app.on_deleted_item_nodes += eve_hooks.after_deleting_node
app.on_delete_item_nodes += before_deleting_node
app.on_deleted_item_nodes += after_deleting_node
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
activities.setup_app(app)

View File

@@ -1,43 +0,0 @@
from eve.methods import get
from pillar.api.utils import gravatar
def for_node(node_id):
activities, _, _, status, _ =\
get('activities',
{
'$or': [
{'object_type': 'node',
'object': node_id},
{'context_object_type': 'node',
'context_object': node_id},
],
},)
for act in activities['_items']:
act['actor_user'] = _user_info(act['actor_user'])
return activities
def _user_info(user_id):
users, _, _, status, _ = get('users', {'_id': user_id})
if len(users['_items']) > 0:
user = users['_items'][0]
user['gravatar'] = gravatar(user['email'])
public_fields = {'full_name', 'username', 'gravatar'}
for field in list(user.keys()):
if field not in public_fields:
del user[field]
return user
return {}
def setup_app(app):
global _user_info
decorator = app.cache.memoize(timeout=300, make_name='%s.public_user_info' % __name__)
_user_info = decorator(_user_info)

View File

@@ -1,298 +0,0 @@
import logging
from datetime import datetime
import pymongo
import typing
import bson
import attr
import werkzeug.exceptions as wz_exceptions
import pillar
from pillar import current_app, shortcodes
from pillar.api.nodes.custom.comment import patch_comment
from pillar.api.utils import jsonify, gravatar
from pillar.auth import current_user
log = logging.getLogger(__name__)
@attr.s(auto_attribs=True)
class UserDO:
id: str
full_name: str
gravatar: str
badges_html: str
@attr.s(auto_attribs=True)
class CommentPropertiesDO:
attachments: typing.Dict
rating_positive: int = 0
rating_negative: int = 0
@attr.s(auto_attribs=True)
class CommentDO:
id: bson.ObjectId
parent: bson.ObjectId
project: bson.ObjectId
user: UserDO
msg_html: str
msg_markdown: str
properties: CommentPropertiesDO
created: datetime
updated: datetime
etag: str
replies: typing.List['CommentDO'] = []
current_user_rating: typing.Optional[bool] = None
@attr.s(auto_attribs=True)
class CommentTreeDO:
node_id: bson.ObjectId
project: bson.ObjectId
nbr_of_comments: int = 0
comments: typing.List[CommentDO] = []
def _get_markdowned_html(document: dict, field_name: str) -> str:
cache_field_name = pillar.markdown.cache_field_name(field_name)
html = document.get(cache_field_name)
if html is None:
markdown_src = document.get(field_name) or ''
html = pillar.markdown.markdown(markdown_src)
return html
def jsonify_data_object(data_object: attr):
return jsonify(
attr.asdict(data_object,
recurse=True)
)
class CommentTreeBuilder:
def __init__(self, node_id: bson.ObjectId):
self.node_id = node_id
self.nbr_of_Comments: int = 0
def build(self) -> CommentTreeDO:
enriched_comments = self.child_comments(self.node_id,
sort={'properties.rating_positive': pymongo.DESCENDING,
'_created': pymongo.DESCENDING})
project_id = self.get_project_id()
return CommentTreeDO(
node_id=self.node_id,
project=project_id,
nbr_of_comments=self.nbr_of_Comments,
comments=enriched_comments
)
def child_comments(self, node_id: bson.ObjectId, sort: dict) -> typing.List[CommentDO]:
raw_comments = self.mongodb_comments(node_id, sort)
return [self.enrich(comment) for comment in raw_comments]
def enrich(self, mongo_comment: dict) -> CommentDO:
self.nbr_of_Comments += 1
comment = to_comment_data_object(mongo_comment)
comment.replies = self.child_comments(mongo_comment['_id'],
sort={'_created': pymongo.ASCENDING})
return comment
def get_project_id(self):
nodes_coll = current_app.db('nodes')
result = nodes_coll.find_one({'_id': self.node_id})
return result['project']
@classmethod
def mongodb_comments(cls, node_id: bson.ObjectId, sort: dict) -> typing.Iterator:
nodes_coll = current_app.db('nodes')
return nodes_coll.aggregate([
{'$match': {'node_type': 'comment',
'_deleted': {'$ne': True},
'properties.status': 'published',
'parent': node_id}},
{'$lookup': {"from": "users",
"localField": "user",
"foreignField": "_id",
"as": "user"}},
{'$unwind': {'path': "$user"}},
{'$sort': sort},
])
def get_node_comments(node_id: bson.ObjectId):
comments_tree = CommentTreeBuilder(node_id).build()
return jsonify_data_object(comments_tree)
def post_node_comment(parent_id: bson.ObjectId, markdown_msg: str, attachments: dict):
parent_node = find_node_or_raise(parent_id,
'User %s tried to update comment with bad parent_id %s',
current_user.objectid,
parent_id)
is_reply = parent_node['node_type'] == 'comment'
comment = dict(
parent=parent_id,
project=parent_node['project'],
name='Comment',
user=current_user.objectid,
node_type='comment',
properties=dict(
content=markdown_msg,
status='published',
is_reply=is_reply,
confidence=0,
rating_positive=0,
rating_negative=0,
attachments=attachments,
),
permissions=dict(
users=[dict(
user=current_user.objectid,
methods=['PUT'])
]
)
)
r, _, _, status = current_app.post_internal('nodes', comment)
if status != 201:
log.warning('Unable to post comment on %s as %s: %s',
parent_id, current_user.objectid, r)
raise wz_exceptions.InternalServerError('Unable to create comment')
comment_do = get_comment(parent_id, r['_id'])
return jsonify_data_object(comment_do), 201
def find_node_or_raise(node_id, *args):
nodes_coll = current_app.db('nodes')
node_to_comment = nodes_coll.find_one({
'_id': node_id,
'_deleted': {'$ne': True},
})
if not node_to_comment:
log.warning(args)
raise wz_exceptions.UnprocessableEntity()
return node_to_comment
def patch_node_comment(parent_id: bson.ObjectId, comment_id: bson.ObjectId, markdown_msg: str, attachments: dict):
_, _ = find_parent_and_comment_or_raise(parent_id, comment_id)
patch = dict(
op='edit',
content=markdown_msg,
attachments=attachments
)
json_result = patch_comment(comment_id, patch)
if json_result.json['result'] != 200:
raise wz_exceptions.InternalServerError('Failed to update comment')
comment_do = get_comment(parent_id, comment_id)
return jsonify_data_object(comment_do), 200
def find_parent_and_comment_or_raise(parent_id, comment_id):
parent = find_node_or_raise(parent_id,
'User %s tried to update comment with bad parent_id %s',
current_user.objectid,
parent_id)
comment = find_node_or_raise(comment_id,
'User %s tried to update comment with bad id %s',
current_user.objectid,
comment_id)
validate_comment_parent_relation(comment, parent)
return parent, comment
def validate_comment_parent_relation(comment, parent):
if comment['parent'] != parent['_id']:
log.warning('User %s tried to update comment with bad parent/comment pair. parent_id: %s comment_id: %s',
current_user.objectid,
parent['_id'],
comment['_id'])
raise wz_exceptions.BadRequest()
def get_comment(parent_id: bson.ObjectId, comment_id: bson.ObjectId) -> CommentDO:
nodes_coll = current_app.db('nodes')
mongo_comment = list(nodes_coll.aggregate([
{'$match': {'node_type': 'comment',
'_deleted': {'$ne': True},
'properties.status': 'published',
'parent': parent_id,
'_id': comment_id}},
{'$lookup': {"from": "users",
"localField": "user",
"foreignField": "_id",
"as": "user"}},
{'$unwind': {'path': "$user"}},
]))[0]
return to_comment_data_object(mongo_comment)
def to_comment_data_object(mongo_comment: dict) -> CommentDO:
def current_user_rating():
if current_user.is_authenticated:
for rating in mongo_comment['properties'].get('ratings', ()):
if str(rating['user']) != current_user.objectid:
continue
return rating['is_positive']
return None
user_dict = mongo_comment['user']
user = UserDO(
id=str(mongo_comment['user']['_id']),
full_name=user_dict['full_name'],
gravatar=gravatar(user_dict['email']),
badges_html=user_dict.get('badges', {}).get('html', '')
)
html = _get_markdowned_html(mongo_comment['properties'], 'content')
html = shortcodes.render_commented(html, context=mongo_comment['properties'])
return CommentDO(
id=mongo_comment['_id'],
parent=mongo_comment['parent'],
project=mongo_comment['project'],
user=user,
msg_html=html,
msg_markdown=mongo_comment['properties']['content'],
current_user_rating=current_user_rating(),
created=mongo_comment['_created'],
updated=mongo_comment['_updated'],
etag=mongo_comment['_etag'],
properties=CommentPropertiesDO(
attachments=mongo_comment['properties'].get('attachments', {}),
rating_positive=mongo_comment['properties']['rating_positive'],
rating_negative=mongo_comment['properties']['rating_negative']
)
)
def post_node_comment_vote(parent_id: bson.ObjectId, comment_id: bson.ObjectId, vote: int):
normalized_vote = min(max(vote, -1), 1)
_, _ = find_parent_and_comment_or_raise(parent_id, comment_id)
actions = {
1: 'upvote',
0: 'revoke',
-1: 'downvote',
}
patch = dict(
op=actions[normalized_vote]
)
json_result = patch_comment(comment_id, patch)
if json_result.json['_status'] != 'OK':
raise wz_exceptions.InternalServerError('Failed to vote on comment')
comment_do = get_comment(parent_id, comment_id)
return jsonify_data_object(comment_do), 200

View File

@@ -5,7 +5,7 @@ import logging
from flask import current_app
import werkzeug.exceptions as wz_exceptions
from pillar.api.utils import authorization, authentication, jsonify, remove_private_keys
from pillar.api.utils import authorization, authentication, jsonify
from . import register_patch_handler
@@ -135,7 +135,10 @@ def edit_comment(user_id, node_id, patch):
# we can pass this stuff to Eve's patch_internal; that way the validation &
# authorisation system has enough info to work.
nodes_coll = current_app.data.driver.db['nodes']
node = nodes_coll.find_one(node_id)
projection = {'user': 1,
'project': 1,
'node_type': 1}
node = nodes_coll.find_one(node_id, projection=projection)
if node is None:
log.warning('User %s wanted to patch non-existing node %s' % (user_id, node_id))
raise wz_exceptions.NotFound('Node %s not found' % node_id)
@@ -143,12 +146,12 @@ def edit_comment(user_id, node_id, patch):
if node['user'] != user_id and not authorization.user_has_role('admin'):
raise wz_exceptions.Forbidden('You can only edit your own comments.')
node = remove_private_keys(node)
node['properties']['content'] = patch['content']
node['properties']['attachments'] = patch.get('attachments', {})
# Use Eve to PUT this node, as that also updates the etag and we want to replace attachments.
r, _, _, status = current_app.put_internal('nodes',
node,
# Use Eve to PATCH this node, as that also updates the etag.
r, _, _, status = current_app.patch_internal('nodes',
{'properties.content': patch['content'],
'project': node['project'],
'user': node['user'],
'node_type': node['node_type']},
concurrency_check=False,
_id=node_id)
if status != 200:

View File

@@ -1,352 +0,0 @@
import collections
import functools
import logging
import urllib.parse
from bson import ObjectId
from werkzeug import exceptions as wz_exceptions
from pillar import current_app
from pillar.api.activities import activity_subscribe, activity_object_add
from pillar.api.file_storage_backends.gcs import update_file_name
from pillar.api.node_types import PILLAR_NAMED_NODE_TYPES
from pillar.api.utils import random_etag
from pillar.api.utils.authorization import check_permissions
log = logging.getLogger(__name__)
def before_returning_node(node):
# Run validation process, since GET on nodes entry point is public
check_permissions('nodes', node, 'GET', append_allowed_methods=True)
# Embed short_link_info if the node has a short_code.
short_code = node.get('short_code')
if short_code:
node['short_link'] = short_link_info(short_code)['short_link']
def before_returning_nodes(nodes):
for node in nodes['_items']:
before_returning_node(node)
def only_for_node_type_decorator(*required_node_type_names):
"""Returns a decorator that checks its first argument's node type.
If the node type is not of the required node type, returns None,
otherwise calls the wrapped function.
>>> deco = only_for_node_type_decorator('comment')
>>> @deco
... def handle_comment(node): pass
>>> deco = only_for_node_type_decorator('comment', 'post')
>>> @deco
... def handle_comment_or_post(node): pass
"""
# Convert to a set for efficient 'x in required_node_type_names' queries.
required_node_type_names = set(required_node_type_names)
def only_for_node_type(wrapped):
@functools.wraps(wrapped)
def wrapper(node, *args, **kwargs):
if node.get('node_type') not in required_node_type_names:
return
return wrapped(node, *args, **kwargs)
return wrapper
only_for_node_type.__doc__ = "Decorator, immediately returns when " \
"the first argument is not of type %s." % required_node_type_names
return only_for_node_type
def before_replacing_node(item, original):
check_permissions('nodes', original, 'PUT')
update_file_name(item)
# XXX Dillo specific feature (for Graphicall)
if 'download' in original['properties']:
# Check if the file referenced in the download property was updated.
# If so, mark the old file as deleted. A cronjob will take care of
# removing the actual file based on the _delete status of file docs.
original_file_id = original['properties']['download']
new_file_id = item['properties']['download']
if original_file_id == new_file_id:
return
# Mark the original file as _deleted
files = current_app.data.driver.db['files']
files.update_one({'_id': original_file_id}, {'$set': {'_deleted': True}})
log.info('Marking file %s as _deleted' % original_file_id)
def after_replacing_node(item, original):
"""Push an update to the Algolia index when a node item is updated. If the
project is private, prevent public indexing.
"""
from pillar.celery import search_index_tasks as index
projects_collection = current_app.data.driver.db['projects']
project = projects_collection.find_one({'_id': item['project']})
if project.get('is_private', False):
# Skip index updating and return
return
status = item['properties'].get('status', 'unpublished')
node_id = str(item['_id'])
if status == 'published':
index.node_save.delay(node_id)
else:
index.node_delete.delay(node_id)
def before_inserting_nodes(items):
"""Before inserting a node in the collection we check if the user is allowed
and we append the project id to it.
"""
from pillar.auth import current_user
nodes_collection = current_app.data.driver.db['nodes']
def find_parent_project(node):
"""Recursive function that finds the ultimate parent of a node."""
if node and 'parent' in node:
parent = nodes_collection.find_one({'_id': node['parent']})
return find_parent_project(parent)
if node:
return node
else:
return None
for item in items:
check_permissions('nodes', item, 'POST')
if 'parent' in item and 'project' not in item:
parent = nodes_collection.find_one({'_id': item['parent']})
project = find_parent_project(parent)
if project:
item['project'] = project['_id']
# Default the 'user' property to the current user.
item.setdefault('user', current_user.user_id)
def get_comment_verb_and_context_object_id(comment):
nodes_collection = current_app.data.driver.db['nodes']
verb = 'commented'
parent = nodes_collection.find_one({'_id': comment['parent']})
context_object_id = comment['parent']
while parent['node_type'] == 'comment':
# If the parent is a comment, we provide its own parent as
# context. We do this in order to point the user to an asset
# or group when viewing the notification.
verb = 'replied'
context_object_id = parent['parent']
parent = nodes_collection.find_one({'_id': parent['parent']})
return verb, context_object_id
def after_inserting_nodes(items):
for item in items:
context_object_id = None
# TODO: support should be added for mixed context
if item['node_type'] in PILLAR_NAMED_NODE_TYPES:
activity_subscribe(item['user'], 'node', item['_id'])
verb = 'posted'
context_object_id = item.get('parent')
if item['node_type'] == 'comment':
# Always subscribe to the parent node
activity_subscribe(item['user'], 'node', item['parent'])
verb, context_object_id = get_comment_verb_and_context_object_id(item)
# Subscribe to the parent of the parent comment (post or group)
activity_subscribe(item['user'], 'node', context_object_id)
if context_object_id and item['node_type'] in PILLAR_NAMED_NODE_TYPES:
# * Skip activity for first level items (since the context is not a
# node, but a project).
# * Don't automatically create activities for non-Pillar node types,
# as we don't know what would be a suitable verb (among other things).
activity_object_add(
item['user'],
verb,
'node',
item['_id'],
'node',
context_object_id
)
def deduct_content_type_and_duration(node_doc, original=None):
"""Deduct the content type from the attached file, if any."""
if node_doc['node_type'] != 'asset':
log.debug('deduct_content_type: called on node type %r, ignoring', node_doc['node_type'])
return
node_id = node_doc.get('_id')
try:
file_id = ObjectId(node_doc['properties']['file'])
except KeyError:
if node_id is None:
# Creation of a file-less node is allowed, but updates aren't.
return
log.warning('deduct_content_type: Asset without properties.file, rejecting.')
raise wz_exceptions.UnprocessableEntity('Missing file property for asset node')
files = current_app.data.driver.db['files']
file_doc = files.find_one({'_id': file_id},
{'content_type': 1,
'variations': 1})
if not file_doc:
log.warning('deduct_content_type: Node %s refers to non-existing file %s, rejecting.',
node_id, file_id)
raise wz_exceptions.UnprocessableEntity('File property refers to non-existing file')
# Guess the node content type from the file content type
file_type = file_doc['content_type']
if file_type.startswith('video/'):
content_type = 'video'
elif file_type.startswith('image/'):
content_type = 'image'
else:
content_type = 'file'
node_doc['properties']['content_type'] = content_type
if content_type == 'video':
duration = file_doc['variations'][0].get('duration')
if duration:
node_doc['properties']['duration_seconds'] = duration
else:
log.warning('Video file %s has no duration', file_id)
def nodes_deduct_content_type_and_duration(nodes):
for node in nodes:
deduct_content_type_and_duration(node)
def node_set_default_picture(node, original=None):
"""Uses the image of an image asset or colour map of texture node as picture."""
if node.get('picture'):
log.debug('Node %s already has a picture, not overriding', node.get('_id'))
return
node_type = node.get('node_type')
props = node.get('properties', {})
content = props.get('content_type')
if node_type == 'asset' and content == 'image':
image_file_id = props.get('file')
elif node_type == 'texture':
# Find the colour map, defaulting to the first image map available.
image_file_id = None
for image in props.get('files', []):
if image_file_id is None or image.get('map_type') == 'color':
image_file_id = image.get('file')
else:
log.debug('Not setting default picture on node type %s content type %s',
node_type, content)
return
if image_file_id is None:
log.debug('Nothing to set the picture to.')
return
log.debug('Setting default picture for node %s to %s', node.get('_id'), image_file_id)
node['picture'] = image_file_id
def nodes_set_default_picture(nodes):
for node in nodes:
node_set_default_picture(node)
def before_deleting_node(node: dict):
check_permissions('nodes', node, 'DELETE')
remove_project_references(node)
def remove_project_references(node):
project_id = node.get('project')
if not project_id:
return
node_id = node['_id']
log.info('Removing references to node %s from project %s', node_id, project_id)
projects_col = current_app.db('projects')
project = projects_col.find_one({'_id': project_id})
updates = collections.defaultdict(dict)
if project.get('header_node') == node_id:
updates['$unset']['header_node'] = node_id
project_reference_lists = ('nodes_blog', 'nodes_featured', 'nodes_latest')
for list_name in project_reference_lists:
references = project.get(list_name)
if not references:
continue
try:
references.remove(node_id)
except ValueError:
continue
updates['$set'][list_name] = references
if not updates:
return
updates['$set']['_etag'] = random_etag()
result = projects_col.update_one({'_id': project_id}, updates)
if result.modified_count != 1:
log.warning('Removing references to node %s from project %s resulted in %d modified documents (expected 1)',
node_id, project_id, result.modified_count)
def after_deleting_node(item):
from pillar.celery import search_index_tasks as index
index.node_delete.delay(str(item['_id']))
only_for_textures = only_for_node_type_decorator('texture')
@only_for_textures
def texture_sort_files(node, original=None):
"""Sort files alphabetically by map type, with colour map first."""
try:
files = node['properties']['files']
except KeyError:
return
# Sort the map types alphabetically, ensuring 'color' comes first.
as_dict = {f['map_type']: f for f in files}
types = sorted(as_dict.keys(), key=lambda k: '\0' if k == 'color' else k)
node['properties']['files'] = [as_dict[map_type] for map_type in types]
def textures_sort_files(nodes):
for node in nodes:
texture_sort_files(node)
def short_link_info(short_code):
"""Returns the short link info in a dict."""
short_link = urllib.parse.urljoin(
current_app.config['SHORT_LINK_BASE_URL'], short_code)
return {
'short_code': short_code,
'short_link': short_link,
}

View File

@@ -1,7 +1,7 @@
"""Code for moving around nodes."""
import attr
import pymongo.database
import flask_pymongo.wrappers
from bson import ObjectId
from pillar import attrs_extra
@@ -10,7 +10,7 @@ import pillar.api.file_storage.moving
@attr.s
class NodeMover(object):
db = attr.ib(validator=attr.validators.instance_of(pymongo.database.Database))
db = attr.ib(validator=attr.validators.instance_of(flask_pymongo.wrappers.Database))
skip_gcs = attr.ib(default=False, validator=attr.validators.instance_of(bool))
_log = attrs_extra.log('%s.NodeMover' % __name__)

View File

@@ -9,7 +9,6 @@ def setup_app(app, api_prefix):
app.on_replace_projects += hooks.override_is_private_field
app.on_replace_projects += hooks.before_edit_check_permissions
app.on_replace_projects += hooks.protect_sensitive_fields
app.on_replace_projects += hooks.parse_markdown
app.on_update_projects += hooks.override_is_private_field
app.on_update_projects += hooks.before_edit_check_permissions
@@ -20,8 +19,6 @@ def setup_app(app, api_prefix):
app.on_insert_projects += hooks.before_inserting_override_is_private_field
app.on_insert_projects += hooks.before_inserting_projects
app.on_insert_projects += hooks.parse_markdowns
app.on_inserted_projects += hooks.after_inserting_projects
app.on_fetched_item_projects += hooks.before_returning_project_permissions

View File

@@ -3,7 +3,6 @@ import logging
from flask import request, abort
import pillar
from pillar import current_app
from pillar.api.node_types.asset import node_type_asset
from pillar.api.node_types.comment import node_type_comment
@@ -72,19 +71,14 @@ def before_delete_project(document):
def after_delete_project(project: dict):
"""Perform delete on the project's files too."""
from werkzeug.exceptions import NotFound
from eve.methods.delete import delete
pid = project['_id']
log.info('Project %s was deleted, also deleting its files.', pid)
try:
r, _, _, status = delete('files', {'project': pid})
except NotFound:
# There were no files, and that's fine.
return
if status != 204:
# Will never happen because bloody Eve always returns 204 or raises an exception.
log.warning('Unable to delete files of project %s: %s', pid, r)
@@ -247,37 +241,3 @@ def project_node_type_has_method(response):
def projects_node_type_has_method(response):
for project in response['_items']:
project_node_type_has_method(project)
def parse_markdown(project, original=None):
schema = current_app.config['DOMAIN']['projects']['schema']
def find_markdown_fields(schema, project):
"""Find and process all Markdown coerced fields.
- look for fields with a 'coerce': 'markdown' property
- parse the name of the field and generate the sibling field name (_<field_name>_html -> <field_name>)
- parse the content of the <field_name> field as markdown and save it in _<field_name>_html
"""
for field_name, field_value in schema.items():
if not isinstance(field_value, dict):
continue
if field_value.get('coerce') != 'markdown':
continue
if field_name not in project:
continue
# Construct markdown source field name (strip the leading '_' and the trailing '_html')
source_field_name = field_name[1:-5]
html = pillar.markdown.markdown(project[source_field_name])
project[field_name] = html
if isinstance(project, dict) and field_name in project:
find_markdown_fields(field_value, project[field_name])
find_markdown_fields(schema, project)
def parse_markdowns(items):
for item in items:
parse_markdown(item)

View File

@@ -7,7 +7,6 @@ from werkzeug.exceptions import abort
from pillar import current_app
from pillar.auth import current_user
from pillar.api import file_storage_backends
log = logging.getLogger(__name__)
@@ -156,18 +155,6 @@ def project_id(project_url: str) -> ObjectId:
return proj['_id']
def get_project_url(project_id: ObjectId) -> str:
"""Returns the project URL, or raises a ValueError when not found."""
proj_coll = current_app.db('projects')
proj = proj_coll.find_one({'_id': project_id, '_deleted': {'$ne': True}},
projection={'url': True})
if not proj:
raise ValueError(f'project with id={project_id} not found')
return proj['url']
def get_project(project_url: str) -> dict:
"""Find a project in the database, raises ValueError if not found.
@@ -200,14 +187,3 @@ def put_project(project: dict):
if status_code != 200:
raise ValueError(f"Can't update project {pid}, "
f"status {status_code} with issues: {result}")
def storage(project_id: ObjectId) -> file_storage_backends.Bucket:
"""Return the storage bucket for this project.
For now this returns a bucket in the default storage backend, since
individual projects do not have a 'storage backend' setting (this is
set per file, not per project).
"""
return file_storage_backends.default_storage_backend(str(project_id))

View File

@@ -81,7 +81,6 @@ class Node(es.DocType):
fields={
'id': es.Keyword(),
'name': es.Keyword(),
'url': es.Keyword(),
}
)
@@ -154,21 +153,18 @@ def create_doc_from_node_data(node_to_index: dict) -> typing.Optional[Node]:
doc.objectID = str(node_to_index['objectID'])
doc.node_type = node_to_index['node_type']
doc.name = node_to_index['name']
doc.description = node_to_index.get('description')
doc.user.id = str(node_to_index['user']['_id'])
doc.user.name = node_to_index['user']['full_name']
doc.project.id = str(node_to_index['project']['_id'])
doc.project.name = node_to_index['project']['name']
doc.project.url = node_to_index['project']['url']
if node_to_index['node_type'] == 'asset':
doc.media = node_to_index['media']
doc.picture = str(node_to_index.get('picture'))
doc.picture = node_to_index.get('picture')
doc.tags = node_to_index.get('tags')
doc.license_notes = node_to_index.get('license_notes')
doc.is_free = node_to_index.get('is_free')
doc.created_at = node_to_index['created']
doc.updated_at = node_to_index['updated']

View File

@@ -3,18 +3,16 @@ import logging
import typing
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q, MultiSearch
from elasticsearch_dsl import Search, Q
from elasticsearch_dsl.query import Query
from pillar import current_app
log = logging.getLogger(__name__)
BOOLEAN_TERMS = ['is_free']
NODE_AGG_TERMS = ['node_type', 'media', 'tags', *BOOLEAN_TERMS]
NODE_AGG_TERMS = ['node_type', 'media', 'tags', 'is_free']
USER_AGG_TERMS = ['roles', ]
ITEMS_PER_PAGE = 10
USER_SOURCE_INCLUDE = ['full_name', 'objectID', 'username']
# Will be set in setup_app()
client: Elasticsearch = None
@@ -29,25 +27,26 @@ def add_aggs_to_search(search, agg_terms):
search.aggs.bucket(term, 'terms', field=term)
def make_filter(must: list, terms: dict) -> list:
def make_must(must: list, terms: dict) -> list:
""" Given term parameters append must queries to the must list """
for field, value in terms.items():
if value not in (None, ''):
must.append({'term': {field: value}})
if value:
must.append({'match': {field: value}})
return must
def nested_bool(filters: list, should: list, terms: dict, *, index_alias: str) -> Search:
def nested_bool(must: list, should: list, terms: dict, *, index_alias: str) -> Search:
"""
Create a nested bool, where the aggregation selection is a must.
:param index_alias: 'USER' or 'NODE', see ELASTIC_INDICES config.
"""
filters = make_filter(filters, terms)
must = make_must(must, terms)
bool_query = Q('bool', should=should)
bool_query = Q('bool', must=bool_query, filter=filters)
must.append(bool_query)
bool_query = Q('bool', must=must)
index = current_app.config['ELASTIC_INDICES'][index_alias]
search = Search(using=client, index=index)
@@ -56,34 +55,12 @@ def nested_bool(filters: list, should: list, terms: dict, *, index_alias: str) -
return search
def do_multi_node_search(queries: typing.List[dict]) -> typing.List[dict]:
"""
Given user query input and term refinements
search for public published nodes
"""
search = create_multi_node_search(queries)
return _execute_multi(search)
def do_node_search(query: str, terms: dict, page: int, project_id: str='') -> dict:
"""
Given user query input and term refinements
search for public published nodes
"""
search = create_node_search(query, terms, page, project_id)
return _execute(search)
def create_multi_node_search(queries: typing.List[dict]) -> MultiSearch:
search = MultiSearch(using=client)
for q in queries:
search = search.add(create_node_search(**q))
return search
def create_node_search(query: str, terms: dict, page: int, project_id: str='') -> Search:
terms = _transform_terms(terms)
should = [
Q('match', name=query),
@@ -94,30 +71,52 @@ def create_node_search(query: str, terms: dict, page: int, project_id: str='') -
Q('term', media=query),
Q('term', tags=query),
]
filters = []
must = []
if project_id:
filters.append({'term': {'project.id': project_id}})
must.append({'term': {'project.id': project_id}})
if not query:
should = []
search = nested_bool(filters, should, terms, index_alias='NODE')
search = nested_bool(must, should, terms, index_alias='NODE')
if not query:
search = search.sort('-created_at')
add_aggs_to_search(search, NODE_AGG_TERMS)
search = paginate(search, page)
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(search.to_dict(), indent=4))
return search
response = search.execute()
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(response.to_dict(), indent=4))
return response.to_dict()
def do_user_search(query: str, terms: dict, page: int) -> dict:
""" return user objects represented in elasicsearch result dict"""
search = create_user_search(query, terms, page)
return _execute(search)
must, should = _common_user_search(query)
search = nested_bool(must, should, terms, index_alias='USER')
add_aggs_to_search(search, USER_AGG_TERMS)
search = paginate(search, page)
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(search.to_dict(), indent=4))
response = search.execute()
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(response.to_dict(), indent=4))
return response.to_dict()
def _common_user_search(query: str) -> (typing.List[Query], typing.List[Query]):
"""Construct (filter,should) for regular + admin user search."""
"""Construct (must,shoud) for regular + admin user search."""
if not query:
return [], []
@@ -145,31 +144,8 @@ def do_user_search_admin(query: str, terms: dict, page: int) -> dict:
search all user fields and provide aggregation information
"""
search = create_user_admin_search(query, terms, page)
return _execute(search)
must, should = _common_user_search(query)
def _execute(search: Search) -> dict:
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(search.to_dict(), indent=4))
resp = search.execute()
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(resp.to_dict(), indent=4))
return resp.to_dict()
def _execute_multi(search: typing.List[Search]) -> typing.List[dict]:
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(search.to_dict(), indent=4))
resp = search.execute()
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(resp.to_dict(), indent=4))
return [r.to_dict() for r in resp]
def create_user_admin_search(query: str, terms: dict, page: int) -> Search:
terms = _transform_terms(terms)
filters, should = _common_user_search(query)
if query:
# We most likely got and id field. we should find it.
if len(query) == len('563aca02c379cf0005e8e17d'):
@@ -179,34 +155,26 @@ def create_user_admin_search(query: str, terms: dict, page: int) -> Search:
'boost': 100, # how much more it counts for the score
}
}})
search = nested_bool(filters, should, terms, index_alias='USER')
search = nested_bool(must, should, terms, index_alias='USER')
add_aggs_to_search(search, USER_AGG_TERMS)
search = paginate(search, page)
return search
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(search.to_dict(), indent=4))
def create_user_search(query: str, terms: dict, page: int) -> Search:
search = create_user_admin_search(query, terms, page)
return search.source(include=USER_SOURCE_INCLUDE)
response = search.execute()
if log.isEnabledFor(logging.DEBUG):
log.debug(json.dumps(response.to_dict(), indent=4))
return response.to_dict()
def paginate(search: Search, page_idx: int) -> Search:
return search[page_idx * ITEMS_PER_PAGE:(page_idx + 1) * ITEMS_PER_PAGE]
def _transform_terms(terms: dict) -> dict:
"""
Ugly hack! Elastic uses 1/0 for boolean values in its aggregate response,
but expects true/false in queries.
"""
transformed = terms.copy()
for t in BOOLEAN_TERMS:
orig = transformed.get(t)
if orig in ('1', '0'):
transformed[t] = bool(int(orig))
return transformed
def setup_app(app):
global client

View File

@@ -18,7 +18,7 @@ TERMS = [
]
def _term_filters(args) -> dict:
def _term_filters() -> dict:
"""
Check if frontent wants to filter stuff
on specific fields AKA facets
@@ -26,52 +26,35 @@ def _term_filters(args) -> dict:
return mapping with term field name
and provided user term value
"""
return {term: args.get(term, '') for term in TERMS}
return {term: request.args.get(term, '') for term in TERMS}
def _page_index(page) -> int:
def _page_index() -> int:
"""Return the page index from the query string."""
try:
page_idx = int(page)
page_idx = int(request.args.get('page') or '0')
except TypeError:
log.info('invalid page number %r received', request.args.get('page'))
raise wz_exceptions.BadRequest()
return page_idx
@blueprint_search.route('/', methods=['GET'])
@blueprint_search.route('/')
def search_nodes():
searchword = request.args.get('q', '')
project_id = request.args.get('project', '')
terms = _term_filters(request.args)
page_idx = _page_index(request.args.get('page', 0))
terms = _term_filters()
page_idx = _page_index()
result = queries.do_node_search(searchword, terms, page_idx, project_id)
return jsonify(result)
@blueprint_search.route('/multisearch', methods=['POST'])
def multi_search_nodes():
if len(request.args) != 1:
log.info(f'Expected 1 argument, received {len(request.args)}')
json_obj = request.json
q = []
for row in json_obj:
q.append({
'query': row.get('q', ''),
'project_id': row.get('project', ''),
'terms': _term_filters(row),
'page': _page_index(row.get('page', 0))
})
result = queries.do_multi_node_search(q)
return jsonify(result)
@blueprint_search.route('/user')
def search_user():
searchword = request.args.get('q', '')
terms = _term_filters(request.args)
page_idx = _page_index(request.args.get('page', 0))
terms = _term_filters()
page_idx = _page_index()
# result is the raw elasticseach output.
# we need to filter fields in case of user objects.
@@ -82,6 +65,27 @@ def search_user():
resp.status_code = 500
return resp
# filter sensitive stuff
# we only need. objectID, full_name, username
hits = result.get('hits', {})
new_hits = []
for hit in hits.get('hits'):
source = hit['_source']
single_hit = {
'_source': {
'objectID': source.get('objectID'),
'username': source.get('username'),
'full_name': source.get('full_name'),
}
}
new_hits.append(single_hit)
# replace search result with safe subset
result['hits']['hits'] = new_hits
return jsonify(result)
@@ -93,8 +97,8 @@ def search_user_admin():
"""
searchword = request.args.get('q', '')
terms = _term_filters(request.args)
page_idx = _page_index(_page_index(request.args.get('page', 0)))
terms = _term_filters()
page_idx = _page_index()
try:
result = queries.do_user_search_admin(searchword, terms, page_idx)

View File

@@ -1,374 +0,0 @@
import itertools
import typing
from datetime import datetime
from operator import itemgetter
import attr
import bson
import pymongo
from flask import Blueprint, current_app, request, url_for
import pillar
from pillar import shortcodes
from pillar.api.utils import jsonify, pretty_duration, str2id
blueprint = Blueprint('timeline', __name__)
@attr.s(auto_attribs=True)
class TimelineDO:
groups: typing.List['GroupDO'] = []
continue_from: typing.Optional[float] = None
@attr.s(auto_attribs=True)
class GroupDO:
label: typing.Optional[str] = None
url: typing.Optional[str] = None
items: typing.Dict = {}
groups: typing.Iterable['GroupDO'] = []
class SearchHelper:
def __init__(self, nbr_of_weeks: int, continue_from: typing.Optional[datetime],
project_ids: typing.List[bson.ObjectId], sort_direction: str):
self._nbr_of_weeks = nbr_of_weeks
self._continue_from = continue_from
self._project_ids = project_ids
self.sort_direction = sort_direction
def _match(self, continue_from: typing.Optional[datetime]) -> dict:
created = {}
if continue_from:
if self.sort_direction == 'desc':
created = {'_created': {'$lt': continue_from}}
else:
created = {'_created': {'$gt': continue_from}}
return {'_deleted': {'$ne': True},
'node_type': {'$in': ['asset', 'post']},
'properties.status': {'$eq': 'published'},
'project': {'$in': self._project_ids},
**created,
}
def raw_weeks_from_mongo(self) -> pymongo.collection.Collection:
direction = pymongo.DESCENDING if self.sort_direction == 'desc' else pymongo.ASCENDING
nodes_coll = current_app.db('nodes')
return nodes_coll.aggregate([
{'$match': self._match(self._continue_from)},
{'$lookup': {"from": "projects",
"localField": "project",
"foreignField": "_id",
"as": "project"}},
{'$unwind': {'path': "$project"}},
{'$lookup': {"from": "users",
"localField": "user",
"foreignField": "_id",
"as": "user"}},
{'$unwind': {'path': "$user"}},
{'$project': {
'_created': 1,
'project._id': 1,
'project.url': 1,
'project.name': 1,
'user._id': 1,
'user.full_name': 1,
'name': 1,
'node_type': 1,
'picture': 1,
'properties': 1,
'permissions': 1,
}},
{'$group': {
'_id': {'year': {'$isoWeekYear': '$_created'},
'week': {'$isoWeek': '$_created'}},
'nodes': {'$push': '$$ROOT'}
}},
{'$sort': {'_id.year': direction,
'_id.week': direction}},
{'$limit': self._nbr_of_weeks}
])
def has_more(self, continue_from: datetime) -> bool:
nodes_coll = current_app.db('nodes')
result = nodes_coll.count(self._match(continue_from))
return bool(result)
class Grouper:
@classmethod
def label(cls, node):
return None
@classmethod
def url(cls, node):
return None
@classmethod
def group_key(cls) -> typing.Callable[[dict], typing.Any]:
raise NotImplemented()
@classmethod
def sort_key(cls) -> typing.Callable[[dict], typing.Any]:
raise NotImplemented()
class ProjectGrouper(Grouper):
@classmethod
def label(cls, project: dict):
return project['name']
@classmethod
def url(cls, project: dict):
return url_for('projects.view', project_url=project['url'])
@classmethod
def group_key(cls) -> typing.Callable[[dict], typing.Any]:
return itemgetter('project')
@classmethod
def sort_key(cls) -> typing.Callable[[dict], typing.Any]:
return lambda node: node['project']['_id']
class UserGrouper(Grouper):
@classmethod
def label(cls, user):
return user['full_name']
@classmethod
def group_key(cls) -> typing.Callable[[dict], typing.Any]:
return itemgetter('user')
@classmethod
def sort_key(cls) -> typing.Callable[[dict], typing.Any]:
return lambda node: node['user']['_id']
class TimeLineBuilder:
def __init__(self, search_helper: SearchHelper, grouper: typing.Type[Grouper]):
self.search_helper = search_helper
self.grouper = grouper
self.continue_from = None
def build(self) -> TimelineDO:
raw_weeks = self.search_helper.raw_weeks_from_mongo()
clean_weeks = (self.create_week_group(week) for week in raw_weeks)
return TimelineDO(
groups=list(clean_weeks),
continue_from=self.continue_from.timestamp() if self.search_helper.has_more(self.continue_from) else None
)
def create_week_group(self, week: dict) -> GroupDO:
nodes = week['nodes']
nodes.sort(key=itemgetter('_created'), reverse=True)
self.update_continue_from(nodes)
groups = self.create_groups(nodes)
return GroupDO(
label=f'Week {week["_id"]["week"]}, {week["_id"]["year"]}',
groups=groups
)
def create_groups(self, nodes: typing.List[dict]) -> typing.List[GroupDO]:
self.sort_nodes(nodes) # groupby assumes that the list is sorted
nodes_grouped = itertools.groupby(nodes, self.grouper.group_key())
groups = (self.clean_group(grouped_by, group) for grouped_by, group in nodes_grouped)
groups_sorted = sorted(groups, key=self.group_row_sorter, reverse=True)
return groups_sorted
def sort_nodes(self, nodes: typing.List[dict]):
nodes.sort(key=itemgetter('node_type'))
nodes.sort(key=self.grouper.sort_key())
def update_continue_from(self, sorted_nodes: typing.List[dict]):
if self.search_helper.sort_direction == 'desc':
first_created = sorted_nodes[-1]['_created']
candidate = self.continue_from or first_created
self.continue_from = min(candidate, first_created)
else:
last_created = sorted_nodes[0]['_created']
candidate = self.continue_from or last_created
self.continue_from = max(candidate, last_created)
def clean_group(self, grouped_by: typing.Any, group: typing.Iterable[dict]) -> GroupDO:
items = self.create_items(group)
return GroupDO(
label=self.grouper.label(grouped_by),
url=self.grouper.url(grouped_by),
items=items
)
def create_items(self, group) -> typing.List[dict]:
by_node_type = itertools.groupby(group, key=itemgetter('node_type'))
items = {}
for node_type, nodes in by_node_type:
items[node_type] = [self.node_prettyfy(n) for n in nodes]
return items
@classmethod
def node_prettyfy(cls, node: dict)-> dict:
duration_seconds = node['properties'].get('duration_seconds')
if duration_seconds is not None:
node['properties']['duration'] = pretty_duration(duration_seconds)
if node['node_type'] == 'post':
html = _get_markdowned_html(node['properties'], 'content')
html = shortcodes.render_commented(html, context=node['properties'])
node['properties']['pretty_content'] = html
return node
@classmethod
def group_row_sorter(cls, row: GroupDO) -> typing.Tuple[datetime, datetime]:
'''
If a group contains posts are more interesting and therefor we put them higher in up
:param row:
:return: tuple with newest post date and newest asset date
'''
def newest_created(nodes: typing.List[dict]) -> datetime:
if nodes:
return nodes[0]['_created']
return datetime.fromtimestamp(0, tz=bson.tz_util.utc)
newest_post_date = newest_created(row.items.get('post'))
newest_asset_date = newest_created(row.items.get('asset'))
return newest_post_date, newest_asset_date
def _public_project_ids() -> typing.List[bson.ObjectId]:
"""Returns a list of ObjectIDs of public projects.
Memoized in setup_app().
"""
proj_coll = current_app.db('projects')
result = proj_coll.find({'is_private': False}, {'_id': 1})
return [p['_id'] for p in result]
def _get_markdowned_html(document: dict, field_name: str) -> str:
cache_field_name = pillar.markdown.cache_field_name(field_name)
html = document.get(cache_field_name)
if html is None:
markdown_src = document.get(field_name) or ''
html = pillar.markdown.markdown(markdown_src)
return html
@blueprint.route('/', methods=['GET'])
def global_timeline():
continue_from_str = request.args.get('from')
continue_from = parse_continue_from(continue_from_str)
nbr_of_weeks_str = request.args.get('weeksToLoad')
nbr_of_weeks = parse_nbr_of_weeks(nbr_of_weeks_str)
sort_direction = request.args.get('dir', 'desc')
return _global_timeline(continue_from, nbr_of_weeks, sort_direction)
@blueprint.route('/p/<string(length=24):pid_path>', methods=['GET'])
def project_timeline(pid_path: str):
continue_from_str = request.args.get('from')
continue_from = parse_continue_from(continue_from_str)
nbr_of_weeks_str = request.args.get('weeksToLoad')
nbr_of_weeks = parse_nbr_of_weeks(nbr_of_weeks_str)
sort_direction = request.args.get('dir', 'desc')
pid = str2id(pid_path)
return _project_timeline(continue_from, nbr_of_weeks, sort_direction, pid)
def parse_continue_from(from_arg) -> typing.Optional[datetime]:
try:
from_float = float(from_arg)
except (TypeError, ValueError):
return None
return datetime.fromtimestamp(from_float, tz=bson.tz_util.utc)
def parse_nbr_of_weeks(weeks_to_load: str) -> int:
try:
return int(weeks_to_load)
except (TypeError, ValueError):
return 3
def _global_timeline(continue_from: typing.Optional[datetime], nbr_of_weeks: int, sort_direction: str):
"""Returns an aggregated view of what has happened on the site
Memoized in setup_app().
:param continue_from: Python utc timestamp where to begin aggregation
:param nbr_of_weeks: Number of weeks to return
Example output:
{
groups: [{
label: 'Week 32',
groups: [{
label: 'Spring',
url: '/p/spring',
items:{
post: [blogPostDoc, blogPostDoc],
asset: [assetDoc, assetDoc]
},
groups: ...
}]
}],
continue_from: 123456.2 // python timestamp
}
"""
builder = TimeLineBuilder(
SearchHelper(nbr_of_weeks, continue_from, _public_project_ids(), sort_direction),
ProjectGrouper
)
return jsonify_timeline(builder.build())
def jsonify_timeline(timeline: TimelineDO):
return jsonify(
attr.asdict(timeline,
recurse=True,
filter=lambda att, value: value is not None)
)
def _project_timeline(continue_from: typing.Optional[datetime], nbr_of_weeks: int, sort_direction, pid: bson.ObjectId):
"""Returns an aggregated view of what has happened on the site
Memoized in setup_app().
:param continue_from: Python utc timestamp where to begin aggregation
:param nbr_of_weeks: Number of weeks to return
Example output:
{
groups: [{
label: 'Week 32',
groups: [{
label: 'Tobias Johansson',
items:{
post: [blogPostDoc, blogPostDoc],
asset: [assetDoc, assetDoc]
},
groups: ...
}]
}],
continue_from: 123456.2 // python timestamp
}
"""
builder = TimeLineBuilder(
SearchHelper(nbr_of_weeks, continue_from, [pid], sort_direction),
UserGrouper
)
return jsonify_timeline(builder.build())
def setup_app(app, url_prefix):
global _public_project_ids
global _global_timeline
global _project_timeline
app.register_api_blueprint(blueprint, url_prefix=url_prefix)
cached = app.cache.cached(timeout=3600)
_public_project_ids = cached(_public_project_ids)
memoize = app.cache.memoize(timeout=60)
_global_timeline = memoize(_global_timeline)
_project_timeline = memoize(_project_timeline)

View File

@@ -142,7 +142,7 @@ def after_fetching_user(user):
return
# Remove all fields except public ones.
public_fields = {'full_name', 'username', 'email', 'extension_props_public', 'badges'}
public_fields = {'full_name', 'username', 'email', 'extension_props_public'}
for field in list(user.keys()):
if field not in public_fields:
del user[field]

View File

@@ -1,11 +1,9 @@
import logging
from eve.methods.get import get
from flask import Blueprint, request
import werkzeug.exceptions as wz_exceptions
from flask import Blueprint
from pillar import current_app
from pillar.api import utils
from pillar.api.utils import jsonify
from pillar.api.utils.authorization import require_login
from pillar.auth import current_user
@@ -17,128 +15,7 @@ blueprint_api = Blueprint('users_api', __name__)
@require_login()
def my_info():
eve_resp, _, _, status, _ = get('users', {'_id': current_user.user_id})
resp = utils.jsonify(eve_resp['_items'][0], status=status)
resp = jsonify(eve_resp['_items'][0], status=status)
return resp
@blueprint_api.route('/video/<video_id>/progress')
@require_login()
def get_video_progress(video_id: str):
"""Return video progress information.
Either a `204 No Content` is returned (no information stored),
or a `200 Ok` with JSON from Eve's 'users' schema, from the key
video.view_progress.<video_id>.
"""
# Validation of the video ID; raises a BadRequest when it's not an ObjectID.
# This isn't strictly necessary, but it makes this function behave symmetrical
# to the set_video_progress() function.
utils.str2id(video_id)
users_coll = current_app.db('users')
user_doc = users_coll.find_one(current_user.user_id, projection={'nodes.view_progress': True})
try:
progress = user_doc['nodes']['view_progress'][video_id]
except KeyError:
return '', 204
if not progress:
return '', 204
return utils.jsonify(progress)
@blueprint_api.route('/video/<video_id>/progress', methods=['POST'])
@require_login()
def set_video_progress(video_id: str):
"""Save progress information about a certain video.
Expected parameters:
- progress_in_sec: float number of seconds
- progress_in_perc: integer percentage of video watched (interval [0-100])
"""
my_log = log.getChild('set_video_progress')
my_log.debug('Setting video progress for user %r video %r', current_user.user_id, video_id)
# Constructing this response requires an active app, and thus can't be done on module load.
no_video_response = utils.jsonify({'_message': 'No such video'}, status=404)
try:
progress_in_sec = float(request.form['progress_in_sec'])
progress_in_perc = int(request.form['progress_in_perc'])
except KeyError as ex:
my_log.debug('Missing POST field in request: %s', ex)
raise wz_exceptions.BadRequest(f'missing a form field')
except ValueError as ex:
my_log.debug('Invalid value for POST field in request: %s', ex)
raise wz_exceptions.BadRequest(f'Invalid value for field: {ex}')
users_coll = current_app.db('users')
nodes_coll = current_app.db('nodes')
# First check whether this is actually an existing video
video_oid = utils.str2id(video_id)
video_doc = nodes_coll.find_one(video_oid, projection={
'node_type': True,
'properties.content_type': True,
'properties.file': True,
})
if not video_doc:
my_log.debug('Node %r not found, unable to set progress for user %r',
video_oid, current_user.user_id)
return no_video_response
try:
is_video = (video_doc['node_type'] == 'asset'
and video_doc['properties']['content_type'] == 'video')
except KeyError:
is_video = False
if not is_video:
my_log.info('Node %r is not a video, unable to set progress for user %r',
video_oid, current_user.user_id)
# There is no video found at this URL, so act as if it doesn't even exist.
return no_video_response
# Compute the progress
percent = min(100, max(0, progress_in_perc))
progress = {
'progress_in_sec': progress_in_sec,
'progress_in_percent': percent,
'last_watched': utils.utcnow(),
}
# After watching a certain percentage of the video, we consider it 'done'
#
# Total Credit start Total Credit Percent
# HH:MM:SS HH:MM:SS sec sec of duration
# Sintel 00:14:48 00:12:24 888 744 83.78%
# Tears of Steel 00:12:14 00:09:49 734 589 80.25%
# Cosmos Laundro 00:12:10 00:10:05 730 605 82.88%
# Agent 327 00:03:51 00:03:26 231 206 89.18%
# Caminandes 3 00:02:30 00:02:18 150 138 92.00%
# Glass Half 00:03:13 00:02:52 193 172 89.12%
# Big Buck Bunny 00:09:56 00:08:11 596 491 82.38%
# Elephants Drea 00:10:54 00:09:25 654 565 86.39%
#
# Median 85.09%
# Average 85.75%
#
# For training videos marking at done at 85% of the video may be a bit
# early, since those probably won't have (long) credits. This is why we
# stick to 90% here.
if percent >= 90:
progress['done'] = True
# Setting each property individually prevents us from overwriting any
# existing {done: true} fields.
updates = {f'nodes.view_progress.{video_id}.{k}': v
for k, v in progress.items()}
result = users_coll.update_one({'_id': current_user.user_id},
{'$set': updates})
if result.matched_count == 0:
my_log.error('Current user %r could not be updated', current_user.user_id)
raise wz_exceptions.InternalServerError('Unable to find logged-in user')
return '', 204

View File

@@ -44,16 +44,10 @@ def remove_private_keys(document):
"""Removes any key that starts with an underscore, returns result as new
dictionary.
"""
def do_remove(doc):
for key in list(doc.keys()):
if key.startswith('_'):
del doc[key]
elif isinstance(doc[key], dict):
doc[key] = do_remove(doc[key])
return doc
doc_copy = copy.deepcopy(document)
do_remove(doc_copy)
for key in list(doc_copy.keys()):
if key.startswith('_'):
del doc_copy[key]
try:
del doc_copy['allowed_methods']
@@ -63,39 +57,6 @@ def remove_private_keys(document):
return doc_copy
def pretty_duration(seconds: typing.Union[None, int, float]):
if seconds is None:
return ''
seconds = round(seconds)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if hours > 0:
return f'{hours:02}:{minutes:02}:{seconds:02}'
else:
return f'{minutes:02}:{seconds:02}'
def pretty_duration_fractional(seconds: typing.Union[None, int, float]):
if seconds is None:
return ''
# Remove fraction of seconds from the seconds so that the rest is done as integers.
seconds, fracs = divmod(seconds, 1)
hours, seconds = divmod(int(seconds), 3600)
minutes, seconds = divmod(seconds, 60)
msec = int(round(fracs * 1000))
if msec == 0:
msec_str = ''
else:
msec_str = f'.{msec:03}'
if hours > 0:
return f'{hours:02}:{minutes:02}:{seconds:02}{msec_str}'
else:
return f'{minutes:02}:{seconds:02}{msec_str}'
class PillarJSONEncoder(json.JSONEncoder):
"""JSON encoder with support for Pillar resources."""
@@ -103,9 +64,6 @@ class PillarJSONEncoder(json.JSONEncoder):
if isinstance(obj, datetime.datetime):
return obj.strftime(RFC1123_DATE_FORMAT)
if isinstance(obj, datetime.timedelta):
return pretty_duration(obj.total_seconds())
if isinstance(obj, bson.ObjectId):
return str(obj)
@@ -223,8 +181,7 @@ def doc_diff(doc1, doc2, *, falsey_is_equal=True, superkey: str = None):
function won't report differences between DoesNotExist, False, '', and 0.
"""
def is_private(key):
return str(key).startswith('_')
private_keys = {'_id', '_etag', '_deleted', '_updated', '_created'}
def combine_key(some_key):
"""Combine this key with the superkey.
@@ -245,7 +202,7 @@ def doc_diff(doc1, doc2, *, falsey_is_equal=True, superkey: str = None):
if isinstance(doc1, dict) and isinstance(doc2, dict):
for key in set(doc1.keys()).union(set(doc2.keys())):
if is_private(key):
if key in private_keys:
continue
val1 = doc1.get(key, DoesNotExist)
@@ -288,10 +245,4 @@ def random_etag() -> str:
def utcnow() -> datetime.datetime:
"""Construct timezone-aware 'now' in UTC with millisecond precision."""
now = datetime.datetime.now(tz=bson.tz_util.utc)
# MongoDB stores in millisecond precision, so truncate the microseconds.
# This way the returned datetime can be round-tripped via MongoDB and stay the same.
trunc_now = now.replace(microsecond=now.microsecond - (now.microsecond % 1000))
return trunc_now
return datetime.datetime.now(tz=bson.tz_util.utc)

View File

@@ -13,7 +13,7 @@ import logging
import typing
import bson
from flask import g, current_app, session
from flask import g, current_app
from flask import request
from werkzeug import exceptions as wz_exceptions
@@ -103,7 +103,7 @@ def find_user_in_db(user_info: dict, provider='blender-id') -> dict:
return db_user
def validate_token(*, force=False) -> bool:
def validate_token(*, force=False):
"""Validate the token provided in the request and populate the current_user
flask.g object, so that permissions and access to a resource can be defined
from it.
@@ -115,7 +115,7 @@ def validate_token(*, force=False) -> bool:
:returns: True iff the user is logged in with a valid Blender ID token.
"""
import pillar.auth
from pillar.auth import AnonymousUser
# Trust a pre-existing g.current_user
if not force:
@@ -133,22 +133,16 @@ def validate_token(*, force=False) -> bool:
oauth_subclient = ''
else:
# Check the session, the user might be logged in through Flask-Login.
from pillar import auth
# The user has a logged-in session; trust only if this request passes a CSRF check.
# FIXME(Sybren): we should stop saving the token as 'user_id' in the sesion.
token = session.get('user_id')
if token:
log.debug('skipping token check because current user already has a session')
current_app.csrf.protect()
else:
token = pillar.auth.get_blender_id_oauth_token()
token = auth.get_blender_id_oauth_token()
oauth_subclient = None
if not token:
# If no authorization headers are provided, we are getting a request
# from a non logged in user. Proceed accordingly.
log.debug('No authentication headers, so not logged in.')
g.current_user = pillar.auth.AnonymousUser()
g.current_user = AnonymousUser()
return False
return validate_this_token(token, oauth_subclient) is not None
@@ -189,7 +183,7 @@ def validate_this_token(token, oauth_subclient=None):
return None
g.current_user = UserClass.construct(token, db_user)
user_authenticated.send(g.current_user)
user_authenticated.send(None)
return db_user
@@ -200,7 +194,7 @@ def remove_token(token: str):
tokens_coll = current_app.db('tokens')
token_hashed = hash_auth_token(token)
# TODO: remove matching on hashed tokens once all hashed tokens have expired.
# TODO: remove matching on unhashed tokens once all tokens have been hashed.
lookup = {'$or': [{'token': token}, {'token_hashed': token_hashed}]}
del_res = tokens_coll.delete_many(lookup)
log.debug('Removed token %r, matched %d documents', token, del_res.deleted_count)
@@ -212,7 +206,7 @@ def find_token(token, is_subclient_token=False, **extra_filters):
tokens_coll = current_app.db('tokens')
token_hashed = hash_auth_token(token)
# TODO: remove matching on hashed tokens once all hashed tokens have expired.
# TODO: remove matching on unhashed tokens once all tokens have been hashed.
lookup = {'$or': [{'token': token}, {'token_hashed': token_hashed}],
'is_subclient_token': True if is_subclient_token else {'$in': [False, None]},
'expire_time': {"$gt": utcnow()}}
@@ -235,14 +229,8 @@ def hash_auth_token(token: str) -> str:
return base64.b64encode(digest).decode('ascii')
def store_token(user_id,
token: str,
token_expiry,
oauth_subclient_id=False,
*,
org_roles: typing.Set[str] = frozenset(),
oauth_scopes: typing.Optional[typing.List[str]] = None,
):
def store_token(user_id, token: str, token_expiry, oauth_subclient_id=False,
org_roles: typing.Set[str] = frozenset()):
"""Stores an authentication token.
:returns: the token document from MongoDB
@@ -252,15 +240,13 @@ def store_token(user_id,
token_data = {
'user': user_id,
'token': token,
'token_hashed': hash_auth_token(token),
'expire_time': token_expiry,
}
if oauth_subclient_id:
token_data['is_subclient_token'] = True
if org_roles:
token_data['org_roles'] = sorted(org_roles)
if oauth_scopes:
token_data['oauth_scopes'] = oauth_scopes
r, _, _, status = current_app.post_internal('tokens', token_data)

View File

@@ -1,6 +1,5 @@
import logging
import functools
import typing
from bson import ObjectId
from flask import g
@@ -13,9 +12,8 @@ CHECK_PERMISSIONS_IMPLEMENTED_FOR = {'projects', 'nodes', 'flamenco_jobs'}
log = logging.getLogger(__name__)
def check_permissions(collection_name: str, resource: dict, method: str,
append_allowed_methods=False,
check_node_type: typing.Optional[str] = None):
def check_permissions(collection_name, resource, method, append_allowed_methods=False,
check_node_type=None):
"""Check user permissions to access a node. We look up node permissions from
world to groups to users and match them with the computed user permissions.
If there is not match, we raise 403.
@@ -95,9 +93,8 @@ def compute_allowed_methods(collection_name, resource, check_node_type=None):
return allowed_methods
def has_permissions(collection_name: str, resource: dict, method: str,
append_allowed_methods=False,
check_node_type: typing.Optional[str] = None):
def has_permissions(collection_name, resource, method, append_allowed_methods=False,
check_node_type=None):
"""Check user permissions to access a node. We look up node permissions from
world to groups to users and match them with the computed user permissions.
@@ -331,9 +328,8 @@ def require_login(*, require_roles=set(),
def render_error() -> Response:
if error_view is None:
resp = Forbidden().get_response()
else:
resp = error_view()
abort(403)
resp: Response = error_view()
resp.status_code = 403
return resp

View File

@@ -9,8 +9,12 @@ string = functools.partial(attr.ib, validator=attr.validators.instance_of(str))
def log(name):
"""Returns a logger
"""Returns a logger attr.ib
:param name: name to pass to logging.getLogger()
:rtype: attr.ib
"""
return logging.getLogger(name)
return attr.ib(default=logging.getLogger(name),
repr=False,
hash=False,
cmp=False)

View File

@@ -12,10 +12,7 @@ from werkzeug.local import LocalProxy
from pillar import current_app
# The sender is the user that was just authenticated.
user_authenticated = blinker.Signal('Sent whenever a user was authenticated')
user_logged_in = blinker.Signal('Sent whenever a user logged in on the web')
log = logging.getLogger(__name__)
# Mapping from user role to capabilities obtained by users with that role.
@@ -41,8 +38,6 @@ class UserClass(flask_login.UserMixin):
self.groups: typing.List[str] = [] # NOTE: these are stringified object IDs.
self.group_ids: typing.List[bson.ObjectId] = []
self.capabilities: typing.Set[str] = set()
self.nodes: dict = {} # see the 'nodes' key in eve_settings.py::user_schema.
self.badges_html: str = ''
# Lazily evaluated
self._has_organizations: typing.Optional[bool] = None
@@ -61,12 +56,6 @@ class UserClass(flask_login.UserMixin):
user.email = db_user.get('email') or ''
user.username = db_user.get('username') or ''
user.full_name = db_user.get('full_name') or ''
user.badges_html = db_user.get('badges', {}).get('html') or ''
# Be a little more specific than just db_user['nodes']
user.nodes = {
'view_progress': db_user.get('nodes', {}).get('view_progress', {}),
}
# Derived properties
user.objectid = str(user.user_id or '')
@@ -221,15 +210,9 @@ def login_user(oauth_token: str, *, load_from_db=False):
user = _load_user(oauth_token)
else:
user = UserClass(oauth_token)
login_user_object(user)
def login_user_object(user: UserClass):
"""Log in the given user."""
flask_login.login_user(user, remember=True)
g.current_user = user
user_authenticated.send(user)
user_logged_in.send(user)
user_authenticated.send(None)
def logout_user():

View File

@@ -1,48 +0,0 @@
"""Support for adding CORS headers to responses."""
import functools
import flask
import werkzeug.wrappers as wz_wrappers
import werkzeug.exceptions as wz_exceptions
def allow(*, allow_credentials=False):
"""Flask endpoint decorator, adds CORS headers to the response.
If the request has a non-empty 'Origin' header, the response header
'Access-Control-Allow-Origin' is set to the value of that request header,
and some other CORS headers are set.
"""
def decorator(wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
request_origin = flask.request.headers.get('Origin')
if not request_origin:
# No CORS headers requested, so don't bother touching the response.
return wrapped(*args, **kwargs)
try:
response = wrapped(*args, **kwargs)
except wz_exceptions.HTTPException as ex:
response = ex.get_response()
else:
if isinstance(response, tuple):
response = flask.make_response(*response)
elif isinstance(response, str):
response = flask.make_response(response)
elif isinstance(response, wz_wrappers.Response):
pass
else:
raise TypeError(f'unknown response type {type(response)}')
assert isinstance(response, wz_wrappers.Response)
response.headers.set('Access-Control-Allow-Origin', request_origin)
response.headers.set('Access-Control-Allow-Headers', 'x-requested-with')
if allow_credentials:
response.headers.set('Access-Control-Allow-Credentials', 'true')
return response
return wrapper
return decorator

View File

@@ -1,9 +1,8 @@
import abc
import attr
import json
import logging
import typing
import attr
from rauth import OAuth2Service
from flask import current_app, url_for, request, redirect, session, Response
@@ -16,8 +15,6 @@ class OAuthUserResponse:
id = attr.ib(validator=attr.validators.instance_of(str))
email = attr.ib(validator=attr.validators.instance_of(str))
access_token = attr.ib(validator=attr.validators.instance_of(str))
scopes: typing.List[str] = attr.ib(validator=attr.validators.instance_of(list))
class OAuthError(Exception):
@@ -130,10 +127,8 @@ class OAuthSignIn(metaclass=abc.ABCMeta):
class BlenderIdSignIn(OAuthSignIn):
provider_name = 'blender-id'
scopes = ['email', 'badge']
def __init__(self):
from urllib.parse import urljoin
super().__init__()
base_url = current_app.config['BLENDER_ID_ENDPOINT']
@@ -142,14 +137,14 @@ class BlenderIdSignIn(OAuthSignIn):
name='blender-id',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url=urljoin(base_url, 'oauth/authorize'),
access_token_url=urljoin(base_url, 'oauth/token'),
base_url=urljoin(base_url, 'api/'),
authorize_url='%s/oauth/authorize' % base_url,
access_token_url='%s/oauth/token' % base_url,
base_url='%s/api/' % base_url
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope=' '.join(self.scopes),
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
@@ -163,11 +158,7 @@ class BlenderIdSignIn(OAuthSignIn):
session['blender_id_oauth_token'] = access_token
me = oauth_session.get('user').json()
# Blender ID doesn't tell us which scopes were granted by the user, so
# for now assume we got all the scopes we requested.
# (see https://github.com/jazzband/django-oauth-toolkit/issues/644)
return OAuthUserResponse(str(me['id']), me['email'], access_token, self.scopes)
return OAuthUserResponse(str(me['id']), me['email'])
class FacebookSignIn(OAuthSignIn):
@@ -197,7 +188,7 @@ class FacebookSignIn(OAuthSignIn):
me = oauth_session.get('me?fields=id,email').json()
# TODO handle case when user chooses not to disclose en email
# see https://developers.facebook.com/docs/graph-api/reference/user/
return OAuthUserResponse(me['id'], me.get('email'), '', [])
return OAuthUserResponse(me['id'], me.get('email'))
class GoogleSignIn(OAuthSignIn):
@@ -225,4 +216,4 @@ class GoogleSignIn(OAuthSignIn):
oauth_session = self.make_oauth_session()
me = oauth_session.get('userinfo').json()
return OAuthUserResponse(str(me['id']), me['email'], '', [])
return OAuthUserResponse(str(me['id']), me['email'])

View File

@@ -1,266 +0,0 @@
import collections
import datetime
import logging
import typing
from urllib.parse import urljoin
import bson
import requests
from pillar import current_app, auth
from pillar.api.utils import utcnow
SyncUser = collections.namedtuple('SyncUser', 'user_id token bid_user_id')
BadgeHTML = collections.namedtuple('BadgeHTML', 'html expires')
log = logging.getLogger(__name__)
class StopRefreshing(Exception):
"""Indicates that Blender ID is having problems.
Further badge refreshes should be put on hold to avoid bludgeoning
a suffering Blender ID.
"""
def find_user_to_sync(user_id: bson.ObjectId) -> typing.Optional[SyncUser]:
"""Return user information for syncing badges for a specific user.
Returns None if the user cannot be synced (no 'badge' scope on a token,
or no Blender ID user_id known).
"""
my_log = log.getChild('refresh_single_user')
now = utcnow()
tokens_coll = current_app.db('tokens')
users_coll = current_app.db('users')
token_info = tokens_coll.find_one({
'user': user_id,
'token': {'$exists': True},
'oauth_scopes': 'badge',
'expire_time': {'$gt': now},
})
if not token_info:
my_log.debug('No token with scope "badge" for user %s', user_id)
return None
user_info = users_coll.find_one({'_id': user_id})
# TODO(Sybren): do this filtering in the MongoDB query:
bid_user_ids = [auth_info.get('user_id')
for auth_info in user_info.get('auth', [])
if auth_info.get('provider', '') == 'blender-id' and auth_info.get('user_id')]
if not bid_user_ids:
my_log.debug('No Blender ID user_id for user %s', user_id)
return None
bid_user_id = bid_user_ids[0]
return SyncUser(user_id=user_id, token=token_info['token'], bid_user_id=bid_user_id)
def find_users_to_sync() -> typing.Iterable[SyncUser]:
"""Return user information of syncable users with badges."""
now = utcnow()
tokens_coll = current_app.db('tokens')
cursor = tokens_coll.aggregate([
# Find all users who have a 'badge' scope in their OAuth token.
{'$match': {
'token': {'$exists': True},
'oauth_scopes': 'badge',
'expire_time': {'$gt': now},
# TODO(Sybren): save real token expiry time but keep checking tokens hourly when they are used!
}},
{'$lookup': {
'from': 'users',
'localField': 'user',
'foreignField': '_id',
'as': 'user'
}},
# Prevent 'user' from being an array.
{'$unwind': {'path': '$user'}},
# Get the Blender ID user ID only.
{'$unwind': {'path': '$user.auth'}},
{'$match': {'user.auth.provider': 'blender-id'}},
# Only select those users whose badge doesn't exist or has expired.
{'$match': {
'user.badges.expires': {'$not': {'$gt': now}}
}},
# Make sure that the badges that expire last are also refreshed last.
{'$sort': {'user.badges.expires': 1}},
# Reduce the document to the info we're after.
{'$project': {
'token': True,
'user._id': True,
'user.auth.user_id': True,
}},
])
log.debug('Aggregating tokens and users')
for user_info in cursor:
log.debug('User %s has badges %s',
user_info['user']['_id'], user_info['user'].get('badges'))
yield SyncUser(
user_id=user_info['user']['_id'],
token=user_info['token'],
bid_user_id=user_info['user']['auth']['user_id'])
def fetch_badge_html(session: requests.Session, user: SyncUser, size: str) \
-> str:
"""Fetch a Blender ID badge for this user.
:param session:
:param user:
:param size: Size indication for the badge images, see the Blender ID
documentation/code. As of this writing valid sizes are {'s', 'm', 'l'}.
"""
my_log = log.getChild('fetch_badge_html')
blender_id_endpoint = current_app.config['BLENDER_ID_ENDPOINT']
url = urljoin(blender_id_endpoint, f'api/badges/{user.bid_user_id}/html/{size}')
my_log.debug('Fetching badge HTML at %s for user %s', url, user.user_id)
try:
resp = session.get(url, headers={'Authorization': f'Bearer {user.token}'})
except requests.ConnectionError as ex:
my_log.warning('Unable to connect to Blender ID at %s: %s', url, ex)
raise StopRefreshing()
if resp.status_code == 204:
my_log.debug('No badges for user %s', user.user_id)
return ''
if resp.status_code == 403:
# TODO(Sybren): this indicates the token is invalid, so we could just as well delete it.
my_log.warning('Tried fetching %s for user %s but received a 403: %s',
url, user.user_id, resp.text)
return ''
if resp.status_code == 400:
my_log.warning('Blender ID did not accept our GET request at %s for user %s: %s',
url, user.user_id, resp.text)
return ''
if resp.status_code == 500:
my_log.warning('Blender ID returned an internal server error on %s for user %s, '
'aborting all badge refreshes: %s', url, user.user_id, resp.text)
raise StopRefreshing()
if resp.status_code == 404:
my_log.warning('Blender ID has no user %s for our user %s', user.bid_user_id, user.user_id)
return ''
resp.raise_for_status()
return resp.text
def refresh_all_badges(only_user_id: typing.Optional[bson.ObjectId] = None, *,
dry_run=False,
timelimit: datetime.timedelta):
"""Re-fetch all badges for all users, except when already refreshed recently.
:param only_user_id: Only refresh this user. This is expected to be used
sparingly during manual maintenance / debugging sessions only. It does
fetch all users to refresh, and in Python code skips all except the
given one.
:param dry_run: if True the changes are described in the log, but not performed.
:param timelimit: Refreshing will stop after this time. This allows for cron(-like)
jobs to run without overlapping, even when the number fo badges to refresh
becomes larger than possible within the period of the cron job.
"""
my_log = log.getChild('refresh_all_badges')
# Test the config before we start looping over the world.
badge_expiry = badge_expiry_config()
if not badge_expiry or not isinstance(badge_expiry, datetime.timedelta):
raise ValueError('BLENDER_ID_BADGE_EXPIRY not configured properly, should be a timedelta')
session = _get_requests_session()
deadline = utcnow() + timelimit
num_updates = 0
for user_info in find_users_to_sync():
if utcnow() > deadline:
my_log.info('Stopping badge refresh because the timelimit %s (H:MM:SS) was hit.',
timelimit)
break
if only_user_id and user_info.user_id != only_user_id:
my_log.debug('Skipping user %s', user_info.user_id)
continue
try:
badge_html = fetch_badge_html(session, user_info, 's')
except StopRefreshing:
my_log.error('Blender ID has internal problems, stopping badge refreshing at user %s',
user_info)
break
num_updates += 1
update_badges(user_info, badge_html, badge_expiry, dry_run=dry_run)
my_log.info('Updated badges of %d users%s', num_updates, ' (dry-run)' if dry_run else '')
def _get_requests_session() -> requests.Session:
from requests.adapters import HTTPAdapter
session = requests.Session()
session.mount('https://', HTTPAdapter(max_retries=5))
return session
def refresh_single_user(user_id: bson.ObjectId):
"""Refresh badges for a single user."""
my_log = log.getChild('refresh_single_user')
badge_expiry = badge_expiry_config()
if not badge_expiry:
my_log.warning('Skipping badge fetching, BLENDER_ID_BADGE_EXPIRY not configured')
my_log.debug('Fetching badges for user %s', user_id)
session = _get_requests_session()
user_info = find_user_to_sync(user_id)
if not user_info:
return
try:
badge_html = fetch_badge_html(session, user_info, 's')
except StopRefreshing:
my_log.error('Blender ID has internal problems, stopping badge refreshing at user %s',
user_info)
return
update_badges(user_info, badge_html, badge_expiry, dry_run=False)
my_log.info('Updated badges of user %s', user_id)
def update_badges(user_info: SyncUser, badge_html: str, badge_expiry: datetime.timedelta,
*, dry_run: bool):
my_log = log.getChild('update_badges')
users_coll = current_app.db('users')
update = {'badges': {
'html': badge_html,
'expires': utcnow() + badge_expiry,
}}
my_log.info('Updating badges HTML for Blender ID %s, user %s',
user_info.bid_user_id, user_info.user_id)
if dry_run:
return
result = users_coll.update_one({'_id': user_info.user_id},
{'$set': update})
if result.matched_count != 1:
my_log.warning('Unable to update badges for user %s', user_info.user_id)
def badge_expiry_config() -> datetime.timedelta:
return current_app.config.get('BLENDER_ID_BADGE_EXPIRY')
@auth.user_logged_in.connect
def sync_badge_upon_login(sender: auth.UserClass, **kwargs):
"""Auto-sync badges when a user logs in."""
log.info('Refreshing badge of %s because they logged in', sender.user_id)
refresh_single_user(sender.user_id)

View File

@@ -0,0 +1,38 @@
import logging
from algoliasearch.helpers import AlgoliaException
log = logging.getLogger(__name__)
def push_updated_user(user_to_index: dict):
"""Push an update to the Algolia index when a user item is updated"""
from pillar.api.utils.algolia import index_user_save
try:
index_user_save(user_to_index)
except AlgoliaException as ex:
log.warning(
'Unable to push user info to Algolia for user "%s", id=%s; %s', # noqa
user_to_index.get('username'),
user_to_index.get('objectID'), ex)
def index_node_save(node_to_index: dict):
from pillar.api.utils import algolia
try:
algolia.index_node_save(node_to_index)
except AlgoliaException as ex:
log.warning(
'Unable to push node info to Algolia for node %s; %s', node_to_index, ex) # noqa
def index_node_delete(delete_id: str):
from pillar.api.utils import algolia
try:
algolia.index_node_delete(delete_id)
except AlgoliaException as ex:
log.warning('Unable to delete node info to Algolia for node %s; %s', delete_id, ex) # noqa

View File

@@ -1,20 +0,0 @@
"""Badge HTML synchronisation.
Note that this module can only be imported when an application context is
active. Best to late-import this in the functions where it's needed.
"""
import datetime
import logging
from pillar import current_app, badge_sync
log = logging.getLogger(__name__)
@current_app.celery.task(ignore_result=True)
def sync_badges_for_users(timelimit_seconds: int):
"""Synchronises Blender ID badges for the most-urgent users."""
timelimit = datetime.timedelta(seconds=timelimit_seconds)
log.info('Refreshing badges, timelimit is %s (H:MM:SS)', timelimit)
badge_sync.refresh_all_badges(timelimit=timelimit)

View File

@@ -1,6 +1,4 @@
import logging
import bleach
from bson import ObjectId
from pillar import current_app
@@ -12,7 +10,7 @@ from pillar.api.search import algolia_indexing
log = logging.getLogger(__name__)
INDEX_ALLOWED_NODE_TYPES = {'asset', 'texture', 'group', 'hdri', 'post'}
INDEX_ALLOWED_NODE_TYPES = {'asset', 'texture', 'group', 'hdri'}
SEARCH_BACKENDS = {
@@ -30,6 +28,34 @@ def _get_node_from_id(node_id: str):
return node
def _handle_picture(node: dict, to_index: dict):
"""Add picture URL in-place to the to-be-indexed node."""
picture_id = node.get('picture')
if not picture_id:
return
files_collection = current_app.data.driver.db['files']
lookup = {'_id': ObjectId(picture_id)}
picture = files_collection.find_one(lookup)
for item in picture.get('variations', []):
if item['size'] != 't':
continue
# Not all files have a project...
pid = picture.get('project')
if pid:
link = generate_link(picture['backend'],
item['file_path'],
str(pid),
is_public=True)
else:
link = item['link']
to_index['picture'] = link
break
def prepare_node_data(node_id: str, node: dict=None) -> dict:
"""Given a node id or a node document, return an indexable version of it.
@@ -60,30 +86,25 @@ def prepare_node_data(node_id: str, node: dict=None) -> dict:
users_collection = current_app.data.driver.db['users']
user = users_collection.find_one({'_id': ObjectId(node['user'])})
clean_description = bleach.clean(node.get('_description_html') or '', strip=True)
if not clean_description and node['node_type'] == 'post':
clean_description = bleach.clean(node['properties'].get('_content_html') or '', strip=True)
to_index = {
'objectID': node['_id'],
'name': node['name'],
'project': {
'_id': project['_id'],
'name': project['name'],
'url': project['url'],
'name': project['name']
},
'created': node['_created'],
'updated': node['_updated'],
'node_type': node['node_type'],
'picture': node.get('picture') or '',
'user': {
'_id': user['_id'],
'full_name': user['full_name']
},
'description': clean_description or None,
'is_free': False
'description': node.get('description'),
}
_handle_picture(node, to_index)
# If the node has world permissions, compute the Free permission
if 'world' in node.get('permissions', {}):
if 'GET' in node['permissions']['world']:

View File

@@ -13,7 +13,6 @@ from pillar.cli.maintenance import manager_maintenance
from pillar.cli.operations import manager_operations
from pillar.cli.setup import manager_setup
from pillar.cli.elastic import manager_elastic
from . import badges
from pillar.cli import translations
@@ -25,4 +24,3 @@ manager.add_command("maintenance", manager_maintenance)
manager.add_command("setup", manager_setup)
manager.add_command("operations", manager_operations)
manager.add_command("elastic", manager_elastic)
manager.add_command("badges", badges.manager)

View File

@@ -1,39 +0,0 @@
import datetime
import logging
from flask_script import Manager
from pillar import current_app, badge_sync
from pillar.api.utils import utcnow
log = logging.getLogger(__name__)
manager = Manager(current_app, usage="Badge operations")
@manager.option('-u', '--user', dest='email', default='', help='Email address of the user to sync')
@manager.option('-a', '--all', dest='sync_all', action='store_true', default=False,
help='Sync all users')
@manager.option('--go', action='store_true', default=False,
help='Actually perform the sync; otherwise it is a dry-run.')
def sync(email: str = '', sync_all: bool=False, go: bool=False):
if bool(email) == bool(sync_all):
raise ValueError('Use either --user or --all.')
if email:
users_coll = current_app.db('users')
db_user = users_coll.find_one({'email': email}, projection={'_id': True})
if not db_user:
raise ValueError(f'No user with email {email!r} found')
specific_user = db_user['_id']
else:
specific_user = None
if not go:
log.info('Performing dry-run, not going to change the user database.')
start_time = utcnow()
badge_sync.refresh_all_badges(specific_user, dry_run=not go,
timelimit=datetime.timedelta(hours=1))
end_time = utcnow()
log.info('%s took %s (H:MM:SS)',
'Updating user badges' if go else 'Dry-run',
end_time - start_time)

View File

@@ -1,9 +1,7 @@
import collections
import copy
import datetime
import json
import logging
from pathlib import PurePosixPath, Path
from pathlib import PurePosixPath
import re
import typing
@@ -14,7 +12,6 @@ from flask_script import Manager
import pymongo
from pillar import current_app
import pillar.api.utils
# Collections to skip when finding file references (during orphan file detection).
# This collection can be added to from PillarExtension.setup_app().
@@ -562,6 +559,50 @@ def replace_pillar_node_type_schemas(project_url=None, all_projects=False, missi
projects_changed, projects_seen)
@manager_maintenance.command
def remarkdown_comments():
"""Retranslates all Markdown to HTML for all comment nodes.
"""
from pillar.api.nodes import convert_markdown
nodes_collection = current_app.db()['nodes']
comments = nodes_collection.find({'node_type': 'comment'},
projection={'properties.content': 1,
'node_type': 1})
updated = identical = skipped = errors = 0
for node in comments:
convert_markdown(node)
node_id = node['_id']
try:
content_html = node['properties']['content_html']
except KeyError:
log.warning('Node %s has no content_html', node_id)
skipped += 1
continue
result = nodes_collection.update_one(
{'_id': node_id},
{'$set': {'properties.content_html': content_html}}
)
if result.matched_count != 1:
log.error('Unable to update node %s', node_id)
errors += 1
continue
if result.modified_count:
updated += 1
else:
identical += 1
log.info('updated : %i', updated)
log.info('identical: %i', identical)
log.info('skipped : %i', skipped)
log.info('errors : %i', errors)
@manager_maintenance.option('-p', '--project', dest='proj_url', nargs='?',
help='Project URL')
@manager_maintenance.option('-a', '--all', dest='all_projects', action='store_true', default=False,
@@ -643,7 +684,7 @@ def upgrade_attachment_schema(proj_url=None, all_projects=False, go=False):
log_proj()
log.info('Removed %d empty attachment dicts', res.modified_count)
else:
to_remove = nodes_coll.count_documents({'properties.attachments': {},
to_remove = nodes_coll.count({'properties.attachments': {},
'project': project['_id']})
if to_remove:
log_proj()
@@ -726,9 +767,7 @@ def iter_markdown(proj_node_types: dict, some_node: dict, callback: typing.Calla
continue
to_visit.append((subdoc, definition['schema']))
continue
coerce = definition.get('coerce') # Eve < 0.8
validator = definition.get('validator') # Eve >= 0.8
if coerce != 'markdown' and validator != 'markdown':
if definition.get('coerce') != 'markdown':
continue
my_log.debug('I have to change %r of %s', key, doc)
@@ -739,6 +778,113 @@ def iter_markdown(proj_node_types: dict, some_node: dict, callback: typing.Calla
doc[key] = new_value
@manager_maintenance.option('-p', '--project', dest='proj_url', nargs='?',
help='Project URL')
@manager_maintenance.option('-a', '--all', dest='all_projects', action='store_true', default=False,
help='Replace on all projects.')
@manager_maintenance.option('-g', '--go', dest='go', action='store_true', default=False,
help='Actually perform the changes (otherwise just show as dry-run).')
def upgrade_attachment_usage(proj_url=None, all_projects=False, go=False):
"""Replaces '@[slug]' with '{attachment slug}'.
Also moves links from the attachment dict to the attachment shortcode.
"""
if bool(proj_url) == all_projects:
log.error('Use either --project or --all.')
return 1
import html
from pillar.api.projects.utils import node_type_dict
from pillar.api.utils import remove_private_keys
from pillar.api.utils.authentication import force_cli_user
force_cli_user()
nodes_coll = current_app.db('nodes')
total_nodes = 0
failed_node_ids = set()
# Use a mixture of the old slug RE that still allowes spaces in the slug
# name and the new RE that allows dashes.
old_slug_re = re.compile(r'@\[([a-zA-Z0-9_\- ]+)\]')
for proj in _db_projects(proj_url, all_projects, go=go):
proj_id = proj['_id']
proj_url = proj.get('url', '-no-url-')
nodes = nodes_coll.find({
'_deleted': {'$ne': True},
'project': proj_id,
'properties.attachments': {'$exists': True},
})
node_count = nodes.count()
if node_count == 0:
log.debug('Skipping project %s (%s)', proj_url, proj_id)
continue
proj_node_types = node_type_dict(proj)
for node in nodes:
attachments = node['properties']['attachments']
replaced = False
# Inner functions because of access to the node's attachments.
def replace(match):
nonlocal replaced
slug = match.group(1)
log.debug(' - OLD STYLE attachment slug %r', slug)
try:
att = attachments[slug]
except KeyError:
log.info("Attachment %r not found for node %s", slug, node['_id'])
link = ''
else:
link = att.get('link', '')
if link == 'self':
link = " link='self'"
elif link == 'custom':
url = att.get('link_custom')
if url:
link = " link='%s'" % html.escape(url)
replaced = True
return '{attachment %r%s}' % (slug.replace(' ', '-'), link)
def update_markdown(value: str) -> str:
return old_slug_re.sub(replace, value)
iter_markdown(proj_node_types, node, update_markdown)
# Remove no longer used properties from attachments
new_attachments = {}
for slug, attachment in attachments.items():
replaced |= 'link' in attachment # link_custom implies link
attachment.pop('link', None)
attachment.pop('link_custom', None)
new_attachments[slug.replace(' ', '-')] = attachment
node['properties']['attachments'] = new_attachments
if replaced:
total_nodes += 1
else:
# Nothing got replaced,
continue
if go:
# Use Eve to PUT, so we have schema checking.
db_node = remove_private_keys(node)
r, _, _, status = current_app.put_internal('nodes', db_node, _id=node['_id'])
if status != 200:
log.error('Error %i storing altered node %s %s', status, node['_id'], r)
failed_node_ids.add(node['_id'])
# raise SystemExit('Error storing node; see log.')
log.debug('Updated node %s: %s', node['_id'], r)
log.info('Project %s (%s) has %d nodes with attachments',
proj_url, proj_id, node_count)
log.info('%s %d nodes', 'Updated' if go else 'Would update', total_nodes)
if failed_node_ids:
log.warning('Failed to update %d of %d nodes: %s', len(failed_node_ids), total_nodes,
', '.join(str(nid) for nid in failed_node_ids))
def _db_projects(proj_url: str, all_projects: bool, project_id='', *, go: bool) \
-> typing.Iterable[dict]:
"""Yields a subset of the projects in the database.
@@ -778,12 +924,25 @@ def _db_projects(proj_url: str, all_projects: bool, project_id='', *, go: bool)
log.info('Command took %s', duration)
def find_object_ids(something: typing.Any) -> typing.Iterable[bson.ObjectId]:
"""Generator, yields all ObjectIDs referenced by the given object.
def _find_orphan_files() -> typing.Set[bson.ObjectId]:
"""Finds all non-referenced files for the given project.
Assumes 'something' comes from a MongoDB. This function wasn't made for
generic Python objects.
Returns an iterable of all orphan file IDs.
"""
log.debug('Finding orphan files')
# Get all file IDs that belong to this project.
files_coll = current_app.db('files')
cursor = files_coll.find({'_deleted': {'$ne': True}}, projection={'_id': 1})
file_ids = {doc['_id'] for doc in cursor}
if not file_ids:
log.debug('No files found')
return set()
total_file_count = len(file_ids)
log.debug('Found %d files in total', total_file_count)
def find_object_ids(something: typing.Any) -> typing.Iterable[bson.ObjectId]:
if isinstance(something, bson.ObjectId):
yield something
elif isinstance(something, str) and len(something) == 24:
@@ -796,30 +955,9 @@ def find_object_ids(something: typing.Any) -> typing.Iterable[bson.ObjectId]:
for item in something:
yield from find_object_ids(item)
elif isinstance(something, dict):
for item in something.keys():
yield from find_object_ids(item)
for item in something.values():
yield from find_object_ids(item)
def _find_orphan_files() -> typing.Set[bson.ObjectId]:
"""Finds all non-referenced files.
Returns an iterable of all orphan file IDs.
"""
log.debug('Finding orphan files')
# Get all file IDs and make a set; we'll remove any referenced object ID later.
files_coll = current_app.db('files')
cursor = files_coll.find({'_deleted': {'$ne': True}}, projection={'_id': 1})
file_ids = {doc['_id'] for doc in cursor}
if not file_ids:
log.debug('No files found')
return set()
total_file_count = len(file_ids)
log.debug('Found %d files in total', total_file_count)
# Find all references by iterating through the project itself and every document that has a
# 'project' key set to this ObjectId.
db = current_app.db()
@@ -849,6 +987,7 @@ def find_orphan_files():
This is a heavy operation that inspects *everything* in MongoDB. Use with care.
"""
from jinja2.filters import do_filesizeformat
from pathlib import Path
output_fpath = Path(current_app.config['STORAGE_DIR']) / 'orphan-files.txt'
if output_fpath.exists():
@@ -894,6 +1033,7 @@ def delete_orphan_files():
Use 'find_orphan_files' first to generate orphan-files.txt.
"""
import pymongo.results
from pathlib import Path
output_fpath = Path(current_app.config['STORAGE_DIR']) / 'orphan-files.txt'
with output_fpath.open('r', encoding='ascii') as infile:
@@ -924,412 +1064,3 @@ def delete_orphan_files():
log.warning('Soft-deletion modified %d of %d files', res.modified_count, file_count)
log.info('%d files have been soft-deleted', res.modified_count)
@manager_maintenance.command
def find_video_files_without_duration():
"""Finds video files without any duration
This is a heavy operation. Use with care.
"""
output_fpath = Path(current_app.config['STORAGE_DIR']) / 'video_files_without_duration.txt'
if output_fpath.exists():
log.error('Output filename %s already exists, remove it first.', output_fpath)
return 1
start_timestamp = datetime.datetime.now()
files_coll = current_app.db('files')
starts_with_video = re.compile("^video", re.IGNORECASE)
aggr = files_coll.aggregate([
{'$match': {'content_type': starts_with_video,
'_deleted': {'$ne': True}}},
{'$unwind': '$variations'},
{'$match': {
'variations.duration': {'$not': {'$gt': 0}}
}},
{'$project': {'_id': 1}}
])
file_ids = [str(f['_id']) for f in aggr]
nbr_files = len(file_ids)
log.info('Total nbr video files without duration: %d', nbr_files)
end_timestamp = datetime.datetime.now()
duration = end_timestamp - start_timestamp
log.info('Finding files took %s', duration)
log.info('Writing Object IDs to %s', output_fpath)
with output_fpath.open('w', encoding='ascii') as outfile:
outfile.write('\n'.join(sorted(file_ids)))
@manager_maintenance.command
def find_video_nodes_without_duration():
"""Finds video nodes without any duration
This is a heavy operation. Use with care.
"""
output_fpath = Path(current_app.config['STORAGE_DIR']) / 'video_nodes_without_duration.txt'
if output_fpath.exists():
log.error('Output filename %s already exists, remove it first.', output_fpath)
return 1
start_timestamp = datetime.datetime.now()
nodes_coll = current_app.db('nodes')
aggr = nodes_coll.aggregate([
{'$match': {'node_type': 'asset',
'properties.content_type': 'video',
'_deleted': {'$ne': True},
'properties.duration_seconds': {'$not': {'$gt': 0}}}},
{'$project': {'_id': 1}}
])
file_ids = [str(f['_id']) for f in aggr]
nbr_files = len(file_ids)
log.info('Total nbr video nodes without duration: %d', nbr_files)
end_timestamp = datetime.datetime.now()
duration = end_timestamp - start_timestamp
log.info('Finding nodes took %s', duration)
log.info('Writing Object IDs to %s', output_fpath)
with output_fpath.open('w', encoding='ascii') as outfile:
outfile.write('\n'.join(sorted(file_ids)))
@manager_maintenance.option('-n', '--nodes', dest='nodes_to_update', nargs='*',
help='List of nodes to update')
@manager_maintenance.option('-a', '--all', dest='all_nodes', action='store_true', default=False,
help='Update on all video nodes.')
@manager_maintenance.option('-g', '--go', dest='go', action='store_true', default=False,
help='Actually perform the changes (otherwise just show as dry-run).')
def reconcile_node_video_duration(nodes_to_update=None, all_nodes=False, go=False):
"""Copy video duration from file.variations.duration to node.properties.duraion_seconds
This is a heavy operation. Use with care.
"""
from pillar.api.utils import random_etag, utcnow
if bool(nodes_to_update) == all_nodes:
log.error('Use either --nodes or --all.')
return 1
start_timestamp = datetime.datetime.now()
nodes_coll = current_app.db('nodes')
node_subset = []
if nodes_to_update:
node_subset = [{'$match': {'_id': {'$in': [ObjectId(nid) for nid in nodes_to_update]}}}]
files = nodes_coll.aggregate(
[
*node_subset,
{'$match': {
'node_type': 'asset',
'properties.content_type': 'video',
'_deleted': {'$ne': True}}
},
{'$lookup': {
'from': 'files',
'localField': 'properties.file',
'foreignField': '_id',
'as': '_files',
}},
{'$unwind': '$_files'},
{'$unwind': '$_files.variations'},
{'$match': {'_files.variations.duration': {'$gt': 0}}},
{'$addFields': {
'need_update': {
'$ne': ['$_files.variations.duration', '$properties.duration_seconds']}
}},
{'$match': {'need_update': True}},
{'$project': {
'_id': 1,
'duration': '$_files.variations.duration',
}}]
)
if not go:
log.info('Would try to update %d nodes', len(list(files)))
return 0
modified_count = 0
for f in files:
log.debug('Updating node %s with duration %d', f['_id'], f['duration'])
new_etag = random_etag()
now = utcnow()
resp = nodes_coll.update_one(
{'_id': f['_id']},
{'$set': {
'properties.duration_seconds': f['duration'],
'_etag': new_etag,
'_updated': now,
}}
)
if resp.modified_count == 0:
log.debug('Node %s was already up to date', f['_id'])
modified_count += resp.modified_count
log.info('Updated %d nodes', modified_count)
end_timestamp = datetime.datetime.now()
duration = end_timestamp - start_timestamp
log.info('Operation took %s', duration)
return 0
@manager_maintenance.option('-g', '--go', dest='go', action='store_true', default=False,
help='Actually perform the changes (otherwise just show as dry-run).')
def delete_projectless_files(go=False):
"""Soft-deletes file documents of projects that have been deleted.
WARNING: this also soft-deletes file documents that do not have a project
property at all.
"""
start_timestamp = datetime.datetime.now()
files_coll = current_app.db('files')
aggr = files_coll.aggregate([
{'$match': {'_deleted': {'$ne': True}}},
{'$lookup': {
'from': 'projects',
'localField': 'project',
'foreignField': '_id',
'as': '_project'
}},
{'$match': {'$or': [
{'_project': []},
{'_project._deleted': True},
]}},
{'$project': {'_id': True}},
])
files_to_delete: typing.List[ObjectId] = [doc['_id'] for doc in aggr]
orphan_count = len(files_to_delete)
log.info('Total number of files to soft-delete: %d', orphan_count)
total_count = files_coll.count_documents({'_deleted': {'$ne': True}})
log.info('Total nr of orphan files: %d', orphan_count)
log.info('Total nr of files : %d', total_count)
log.info('Orphan percentage : %d%%', 100 * orphan_count / total_count)
if go:
log.info('Soft-deleting all %d projectless files', orphan_count)
now = pillar.api.utils.utcnow()
etag = pillar.api.utils.random_etag()
result = files_coll.update_many(
{'_id': {'$in': files_to_delete}},
{'$set': {
'_deleted': True,
'_updated': now,
'_etag': etag,
}},
)
log.info('Matched count: %d', result.matched_count)
log.info('Modified count: %d', result.modified_count)
end_timestamp = datetime.datetime.now()
duration = end_timestamp - start_timestamp
if go:
verb = 'Soft-deleting'
else:
verb = 'Finding'
log.info('%s orphans took %s', verb, duration)
@manager_maintenance.command
def find_projects_for_files():
"""For file documents without project, tries to find in which project files are used.
This is a heavy operation that inspects *everything* in MongoDB. Use with care.
"""
output_fpath = Path(current_app.config['STORAGE_DIR']) / 'files-without-project.json'
if output_fpath.exists():
log.error('Output filename %s already exists, remove it first.', output_fpath)
return 1
start_timestamp = datetime.datetime.now()
log.info('Finding files to fix...')
files_coll = current_app.db('files')
query = {'project': {'$exists': False},
'_deleted': {'$ne': True}}
files_to_fix = {file_doc['_id']: None for file_doc in files_coll.find(query)}
if not files_to_fix:
log.info('No files without projects found, congratulations.')
return 0
# Find all references by iterating through every node and project, and
# hoping that they reference the file.
projects_coll = current_app.db('projects')
existing_projects: typing.MutableSet[ObjectId] = set()
for doc in projects_coll.find():
project_id = doc['_id']
existing_projects.add(project_id)
for obj_id in find_object_ids(doc):
if obj_id not in files_to_fix:
continue
files_to_fix[obj_id] = project_id
nodes_coll = current_app.db('nodes')
for doc in nodes_coll.find():
project_id = doc.get('project')
if not project_id:
log.warning('Skipping node %s, as it is not part of any project', doc['_id'])
continue
if project_id not in existing_projects:
log.warning('Skipping node %s, as its project %s does not exist',
doc['_id'], project_id)
continue
for obj_id in find_object_ids(doc):
if obj_id not in files_to_fix:
continue
files_to_fix[obj_id] = project_id
orphans = {oid for oid, project_id in files_to_fix.items()
if project_id is None}
fixable = {str(oid): str(project_id)
for oid, project_id in files_to_fix.items()
if project_id is not None}
log.info('Total nr of orphan files : %d', len(orphans))
log.info('Total nr of fixable files: %d', len(fixable))
projects = set(fixable.values())
log.info('Fixable project count : %d', len(projects))
for project_id in projects:
project = projects_coll.find_one(ObjectId(project_id))
log.info(' - %40s /p/%-20s created on %s, ',
project['name'], project['url'], project['_created'])
end_timestamp = datetime.datetime.now()
duration = end_timestamp - start_timestamp
log.info('Finding projects took %s', duration)
log.info('Writing {file_id: project_id} mapping to %s', output_fpath)
with output_fpath.open('w', encoding='ascii') as outfile:
json.dump(fixable, outfile, indent=4, sort_keys=True)
@manager_maintenance.option('filepath', type=Path,
help='JSON file produced by find_projects_for_files')
@manager_maintenance.option('-g', '--go', dest='go', action='store_true', default=False,
help='Actually perform the changes (otherwise just show as dry-run).')
def fix_projects_for_files(filepath: Path, go=False):
"""Assigns file documents to projects.
Use 'manage.py maintenance find_projects_for_files` to produce the JSON
file that contains the file ID to project ID mapping.
"""
log.info('Loading %s', filepath)
with filepath.open('r', encoding='ascii') as infile:
mapping: typing.Mapping[str, str] = json.load(infile)
# Group IDs per project for more efficient querying.
log.info('Grouping per project')
project_to_file_ids: typing.Mapping[ObjectId, typing.List[ObjectId]] = \
collections.defaultdict(list)
for file_id, project_id in mapping.items():
project_to_file_ids[ObjectId(project_id)].append(ObjectId(file_id))
MockUpdateResult = collections.namedtuple('MockUpdateResult', 'matched_count modified_count')
files_coll = current_app.db('files')
total_matched = total_modified = 0
for project_oid, file_oids in project_to_file_ids.items():
query = {'_id': {'$in': file_oids}}
if go:
result = files_coll.update_many(query, {'$set': {'project': project_oid}})
else:
found = files_coll.count_documents(query)
result = MockUpdateResult(found, 0)
total_matched += result.matched_count
total_modified += result.modified_count
if result.matched_count != len(file_oids):
log.warning('Matched only %d of %d files; modified %d; for project %s',
result.matched_count, len(file_oids), result.modified_count, project_oid)
else:
log.info('Matched all %d files; modified %d; for project %s',
result.matched_count, result.modified_count, project_oid)
log.info('Done updating %d files (found %d, modified %d) on %d projects',
len(mapping), total_matched, total_modified, len(project_to_file_ids))
@manager_maintenance.option('-u', '--user', dest='user', nargs='?',
help='Update subscriptions for single user.')
@manager_maintenance.option('-o', '--object', dest='context_object', nargs='?',
help='Update subscriptions for context_object.')
@manager_maintenance.option('-g', '--go', dest='go', action='store_true', default=False,
help='Actually perform the changes (otherwise just show as dry-run).')
def fix_missing_activities_subscription_defaults(user=None, context_object=None, go=False):
"""Assign default values to activities-subscriptions documents where values are missing.
"""
subscriptions_collection = current_app.db('activities-subscriptions')
lookup_is_subscribed = {
'is_subscribed': {'$exists': False},
}
lookup_notifications = {
'notifications.web': {'$exists': False},
}
if user:
lookup_is_subscribed['user'] = ObjectId(user)
lookup_notifications['user'] = ObjectId(user)
if context_object:
lookup_is_subscribed['context_object'] = ObjectId(context_object)
lookup_notifications['context_object'] = ObjectId(context_object)
num_need_is_subscribed_update = subscriptions_collection.count(lookup_is_subscribed)
log.info("Found %d documents that needs to be update 'is_subscribed'", num_need_is_subscribed_update)
num_need_notification_web_update = subscriptions_collection.count(lookup_notifications)
log.info("Found %d documents that needs to be update 'notifications.web'", num_need_notification_web_update)
if not go:
return
if num_need_is_subscribed_update > 0:
log.info("Updating 'is_subscribed'")
resp = subscriptions_collection.update(
lookup_is_subscribed,
{
'$set': {'is_subscribed': True}
},
multi=True,
upsert=False
)
if resp['nModified'] is not num_need_is_subscribed_update:
log.warning("Expected % documents to be update, was %d",
num_need_is_subscribed_update, resp['nModified'])
if num_need_notification_web_update > 0:
log.info("Updating 'notifications.web'")
resp = subscriptions_collection.update(
lookup_notifications,
{
'$set': {'notifications.web': True}
},
multi=True,
upsert=False
)
if resp['nModified'] is not num_need_notification_web_update:
log.warning("Expected % documents to be update, was %d",
num_need_notification_web_update, resp['nModified'])
log.info("Done updating 'activities-subscriptions' documents")

View File

@@ -1,8 +1,6 @@
from collections import defaultdict
import datetime
import os.path
from os import getenv
from collections import defaultdict
import requests.certs
# Certificate file for communication with other systems.
@@ -31,11 +29,10 @@ DEBUG = False
SECRET_KEY = ''
# Authentication token hashing key. If empty falls back to UTF8-encoded SECRET_KEY with a warning.
# Not used to hash new tokens, but it is used to check pre-existing hashed tokens.
AUTH_TOKEN_HMAC_KEY = b''
# Authentication settings
BLENDER_ID_ENDPOINT = 'http://id.local:8000/'
BLENDER_ID_ENDPOINT = 'http://id.local:8000'
CDN_USE_URL_SIGNING = True
CDN_SERVICE_DOMAIN_PROTOCOL = 'https'
@@ -195,7 +192,7 @@ BLENDER_CLOUD_ADDON_VERSION = '1.4'
TLS_CERT_FILE = requests.certs.where()
CELERY_BACKEND = 'redis://redis/1'
CELERY_BROKER = 'redis://redis/2'
CELERY_BROKER = 'amqp://guest:guest@rabbit//'
# This configures the Celery task scheduler in such a way that we don't
# have to import the pillar.celery.XXX modules. Remember to run
@@ -206,18 +203,8 @@ CELERY_BEAT_SCHEDULE = {
'schedule': 600, # every N seconds
'args': ('gcs', 100)
},
'refresh-blenderid-badges': {
'task': 'pillar.celery.badges.sync_badges_for_users',
'schedule': 10 * 60, # every N seconds
'args': (9 * 60, ), # time limit in seconds, keep shorter than 'schedule'
}
}
# Badges will be re-fetched every timedelta.
# TODO(Sybren): A proper value should be determined after we actually have users with badges.
BLENDER_ID_BADGE_EXPIRY = datetime.timedelta(hours=4)
# Mapping from user role to capabilities obtained by users with that role.
USER_CAPABILITIES = defaultdict(**{
'subscriber': {'subscriber', 'home-project'},
@@ -270,14 +257,3 @@ STATIC_FILE_HASH = ''
# all API endpoints do not need it. On the views that require it, we use the
# current_app.csrf.protect() method.
WTF_CSRF_CHECK_DEFAULT = False
# Flask Debug Toolbar. Enable it by overriding DEBUG_TB_ENABLED in config_local.py.
DEBUG_TB_ENABLED = False
DEBUG_TB_PANELS = [
'flask_debugtoolbar.panels.versions.VersionDebugPanel',
'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask_debugtoolbar.panels.config_vars.ConfigVarsDebugPanel',
'flask_debugtoolbar.panels.template.TemplateDebugPanel',
'flask_debugtoolbar.panels.logger.LoggingPanel',
'flask_debugtoolbar.panels.route_list.RouteListDebugPanel']

View File

@@ -1,5 +1,3 @@
import flask
import raven.breadcrumbs
from raven.contrib.flask import Sentry
from .auth import current_user
@@ -16,14 +14,16 @@ class PillarSentry(Sentry):
def init_app(self, app, *args, **kwargs):
super().init_app(app, *args, **kwargs)
flask.request_started.connect(self.__add_sentry_breadcrumbs, self)
# We perform authentication of the user while handling the request,
# so Sentry calls get_user_info() too early.
def __add_sentry_breadcrumbs(self, sender, **extra):
raven.breadcrumbs.record(
message='Request started',
category='http',
data={'url': flask.request.url}
)
def get_user_context_again(self, ):
from flask import request
try:
self.client.user_context(self.get_user_info(request))
except Exception as e:
self.client.logger.exception(str(e))
def get_user_info(self, request):
user_info = super().get_user_info(request)

View File

@@ -163,11 +163,11 @@ class YouTube:
return html_module.escape('{youtube invalid YouTube ID/URL}')
src = f'https://www.youtube.com/embed/{youtube_id}?rel=0'
html = f'<div class="embed-responsive embed-responsive-16by9">' \
f'<iframe class="shortcode youtube embed-responsive-item"' \
f' width="{width}" height="{height}" src="{src}"' \
f' frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>' \
f'</div>'
iframe_tag = f'<iframe class="shortcode youtube embed-responsive-item" width="{width}"' \
f' height="{height}" src="{src}" frameborder="0" allow="autoplay; encrypted-media"' \
f' allowfullscreen></iframe>'
# Embed the iframe in a container, to allow easier styling
html = f'<div class="embed-responsive embed-responsive-16by9">{iframe_tag}</div>'
return html
@@ -228,25 +228,12 @@ class Attachment:
return self.render(file_doc, pargs, kwargs)
def sdk_file(self, slug: str, document: dict) -> pillarsdk.File:
def sdk_file(self, slug: str, node_properties: dict) -> pillarsdk.File:
"""Return the file document for the attachment with this slug."""
from pillar.web import system_util
# TODO (fsiddi) Make explicit what 'document' is.
# In some cases we pass the entire node or project documents, in other cases
# we pass node.properties. This should be unified at the level of do_markdown.
# For now we do a quick hack and first look for 'properties' in the doc,
# then we look for 'attachments'.
doc_properties = document.get('properties')
if doc_properties:
# We passed an entire document (all nodes must have 'properties')
attachments = doc_properties.get('attachments', {})
else:
# The value of document could have been defined as 'node.properties'
attachments = document.get('attachments', {})
attachments = node_properties.get('attachments', {})
attachment = attachments.get(slug)
if not attachment:
raise self.NoSuchSlug(slug)

View File

@@ -1,7 +1,6 @@
# -*- encoding: utf-8 -*-
import base64
import contextlib
import copy
import datetime
import json
@@ -11,7 +10,11 @@ import pathlib
import sys
import typing
import unittest.mock
from urllib.parse import urlencode, urljoin
try:
from urllib.parse import urlencode
except ImportError:
from urllib.parse import urlencode
from bson import ObjectId, tz_util
@@ -24,7 +27,6 @@ from eve.tests import TestMinimal
import pymongo.collection
from flask.testing import FlaskClient
import flask.ctx
import flask.wrappers
import responses
import pillar
@@ -174,10 +176,6 @@ class AbstractPillarTest(TestMinimal):
for modname in remove:
del sys.modules[modname]
def url_for(self, endpoint, **values):
with self.app.app_context():
return flask.url_for(endpoint, **values)
def ensure_file_exists(self, file_overrides=None, *, example_file=None) -> (ObjectId, dict):
if example_file is None:
example_file = ctd.EXAMPLE_FILE
@@ -187,7 +185,7 @@ class AbstractPillarTest(TestMinimal):
else:
self.ensure_project_exists()
with self.app.app_context():
with self.app.test_request_context():
files_collection = self.app.data.driver.db['files']
assert isinstance(files_collection, pymongo.collection.Collection)
@@ -328,48 +326,15 @@ class AbstractPillarTest(TestMinimal):
return user
@contextlib.contextmanager
def login_as(self, user_id: typing.Union[str, ObjectId]):
"""Context manager, within the context the app context is active and the user logged in.
The logging-in happens when a request starts, so it's only active when
e.g. self.get() or self.post() or somesuch request is used.
"""
from pillar.auth import UserClass, login_user_object
if isinstance(user_id, str):
user_oid = ObjectId(user_id)
elif isinstance(user_id, ObjectId):
user_oid = user_id
else:
raise TypeError(f'invalid type {type(user_id)} for parameter user_id')
user_doc = self.fetch_user_from_db(user_oid)
def signal_handler(sender, **kwargs):
login_user_object(user)
with self.app.app_context():
user = UserClass.construct('', user_doc)
with flask.request_started.connected_to(signal_handler, self.app):
yield
# TODO: rename to 'create_auth_token' now that 'expire_in_days' can be negative.
def create_valid_auth_token(self,
user_id: typing.Union[str, ObjectId],
token='token',
*,
oauth_scopes: typing.Optional[typing.List[str]]=None,
expire_in_days=1) -> dict:
def create_valid_auth_token(self, user_id, token='token'):
from pillar.api.utils import utcnow
if isinstance(user_id, str):
user_id = ObjectId(user_id)
future = utcnow() + datetime.timedelta(days=expire_in_days)
future = utcnow() + datetime.timedelta(days=1)
with self.app.test_request_context():
from pillar.api.utils import authentication as auth
token_data = auth.store_token(user_id, token, future, oauth_scopes=oauth_scopes)
token_data = auth.store_token(user_id, token, future, None)
return token_data
@@ -399,7 +364,7 @@ class AbstractPillarTest(TestMinimal):
return user_id
def create_node(self, node_doc) -> ObjectId:
def create_node(self, node_doc):
"""Creates a node, returning its ObjectId. """
with self.app.test_request_context():
@@ -441,7 +406,7 @@ class AbstractPillarTest(TestMinimal):
"""Sets up Responses to mock unhappy validation flow."""
responses.add(responses.POST,
urljoin(self.app.config['BLENDER_ID_ENDPOINT'], 'u/validate_token'),
'%s/u/validate_token' % self.app.config['BLENDER_ID_ENDPOINT'],
json={'status': 'fail'},
status=403)
@@ -449,7 +414,7 @@ class AbstractPillarTest(TestMinimal):
"""Sets up Responses to mock happy validation flow."""
responses.add(responses.POST,
urljoin(self.app.config['BLENDER_ID_ENDPOINT'], 'u/validate_token'),
'%s/u/validate_token' % self.app.config['BLENDER_ID_ENDPOINT'],
json=BLENDER_ID_USER_RESPONSE,
status=200)
@@ -520,10 +485,11 @@ class AbstractPillarTest(TestMinimal):
def client_request(self, method, path, qs=None, expected_status=200, auth_token=None, json=None,
data=None, headers=None, files=None, content_type=None, etag=None,
environ_overrides=None) -> flask.wrappers.Response:
environ_overrides=None):
"""Performs a HTTP request to the server."""
from pillar.api.utils import dumps
import json as mod_json
headers = headers or {}
environ_overrides = environ_overrides or {}
@@ -556,21 +522,29 @@ class AbstractPillarTest(TestMinimal):
expected_status, resp.status_code, resp.data
))
def get_json():
if resp.mimetype != 'application/json':
raise TypeError('Unable to load JSON from mimetype %r' % resp.mimetype)
return mod_json.loads(resp.data)
resp.json = get_json
resp.get_json = get_json
return resp
def get(self, *args, **kwargs) -> flask.wrappers.Response:
def get(self, *args, **kwargs):
return self.client_request('GET', *args, **kwargs)
def post(self, *args, **kwargs) -> flask.wrappers.Response:
def post(self, *args, **kwargs):
return self.client_request('POST', *args, **kwargs)
def put(self, *args, **kwargs) -> flask.wrappers.Response:
def put(self, *args, **kwargs):
return self.client_request('PUT', *args, **kwargs)
def delete(self, *args, **kwargs) -> flask.wrappers.Response:
def delete(self, *args, **kwargs):
return self.client_request('DELETE', *args, **kwargs)
def patch(self, *args, **kwargs) -> flask.wrappers.Response:
def patch(self, *args, **kwargs):
return self.client_request('PATCH', *args, **kwargs)
def assertAllowsAccess(self,
@@ -587,7 +561,7 @@ class AbstractPillarTest(TestMinimal):
raise TypeError('expected_user_id should be a string or ObjectId, '
f'but is {expected_user_id!r}')
resp = self.get('/api/users/me', expected_status=200, auth_token=token).get_json()
resp = self.get('/api/users/me', expected_status=200, auth_token=token).json()
if expected_user_id:
self.assertEqual(resp['_id'], str(expected_user_id))

View File

@@ -1,9 +1,9 @@
"""Flask configuration file for unit testing."""
BLENDER_ID_ENDPOINT = 'http://id.local:8001/' # Non existant server
BLENDER_ID_ENDPOINT = 'http://id.local:8001' # Non existant server
SERVER_NAME = 'localhost.local'
PILLAR_SERVER_ENDPOINT = 'http://localhost.local/api/'
SERVER_NAME = 'localhost'
PILLAR_SERVER_ENDPOINT = 'http://localhost/api/'
MAIN_PROJECT_ID = '5672beecc0261b2005ed1a33'
@@ -44,5 +44,3 @@ ELASTIC_INDICES = {
# MUST be 8 characters long, see pillar.flask_extra.HashedPathConverter
STATIC_FILE_HASH = 'abcd1234'
CACHE_NO_NULL_WARNING = True

View File

@@ -1,7 +1,6 @@
from pillar.api.eve_settings import *
MONGO_DBNAME = 'pillar_test'
MONGO_USERNAME = None
def override_eve():
@@ -11,7 +10,5 @@ def override_eve():
test_settings.MONGO_HOST = MONGO_HOST
test_settings.MONGO_PORT = MONGO_PORT
test_settings.MONGO_DBNAME = MONGO_DBNAME
test_settings.MONGO1_USERNAME = MONGO_USERNAME
tests.MONGO_HOST = MONGO_HOST
tests.MONGO_DBNAME = MONGO_DBNAME
tests.MONGO_USERNAME = MONGO_USERNAME

View File

@@ -1,7 +1,6 @@
"""Our custom Jinja filters and other template stuff."""
import functools
import json
import logging
import typing
import urllib.parse
@@ -14,8 +13,6 @@ import werkzeug.exceptions as wz_exceptions
import pillarsdk
import pillar.api.utils
from pillar import auth
from pillar.api.utils import pretty_duration
from pillar.web.utils import pretty_date
from pillar.web.nodes.routes import url_for_node
import pillar.markdown
@@ -31,14 +28,6 @@ def format_pretty_date_time(d):
return pretty_date(d, detail=True)
def format_pretty_duration(s):
return pretty_duration(s)
def format_pretty_duration_fractional(s):
return pillar.api.utils.pretty_duration_fractional(s)
def format_undertitle(s):
"""Underscore-replacing title filter.
@@ -211,32 +200,9 @@ def do_yesno(value, arg=None):
return no
def user_to_dict(user: auth.UserClass) -> dict:
return dict(
user_id=str(user.user_id),
username=user.username,
full_name=user.full_name,
gravatar=user.gravatar,
email=user.email,
capabilities=list(user.capabilities),
badges_html=user.badges_html,
is_authenticated=user.is_authenticated
)
def do_json(some_object) -> str:
if isinstance(some_object, pillarsdk.Resource):
some_object = some_object.to_dict()
if isinstance(some_object, auth.UserClass):
some_object = user_to_dict(some_object)
return json.dumps(some_object)
def setup_jinja_env(jinja_env, app_config: dict):
jinja_env.filters['pretty_date'] = format_pretty_date
jinja_env.filters['pretty_date_time'] = format_pretty_date_time
jinja_env.filters['pretty_duration'] = format_pretty_duration
jinja_env.filters['pretty_duration_fractional'] = format_pretty_duration_fractional
jinja_env.filters['undertitle'] = format_undertitle
jinja_env.filters['hide_none'] = do_hide_none
jinja_env.filters['pluralize'] = do_pluralize
@@ -246,7 +212,6 @@ def setup_jinja_env(jinja_env, app_config: dict):
jinja_env.filters['yesno'] = do_yesno
jinja_env.filters['repr'] = repr
jinja_env.filters['urljoin'] = functools.partial(urllib.parse.urljoin, allow_fragments=True)
jinja_env.filters['json'] = do_json
jinja_env.globals['url_for_node'] = do_url_for_node
jinja_env.globals['abs_url'] = functools.partial(flask.url_for,
_external=True,

View File

@@ -21,7 +21,7 @@ def attachment_form_group_create(schema_prop):
def _attachment_build_single_field(schema_prop):
# Ugly hard-coded schema.
fake_schema = {
'slug': schema_prop['keyschema'],
'slug': schema_prop['propertyschema'],
'oid': schema_prop['valueschema']['schema']['oid'],
}
file_select_form_group = build_file_select_form(fake_schema)

View File

@@ -0,0 +1,236 @@
import logging
from flask import current_app
from flask import request
from flask import jsonify
from flask import render_template
from flask_login import login_required, current_user
from pillarsdk import Node
from pillarsdk import Project
import werkzeug.exceptions as wz_exceptions
from pillar.api.utils import utcnow
from pillar.web import subquery
from pillar.web.nodes.routes import blueprint
from pillar.web.utils import gravatar
from pillar.web.utils import pretty_date
from pillar.web.utils import system_util
log = logging.getLogger(__name__)
@blueprint.route('/comments/create', methods=['POST'])
@login_required
def comments_create():
content = request.form['content']
parent_id = request.form.get('parent_id')
if not parent_id:
log.warning('User %s tried to create comment without parent_id', current_user.objectid)
raise wz_exceptions.UnprocessableEntity()
api = system_util.pillar_api()
parent_node = Node.find(parent_id, api=api)
if not parent_node:
log.warning('Unable to create comment for user %s, parent node %r not found',
current_user.objectid, parent_id)
raise wz_exceptions.UnprocessableEntity()
log.info('Creating comment for user %s on parent node %r',
current_user.objectid, parent_id)
comment_props = dict(
project=parent_node.project,
name='Comment',
user=current_user.objectid,
node_type='comment',
properties=dict(
content=content,
status='published',
confidence=0,
rating_positive=0,
rating_negative=0))
if parent_id:
comment_props['parent'] = parent_id
# Get the parent node and check if it's a comment. In which case we flag
# the current comment as a reply.
parent_node = Node.find(parent_id, api=api)
if parent_node.node_type == 'comment':
comment_props['properties']['is_reply'] = True
comment = Node(comment_props)
comment.create(api=api)
return jsonify({'node_id': comment._id}), 201
@blueprint.route('/comments/<string(length=24):comment_id>', methods=['POST'])
@login_required
def comment_edit(comment_id):
"""Allows a user to edit their comment."""
from pillar.web import jinja
api = system_util.pillar_api()
comment = Node({'_id': comment_id})
result = comment.patch({'op': 'edit', 'content': request.form['content']}, api=api)
assert result['_status'] == 'OK'
return jsonify({
'status': 'success',
'data': {
'content': result.properties.content or '',
'content_html': jinja.do_markdowned(result.properties, 'content'),
}})
def format_comment(comment, is_reply=False, is_team=False, replies=None):
"""Format a comment node into a simpler dictionary.
:param comment: the comment object
:param is_reply: True if the comment is a reply to another comment
:param is_team: True if the author belongs to the group that owns the node
:param replies: list of replies (formatted with this function)
"""
try:
is_own = (current_user.objectid == comment.user._id) \
if current_user.is_authenticated else False
except AttributeError:
current_app.bugsnag.notify(Exception(
'Missing user for embedded user ObjectId'),
meta_data={'nodes_info': {'node_id': comment['_id']}})
return
is_rated = False
is_rated_positive = None
if comment.properties.ratings:
for rating in comment.properties.ratings:
if current_user.is_authenticated and rating.user == current_user.objectid:
is_rated = True
is_rated_positive = rating.is_positive
break
return dict(_id=comment._id,
gravatar=gravatar(comment.user.email, size=32),
time_published=pretty_date(comment._created or utcnow(), detail=True),
rating=comment.properties.rating_positive - comment.properties.rating_negative,
author=comment.user.full_name,
author_username=comment.user.username,
content=comment.properties.content,
is_reply=is_reply,
is_own=is_own,
is_rated=is_rated,
is_rated_positive=is_rated_positive,
is_team=is_team,
replies=replies)
@blueprint.route('/<string(length=24):node_id>/comments')
def comments_for_node(node_id):
"""Shows the comments attached to the given node.
The URL can be overridden in order to define can_post_comments in a different way
"""
api = system_util.pillar_api()
node = Node.find(node_id, api=api)
project = Project({'_id': node.project})
can_post_comments = project.node_type_has_method('comment', 'POST', api=api)
can_comment_override = request.args.get('can_comment', 'True') == 'True'
can_post_comments = can_post_comments and can_comment_override
return render_comments_for_node(node_id, can_post_comments=can_post_comments)
def render_comments_for_node(node_id: str, *, can_post_comments: bool):
"""Render the list of comments for a node."""
api = system_util.pillar_api()
# Query for all children, i.e. comments on the node.
comments = Node.all({
'where': {'node_type': 'comment', 'parent': node_id},
}, api=api)
def enrich(some_comment):
some_comment['_user'] = subquery.get_user_info(some_comment['user'])
some_comment['_is_own'] = some_comment['user'] == current_user.objectid
some_comment['_current_user_rating'] = None # tri-state boolean
some_comment[
'_rating'] = some_comment.properties.rating_positive - some_comment.properties.rating_negative
if current_user.is_authenticated:
for rating in some_comment.properties.ratings or ():
if rating.user != current_user.objectid:
continue
some_comment['_current_user_rating'] = rating.is_positive
for comment in comments['_items']:
# Query for all grandchildren, i.e. replies to comments on the node.
comment['_replies'] = Node.all({
'where': {'node_type': 'comment', 'parent': comment['_id']},
}, api=api)
enrich(comment)
for reply in comment['_replies']['_items']:
enrich(reply)
nr_of_comments = sum(1 + comment['_replies']['_meta']['total']
for comment in comments['_items'])
return render_template('nodes/custom/comment/list_embed.html',
node_id=node_id,
comments=comments,
nr_of_comments=nr_of_comments,
show_comments=True,
can_post_comments=can_post_comments)
@blueprint.route('/<string(length=24):node_id>/commentform')
def commentform_for_node(node_id):
"""Shows only the comment for for comments attached to the given node.
i.e. does not show the comments themselves, just the form to post a new comment.
"""
api = system_util.pillar_api()
node = Node.find(node_id, api=api)
project = Project({'_id': node.project})
can_post_comments = project.node_type_has_method('comment', 'POST', api=api)
return render_template('nodes/custom/comment/list_embed.html',
node_id=node_id,
show_comments=False,
can_post_comments=can_post_comments)
@blueprint.route("/comments/<comment_id>/rate/<operation>", methods=['POST'])
@login_required
def comments_rate(comment_id, operation):
"""Comment rating function
:param comment_id: the comment id
:type comment_id: str
:param rating: the rating (is cast from 0 to False and from 1 to True)
:type rating: int
"""
if operation not in {'revoke', 'upvote', 'downvote'}:
raise wz_exceptions.BadRequest('Invalid operation')
api = system_util.pillar_api()
# PATCH the node and return the result.
comment = Node({'_id': comment_id})
result = comment.patch({'op': operation}, api=api)
assert result['_status'] == 'OK'
return jsonify({
'status': 'success',
'data': {
'op': operation,
'rating_positive': result.properties.rating_positive,
'rating_negative': result.properties.rating_negative,
}})

View File

@@ -19,7 +19,6 @@ from pillar.web.nodes.routes import url_for_node
from pillar.web.nodes.forms import get_node_form
import pillar.web.nodes.attachments
from pillar.web.projects.routes import project_update_nodes_list
from pillar.web.projects.routes import project_navigation_links
log = logging.getLogger(__name__)
@@ -62,10 +61,16 @@ def posts_view(project_id=None, project_url=None, url=None, *, archive=False, pa
post.picture = get_file(post.picture, api=api)
post.url = url_for_node(node=post)
# Use the *_main_project.html template for the main blog
is_main_project = project_id == current_app.config['MAIN_PROJECT_ID']
main_project_template = '_main_project' if is_main_project else ''
main_project_template = '_main_project'
index_arch = 'archive' if archive else 'index'
template_path = f'nodes/custom/blog/{index_arch}.html',
template_path = f'nodes/custom/blog/{index_arch}{main_project_template}.html',
if url:
template_path = f'nodes/custom/post/view{main_project_template}.html',
post = Node.find_one({
'where': {'parent': blog._id, 'properties.url': url},
'embedded': {'node_type': 1, 'user': 1},
@@ -90,7 +95,6 @@ def posts_view(project_id=None, project_url=None, url=None, *, archive=False, pa
can_create_blog_posts = project.node_type_has_method('post', 'POST', api=api)
# Use functools.partial so we can later pass page=X.
is_main_project = project_id == current_app.config['MAIN_PROJECT_ID']
if is_main_project:
url_func = functools.partial(url_for, 'main.main_blog_archive')
else:
@@ -108,21 +112,24 @@ def posts_view(project_id=None, project_url=None, url=None, *, archive=False, pa
else:
project.blog_archive_prev = None
navigation_links = project_navigation_links(project, api)
extension_sidebar_links = current_app.extension_sidebar_links(project)
title = 'blog_main' if is_main_project else 'blog'
pages = Node.all({
'where': {'project': project._id, 'node_type': 'page'},
'projection': {'name': 1}}, api=api)
return render_template(
template_path,
blog=blog,
node=post, # node is used by the generic comments rendering (see custom/_scripts.pug)
node=post,
posts=posts._items,
posts_meta=pmeta,
more_posts_available=pmeta['total'] > pmeta['max_results'],
project=project,
title=title,
node_type_post=project.get_node_type('post'),
can_create_blog_posts=can_create_blog_posts,
navigation_links=navigation_links,
extension_sidebar_links=extension_sidebar_links,
pages=pages._items,
api=api)

View File

@@ -48,12 +48,9 @@ def find_for_comment(project, node):
continue
try:
parent = Node.find_one({'where': {
'_id': parent.parent,
'_deleted': {'$ne': True}
}}, api=api)
parent = Node.find(parent.parent, api=api)
except ResourceNotFound:
log.debug(
log.warning(
'url_for_node(node_id=%r): Unable to find parent node %r',
node['_id'], parent.parent)
raise ValueError('Unable to find parent node %r' % parent.parent)
@@ -97,16 +94,6 @@ def find_for_post(project, node):
url=node.properties.url)
@register_node_finder('page')
def find_for_page(project, node):
"""Returns the URL for a page."""
project_id = project['_id']
the_project = project_url(project_id, project=project)
return url_for('projects.view_node', project_url=the_project.url, node_id=node.properties.url)
def find_for_other(project, node):
"""Fallback: Assets, textures, and other node types.

View File

@@ -1,10 +1,9 @@
import functools
import logging
import typing
from datetime import datetime
from datetime import date
import pillarsdk
from flask import current_app
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import DateField
@@ -18,8 +17,6 @@ from wtforms import DateTimeField
from wtforms import SelectMultipleField
from wtforms import FieldList
from wtforms.validators import DataRequired
from pillar import current_app
from pillar.web.utils import system_util
from pillar.web.utils.forms import FileSelectField
from pillar.web.utils.forms import CustomFormField
@@ -47,14 +44,6 @@ def iter_node_properties(node_type):
yield prop_name, prop_schema, prop_fschema
@functools.lru_cache(maxsize=1)
def tag_choices() -> typing.List[typing.Tuple[str, str]]:
"""Return (value, label) tuples for the NODE_TAGS config setting."""
#TODO(fsiddi) consider allowing tags based on custom_properties in the project.
tags = current_app.config.get('NODE_TAGS') or []
return [(tag, tag.title()) for tag in tags] # (value, label) tuples
def add_form_properties(form_class, node_type):
"""Add fields to a form based on the node and form schema provided.
:type node_schema: dict

View File

@@ -1,9 +1,9 @@
import os
import json
import logging
from datetime import datetime
import pillarsdk
from pillar import shortcodes
from pillarsdk import Node
from pillarsdk import Project
from pillarsdk.exceptions import ResourceNotFound
@@ -17,12 +17,15 @@ from flask import request
from flask import jsonify
from flask import abort
from flask_login import current_user
from flask_wtf.csrf import validate_csrf
import werkzeug.exceptions as wz_exceptions
from wtforms import SelectMultipleField
from flask_login import login_required
from jinja2.exceptions import TemplateNotFound
from pillar.api.utils.authorization import check_permissions
from pillar.web.utils import caching
from pillar.markdown import markdown
from pillar.web.nodes.forms import get_node_form
from pillar.web.nodes.forms import process_node_form
@@ -105,11 +108,6 @@ def view(node_id, extra_template_args: dict=None):
node_type_name = node.node_type
if node_type_name == 'page':
# HACK: The 'edit node' page GETs this endpoint, but for pages it's plain wrong,
# so we just redirect to the correct URL.
return redirect(url_for_node(node=node))
if node_type_name == 'post' and not request.args.get('embed'):
# Posts shouldn't be shown at this route (unless viewed embedded, tipically
# after an edit. Redirect to the correct one.
@@ -489,14 +487,11 @@ def preview_markdown():
current_app.csrf.protect()
try:
content = request.json['content']
content = request.form['content']
except KeyError:
return jsonify({'_status': 'ERR',
'message': 'The field "content" was not specified.'}), 400
html = markdown(content)
attachmentsdict = request.json.get('attachments', {})
html = shortcodes.render_commented(html, context={'attachments': attachmentsdict})
return jsonify(content=html)
return jsonify(content=markdown(content))
def ensure_lists_exist_as_empty(node_doc, node_type):
@@ -609,94 +604,5 @@ def url_for_node(node_id=None, node=None):
return finders.find_url_for_node(node)
@blueprint.route("/<node_id>/breadcrumbs")
def breadcrumbs(node_id: str):
"""Return breadcrumbs for the given node, as JSON.
Note that a missing parent is still returned in the breadcrumbs,
but with `{_exists: false, name: '-unknown-'}`.
The breadcrumbs start with the top-level parent, and end with the node
itself (marked by {_self: true}). Returns JSON like this:
{breadcrumbs: [
...,
{_id: "parentID",
name: "The Parent Node",
node_type: "group",
url: "/p/project/parentID"},
{_id: "deadbeefbeefbeefbeeffeee",
_self: true,
name: "The Node Itself",
node_type: "asset",
url: "/p/project/nodeID"},
]}
When a parent node is missing, it has a breadcrumb like this:
{_id: "deadbeefbeefbeefbeeffeee",
_exists': false,
name': '-unknown-'}
"""
api = system_util.pillar_api()
is_self = True
def make_crumb(some_node: None) -> dict:
"""Construct a breadcrumb for this node."""
nonlocal is_self
crumb = {
'_id': some_node._id,
'name': some_node.name,
'node_type': some_node.node_type,
'url': finders.find_url_for_node(some_node),
}
if is_self:
crumb['_self'] = True
is_self = False
return crumb
def make_missing_crumb(some_node_id: None) -> dict:
"""Construct 'missing parent' breadcrumb."""
return {
'_id': some_node_id,
'_exists': False,
'name': '-unknown-',
}
# The first node MUST exist.
try:
node = Node.find(node_id, api=api)
except ResourceNotFound:
log.warning('breadcrumbs(node_id=%r): Unable to find node', node_id)
raise wz_exceptions.NotFound(f'Unable to find node {node_id}')
except ForbiddenAccess:
log.warning('breadcrumbs(node_id=%r): access denied to current user', node_id)
raise wz_exceptions.Forbidden(f'No access to node {node_id}')
crumbs = []
while True:
crumbs.append(make_crumb(node))
child_id = node._id
node_id = node.parent
if not node_id:
break
# If a subsequent node doesn't exist any more, include that in the breadcrumbs.
# Forbidden nodes are handled as if they don't exist.
try:
node = Node.find(node_id, api=api)
except (ResourceNotFound, ForbiddenAccess):
log.warning('breadcrumbs: Unable to find node %r but it is marked as parent of %r',
node_id, child_id)
crumbs.append(make_missing_crumb(node_id))
break
return jsonify({'breadcrumbs': list(reversed(crumbs))})
# Import of custom modules (using the same nodes decorator)
from .custom import groups, storage, posts
from .custom import comments, groups, storage, posts

View File

@@ -30,7 +30,6 @@ class ProjectForm(FlaskForm):
('deleted', 'Deleted')])
picture_header = FileSelectField('Picture header', file_format='image')
picture_square = FileSelectField('Picture square', file_format='image')
picture_16_9 = FileSelectField('Picture 16:9', file_format='image')
def validate(self):
rv = FlaskForm.validate(self)

View File

@@ -24,7 +24,6 @@ from pillar import current_app
from pillar.api.utils import utcnow
from pillar.web import system_util
from pillar.web import utils
from pillar.web.nodes import finders
from pillar.web.utils.jstree import jstree_get_children
import pillar.extension
@@ -303,53 +302,9 @@ def view(project_url):
'header_video_node': header_video_node})
def project_navigation_links(project: typing.Type[Project], api) -> list:
"""Returns a list of nodes for the project, for top navigation display.
Args:
project: A Project object.
api: the api client credential.
Returns:
A list of links for the Project.
For example we display a link to the project blog if present, as well
as pages. The list is structured as follows:
[{'url': '/p/spring/about', 'label': 'About'},
{'url': '/p/spring/blog', 'label': 'Blog'}]
"""
links = []
# Fetch the blog
blog = Node.find_first({
'where': {'project': project._id, 'node_type': 'blog', '_deleted': {'$ne': True}},
'projection': {
'name': 1,
}
}, api=api)
if blog:
links.append({'url': finders.find_url_for_node(blog), 'label': blog.name, 'slug': 'blog'})
# Fetch pages
pages = Node.all({
'where': {'project': project._id, 'node_type': 'page', '_deleted': {'$ne': True}},
'projection': {
'name': 1,
'properties.url': 1
}
}, api=api)
# Process the results and append the links to the list
for p in pages._items:
links.append({'url': finders.find_url_for_node(p), 'label': p.name, 'slug': p.properties.url})
return links
def render_project(project, api, extra_context=None, template_name=None):
utils.attach_project_pictures(project, api)
project.picture_square = utils.get_file(project.picture_square, api=api)
project.picture_header = utils.get_file(project.picture_header, api=api)
def load_latest(list_of_ids, node_type=None):
"""Loads a list of IDs in reversed order."""
@@ -360,7 +315,6 @@ def render_project(project, api, extra_context=None, template_name=None):
# Construct query parameters outside the loop.
projection = {'name': 1, 'user': 1, 'node_type': 1, 'project': 1,
'properties.url': 1, 'properties.content_type': 1,
'properties.duration_seconds': 1,
'picture': 1}
params = {'projection': projection, 'embedded': {'user': 1}}
@@ -414,7 +368,6 @@ def render_project(project, api, extra_context=None, template_name=None):
embed_string = ''
template_name = "projects/view{0}.html".format(embed_string)
navigation_links = project_navigation_links(project, api)
extension_sidebar_links = current_app.extension_sidebar_links(project)
return render_template(template_name,
@@ -423,9 +376,8 @@ def render_project(project, api, extra_context=None, template_name=None):
node=None,
show_node=False,
show_project=True,
og_picture=project.picture_16_9,
og_picture=project.picture_header,
activity_stream=activity_stream,
navigation_links=navigation_links,
extension_sidebar_links=extension_sidebar_links,
**extra_context)
@@ -464,7 +416,6 @@ def view_node(project_url, node_id):
api = system_util.pillar_api()
# First we check if it's a simple string, in which case we are looking for
# a static page. Maybe we could use bson.objectid.ObjectId.is_valid(node_id)
project: typing.Optional[Project] = None
if not utils.is_valid_id(node_id):
# raise wz_exceptions.NotFound('No such node')
project, node = render_node_page(project_url, node_id, api)
@@ -482,33 +433,34 @@ def view_node(project_url, node_id):
project = Project.find_one({'where': {"url": project_url, '_id': node.project}},
api=api)
except ResourceNotFound:
# In theatre mode, we don't need access to the project at all.
if theatre_mode:
pass # In theatre mode, we don't need access to the project at all.
project = None
else:
raise wz_exceptions.NotFound('No such project')
navigation_links = []
extension_sidebar_links = ''
og_picture = node.picture = utils.get_file(node.picture, api=api)
if project:
utils.attach_project_pictures(project, api)
if not node.picture:
og_picture = project.picture_16_9
navigation_links = project_navigation_links(project, api)
extension_sidebar_links = current_app.extension_sidebar_links(project)
og_picture = utils.get_file(project.picture_header, api=api)
project.picture_square = utils.get_file(project.picture_square, api=api)
# Append _theatre to load the proper template
theatre = '_theatre' if theatre_mode else ''
if node.node_type == 'page':
pages = Node.all({
'where': {'project': project._id, 'node_type': 'page'},
'projection': {'name': 1}}, api=api)
return render_template('nodes/custom/page/view_embed.html',
api=api,
node=node,
project=project,
navigation_links=navigation_links,
extension_sidebar_links=extension_sidebar_links,
pages=pages._items,
og_picture=og_picture,)
extension_sidebar_links = current_app.extension_sidebar_links(project)
return render_template('projects/view{}.html'.format(theatre),
api=api,
project=project,
@@ -516,8 +468,7 @@ def view_node(project_url, node_id):
show_node=True,
show_project=False,
og_picture=og_picture,
navigation_links=navigation_links,
extension_sidebar_links=extension_sidebar_links,)
extension_sidebar_links=extension_sidebar_links)
def find_project_or_404(project_url, embedded=None, api=None):
@@ -540,7 +491,8 @@ def search(project_url):
"""Search into a project"""
api = system_util.pillar_api()
project = find_project_or_404(project_url, api=api)
utils.attach_project_pictures(project, api)
project.picture_square = utils.get_file(project.picture_square, api=api)
project.picture_header = utils.get_file(project.picture_header, api=api)
return render_template('nodes/search.html',
project=project,
@@ -581,8 +533,6 @@ def edit(project_url):
project.picture_square = form.picture_square.data
if form.picture_header.data:
project.picture_header = form.picture_header.data
if form.picture_16_9.data:
project.picture_16_9 = form.picture_16_9.data
# Update world permissions from is_private checkbox
if form.is_private.data:
@@ -598,8 +548,6 @@ def edit(project_url):
form.picture_square.data = project.picture_square._id
if project.picture_header:
form.picture_header.data = project.picture_header._id
if project.picture_16_9:
form.picture_16_9.data = project.picture_16_9._id
# List of fields from the form that should be hidden to regular users
if current_user.has_role('admin'):

File diff suppressed because one or more lines are too long

View File

@@ -872,6 +872,12 @@
"code": 61930,
"src": "fontawesome"
},
{
"uid": "31972e4e9d080eaa796290349ae6c1fd",
"css": "users",
"code": 59502,
"src": "fontawesome"
},
{
"uid": "c8585e1e5b0467f28b70bce765d5840c",
"css": "clipboard-copy",
@@ -984,30 +990,6 @@
"code": 59394,
"src": "entypo"
},
{
"uid": "347c38a8b96a509270fdcabc951e7571",
"css": "database",
"code": 61888,
"src": "fontawesome"
},
{
"uid": "3a6f0140c3a390bdb203f56d1bfdefcb",
"css": "speed",
"code": 59471,
"src": "entypo"
},
{
"uid": "4c1ef492f1d2c39a2250ae457cee2a6e",
"css": "social-instagram",
"code": 61805,
"src": "fontawesome"
},
{
"uid": "e36d581e4f2844db345bddc205d15dda",
"css": "users",
"code": 59507,
"src": "elusive"
},
{
"uid": "053a214a098a9453877363eeb45f004e",
"css": "log-in",

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -33,8 +33,7 @@ def get_user_info(user_id):
# TODO: put those fields into a config var or module-level global.
return {'email': user.email,
'full_name': user.full_name,
'username': user.username,
'badges_html': (user.badges and user.badges.html) or ''}
'username': user.username}
def setup_app(app):

View File

@@ -31,10 +31,8 @@ def check_oauth_provider(provider):
@blueprint.route('/authorize/<provider>')
def oauth_authorize(provider):
if current_user.is_authenticated:
next_after_login = session.pop('next_after_login', None) or url_for('main.homepage')
log.debug('Redirecting user to %s', next_after_login)
return redirect(next_after_login)
if not current_user.is_anonymous:
return redirect(url_for('main.homepage'))
try:
oauth = OAuthSignIn.get_provider(provider)
@@ -50,14 +48,8 @@ def oauth_authorize(provider):
@blueprint.route('/oauth/<provider>/authorized')
def oauth_callback(provider):
import datetime
from pillar.api.utils.authentication import store_token
from pillar.api.utils import utcnow
next_after_login = session.pop('next_after_login', None) or url_for('main.homepage')
if current_user.is_authenticated:
log.debug('Redirecting user to %s', next_after_login)
return redirect(next_after_login)
return redirect(url_for('main.homepage'))
oauth = OAuthSignIn.get_provider(provider)
try:
@@ -67,25 +59,12 @@ def oauth_callback(provider):
raise wz_exceptions.Forbidden()
if oauth_user.id is None:
log.debug('Authentication failed for user with {}'.format(provider))
return redirect(next_after_login)
return redirect(url_for('main.homepage'))
# Find or create user
user_info = {'id': oauth_user.id, 'email': oauth_user.email, 'full_name': ''}
db_user = find_user_in_db(user_info, provider=provider)
if '_deleted' in db_user and db_user['_deleted'] is True:
log.debug('User has been deleted and will not be logge in')
return redirect(next_after_login)
db_id, status = upsert_user(db_user)
# TODO(Sybren): If the user doesn't have any badges, but the access token
# does have 'badge' scope, we should fetch the badges in the background.
if oauth_user.access_token:
# TODO(Sybren): make nr of days configurable, or get from OAuthSignIn subclass.
token_expiry = utcnow() + datetime.timedelta(days=15)
token = store_token(db_id, oauth_user.access_token, token_expiry,
oauth_scopes=oauth_user.scopes)
else:
token = generate_and_store_token(db_id)
# Login user
@@ -95,8 +74,11 @@ def oauth_callback(provider):
# Check with Blender ID to update certain user roles.
update_subscription()
next_after_login = session.pop('next_after_login', None)
if next_after_login:
log.debug('Redirecting user to %s', next_after_login)
return redirect(next_after_login)
return redirect(url_for('main.homepage'))
@blueprint.route('/login')

View File

@@ -45,7 +45,6 @@ def attach_project_pictures(project, api):
project.picture_square = get_file(project.picture_square, api=api)
project.picture_header = get_file(project.picture_header, api=api)
project.picture_16_9 = get_file(project.picture_16_9, api=api)
def mass_attach_project_pictures(projects: typing.Iterable[pillarsdk.Project], *,

View File

@@ -62,7 +62,7 @@ def jstree_get_children(node_id, project_id=None):
'where': {
'$and': [
{'node_type': {'$regex': '^(?!attract_)'}},
{'node_type': {'$not': {'$in': ['comment', 'post', 'blog', 'page']}}},
{'node_type': {'$not': {'$in': ['comment', 'post']}}},
],
}
}

View File

@@ -1,22 +1,21 @@
# Primary requirements
-r ../pillar-python-sdk/requirements.txt
attrs==18.2.0
attrs==16.2.0
algoliasearch==1.12.0
bcrypt==3.1.3
blinker==1.4
bleach==2.1.3
celery[redis]==4.2.1
celery[redis]==4.0.2
CommonMark==0.7.2
elasticsearch==6.1.1
elasticsearch-dsl==6.1.0
Eve==0.8
Flask==1.0.2
Eve==0.7.3
Flask==0.12
Flask-Babel==0.11.2
Flask-Caching==1.4.0
Flask-DebugToolbar==0.10.1
Flask-Cache==0.13.1
Flask-Script==2.0.6
Flask-Login==0.4.1
Flask-Login==0.3.2
Flask-WTF==0.14.2
gcloud==0.12.0
google-apitools==0.4.11
@@ -28,49 +27,37 @@ Pillow==4.1.1
python-dateutil==2.5.3
rauth==0.7.3
raven[flask]==6.3.0
requests==2.13.0
redis==2.10.5
shortcodes==2.5.0
WebOb==1.5.0
wheel==0.29.0
zencoder==0.6.5
# Secondary requirements
amqp==2.3.2
asn1crypto==0.24.0
Babel==2.6.0
billiard==3.5.0.4
Cerberus==1.2
cffi==1.12.2
click==6.7
cryptography==2.6.1
Events==0.3
future==0.16.0
googleapis-common-protos==1.5.3
html5lib==1.0.1
idna==2.5
ipaddress==1.0.22
amqp==2.1.4
billiard==3.5.0.2
Flask-PyMongo==0.4.1
-e git+https://github.com/armadillica/cerberus.git@sybren-0.9#egg=Cerberus
Events==0.2.2
future==0.15.2
html5lib==0.99999999
googleapis-common-protos==1.1.0
itsdangerous==0.24
Jinja2==2.10.1
kombu==4.2.1
oauth2client==4.1.2
oauthlib==2.1.0
olefile==0.45.1
protobuf==3.6.0
protorpc==0.12.0
pyasn1==0.4.4
pyasn1-modules==0.2.2
pycparser==2.19
pymongo==3.7.0
pyOpenSSL==16.2.0
pytz==2018.5
requests-oauthlib==1.0.0
Jinja2==2.9.6
kombu==4.0.2
oauth2client==2.0.2
oauthlib==2.0.1
olefile==0.44
protobuf==3.0.0b2.post2
protorpc==0.11.1
pyasn1-modules==0.0.8
pymongo==3.4.0
pytz==2017.2
requests-oauthlib==0.7.0
rsa==3.4.2
simplejson==3.16.0
six==1.12.0
simplejson==3.10.0
six==1.10.0
urllib3==1.22
vine==1.1.4
webencodings==0.5.1
Werkzeug==0.14.1
WTForms==2.2.1
vine==1.1.3
WTForms==2.1
Werkzeug==0.11.15

View File

@@ -35,7 +35,7 @@ setuptools.setup(
install_requires=[
'Flask>=0.12',
'Eve>=0.7.3',
'Flask-Caching>=1.4.0',
'Flask-Cache>=0.13.1',
'Flask-Script>=2.0.5',
'Flask-Login>=0.3.2',
'Flask-OAuthlib>=0.9.3',

View File

@@ -11,8 +11,10 @@ $(document).ready(function() {
var what = '';
// Templates binding
var hitTemplate = Hogan.compile($('#hit-template').text());
var statsTemplate = Hogan.compile($('#stats-template').text());
var facetTemplate = Hogan.compile($('#facet-template').text());
var sliderTemplate = Hogan.compile($('#slider-template').text());
var paginationTemplate = Hogan.compile($('#pagination-template').text());
// defined in tutti/4_search.js
@@ -45,7 +47,6 @@ $(document).ready(function() {
renderFacets(content);
renderPagination(content);
renderFirstHit($(hits).children('.search-hit:first'));
updateUrlParams();
});
/***************
@@ -65,7 +66,7 @@ $(document).ready(function() {
window.setTimeout(function() {
// Ignore getting that first result when there is none.
var hit_id = firstHit.attr('data-node-id');
var hit_id = firstHit.attr('data-hit-id');
if (hit_id === undefined) {
done();
return;
@@ -86,6 +87,12 @@ $(document).ready(function() {
// Initial search
initWithUrlParams();
function convertTimestamp(iso8601) {
var d = new Date(iso8601)
return d.toLocaleDateString();
}
function renderStats(content) {
var stats = {
nbHits: numberWithDelimiter(content.count),
@@ -96,17 +103,20 @@ $(document).ready(function() {
}
function renderHits(content) {
$hits.empty();
if (content.hits.length === 0) {
$hits.html('<p id="no-hits">We didn\'t find any items. Try searching something else.</p>');
var hitsHtml = '';
for (var i = 0; i < content.hits.length; ++i) {
var created = content.hits[i].created_at;
if (created) {
content.hits[i].created_at = convertTimestamp(created);
}
else {
listof$hits = content.hits.map(function(hit){
return pillar.templates.Component.create$listItem(hit)
.addClass('js-search-hit cursor-pointer search-hit');
})
$hits.append(listof$hits);
var updated = content.hits[i].updated_at;
if (updated) {
content.hits[i].updated_at = convertTimestamp(updated);
}
hitsHtml += hitTemplate.render(content.hits[i]);
}
if (content.hits.length === 0) hitsHtml = '<p id="no-hits">We didn\'t find any items. Try searching something else.</p>';
$hits.html(hitsHtml);
}
function renderFacets(content) {
@@ -123,7 +133,7 @@ $(document).ready(function() {
var refined = search.isRefined(label, item.key);
values.push({
facet: label,
label: item.key_as_string || item.key,
label: item.key,
value: item.key,
count: item.doc_count,
refined: refined,
@@ -143,7 +153,7 @@ $(document).ready(function() {
buckets.forEach(storeValue(values, label));
facets.push({
title: removeUnderscore(label),
title: label,
values: values.slice(0),
});
}
@@ -208,9 +218,6 @@ $(document).ready(function() {
$pagination.html(paginationTemplate.render(pagination));
}
function removeUnderscore(s) {
return s.replace(/_/g, ' ')
}
// Event bindings
// Click binding
@@ -293,46 +300,37 @@ $(document).ready(function() {
};
function initWithUrlParams() {
var pageURL = decodeURIComponent(window.location.search.substring(1)),
urlVariables = pageURL.split('&'),
query,
i;
for (i = 0; i < urlVariables.length; i++) {
var parameterPair = urlVariables[i].split('='),
key = parameterPair[0],
sValue = parameterPair[1];
if (!key) continue;
if (key === 'q') {
query = sValue;
continue;
var sPageURL = location.hash;
if (!sPageURL || sPageURL.length === 0) {
return true;
}
if (key === 'page') {
var page = Number.parseInt(sValue)
search.setCurrentPage(isNaN(page) ? 0 : page)
continue;
}
if (key === 'project') {
continue; // We take the project from the path
}
if (sValue !== undefined) {
var iValue = Number.parseInt(sValue),
value = isNaN(iValue) ? sValue : iValue;
search.toggleTerm(key, value);
continue;
}
console.log('Unhandled url parameter pair:', parameterPair)
var sURLVariables = sPageURL.split('&');
if (!sURLVariables || sURLVariables.length === 0) {
return true;
}
var query = decodeURIComponent(sURLVariables[0].split('=')[1]);
$inputField.val(query);
do_search(query || '');
search.setQuery(query, what);
for (var i = 2; i < sURLVariables.length; i++) {
var sParameterName = sURLVariables[i].split('=');
var facet = decodeURIComponent(sParameterName[0]);
var value = decodeURIComponent(sParameterName[1]);
}
// Page has to be set in the end to avoid being overwritten
var page = decodeURIComponent(sURLVariables[1].split('=')[1]) - 1;
search.setCurrentPage(page);
}
function updateUrlParams() {
var prevState = history.state,
prevTitle = document.title,
params = search.getParams(),
newUrl = window.location.pathname + '?';
delete params['project'] // We take the project from the path
newUrl += jQuery.param(params)
history.replaceState(prevState, prevTitle, newUrl);
function setURLParams(state) {
var urlParams = '?';
var currentQuery = state.query;
urlParams += 'q=' + encodeURIComponent(currentQuery);
var currentPage = state.page + 1;
urlParams += '&page=' + currentPage;
location.replace(urlParams);
}
// do empty search to fill aggregations
do_search('');
});

View File

@@ -1,2 +0,0 @@
Gulp will transpile everything in this folder. Every sub folder containing a init.js file exporting functions/classes
will be packed into a module in tutti.js under the namespace pillar.FOLDER_NAME.

View File

@@ -1,46 +0,0 @@
function thenGetComments(parentId) {
return $.getJSON(`/api/nodes/${parentId}/comments`);
}
function thenCreateComment(parentId, msg, attachments) {
let data = JSON.stringify({
msg: msg,
attachments: attachments
});
return $.ajax({
url: `/api/nodes/${parentId}/comments`,
type: 'POST',
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8'
});
}
function thenUpdateComment(parentId, commentId, msg, attachments) {
let data = JSON.stringify({
msg: msg,
attachments: attachments
});
return $.ajax({
url: `/api/nodes/${parentId}/comments/${commentId}`,
type: 'PATCH',
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8'
});
}
function thenVoteComment(parentId, commentId, vote) {
let data = JSON.stringify({
vote: vote
});
return $.ajax({
url: `/api/nodes/${parentId}/comments/${commentId}/vote`,
type: 'POST',
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8'
});
}
export { thenGetComments, thenCreateComment, thenUpdateComment, thenVoteComment }

View File

@@ -1,54 +0,0 @@
function thenUploadFile(projectId, file, progressCB=(total, loaded)=>{}) {
let formData = createFormData(file)
return $.ajax({
url: `/api/storage/stream/${projectId}`,
type: 'POST',
data: formData,
cache: false,
contentType: false,
processData: false,
xhr: () => {
let myxhr = $.ajaxSettings.xhr();
if (myxhr.upload) {
// For handling the progress of the upload
myxhr.upload.addEventListener('progress', function(e) {
if (e.lengthComputable) {
progressCB(e.total, e.loaded);
}
}, false);
}
return myxhr;
}
});
}
function createFormData(file) {
let formData = new FormData();
formData.append('file', file);
return formData;
}
function thenGetFileDocument(fileId) {
return $.get(`/api/files/${fileId}`);
}
function getFileVariation(fileDoc, size = 'm') {
var show_variation = null;
if (typeof fileDoc.variations != 'undefined') {
for (var variation of fileDoc.variations) {
if (variation.size != size) continue;
show_variation = variation;
break;
}
}
if (show_variation == null) {
throw 'Image not found: ' + fileDoc._id + ' size: ' + size;
}
return show_variation;
}
export { thenUploadFile, thenGetFileDocument, getFileVariation }

View File

@@ -1,7 +0,0 @@
/**
* Functions for communicating with the pillar server api
*/
export { thenMarkdownToHtml } from './markdown'
export { thenGetProject } from './projects'
export { thenGetNodes, thenGetNode, thenGetNodeActivities, thenUpdateNode, thenDeleteNode } from './nodes'
export { thenGetProjectUsers } from './users'

View File

@@ -1,17 +0,0 @@
function thenMarkdownToHtml(markdown, attachments={}) {
let data = JSON.stringify({
content: markdown,
attachments: attachments
});
return $.ajax({
url: "/nodes/preview-markdown",
type: 'POST',
headers: {"X-CSRFToken": csrf_token},
headers: {},
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8'
})
}
export { thenMarkdownToHtml }

View File

@@ -1,82 +0,0 @@
function thenGetNodes(where, embedded={}, sort='') {
let encodedWhere = encodeURIComponent(JSON.stringify(where));
let encodedEmbedded = encodeURIComponent(JSON.stringify(embedded));
let encodedSort = encodeURIComponent(sort);
return $.ajax({
url: `/api/nodes?where=${encodedWhere}&embedded=${encodedEmbedded}&sort=${encodedSort}`,
cache: false,
});
}
function thenGetNode(nodeId) {
return $.ajax({
url: `/api/nodes/${nodeId}`,
cache: false,
});
}
function thenGetNodeActivities(nodeId, sort='[("_created", -1)]', max_results=20, page=1) {
let encodedSort = encodeURIComponent(sort);
return $.ajax({
url: `/api/nodes/${nodeId}/activities?sort=${encodedSort}&max_results=${max_results}&page=${page}`,
cache: false,
});
}
function thenUpdateNode(node) {
let id = node['_id'];
let etag = node['_etag'];
let nodeToSave = removePrivateKeys(node);
let data = JSON.stringify(nodeToSave);
return $.ajax({
url: `/api/nodes/${id}`,
type: 'PUT',
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8',
headers: {'If-Match': etag},
}).then(updatedInfo => {
return thenGetNode(updatedInfo['_id'])
.then(node => {
pillar.events.Nodes.triggerUpdated(node);
return node;
})
});
}
function thenDeleteNode(node) {
let id = node['_id'];
let etag = node['_etag'];
return $.ajax({
url: `/api/nodes/${id}`,
type: 'DELETE',
headers: {'If-Match': etag},
}).then(() => {
pillar.events.Nodes.triggerDeleted(id);
});
}
function removePrivateKeys(doc) {
function doRemove(d) {
for (const key in d) {
if (key.startsWith('_')) {
delete d[key];
continue;
}
let val = d[key];
if(typeof val === 'object') {
doRemove(val);
}
}
}
let docCopy = JSON.parse(JSON.stringify(doc));
doRemove(docCopy);
delete docCopy['allowed_methods']
return docCopy;
}
export { thenGetNodes, thenGetNode, thenGetNodeActivities, thenUpdateNode, thenDeleteNode }

View File

@@ -1,5 +0,0 @@
function thenGetProject(projectId) {
return $.get(`/api/projects/${projectId}`);
}
export { thenGetProject }

View File

@@ -1,7 +0,0 @@
function thenGetProjectUsers(projectId) {
return $.ajax({
url: `/api/p/users?project_id=${projectId}`,
});
}
export { thenGetProjectUsers }

View File

@@ -1,167 +0,0 @@
/**
* Helper class to trigger/listen to global events on new/updated/deleted nodes.
*
* @example
* function myCallback(event) {
* console.log('Updated node:', event.detail);
* }
* // Register a callback:
* Nodes.onUpdated('5c1cc4a5a013573d9787164b', myCallback);
* // When changing the node, notify the listeners:
* Nodes.triggerUpdated(myUpdatedNode);
*/
class EventName {
static parentCreated(parentId, node_type) {
return `pillar:node:${parentId}:created-${node_type}`;
}
static globalCreated(node_type) {
return `pillar:node:created-${node_type}`;
}
static updated(nodeId) {
return `pillar:node:${nodeId}:updated`;
}
static deleted(nodeId) {
return `pillar:node:${nodeId}:deleted`;
}
static loaded() {
return `pillar:node:loaded`;
}
}
function trigger(eventName, data) {
document.dispatchEvent(new CustomEvent(eventName, {detail: data}));
}
function on(eventName, cb) {
document.addEventListener(eventName, cb);
}
function off(eventName, cb) {
document.removeEventListener(eventName, cb);
}
class Nodes {
/**
* Trigger events that node has been created
* @param {Object} node
*/
static triggerCreated(node) {
if (node.parent) {
trigger(
EventName.parentCreated(node.parent, node.node_type),
node);
}
trigger(
EventName.globalCreated(node.node_type),
node);
}
/**
* Get notified when new nodes where parent === parentId and node_type === node_type
* @param {String} parentId
* @param {String} node_type
* @param {Function(Event)} cb
*/
static onParentCreated(parentId, node_type, cb){
on(
EventName.parentCreated(parentId, node_type),
cb);
}
static offParentCreated(parentId, node_type, cb){
off(
EventName.parentCreated(parentId, node_type),
cb);
}
/**
* Get notified when new nodes where node_type === node_type is created
* @param {String} node_type
* @param {Function(Event)} cb
*/
static onCreated(node_type, cb){
on(
EventName.globalCreated(node_type),
cb);
}
static offCreated(node_type, cb){
off(
EventName.globalCreated(node_type),
cb);
}
static triggerUpdated(node) {
trigger(
EventName.updated(node._id),
node);
}
/**
* Get notified when node with _id === nodeId is updated
* @param {String} nodeId
* @param {Function(Event)} cb
*/
static onUpdated(nodeId, cb) {
on(
EventName.updated(nodeId),
cb);
}
static offUpdated(nodeId, cb) {
off(
EventName.updated(nodeId),
cb);
}
/**
* Notify that node has been deleted.
* @param {String} nodeId
*/
static triggerDeleted(nodeId) {
trigger(
EventName.deleted(nodeId),
nodeId);
}
/**
* Listen to events of nodes being deleted where _id === nodeId
* @param {String} nodeId
* @param {Function(Event)} cb
*/
static onDeleted(nodeId, cb) {
on(
EventName.deleted(nodeId),
cb);
}
static offDeleted(nodeId, cb) {
off(
EventName.deleted(nodeId),
cb);
}
static triggerLoaded(nodeId) {
trigger(EventName.loaded(), {nodeId: nodeId});
}
/**
* Listen to events of nodes being loaded for display
* @param {Function(Event)} cb
*/
static onLoaded(cb) {
on(EventName.loaded(), cb);
}
static offLoaded(cb) {
off(EventName.loaded(), cb);
}
}
export { Nodes }

View File

@@ -1,4 +0,0 @@
/**
* Collecting Custom Pillar events here
*/
export {Nodes} from './Nodes'

View File

@@ -1,64 +0,0 @@
import {SearchParams} from './SearchParams';
export class MultiSearch {
constructor(kwargs) {
this.uiUrl = kwargs['uiUrl']; // Url for advanced search
this.apiUrl = kwargs['apiUrl']; // Url for api calls
this.searchParams = MultiSearch.createMultiSearchParams(kwargs['searchParams']);
this.q = '';
}
setSearchWord(q) {
this.q = q;
this.searchParams.forEach((qsParam) => {
qsParam.setSearchWord(q);
});
}
getSearchUrl() {
return this.uiUrl + '?q=' + this.q;
}
getAllParams() {
let retval = $.map(this.searchParams, (msParams) => {
return msParams.params;
});
return retval;
}
parseResult(rawResult) {
return $.map(rawResult, (subResult, index) => {
let name = this.searchParams[index].name;
let pStr = this.searchParams[index].getParamStr();
let result = $.map(subResult.hits.hits, (hit) => {
return hit._source;
});
return {
name: name,
url: this.uiUrl + '?' + pStr,
result: result,
hasResults: !!result.length
};
});
}
thenExecute() {
let data = JSON.stringify(this.getAllParams());
let rawAjax = $.ajax({
url: this.apiUrl,
type: 'POST',
data: data,
dataType: 'json',
contentType: 'application/json; charset=UTF-8'
});
let prettyPromise = rawAjax.then(this.parseResult.bind(this));
prettyPromise['abort'] = rawAjax.abort.bind(rawAjax); // Hack to be able to abort the promise down the road
return prettyPromise;
}
static createMultiSearchParams(argsList) {
return $.map(argsList, (args) => {
return new SearchParams(args);
});
}
}

View File

@@ -1,204 +0,0 @@
import { create$noHits, create$results, create$input } from './templates'
import {SearchFacade} from './SearchFacade';
/**
* QuickSearch : Interacts with the dom document
* 1-SearchFacade : Controls which multisearch is active
* *-MultiSearch : One multi search is typically Project or Cloud
* *-SearchParams : The search params for the individual searches
*/
export class QuickSearch {
/**
* Interacts with the dom document and deligates the input down to the SearchFacade
* @param {selector string} searchToggle The quick-search toggle
* @param {*} kwargs
*/
constructor(searchToggle, kwargs) {
this.$body = $('body');
this.$quickSearch = $('.quick-search');
this.$inputComponent = $(kwargs['inputTarget']);
this.$inputComponent.empty();
this.$inputComponent.append(create$input(kwargs['searches']));
this.$searchInput = this.$inputComponent.find('input');
this.$searchSelect = this.$inputComponent.find('select');
this.$resultTarget = $(kwargs['resultTarget']);
this.$searchSymbol = this.$inputComponent.find('.qs-busy-symbol');
this.searchFacade = new SearchFacade(kwargs['searches'] || {});
this.$searchToggle = $(searchToggle);
this.isBusy = false;
this.attach();
}
attach() {
if (this.$searchSelect.length) {
this.$searchSelect
.change(this.execute.bind(this))
.change(() => this.$searchInput.focus());
this.$searchInput.addClass('multi-scope');
}
this.$searchInput
.keyup(this.onInputKeyUp.bind(this));
this.$inputComponent
.on('pillar:workStart', () => {
this.$searchSymbol.addClass('spinner')
this.$searchSymbol.toggleClass('pi-spin pi-cancel')
})
.on('pillar:workStop', () => {
this.$searchSymbol.removeClass('spinner')
this.$searchSymbol.toggleClass('pi-spin pi-cancel')
});
this.searchFacade.setOnResultCB(this.renderResult.bind(this));
this.searchFacade.setOnFailureCB(this.onSearchFailed.bind(this));
this.$searchToggle
.one('click', this.execute.bind(this)); // Initial search executed once
this.registerShowGui();
this.registerHideGui();
}
registerShowGui() {
this.$searchToggle
.click((e) => {
this.showGUI();
e.stopPropagation();
});
}
registerHideGui() {
this.$searchSymbol
.click(() => {
this.hideGUI();
});
this.$body.click((e) => {
let $target = $(e.target);
let isClickInResult = $target.hasClass('.qs-result') || !!$target.parents('.qs-result').length;
let isClickInInput = $target.hasClass('.qs-input') || !!$target.parents('.qs-input').length;
if (!isClickInResult && !isClickInInput) {
this.hideGUI();
}
});
$(document).keyup((e) => {
if (e.key === 'Escape') {
this.hideGUI();
}
});
}
showGUI() {
this.$body.addClass('has-overlay');
this.$quickSearch.trigger('pillar:searchShow');
this.$quickSearch.addClass('show');
if (!this.$searchInput.is(':focus')) {
this.$searchInput.focus();
}
}
hideGUI() {
this.$body.removeClass('has-overlay');
this.$searchToggle.addClass('pi-search');
this.$searchInput.blur();
this.$quickSearch.removeClass('show');
this.$quickSearch.trigger('pillar:searchHidden');
}
onInputKeyUp(e) {
let newQ = this.$searchInput.val();
let currQ = this.searchFacade.getSearchWord();
this.searchFacade.setSearchWord(newQ);
let searchUrl = this.searchFacade.getSearchUrl();
if (e.key === 'Enter') {
window.location.href = searchUrl;
return;
}
if (newQ !== currQ) {
this.execute();
}
}
execute() {
this.busy(true);
let scope = this.getScope();
this.searchFacade.setCurrentScope(scope);
let q = this.$searchInput.val();
this.searchFacade.setSearchWord(q);
this.searchFacade.execute();
}
renderResult(results) {
this.$resultTarget.empty();
this.$resultTarget.append(this.create$result(results));
this.busy(false);
}
create$result(results) {
let withHits = results.reduce((aggr, subResult) => {
if (subResult.hasResults) {
aggr.push(subResult);
}
return aggr;
}, []);
if (!withHits.length) {
return create$noHits(this.searchFacade.getSearchUrl());
}
return create$results(results, this.searchFacade.getSearchUrl());
}
onSearchFailed(err) {
toastr.error(xhrErrorResponseMessage(err), 'Unable to perform search:');
this.busy(false);
this.$inputComponent.trigger('pillar:failed', err);
}
getScope() {
return !!this.$searchSelect.length ? this.$searchSelect.val() : 'cloud';
}
busy(val) {
if (val !== this.isBusy) {
var eventType = val ? 'pillar:workStart' : 'pillar:workStop';
this.$inputComponent.trigger(eventType);
}
this.isBusy = val;
}
}
$.fn.extend({
/**
* $('#qs-toggle').quickSearch({
* resultTarget: '#search-overlay',
* inputTarget: '#qs-input',
* searches: {
* project: {
* name: 'Project',
* uiUrl: '{{ url_for("projects.search", project_url=project.url)}}',
* apiUrl: '/api/newsearch/multisearch',
* searchParams: [
* {name: 'Assets', params: {project: '{{ project._id }}', node_type: 'asset'}},
* {name: 'Blog', params: {project: '{{ project._id }}', node_type: 'post'}},
* {name: 'Groups', params: {project: '{{ project._id }}', node_type: 'group'}},
* ]
* },
* cloud: {
* name: 'Cloud',
* uiUrl: '/search',
* apiUrl: '/api/newsearch/multisearch',
* searchParams: [
* {name: 'Assets', params: {node_type: 'asset'}},
* {name: 'Blog', params: {node_type: 'post'}},
* {name: 'Groups', params: {node_type: 'group'}},
* ]
* },
* },
* });
* @param {*} kwargs
*/
quickSearch: function (kwargs) {
$(this).each((i, qsElem) => {
new QuickSearch(qsElem, kwargs);
});
}
})

View File

@@ -1,68 +0,0 @@
import {MultiSearch} from './MultiSearch';
export class SearchFacade {
/**
* One SearchFacade holds n-number of MultiSearch objects, and delegates search requests to the active mutlisearch
* @param {*} kwargs
*/
constructor(kwargs) {
this.searches = SearchFacade.createMultiSearches(kwargs);
this.currentScope = 'cloud'; // which multisearch to use
this.currRequest;
this.resultCB;
this.failureCB;
this.q = '';
}
setSearchWord(q) {
this.q = q;
$.each(this.searches, (k, mSearch) => {
mSearch.setSearchWord(q);
});
}
getSearchWord() {
return this.q;
}
getSearchUrl() {
return this.searches[this.currentScope].getSearchUrl();
}
setCurrentScope(scope) {
this.currentScope = scope;
}
execute() {
if (this.currRequest) {
this.currRequest.abort();
}
this.currRequest = this.searches[this.currentScope].thenExecute();
this.currRequest
.then((results) => {
this.resultCB(results);
})
.fail((err, reason) => {
if (reason == 'abort') {
return;
}
this.failureCB(err);
});
}
setOnResultCB(cb) {
this.resultCB = cb;
}
setOnFailureCB(cb) {
this.failureCB = cb;
}
static createMultiSearches(kwargs) {
var searches = {};
$.each(kwargs, (key, value) => {
searches[key] = new MultiSearch(value);
});
return searches;
}
}

View File

@@ -1,14 +0,0 @@
export class SearchParams {
constructor(kwargs) {
this.name = kwargs['name'] || '';
this.params = kwargs['params'] || {};
}
setSearchWord(q) {
this.params['q'] = q || '';
}
getParamStr() {
return jQuery.param(this.params);
}
}

View File

@@ -1 +0,0 @@
export { QuickSearch } from './QuickSearch';

View File

@@ -1,93 +0,0 @@
/**
* Creates the jQuery object that is rendered when nothing is found
* @param {String} advancedUrl Url to the advanced search with the current query
* @returns {$element} The jQuery element that is rendered wher there are no hits
*/
function create$noHits(advancedUrl) {
return $('<div>')
.addClass('qs-msg text-center p-3')
.append(
$('<div>')
.addClass('h1 pi-displeased'),
$('<div>')
.addClass('h2')
.append(
$('<a>')
.attr('href', advancedUrl)
.text('Advanced search')
)
)
}
/**
* Creates the jQuery object that is rendered as the search input
* @param {Dict} searches The searches dict that is passed in on construction of the Quick-Search
* @returns {$element} The jQuery object that renders the search input components.
*/
function create$input(searches) {
let input = $('<input>')
.addClass('qs-input')
.attr('type', 'search')
.attr('autocomplete', 'off')
.attr('spellcheck', 'false')
.attr('autocorrect', 'false')
.attr('placeholder', 'Search...');
let workingSymbol = $('<i>')
.addClass('pi-cancel qs-busy-symbol');
let inputComponent = [input, workingSymbol];
if (Object.keys(searches).length > 1) {
let i = 0;
let select = $('<select>')
.append(
$.map(searches, (it, value) => {
let option = $('<option>')
.attr('value', value)
.text(it['name']);
if (i === 0) {
option.attr('selected', 'selected');
}
i += 1;
return option;
})
);
inputComponent.push(select);
}
return inputComponent;
}
/**
* Creates the search result
* @param {List} results
* @param {String} advancedUrl
* @returns {$element} The jQuery object that is rendered as the result
*/
function create$results(results, advancedUrl) {
let $results = results.reduce((agg, res)=> {
if(res['result'].length) {
agg.push(
$('<a>')
.addClass('h4 mt-4 d-flex')
.attr('href', res['url'])
.text(res['name'])
)
agg.push(
$('<div>')
.addClass('card-deck card-deck-responsive card-padless js-asset-list p-3')
.append(
...pillar.templates.Nodes.createListOf$nodeItems(res['result'], 10, 0)
)
)
}
return agg;
}, [])
$results.push(
$('<a>')
.attr('href', advancedUrl)
.text('Advanced search...')
)
return $('<div>')
.addClass('m-auto qs-result')
.append(...$results)
}
export { create$noHits, create$results, create$input }

View File

@@ -1,2 +0,0 @@
This module is used to render nodes/users dynamically. It was written before we introduced vue.js into the project.
Current best practice is to use vue for this type of work.

View File

@@ -1,124 +0,0 @@
import { Assets } from '../nodes/Assets'
jest.useFakeTimers();
describe('Assets', () => {
describe('create$listItem', () => {
let nodeDoc;
let spyGet;
beforeEach(()=>{
// mock now to get a stable pretty printed created
Date.now = jest.fn(() => new Date(Date.UTC(2018,
10, //November! zero based month!
28, 11, 46, 30)).valueOf()); // A Tuesday
nodeDoc = {
_id: 'my-asset-id',
name: 'My Asset',
node_type: 'asset',
_created: "Wed, 07 Nov 2018 16:35:09 GMT",
project: {
name: 'My Project',
url: 'url-to-project'
},
properties: {
content_type: 'image'
}
};
spyGet = spyOn($, 'get').and.callFake(function(url) {
let ajaxMock = $.Deferred();
let response = {
variations: [{
size: 'l',
link: 'wrong-img-link',
width: 150,
height: 170,
},{
size: 'm',
link: 'img-link',
width: 50,
height: 70,
},{
size: 's',
link: 'wrong-img-link',
width: 5,
height: 7,
}]
}
ajaxMock.resolve(response);
return ajaxMock.promise();
});
});
describe('image content', () => {
test('node with picture', done => {
nodeDoc.picture = 'picture_id';
let $card = Assets.create$listItem(nodeDoc);
jest.runAllTimers();
expect($card.length).toEqual(1);
expect($card.prop('tagName')).toEqual('A'); // <a>
expect($card.hasClass('asset')).toBeTruthy();
expect($card.hasClass('card')).toBeTruthy();
expect($card.attr('href')).toEqual('/nodes/my-asset-id/redir');
expect($card.attr('title')).toEqual('My Asset');
let $body = $card.find('.card-body');
expect($body.length).toEqual(1);
let $title = $body.find('.card-title');
expect($title.length).toEqual(1);
expect(spyGet).toHaveBeenCalledTimes(1);
expect(spyGet).toHaveBeenLastCalledWith('/api/files/picture_id');
let $image = $card.find('img');
expect($image.length).toEqual(1);
let $imageSubsititure = $card.find('.pi-asset');
expect($imageSubsititure.length).toEqual(0);
let $progress = $card.find('.progress');
expect($progress.length).toEqual(0);
let $watched = $card.find('.card-label');
expect($watched.length).toEqual(0);
expect($card.find(':contains(3 weeks ago)').length).toBeTruthy();
done();
});
test('node without picture', done => {
let $card = Assets.create$listItem(nodeDoc);
expect($card.length).toEqual(1);
expect($card.prop('tagName')).toEqual('A'); // <a>
expect($card.hasClass('asset')).toBeTruthy();
expect($card.hasClass('card')).toBeTruthy();
expect($card.attr('href')).toEqual('/nodes/my-asset-id/redir');
expect($card.attr('title')).toEqual('My Asset');
let $body = $card.find('.card-body');
expect($body.length).toEqual(1);
let $title = $body.find('.card-title');
expect($title.length).toEqual(1);
expect(spyGet).toHaveBeenCalledTimes(0);
let $image = $card.find('img');
expect($image.length).toEqual(0);
let $imageSubsititure = $card.find('.pi-asset');
expect($imageSubsititure.length).toEqual(1);
let $progress = $card.find('.progress');
expect($progress.length).toEqual(0);
let $watched = $card.find('.card-label');
expect($watched.length).toEqual(0);
expect($card.find(':contains(3 weeks ago)').length).toBeTruthy();
done();
});
});
})
});

Some files were not shown because too many files have changed in this diff Show More