Added Celery task for refreshing file links
This includes a CLI command to kick off a single run for the Celery task. This does *NOT* include a check to see whether the task is already running!
This commit is contained in:
@@ -449,6 +449,7 @@ class PillarServer(Eve):
|
||||
celery_task_modules = [
|
||||
'pillar.celery.tasks',
|
||||
'pillar.celery.algolia_tasks',
|
||||
'pillar.celery.file_link_tasks',
|
||||
]
|
||||
|
||||
# Allow Pillar extensions from defining their own Celery tasks.
|
||||
|
19
pillar/celery/file_link_tasks.py
Normal file
19
pillar/celery/file_link_tasks.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from pillar import current_app
|
||||
|
||||
|
||||
@current_app.celery.task(ignore_result=True)
|
||||
def regenerate_all_expired_links(backend_name: str, chunk_size: int):
|
||||
"""Regenerate all expired links for all non-deleted file documents.
|
||||
|
||||
Probably only works on Google Cloud Storage ('gcs') backends at
|
||||
the moment, since those are the only links that actually expire.
|
||||
|
||||
:param backend_name: name of the backend to refresh for.
|
||||
:param chunk_size: the maximum number of files to refresh in this run.
|
||||
"""
|
||||
from pillar.api import file_storage
|
||||
|
||||
# Refresh all files that already have expired or will expire in the next
|
||||
# two hours. Since this task is intended to run every hour, this should
|
||||
# result in all regular file requests having a valid link.
|
||||
file_storage.refresh_links_for_backend(backend_name, chunk_size, expiry_seconds=7200)
|
@@ -283,6 +283,20 @@ def refresh_backend_links(backend_name, chunk_size=50, quiet=False, window=12):
|
||||
file_storage.refresh_links_for_backend(backend_name, chunk_size, window * 3600)
|
||||
|
||||
|
||||
@manager_maintenance.command
|
||||
@manager_maintenance.option('-c', '--chunk', dest='chunk_size', default=50,
|
||||
help='Number of links to update, use 0 to update all.')
|
||||
def refresh_backend_links_celery(backend_name, chunk_size=50):
|
||||
"""Starts a Celery task that refreshes all file links that are using a certain storage backend.
|
||||
"""
|
||||
from pillar.celery import file_link_tasks
|
||||
|
||||
chunk_size = int(chunk_size) # CLI parameters are passed as strings
|
||||
file_link_tasks.regenerate_all_expired_links.delay(backend_name, chunk_size)
|
||||
|
||||
log.info('File link regeneration task has been queued for execution.')
|
||||
|
||||
|
||||
@manager_maintenance.command
|
||||
def expire_all_project_links(project_uuid):
|
||||
"""Expires all file links for a certain project without refreshing.
|
||||
|
@@ -164,7 +164,10 @@ class AbstractPillarTest(TestMinimal):
|
||||
for modname in remove:
|
||||
del sys.modules[modname]
|
||||
|
||||
def ensure_file_exists(self, file_overrides=None):
|
||||
def ensure_file_exists(self, file_overrides=None, *, example_file=None) -> (ObjectId, dict):
|
||||
if example_file is None:
|
||||
example_file = ctd.EXAMPLE_FILE
|
||||
|
||||
if file_overrides and file_overrides.get('project'):
|
||||
self.ensure_project_exists({'_id': file_overrides['project']})
|
||||
else:
|
||||
@@ -174,7 +177,7 @@ class AbstractPillarTest(TestMinimal):
|
||||
files_collection = self.app.data.driver.db['files']
|
||||
assert isinstance(files_collection, pymongo.collection.Collection)
|
||||
|
||||
file = copy.deepcopy(ctd.EXAMPLE_FILE)
|
||||
file = copy.deepcopy(example_file)
|
||||
if file_overrides is not None:
|
||||
file.update(file_overrides)
|
||||
if '_id' in file and file['_id'] is None:
|
||||
|
Reference in New Issue
Block a user