Don't encode video and don't send uploaded files to GCS when TESTING.

Unittests shouldn't connect to external systems. We could mock GCS and
Zencoder, but that's a lot more work, and takes time to maintain.
This commit is contained in:
Sybren A. Stüvel 2016-07-08 10:56:15 +02:00
parent bfba44e5b8
commit 1c03ea8dec

View File

@ -120,13 +120,16 @@ def _process_image(gcs, file_id, local_file, src_file):
# TODO: parallelize this at some point. # TODO: parallelize this at some point.
for variation in src_file['variations']: for variation in src_file['variations']:
fname = variation['file_path'] fname = variation['file_path']
log.debug(' - Sending thumbnail %s to GCS', fname) if current_app.config['TESTING']:
blob = gcs.bucket.blob('_/' + fname, chunk_size=256 * 1024 * 2) log.warning(' - NOT sending thumbnail %s to GCS', fname)
blob.upload_from_filename(variation['local_path'], else:
content_type=variation['content_type']) log.debug(' - Sending thumbnail %s to GCS', fname)
blob = gcs.bucket.blob('_/' + fname, chunk_size=256 * 1024 * 2)
blob.upload_from_filename(variation['local_path'],
content_type=variation['content_type'])
if variation.get('size') == 't': if variation.get('size') == 't':
blob.make_public() blob.make_public()
try: try:
os.unlink(variation['local_path']) os.unlink(variation['local_path'])
@ -166,10 +169,16 @@ def _process_video(gcs, file_id, local_file, src_file):
# that's why we build a list. # that's why we build a list.
src_file['variations'].append(file_variation) src_file['variations'].append(file_variation)
j = Encoder.job_create(src_file) if current_app.config['TESTING']:
if j is None: log.warning('_process_video: NOT sending out encoding job due to TESTING=%r',
log.warning('_process_video: unable to create encoder job for file %s.', file_id) current_app.config['TESTING'])
return j = type('EncoderJob', (), {'process_id': 'fake-process-id',
'backend': 'fake'})
else:
j = Encoder.job_create(src_file)
if j is None:
log.warning('_process_video: unable to create encoder job for file %s.', file_id)
return
log.info('Created asynchronous Zencoder job %s for file %s', j['process_id'], file_id) log.info('Created asynchronous Zencoder job %s for file %s', j['process_id'], file_id)
@ -561,24 +570,30 @@ def stream_to_gcs(project_id):
else: else:
file_size = os.fstat(stream_for_gcs.fileno()).st_size file_size = os.fstat(stream_for_gcs.fileno()).st_size
# Upload the file to GCS. if current_app.config['TESTING']:
from gcloud.streaming import transfer log.warning('NOT streaming to GCS because TESTING=%r', current_app.config['TESTING'])
# Files larger than this many bytes will be streamed directly from disk, smaller # Fake a Blob object.
# ones will be read into memory and then uploaded. gcs = None
transfer.RESUMABLE_UPLOAD_THRESHOLD = 102400 blob = type('Blob', (), {'size': file_size})
try: else:
gcs = GoogleCloudStorageBucket(project_id) # Upload the file to GCS.
blob = gcs.bucket.blob('_/' + internal_fname, chunk_size=256 * 1024 * 2) from gcloud.streaming import transfer
blob.upload_from_file(stream_for_gcs, size=file_size, # Files larger than this many bytes will be streamed directly from disk, smaller
content_type=uploaded_file.mimetype) # ones will be read into memory and then uploaded.
except Exception: transfer.RESUMABLE_UPLOAD_THRESHOLD = 102400
log.exception('Error uploading file to Google Cloud Storage (GCS),' try:
' aborting handling of uploaded file (id=%s).', file_id) gcs = GoogleCloudStorageBucket(project_id)
update_file_doc(file_id, status='failed') blob = gcs.bucket.blob('_/' + internal_fname, chunk_size=256 * 1024 * 2)
raise wz_exceptions.InternalServerError('Unable to stream file to Google Cloud Storage') blob.upload_from_file(stream_for_gcs, size=file_size,
content_type=uploaded_file.mimetype)
except Exception:
log.exception('Error uploading file to Google Cloud Storage (GCS),'
' aborting handling of uploaded file (id=%s).', file_id)
update_file_doc(file_id, status='failed')
raise wz_exceptions.InternalServerError('Unable to stream file to Google Cloud Storage')
# Reload the blob to get the file size according to Google. # Reload the blob to get the file size according to Google.
blob.reload() blob.reload()
update_file_doc(file_id, update_file_doc(file_id,
status='queued_for_processing', status='queued_for_processing',
file_path=internal_fname, file_path=internal_fname,