diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a7884f7..ef5bf445 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,11 +4,26 @@ This file contains the history of changes to Flamenco. Only changes that might be interesting for users are listed here, such as new features and fixes for bugs in actually-released versions. -## 3.5 - in development +## 3.6 - in development -- Add MQTT support. Flamenco Manager can now send internal events to an MQTT broker. +- Add `label` to job settings, to have full control over how they are presented in Blender's job submission GUI. If a job setting does not define a label, its `key` is used to generate one (like Flamenco 3.5 and older). +- Add `shellSplit(someString)` function to the job compiler scripts. This splits a string into an array of strings using shell/CLI semantics. + +## 3.5 - released 2024-04-16 + +- Add MQTT support ([docs](https://flamenco.blender.org/usage/manager-configuration/mqtt/)). Flamenco Manager can now send internal events to an MQTT broker. - Simplify the preview video filename when a complex set of frames rendered ([#104285](https://projects.blender.org/studio/flamenco/issues/104285)). Instead of `video-1, 4, 10.mp4` it is now simply `video-1-10.mp4`. - Make the `blendfile` parameter of a `blender-render` command optional. This makes it possible to pass, for example, a Python file that loads/constructs the blend file, instead of loading one straight from disk. +- Show the farm status in the web frontend. This shows whether the farm is actively working on a job, idle, asleep (all workers are sleeping and no work is queued), waiting (all workers are sleeping, and work is queued), or inoperable (no workers, or all workers are offline). This status is also broadcast as event via the event bus, and thus available via SocketIO and MQTT. +- Fix an issue where the columns in the web interface wouldn't correctly resize when the shown information changed. +- Add-on: replace the different 'refresh' buttons (for Manager info & storage location, job types, and worker tags) with a single button that just refreshes everything in one go. The information obtained from Flamenco Manager is now stored in a JSON file on disk, making it independent from Blender auto-saving the user preferences. +- Ensure the web frontend connects to the backend correctly when served over HTTPS ([#104296](https://projects.blender.org/studio/flamenco/pulls/104296)). +- For Workers running on Linux, it is now possible to configure the "OOM score adjustment" for sub-processes. This makes it possible for the out-of-memory killer to target Blender, and not Flamenco Worker itself. +- Security updates of some dependencies: + - [Incorrect forwarding of sensitive headers and cookies on HTTP redirect in net/http](https://pkg.go.dev/vuln/GO-2024-2600) + - [Memory exhaustion in multipart form parsing in net/textproto and net/http](https://pkg.go.dev/vuln/GO-2024-2599) + - [Verify panics on certificates with an unknown public key algorithm in crypto/x509](https://pkg.go.dev/vuln/GO-2024-2600) + - [HTTP/2 CONTINUATION flood in net/http](https://pkg.go.dev/vuln/GO-2024-2687) ## 3.4 - released 2024-01-12 diff --git a/Makefile b/Makefile index 1dc4efdf..da8f9ce0 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ PKG := projects.blender.org/studio/flamenco # To update the version number in all the relevant places, update the VERSION # variable below and run `make update-version`. -VERSION := 3.5-alpha1 +VERSION := 3.6-alpha0 # "alpha", "beta", or "release". RELEASE_CYCLE := alpha @@ -240,7 +240,7 @@ swagger-ui: test: # Ensure the web-static directory exists, so that `web/web_app.go` can embed something. mkdir -p ${WEB_STATIC} - go test -short ./... + go test -short -failfast ./... clean: @go clean -i -x diff --git a/addon/flamenco/__init__.py b/addon/flamenco/__init__.py index 3898b036..3666df3e 100644 --- a/addon/flamenco/__init__.py +++ b/addon/flamenco/__init__.py @@ -5,14 +5,14 @@ bl_info = { "name": "Flamenco 3", "author": "Sybren A. Stüvel", - "version": (3, 5), + "version": (3, 6), "blender": (3, 1, 0), "description": "Flamenco client for Blender.", "location": "Output Properties > Flamenco", "doc_url": "https://flamenco.blender.org/", "category": "System", "support": "COMMUNITY", - "warning": "This is version 3.5-alpha1 of the add-on, which is not a stable release", + "warning": "This is version 3.6-alpha0 of the add-on, which is not a stable release", } from pathlib import Path @@ -27,6 +27,7 @@ if __is_first_load: preferences, projects, worker_tags, + manager_info, ) else: import importlib @@ -38,6 +39,7 @@ else: preferences = importlib.reload(preferences) projects = importlib.reload(projects) worker_tags = importlib.reload(worker_tags) + manager_info = importlib.reload(manager_info) import bpy @@ -160,6 +162,9 @@ def register() -> None: gui.register() job_types.register() + # Once everything is registered, load the cached manager info from JSON. + manager_info.load_into_cache() + def unregister() -> None: discard_global_flamenco_data(None) diff --git a/addon/flamenco/bat/shaman.py b/addon/flamenco/bat/shaman.py index dba0aa60..4a45a499 100644 --- a/addon/flamenco/bat/shaman.py +++ b/addon/flamenco/bat/shaman.py @@ -286,16 +286,16 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore return None self.log.debug(" %s: %s", file_spec.status, file_spec.path) - match file_spec.status.value: - case "unknown": - to_upload.appendleft(file_spec) - case "uploading": - to_upload.append(file_spec) - case _: - msg = "Unknown status in response from Shaman: %r" % file_spec - self.log.error(msg) - self.error_set(msg) - return None + status = file_spec.status.value + if status == "unknown": + to_upload.appendleft(file_spec) + elif status == "uploading": + to_upload.append(file_spec) + else: + msg = "Unknown status in response from Shaman: %r" % file_spec + self.log.error(msg) + self.error_set(msg) + return None return to_upload def _upload_files( @@ -375,25 +375,26 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore x_shaman_original_filename=file_spec.path, ) except ApiException as ex: - match ex.status: - case 425: # Too Early, i.e. defer uploading this file. - self.log.info( - " %s: someone else is uploading this file, deferring", - file_spec.path, - ) - defer(file_spec) - continue - case 417: # Expectation Failed; mismatch of checksum or file size. - msg = "Error from Shaman uploading %s, code %d: %s" % ( - file_spec.path, - ex.status, - ex.body, - ) - case _: # Unknown error - msg = "API exception\nHeaders: %s\nBody: %s\n" % ( - ex.headers, - ex.body, - ) + if ex.status == 425: + # Too Early, i.e. defer uploading this file. + self.log.info( + " %s: someone else is uploading this file, deferring", + file_spec.path, + ) + defer(file_spec) + continue + elif ex.status == 417: + # Expectation Failed; mismatch of checksum or file size. + msg = "Error from Shaman uploading %s, code %d: %s" % ( + file_spec.path, + ex.status, + ex.body, + ) + else: # Unknown error + msg = "API exception\nHeaders: %s\nBody: %s\n" % ( + ex.headers, + ex.body, + ) self.log.error(msg) self.error_set(msg) @@ -453,19 +454,15 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore checkoutRequest ) except ApiException as ex: - match ex.status: - case 424: # Files were missing - msg = "We did not upload some files, checkout aborted" - case 409: # Checkout already exists - msg = ( - "There is already an existing checkout at %s" - % self.checkout_path - ) - case _: # Unknown error - msg = "API exception\nHeaders: %s\nBody: %s\n" % ( - ex.headers, - ex.body, - ) + if ex.status == 424: # Files were missing + msg = "We did not upload some files, checkout aborted" + elif ex.status == 409: # Checkout already exists + msg = "There is already an existing checkout at %s" % self.checkout_path + else: # Unknown error + msg = "API exception\nHeaders: %s\nBody: %s\n" % ( + ex.headers, + ex.body, + ) self.log.error(msg) self.error_set(msg) return None diff --git a/addon/flamenco/comms.py b/addon/flamenco/comms.py index 4c303391..15eabe05 100644 --- a/addon/flamenco/comms.py +++ b/addon/flamenco/comms.py @@ -3,13 +3,12 @@ # import logging -import dataclasses -import platform -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING -from urllib3.exceptions import HTTPError, MaxRetryError import bpy +from flamenco import manager_info, job_types + _flamenco_client = None _log = logging.getLogger(__name__) @@ -27,23 +26,6 @@ else: _SharedStorageLocation = object -@dataclasses.dataclass(frozen=True) -class ManagerInfo: - version: Optional[_FlamencoVersion] = None - storage: Optional[_SharedStorageLocation] = None - error: str = "" - - @classmethod - def with_error(cls, error: str) -> "ManagerInfo": - return cls(error=error) - - @classmethod - def with_info( - cls, version: _FlamencoVersion, storage: _SharedStorageLocation - ) -> "ManagerInfo": - return cls(version=version, storage=storage) - - def flamenco_api_client(manager_url: str) -> _ApiClient: """Returns an API client for communicating with a Manager.""" global _flamenco_client @@ -87,12 +69,12 @@ def discard_flamenco_data(): _flamenco_client = None -def ping_manager_with_report( +def ping_manager( window_manager: bpy.types.WindowManager, + scene: bpy.types.Scene, api_client: _ApiClient, - prefs: _FlamencoPreferences, ) -> tuple[str, str]: - """Ping the Manager, update preferences, and return a report as string. + """Fetch Manager info, and update the scene for it. :returns: tuple (report, level). The report will be something like " version found", or an error message. The level will be @@ -100,55 +82,49 @@ def ping_manager_with_report( `Operator.report()`. """ - info = ping_manager(window_manager, api_client, prefs) - if info.error: - return info.error, "ERROR" - - assert info.version is not None - report = "%s version %s found" % (info.version.name, info.version.version) - return report, "INFO" - - -def ping_manager( - window_manager: bpy.types.WindowManager, - api_client: _ApiClient, - prefs: _FlamencoPreferences, -) -> ManagerInfo: - """Fetch Manager config & version, and update cached preferences.""" - window_manager.flamenco_status_ping = "..." - # Do a late import, so that the API is only imported when actually used. - from flamenco.manager import ApiException - from flamenco.manager.apis import MetaApi - from flamenco.manager.models import FlamencoVersion, SharedStorageLocation + # Remember the old values, as they may have disappeared from the Manager. + old_job_type_name = getattr(scene, "flamenco_job_type", "") + old_tag_name = getattr(scene, "flamenco_worker_tag", "") - meta_api = MetaApi(api_client) - error = "" try: - version: FlamencoVersion = meta_api.get_version() - storage: SharedStorageLocation = meta_api.get_shared_storage( - "users", platform.system().lower() - ) - except ApiException as ex: - error = "Manager cannot be reached: %s" % ex - except MaxRetryError as ex: - # This is the common error, when for example the port number is - # incorrect and nothing is listening. The exception text is not included - # because it's very long and confusing. - error = "Manager cannot be reached" - except HTTPError as ex: - error = "Manager cannot be reached: %s" % ex + info = manager_info.fetch(api_client) + except manager_info.FetchError as ex: + report = str(ex) + window_manager.flamenco_status_ping = report + return report, "ERROR" - if error: - window_manager.flamenco_status_ping = error - return ManagerInfo.with_error(error) + manager_info.save(info) - # Store whether this Manager supports the Shaman API. - prefs.is_shaman_enabled = storage.shaman_enabled - prefs.job_storage = storage.location + report = "%s version %s found" % ( + info.flamenco_version.name, + info.flamenco_version.version, + ) + report_level = "INFO" + + job_types.refresh_scene_properties(scene, info.job_types) + + # Try to restore the old values. + # + # Since you cannot un-set an enum property, and 'empty string' is not a + # valid value either, when the old choice is no longer available we remove + # the underlying ID property. + if old_job_type_name: + try: + scene.flamenco_job_type = old_job_type_name + except TypeError: # Thrown when the old enum value no longer exists. + del scene["flamenco_job_type"] + report = f"Job type {old_job_type_name!r} no longer available, choose another one" + report_level = "WARNING" + + if old_tag_name: + try: + scene.flamenco_worker_tag = old_tag_name + except TypeError: # Thrown when the old enum value no longer exists. + del scene["flamenco_worker_tag"] + report = f"Tag {old_tag_name!r} no longer available, choose another one" + report_level = "WARNING" - report = "%s version %s found" % (version.name, version.version) window_manager.flamenco_status_ping = report - - return ManagerInfo.with_info(version, storage) + return report, report_level diff --git a/addon/flamenco/gui.py b/addon/flamenco/gui.py index b0c2a7ff..f1850021 100644 --- a/addon/flamenco/gui.py +++ b/addon/flamenco/gui.py @@ -43,23 +43,19 @@ class FLAMENCO_PT_job_submission(bpy.types.Panel): col.prop(context.scene, "flamenco_job_name", text="Job Name") col.prop(context.scene, "flamenco_job_priority", text="Priority") - # Worker tag: - row = col.row(align=True) - row.prop(context.scene, "flamenco_worker_tag", text="Tag") - row.operator("flamenco.fetch_worker_tags", text="", icon="FILE_REFRESH") - - layout.separator() - - col = layout.column() + # Refreshables: + col = layout.column(align=True) + col.operator( + "flamenco.ping_manager", text="Refresh from Manager", icon="FILE_REFRESH" + ) if not job_types.are_job_types_available(): - col.operator("flamenco.fetch_job_types", icon="FILE_REFRESH") return + col.prop(context.scene, "flamenco_worker_tag", text="Tag") - row = col.row(align=True) - row.prop(context.scene, "flamenco_job_type", text="") - row.operator("flamenco.fetch_job_types", text="", icon="FILE_REFRESH") - - self.draw_job_settings(context, layout.column(align=True)) + # Job properties: + job_col = layout.column(align=True) + job_col.prop(context.scene, "flamenco_job_type", text="Job Type") + self.draw_job_settings(context, job_col) layout.separator() diff --git a/addon/flamenco/job_submission.py b/addon/flamenco/job_submission.py index dad16a9f..21273399 100644 --- a/addon/flamenco/job_submission.py +++ b/addon/flamenco/job_submission.py @@ -8,7 +8,7 @@ import bpy from .job_types_propgroup import JobTypePropertyGroup from .bat.submodules import bpathlib -from . import preferences +from . import manager_info if TYPE_CHECKING: from .manager import ApiClient as _ApiClient @@ -133,8 +133,11 @@ def is_file_inside_job_storage(context: bpy.types.Context, blendfile: Path) -> b blendfile = bpathlib.make_absolute(blendfile) - prefs = preferences.get(context) - job_storage = bpathlib.make_absolute(Path(prefs.job_storage)) + info = manager_info.load_cached() + if not info: + raise RuntimeError("Flamenco Manager info unknown, please refresh.") + + job_storage = bpathlib.make_absolute(Path(info.shared_storage.location)) log.info("Checking whether the file is already inside the job storage") log.info(" file : %s", blendfile) diff --git a/addon/flamenco/job_types.py b/addon/flamenco/job_types.py index 8150a953..69a855bd 100644 --- a/addon/flamenco/job_types.py +++ b/addon/flamenco/job_types.py @@ -1,14 +1,10 @@ # SPDX-License-Identifier: GPL-3.0-or-later -import json -import logging from typing import TYPE_CHECKING, Optional, Union import bpy -from . import job_types_propgroup - -_log = logging.getLogger(__name__) +from . import job_types_propgroup, manager_info if TYPE_CHECKING: from flamenco.manager import ApiClient as _ApiClient @@ -29,34 +25,34 @@ else: _available_job_types: Optional[list[_AvailableJobType]] = None +# Enum property value that indicates 'no job type selected'. This is used +# because an empty string seems to be handled by Blender as 'nothing', which +# never seems to match an enum item even when there is one with "" as its 'key'. +_JOB_TYPE_NOT_SELECTED = "-" +_JOB_TYPE_NOT_SELECTED_ENUM_ITEM = ( + _JOB_TYPE_NOT_SELECTED, + "Select a Job Type", + "", + 0, + 0, +) + # Items for a bpy.props.EnumProperty() _job_type_enum_items: list[ Union[tuple[str, str, str], tuple[str, str, str, int, int]] -] = [] +] = [_JOB_TYPE_NOT_SELECTED_ENUM_ITEM] _selected_job_type_propgroup: Optional[ type[job_types_propgroup.JobTypePropertyGroup] ] = None -def fetch_available_job_types(api_client: _ApiClient, scene: bpy.types.Scene) -> None: - from flamenco.manager import ApiClient - from flamenco.manager.api import jobs_api - from flamenco.manager.model.available_job_types import AvailableJobTypes - - assert isinstance(api_client, ApiClient) - - job_api_instance = jobs_api.JobsApi(api_client) - response: AvailableJobTypes = job_api_instance.get_job_types() - +def refresh_scene_properties( + scene: bpy.types.Scene, available_job_types: _AvailableJobTypes +) -> None: _clear_available_job_types(scene) - - # Store the response JSON on the scene. This is used when the blend file is - # loaded (and thus the _available_job_types global variable is still empty) - # to generate the PropertyGroup of the selected job type. - scene.flamenco_available_job_types_json = json.dumps(response.to_dict()) - - _store_available_job_types(response) + _store_available_job_types(available_job_types) + update_job_type_properties(scene) def setting_is_visible(setting: _AvailableJobSetting) -> bool: @@ -120,36 +116,10 @@ def _store_available_job_types(available_job_types: _AvailableJobTypes) -> None: else: # Convert from API response type to list suitable for an EnumProperty. _job_type_enum_items = [ - (job_type.name, job_type.label, "") for job_type in job_types + (job_type.name, job_type.label, getattr(job_type, "description", "")) + for job_type in job_types ] - _job_type_enum_items.insert(0, ("", "Select a Job Type", "", 0, 0)) - - -def _available_job_types_from_json(job_types_json: str) -> None: - """Convert JSON to AvailableJobTypes object, and update global variables for it.""" - from flamenco.manager.models import AvailableJobTypes - from flamenco.manager.configuration import Configuration - from flamenco.manager.model_utils import validate_and_convert_types - - json_dict = json.loads(job_types_json) - - dummy_cfg = Configuration() - - try: - job_types = validate_and_convert_types( - json_dict, (AvailableJobTypes,), ["job_types"], True, True, dummy_cfg - ) - except TypeError: - _log.warn( - "Flamenco: could not restore cached job types, refresh them from Flamenco Manager" - ) - _store_available_job_types(AvailableJobTypes(job_types=[])) - return - - assert isinstance( - job_types, AvailableJobTypes - ), "expected AvailableJobTypes, got %s" % type(job_types) - _store_available_job_types(job_types) + _job_type_enum_items.insert(0, _JOB_TYPE_NOT_SELECTED_ENUM_ITEM) def are_job_types_available() -> bool: @@ -199,7 +169,7 @@ def _clear_available_job_types(scene: bpy.types.Scene) -> None: _clear_job_type_propgroup() _available_job_types = None - _job_type_enum_items.clear() + _job_type_enum_items = [] scene.flamenco_available_job_types_json = "" @@ -238,26 +208,27 @@ def _get_job_types_enum_items(dummy1, dummy2): @bpy.app.handlers.persistent -def restore_available_job_types(dummy1, dummy2): +def restore_available_job_types(_filepath, _none): scene = bpy.context.scene - job_types_json = getattr(scene, "flamenco_available_job_types_json", "") - if not job_types_json: + info = manager_info.load_cached() + if info is None: _clear_available_job_types(scene) return - _available_job_types_from_json(job_types_json) - update_job_type_properties(scene) + refresh_scene_properties(scene, info.job_types) def discard_flamenco_data(): - if _available_job_types: - _available_job_types.clear() - if _job_type_enum_items: - _job_type_enum_items.clear() + global _available_job_types + global _job_type_enum_items + + _available_job_types = None + _job_type_enum_items = [] def register() -> None: bpy.types.Scene.flamenco_job_type = bpy.props.EnumProperty( name="Job Type", + default=0, items=_get_job_types_enum_items, update=_update_job_type, ) diff --git a/addon/flamenco/job_types_propgroup.py b/addon/flamenco/job_types_propgroup.py index e5de0ce2..48e2c3a9 100644 --- a/addon/flamenco/job_types_propgroup.py +++ b/addon/flamenco/job_types_propgroup.py @@ -304,8 +304,8 @@ def _create_property(job_type: _AvailableJobType, setting: _AvailableJobSetting) if not setting.get("editable", True): prop_kwargs["get"] = _create_prop_getter(job_type, setting) - prop_name = _job_setting_key_to_label(setting.key) - prop = prop_type(name=prop_name, **prop_kwargs) + prop_label = _job_setting_label(setting) + prop = prop_type(name=prop_label, **prop_kwargs) return prop @@ -316,10 +316,10 @@ def _create_autoeval_property( assert isinstance(setting, AvailableJobSetting) - setting_name = _job_setting_key_to_label(setting.key) + setting_label = _job_setting_label(setting) prop_descr = ( "Automatically determine the value for %r when the job gets submitted" - % setting_name + % setting_label ) prop = bpy.props.BoolProperty( @@ -379,13 +379,13 @@ def _job_type_to_class_name(job_type_name: str) -> str: return job_type_name.title().replace("-", "") -def _job_setting_key_to_label(setting_key: str) -> str: - """Change 'some_setting_key' to 'Some Setting Key'. +def _job_setting_label(setting: _AvailableJobSetting) -> str: + """Return a suitable label for this job setting.""" - >>> _job_setting_key_to_label('some_setting_key') - 'Some Setting Key' - """ - return setting_key.title().replace("_", " ") + label = setting.get("label", default="") + if label: + return label + return setting.key.title().replace("_", " ") def _set_if_available( diff --git a/addon/flamenco/manager/__init__.py b/addon/flamenco/manager/__init__.py index 50885729..543f5756 100644 --- a/addon/flamenco/manager/__init__.py +++ b/addon/flamenco/manager/__init__.py @@ -10,7 +10,7 @@ """ -__version__ = "3.5-alpha1" +__version__ = "3.6-alpha0" # import ApiClient from flamenco.manager.api_client import ApiClient diff --git a/addon/flamenco/manager/api/meta_api.py b/addon/flamenco/manager/api/meta_api.py index d67e0c96..9627d223 100644 --- a/addon/flamenco/manager/api/meta_api.py +++ b/addon/flamenco/manager/api/meta_api.py @@ -24,6 +24,7 @@ from flamenco.manager.model_utils import ( # noqa: F401 from flamenco.manager.model.blender_path_check_result import BlenderPathCheckResult from flamenco.manager.model.blender_path_find_result import BlenderPathFindResult from flamenco.manager.model.error import Error +from flamenco.manager.model.farm_status_report import FarmStatusReport from flamenco.manager.model.flamenco_version import FlamencoVersion from flamenco.manager.model.manager_configuration import ManagerConfiguration from flamenco.manager.model.manager_variable_audience import ManagerVariableAudience @@ -268,6 +269,48 @@ class MetaApi(object): }, api_client=api_client ) + self.get_farm_status_endpoint = _Endpoint( + settings={ + 'response_type': (FarmStatusReport,), + 'auth': [], + 'endpoint_path': '/api/v3/status', + 'operation_id': 'get_farm_status', + 'http_method': 'GET', + 'servers': None, + }, + params_map={ + 'all': [ + ], + 'required': [], + 'nullable': [ + ], + 'enum': [ + ], + 'validation': [ + ] + }, + root_map={ + 'validations': { + }, + 'allowed_values': { + }, + 'openapi_types': { + }, + 'attribute_map': { + }, + 'location_map': { + }, + 'collection_format_map': { + } + }, + headers_map={ + 'accept': [ + 'application/json' + ], + 'content_type': [], + }, + api_client=api_client + ) self.get_shared_storage_endpoint = _Endpoint( settings={ 'response_type': (SharedStorageLocation,), @@ -831,6 +874,78 @@ class MetaApi(object): kwargs['_host_index'] = kwargs.get('_host_index') return self.get_configuration_file_endpoint.call_with_http_info(**kwargs) + def get_farm_status( + self, + **kwargs + ): + """Get the status of this Flamenco farm. # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_farm_status(async_req=True) + >>> result = thread.get() + + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + FarmStatusReport + If the method is called asynchronously, returns the request + thread. + """ + kwargs['async_req'] = kwargs.get( + 'async_req', False + ) + kwargs['_return_http_data_only'] = kwargs.get( + '_return_http_data_only', True + ) + kwargs['_preload_content'] = kwargs.get( + '_preload_content', True + ) + kwargs['_request_timeout'] = kwargs.get( + '_request_timeout', None + ) + kwargs['_check_input_type'] = kwargs.get( + '_check_input_type', True + ) + kwargs['_check_return_type'] = kwargs.get( + '_check_return_type', True + ) + kwargs['_spec_property_naming'] = kwargs.get( + '_spec_property_naming', False + ) + kwargs['_content_type'] = kwargs.get( + '_content_type') + kwargs['_host_index'] = kwargs.get('_host_index') + return self.get_farm_status_endpoint.call_with_http_info(**kwargs) + def get_shared_storage( self, audience, diff --git a/addon/flamenco/manager/api_client.py b/addon/flamenco/manager/api_client.py index 08599db8..7d65fe25 100644 --- a/addon/flamenco/manager/api_client.py +++ b/addon/flamenco/manager/api_client.py @@ -76,7 +76,7 @@ class ApiClient(object): self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = 'Flamenco/3.5-alpha1 (Blender add-on)' + self.user_agent = 'Flamenco/3.6-alpha0 (Blender add-on)' def __enter__(self): return self diff --git a/addon/flamenco/manager/configuration.py b/addon/flamenco/manager/configuration.py index 58b78a7d..4f6b9715 100644 --- a/addon/flamenco/manager/configuration.py +++ b/addon/flamenco/manager/configuration.py @@ -404,7 +404,7 @@ conf = flamenco.manager.Configuration( "OS: {env}\n"\ "Python Version: {pyversion}\n"\ "Version of the API: 1.0.0\n"\ - "SDK Package Version: 3.5-alpha1".\ + "SDK Package Version: 3.6-alpha0".\ format(env=sys.platform, pyversion=sys.version) def get_host_settings(self): diff --git a/addon/flamenco/manager/docs/AvailableJobSetting.md b/addon/flamenco/manager/docs/AvailableJobSetting.md index 8f0ab931..da61a737 100644 --- a/addon/flamenco/manager/docs/AvailableJobSetting.md +++ b/addon/flamenco/manager/docs/AvailableJobSetting.md @@ -11,6 +11,7 @@ Name | Type | Description | Notes **choices** | **[str]** | When given, limit the valid values to these choices. Only usable with string type. | [optional] **propargs** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | Any extra arguments to the bpy.props.SomeProperty() call used to create this property. | [optional] **description** | **bool, date, datetime, dict, float, int, list, str, none_type** | The description/tooltip shown in the user interface. | [optional] +**label** | **bool, date, datetime, dict, float, int, list, str, none_type** | Label for displaying this setting. If not specified, the key is used to generate a reasonable label. | [optional] **default** | **bool, date, datetime, dict, float, int, list, str, none_type** | The default value shown to the user when determining this setting. | [optional] **eval** | **str** | Python expression to be evaluated in order to determine the default value for this setting. | [optional] **eval_info** | [**AvailableJobSettingEvalInfo**](AvailableJobSettingEvalInfo.md) | | [optional] diff --git a/addon/flamenco/manager/docs/AvailableJobType.md b/addon/flamenco/manager/docs/AvailableJobType.md index d2e6b305..5360020c 100644 --- a/addon/flamenco/manager/docs/AvailableJobType.md +++ b/addon/flamenco/manager/docs/AvailableJobType.md @@ -9,6 +9,7 @@ Name | Type | Description | Notes **label** | **str** | | **settings** | [**[AvailableJobSetting]**](AvailableJobSetting.md) | | **etag** | **str** | Hash of the job type. If the job settings or the label change, this etag will change. This is used on job submission to ensure that the submitted job settings are up to date. | +**description** | **str** | The description/tooltip shown in the user interface. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/addon/flamenco/manager/docs/EventFarmStatus.md b/addon/flamenco/manager/docs/EventFarmStatus.md new file mode 100644 index 00000000..ba47b032 --- /dev/null +++ b/addon/flamenco/manager/docs/EventFarmStatus.md @@ -0,0 +1,11 @@ +# EventFarmStatus + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **FarmStatusReport** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/addon/flamenco/manager/docs/FarmStatus.md b/addon/flamenco/manager/docs/FarmStatus.md new file mode 100644 index 00000000..20c3bdd6 --- /dev/null +++ b/addon/flamenco/manager/docs/FarmStatus.md @@ -0,0 +1,11 @@ +# FarmStatus + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | | must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/addon/flamenco/manager/docs/FarmStatusReport.md b/addon/flamenco/manager/docs/FarmStatusReport.md new file mode 100644 index 00000000..4b8972cf --- /dev/null +++ b/addon/flamenco/manager/docs/FarmStatusReport.md @@ -0,0 +1,12 @@ +# FarmStatusReport + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**status** | [**FarmStatus**](FarmStatus.md) | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/addon/flamenco/manager/docs/MetaApi.md b/addon/flamenco/manager/docs/MetaApi.md index 9b778a3f..b4d7b5bb 100644 --- a/addon/flamenco/manager/docs/MetaApi.md +++ b/addon/flamenco/manager/docs/MetaApi.md @@ -9,6 +9,7 @@ Method | HTTP request | Description [**find_blender_exe_path**](MetaApi.md#find_blender_exe_path) | **GET** /api/v3/configuration/check/blender | Find one or more CLI commands for use as way to start Blender [**get_configuration**](MetaApi.md#get_configuration) | **GET** /api/v3/configuration | Get the configuration of this Manager. [**get_configuration_file**](MetaApi.md#get_configuration_file) | **GET** /api/v3/configuration/file | Retrieve the configuration of Flamenco Manager. +[**get_farm_status**](MetaApi.md#get_farm_status) | **GET** /api/v3/status | Get the status of this Flamenco farm. [**get_shared_storage**](MetaApi.md#get_shared_storage) | **GET** /api/v3/configuration/shared-storage/{audience}/{platform} | Get the shared storage location of this Manager, adjusted for the given audience and platform. [**get_variables**](MetaApi.md#get_variables) | **GET** /api/v3/configuration/variables/{audience}/{platform} | Get the variables of this Manager. Used by the Blender add-on to recognise two-way variables, and for the web interface to do variable replacement based on the browser's platform. [**get_version**](MetaApi.md#get_version) | **GET** /api/v3/version | Get the Flamenco version of this Manager @@ -341,6 +342,67 @@ No authorization required [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) +# **get_farm_status** +> FarmStatusReport get_farm_status() + +Get the status of this Flamenco farm. + +### Example + + +```python +import time +import flamenco.manager +from flamenco.manager.api import meta_api +from flamenco.manager.model.farm_status_report import FarmStatusReport +from pprint import pprint +# Defining the host is optional and defaults to http://localhost +# See configuration.py for a list of all supported configuration parameters. +configuration = flamenco.manager.Configuration( + host = "http://localhost" +) + + +# Enter a context with an instance of the API client +with flamenco.manager.ApiClient() as api_client: + # Create an instance of the API class + api_instance = meta_api.MetaApi(api_client) + + # example, this endpoint has no required or optional parameters + try: + # Get the status of this Flamenco farm. + api_response = api_instance.get_farm_status() + pprint(api_response) + except flamenco.manager.ApiException as e: + print("Exception when calling MetaApi->get_farm_status: %s\n" % e) +``` + + +### Parameters +This endpoint does not need any parameter. + +### Return type + +[**FarmStatusReport**](FarmStatusReport.md) + +### Authorization + +No authorization required + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | normal response | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + # **get_shared_storage** > SharedStorageLocation get_shared_storage(audience, platform) diff --git a/addon/flamenco/manager/model/available_job_setting.py b/addon/flamenco/manager/model/available_job_setting.py index dd023102..40c21ea0 100644 --- a/addon/flamenco/manager/model/available_job_setting.py +++ b/addon/flamenco/manager/model/available_job_setting.py @@ -99,6 +99,7 @@ class AvailableJobSetting(ModelNormal): 'choices': ([str],), # noqa: E501 'propargs': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501 'description': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 + 'label': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 'default': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501 'eval': (str,), # noqa: E501 'eval_info': (AvailableJobSettingEvalInfo,), # noqa: E501 @@ -119,6 +120,7 @@ class AvailableJobSetting(ModelNormal): 'choices': 'choices', # noqa: E501 'propargs': 'propargs', # noqa: E501 'description': 'description', # noqa: E501 + 'label': 'label', # noqa: E501 'default': 'default', # noqa: E501 'eval': 'eval', # noqa: E501 'eval_info': 'evalInfo', # noqa: E501 @@ -176,6 +178,7 @@ class AvailableJobSetting(ModelNormal): choices ([str]): When given, limit the valid values to these choices. Only usable with string type.. [optional] # noqa: E501 propargs ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Any extra arguments to the bpy.props.SomeProperty() call used to create this property.. [optional] # noqa: E501 description (bool, date, datetime, dict, float, int, list, str, none_type): The description/tooltip shown in the user interface.. [optional] # noqa: E501 + label (bool, date, datetime, dict, float, int, list, str, none_type): Label for displaying this setting. If not specified, the key is used to generate a reasonable label.. [optional] # noqa: E501 default (bool, date, datetime, dict, float, int, list, str, none_type): The default value shown to the user when determining this setting.. [optional] # noqa: E501 eval (str): Python expression to be evaluated in order to determine the default value for this setting.. [optional] # noqa: E501 eval_info (AvailableJobSettingEvalInfo): [optional] # noqa: E501 @@ -273,6 +276,7 @@ class AvailableJobSetting(ModelNormal): choices ([str]): When given, limit the valid values to these choices. Only usable with string type.. [optional] # noqa: E501 propargs ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Any extra arguments to the bpy.props.SomeProperty() call used to create this property.. [optional] # noqa: E501 description (bool, date, datetime, dict, float, int, list, str, none_type): The description/tooltip shown in the user interface.. [optional] # noqa: E501 + label (bool, date, datetime, dict, float, int, list, str, none_type): Label for displaying this setting. If not specified, the key is used to generate a reasonable label.. [optional] # noqa: E501 default (bool, date, datetime, dict, float, int, list, str, none_type): The default value shown to the user when determining this setting.. [optional] # noqa: E501 eval (str): Python expression to be evaluated in order to determine the default value for this setting.. [optional] # noqa: E501 eval_info (AvailableJobSettingEvalInfo): [optional] # noqa: E501 diff --git a/addon/flamenco/manager/model/available_job_type.py b/addon/flamenco/manager/model/available_job_type.py index 2a4015c8..b55af5d1 100644 --- a/addon/flamenco/manager/model/available_job_type.py +++ b/addon/flamenco/manager/model/available_job_type.py @@ -91,6 +91,7 @@ class AvailableJobType(ModelNormal): 'label': (str,), # noqa: E501 'settings': ([AvailableJobSetting],), # noqa: E501 'etag': (str,), # noqa: E501 + 'description': (str,), # noqa: E501 } @cached_property @@ -103,6 +104,7 @@ class AvailableJobType(ModelNormal): 'label': 'label', # noqa: E501 'settings': 'settings', # noqa: E501 'etag': 'etag', # noqa: E501 + 'description': 'description', # noqa: E501 } read_only_vars = { @@ -152,6 +154,7 @@ class AvailableJobType(ModelNormal): Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + description (str): The description/tooltip shown in the user interface.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) @@ -243,6 +246,7 @@ class AvailableJobType(ModelNormal): Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + description (str): The description/tooltip shown in the user interface.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) diff --git a/addon/flamenco/manager/model/event_farm_status.py b/addon/flamenco/manager/model/event_farm_status.py new file mode 100644 index 00000000..ca516d18 --- /dev/null +++ b/addon/flamenco/manager/model/event_farm_status.py @@ -0,0 +1,278 @@ +""" + Flamenco manager + + Render Farm manager API # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from flamenco.manager.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from flamenco.manager.exceptions import ApiAttributeError + + + +class EventFarmStatus(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'value': (FarmStatusReport,), + } + + @cached_property + def discriminator(): + return None + + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """EventFarmStatus - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (FarmStatusReport): # noqa: E501 + + Keyword Args: + value (FarmStatusReport): # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """EventFarmStatus - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (FarmStatusReport): # noqa: E501 + + Keyword Args: + value (FarmStatusReport): # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/addon/flamenco/manager/model/farm_status.py b/addon/flamenco/manager/model/farm_status.py new file mode 100644 index 00000000..7a3211ae --- /dev/null +++ b/addon/flamenco/manager/model/farm_status.py @@ -0,0 +1,287 @@ +""" + Flamenco manager + + Render Farm manager API # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from flamenco.manager.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from flamenco.manager.exceptions import ApiAttributeError + + + +class FarmStatus(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ('value',): { + 'ACTIVE': "active", + 'IDLE': "idle", + 'WAITING': "waiting", + 'ASLEEP': "asleep", + 'INOPERATIVE': "inoperative", + 'UNKNOWN': "unknown", + 'STARTING': "starting", + }, + } + + validations = { + } + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + 'value': (str,), + } + + @cached_property + def discriminator(): + return None + + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """FarmStatus - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501 + + Keyword Args: + value (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """FarmStatus - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501 + + Keyword Args: + value (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop('_path_to_item', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if 'value' in kwargs: + value = kwargs.pop('value') + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/addon/flamenco/manager/model/farm_status_report.py b/addon/flamenco/manager/model/farm_status_report.py new file mode 100644 index 00000000..e103b4bf --- /dev/null +++ b/addon/flamenco/manager/model/farm_status_report.py @@ -0,0 +1,267 @@ +""" + Flamenco manager + + Render Farm manager API # noqa: E501 + + The version of the OpenAPI document: 1.0.0 + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from flamenco.manager.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel +) +from flamenco.manager.exceptions import ApiAttributeError + + +def lazy_import(): + from flamenco.manager.model.farm_status import FarmStatus + globals()['FarmStatus'] = FarmStatus + + +class FarmStatusReport(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + } + + validations = { + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + 'status': (FarmStatus,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + + attribute_map = { + 'status': 'status', # noqa: E501 + } + + read_only_vars = { + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, status, *args, **kwargs): # noqa: E501 + """FarmStatusReport - a model defined in OpenAPI + + Args: + status (FarmStatus): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.status = status + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + '_data_store', + '_check_type', + '_spec_property_naming', + '_path_to_item', + '_configuration', + '_visited_composed_classes', + ]) + + @convert_js_args_to_python_args + def __init__(self, status, *args, **kwargs): # noqa: E501 + """FarmStatusReport - a model defined in OpenAPI + + Args: + status (FarmStatus): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop('_check_type', True) + _spec_property_naming = kwargs.pop('_spec_property_naming', False) + _path_to_item = kwargs.pop('_path_to_item', ()) + _configuration = kwargs.pop('_configuration', None) + _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.status = status + for var_name, var_value in kwargs.items(): + if var_name not in self.attribute_map and \ + self._configuration is not None and \ + self._configuration.discard_unknown_keys and \ + self.additional_properties_type is None: + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + f"class with read only attributes.") diff --git a/addon/flamenco/manager/models/__init__.py b/addon/flamenco/manager/models/__init__.py index b2675225..374f2511 100644 --- a/addon/flamenco/manager/models/__init__.py +++ b/addon/flamenco/manager/models/__init__.py @@ -22,6 +22,7 @@ from flamenco.manager.model.blender_path_find_result import BlenderPathFindResul from flamenco.manager.model.blender_path_source import BlenderPathSource from flamenco.manager.model.command import Command from flamenco.manager.model.error import Error +from flamenco.manager.model.event_farm_status import EventFarmStatus from flamenco.manager.model.event_job_update import EventJobUpdate from flamenco.manager.model.event_last_rendered_update import EventLastRenderedUpdate from flamenco.manager.model.event_life_cycle import EventLifeCycle @@ -29,6 +30,8 @@ from flamenco.manager.model.event_task_log_update import EventTaskLogUpdate from flamenco.manager.model.event_task_update import EventTaskUpdate from flamenco.manager.model.event_worker_tag_update import EventWorkerTagUpdate from flamenco.manager.model.event_worker_update import EventWorkerUpdate +from flamenco.manager.model.farm_status import FarmStatus +from flamenco.manager.model.farm_status_report import FarmStatusReport from flamenco.manager.model.flamenco_version import FlamencoVersion from flamenco.manager.model.job import Job from flamenco.manager.model.job_all_of import JobAllOf diff --git a/addon/flamenco/manager_README.md b/addon/flamenco/manager_README.md index 248073c5..4208db5f 100644 --- a/addon/flamenco/manager_README.md +++ b/addon/flamenco/manager_README.md @@ -4,7 +4,7 @@ Render Farm manager API The `flamenco.manager` package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: - API version: 1.0.0 -- Package version: 3.5-alpha1 +- Package version: 3.6-alpha0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen For more information, please visit [https://flamenco.io/](https://flamenco.io/) @@ -101,6 +101,7 @@ Class | Method | HTTP request | Description *MetaApi* | [**find_blender_exe_path**](flamenco/manager/docs/MetaApi.md#find_blender_exe_path) | **GET** /api/v3/configuration/check/blender | Find one or more CLI commands for use as way to start Blender *MetaApi* | [**get_configuration**](flamenco/manager/docs/MetaApi.md#get_configuration) | **GET** /api/v3/configuration | Get the configuration of this Manager. *MetaApi* | [**get_configuration_file**](flamenco/manager/docs/MetaApi.md#get_configuration_file) | **GET** /api/v3/configuration/file | Retrieve the configuration of Flamenco Manager. +*MetaApi* | [**get_farm_status**](flamenco/manager/docs/MetaApi.md#get_farm_status) | **GET** /api/v3/status | Get the status of this Flamenco farm. *MetaApi* | [**get_shared_storage**](flamenco/manager/docs/MetaApi.md#get_shared_storage) | **GET** /api/v3/configuration/shared-storage/{audience}/{platform} | Get the shared storage location of this Manager, adjusted for the given audience and platform. *MetaApi* | [**get_variables**](flamenco/manager/docs/MetaApi.md#get_variables) | **GET** /api/v3/configuration/variables/{audience}/{platform} | Get the variables of this Manager. Used by the Blender add-on to recognise two-way variables, and for the web interface to do variable replacement based on the browser's platform. *MetaApi* | [**get_version**](flamenco/manager/docs/MetaApi.md#get_version) | **GET** /api/v3/version | Get the Flamenco version of this Manager @@ -147,6 +148,7 @@ Class | Method | HTTP request | Description - [BlenderPathSource](flamenco/manager/docs/BlenderPathSource.md) - [Command](flamenco/manager/docs/Command.md) - [Error](flamenco/manager/docs/Error.md) + - [EventFarmStatus](flamenco/manager/docs/EventFarmStatus.md) - [EventJobUpdate](flamenco/manager/docs/EventJobUpdate.md) - [EventLastRenderedUpdate](flamenco/manager/docs/EventLastRenderedUpdate.md) - [EventLifeCycle](flamenco/manager/docs/EventLifeCycle.md) @@ -154,6 +156,8 @@ Class | Method | HTTP request | Description - [EventTaskUpdate](flamenco/manager/docs/EventTaskUpdate.md) - [EventWorkerTagUpdate](flamenco/manager/docs/EventWorkerTagUpdate.md) - [EventWorkerUpdate](flamenco/manager/docs/EventWorkerUpdate.md) + - [FarmStatus](flamenco/manager/docs/FarmStatus.md) + - [FarmStatusReport](flamenco/manager/docs/FarmStatusReport.md) - [FlamencoVersion](flamenco/manager/docs/FlamencoVersion.md) - [Job](flamenco/manager/docs/Job.md) - [JobAllOf](flamenco/manager/docs/JobAllOf.md) diff --git a/addon/flamenco/manager_info.py b/addon/flamenco/manager_info.py new file mode 100644 index 00000000..10c2bf68 --- /dev/null +++ b/addon/flamenco/manager_info.py @@ -0,0 +1,210 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# + +import dataclasses +import json +import platform +from pathlib import Path +from typing import TYPE_CHECKING, Optional, Union + +from urllib3.exceptions import HTTPError, MaxRetryError + +import bpy + +if TYPE_CHECKING: + from flamenco.manager import ApiClient as _ApiClient + from flamenco.manager.models import ( + AvailableJobTypes as _AvailableJobTypes, + FlamencoVersion as _FlamencoVersion, + SharedStorageLocation as _SharedStorageLocation, + WorkerTagList as _WorkerTagList, + ) +else: + _ApiClient = object + _AvailableJobTypes = object + _FlamencoVersion = object + _SharedStorageLocation = object + _WorkerTagList = object + + +@dataclasses.dataclass +class ManagerInfo: + """Cached information obtained from a Flamenco Manager. + + This is the root object of what is stored on disk, every time someone + presses a 'refresh' button to update worker tags, job types, etc. + """ + + flamenco_version: _FlamencoVersion + shared_storage: _SharedStorageLocation + job_types: _AvailableJobTypes + worker_tags: _WorkerTagList + + @staticmethod + def type_info() -> dict[str, type]: + # Do a late import, so that the API is only imported when actually used. + from flamenco.manager.models import ( + AvailableJobTypes, + FlamencoVersion, + SharedStorageLocation, + WorkerTagList, + ) + + # These types cannot be obtained by introspecting the ManagerInfo class, as + # at runtime that doesn't use real type annotations. + return { + "flamenco_version": FlamencoVersion, + "shared_storage": SharedStorageLocation, + "job_types": AvailableJobTypes, + "worker_tags": WorkerTagList, + } + + +class FetchError(RuntimeError): + """Raised when the manager info could not be fetched from the Manager.""" + + +class LoadError(RuntimeError): + """Raised when the manager info could not be loaded from disk cache.""" + + +_cached_manager_info: Optional[ManagerInfo] = None + + +def fetch(api_client: _ApiClient) -> ManagerInfo: + global _cached_manager_info + + # Do a late import, so that the API is only imported when actually used. + from flamenco.manager import ApiException + from flamenco.manager.apis import MetaApi, JobsApi, WorkerMgtApi + from flamenco.manager.models import ( + AvailableJobTypes, + FlamencoVersion, + SharedStorageLocation, + WorkerTagList, + ) + + meta_api = MetaApi(api_client) + jobs_api = JobsApi(api_client) + worker_mgt_api = WorkerMgtApi(api_client) + + try: + flamenco_version: FlamencoVersion = meta_api.get_version() + shared_storage: SharedStorageLocation = meta_api.get_shared_storage( + "users", platform.system().lower() + ) + job_types: AvailableJobTypes = jobs_api.get_job_types() + worker_tags: WorkerTagList = worker_mgt_api.fetch_worker_tags() + except ApiException as ex: + raise FetchError("Manager cannot be reached: %s" % ex) from ex + except MaxRetryError as ex: + # This is the common error, when for example the port number is + # incorrect and nothing is listening. The exception text is not included + # because it's very long and confusing. + raise FetchError("Manager cannot be reached") from ex + except HTTPError as ex: + raise FetchError("Manager cannot be reached: %s" % ex) from ex + + _cached_manager_info = ManagerInfo( + flamenco_version=flamenco_version, + shared_storage=shared_storage, + job_types=job_types, + worker_tags=worker_tags, + ) + return _cached_manager_info + + +class Encoder(json.JSONEncoder): + def default(self, o): + from flamenco.manager.model_utils import OpenApiModel + + if isinstance(o, OpenApiModel): + return o.to_dict() + + if isinstance(o, ManagerInfo): + # dataclasses.asdict() creates a copy of the OpenAPI models, + # in a way that just doesn't work, hence this workaround. + return {f.name: getattr(o, f.name) for f in dataclasses.fields(o)} + + return super().default(o) + + +def _to_json(info: ManagerInfo) -> str: + return json.dumps(info, indent=" ", cls=Encoder) + + +def _from_json(contents: Union[str, bytes]) -> ManagerInfo: + # Do a late import, so that the API is only imported when actually used. + from flamenco.manager.configuration import Configuration + from flamenco.manager.model_utils import validate_and_convert_types + + json_dict = json.loads(contents) + dummy_cfg = Configuration() + api_models = {} + + for name, api_type in ManagerInfo.type_info().items(): + api_model = validate_and_convert_types( + json_dict[name], + (api_type,), + [name], + True, + True, + dummy_cfg, + ) + api_models[name] = api_model + + return ManagerInfo(**api_models) + + +def _json_filepath() -> Path: + # This is the '~/.config/blender/{version}' path. + user_path = Path(bpy.utils.resource_path(type="USER")) + return user_path / "config" / "flamenco-manager-info.json" + + +def save(info: ManagerInfo) -> None: + json_path = _json_filepath() + json_path.parent.mkdir(parents=True, exist_ok=True) + + as_json = _to_json(info) + json_path.write_text(as_json, encoding="utf8") + + +def load() -> ManagerInfo: + json_path = _json_filepath() + if not json_path.exists(): + raise FileNotFoundError(f"{json_path.name} not found in {json_path.parent}") + + try: + as_json = json_path.read_text(encoding="utf8") + except OSError as ex: + raise LoadError(f"Could not read {json_path}: {ex}") from ex + + try: + return _from_json(as_json) + except json.JSONDecodeError as ex: + raise LoadError(f"Could not decode JSON in {json_path}") from ex + + +def load_into_cache() -> Optional[ManagerInfo]: + global _cached_manager_info + + _cached_manager_info = None + try: + _cached_manager_info = load() + except FileNotFoundError: + return None + except LoadError as ex: + print(f"Could not load Flamenco Manager info from disk: {ex}") + return None + + return _cached_manager_info + + +def load_cached() -> Optional[ManagerInfo]: + global _cached_manager_info + + if _cached_manager_info is not None: + return _cached_manager_info + + return load_into_cache() diff --git a/addon/flamenco/operators.py b/addon/flamenco/operators.py index 0d94effc..c8f1eea1 100644 --- a/addon/flamenco/operators.py +++ b/addon/flamenco/operators.py @@ -10,7 +10,7 @@ from urllib3.exceptions import HTTPError, MaxRetryError import bpy -from . import job_types, job_submission, preferences, worker_tags +from . import job_types, job_submission, preferences, manager_info from .job_types_propgroup import JobTypePropertyGroup from .bat.submodules import bpathlib @@ -51,80 +51,6 @@ class FlamencoOpMixin: return api_client -class FLAMENCO_OT_fetch_job_types(FlamencoOpMixin, bpy.types.Operator): - bl_idname = "flamenco.fetch_job_types" - bl_label = "Fetch Job Types" - bl_description = "Query Flamenco Manager to obtain the available job types" - - def execute(self, context: bpy.types.Context) -> set[str]: - api_client = self.get_api_client(context) - - from flamenco.manager import ApiException - - scene = context.scene - old_job_type_name = getattr(scene, "flamenco_job_type", "") - - try: - job_types.fetch_available_job_types(api_client, scene) - except ApiException as ex: - self.report({"ERROR"}, "Error getting job types: %s" % ex) - return {"CANCELLED"} - except MaxRetryError as ex: - # This is the common error, when for example the port number is - # incorrect and nothing is listening. - self.report({"ERROR"}, "Unable to reach Manager") - return {"CANCELLED"} - - if old_job_type_name: - try: - scene.flamenco_job_type = old_job_type_name - except TypeError: # Thrown when the old job type no longer exists. - # You cannot un-set an enum property, and 'empty string' is not - # a valid value either, so better to just remove the underlying - # ID property. - del scene["flamenco_job_type"] - - self.report( - {"WARNING"}, - "Job type %r no longer available, choose another one" - % old_job_type_name, - ) - - job_types.update_job_type_properties(scene) - return {"FINISHED"} - - -class FLAMENCO_OT_fetch_worker_tags(FlamencoOpMixin, bpy.types.Operator): - bl_idname = "flamenco.fetch_worker_tags" - bl_label = "Fetch Worker Tags" - bl_description = "Query Flamenco Manager to obtain the available worker tags" - - def execute(self, context: bpy.types.Context) -> set[str]: - api_client = self.get_api_client(context) - - from flamenco.manager import ApiException - - scene = context.scene - old_tag = getattr(scene, "flamenco_worker_tag", "") - - try: - worker_tags.refresh(context, api_client) - except ApiException as ex: - self.report({"ERROR"}, "Error getting job types: %s" % ex) - return {"CANCELLED"} - except MaxRetryError as ex: - # This is the common error, when for example the port number is - # incorrect and nothing is listening. - self.report({"ERROR"}, "Unable to reach Manager") - return {"CANCELLED"} - - if old_tag: - # TODO: handle cases where the old tag no longer exists. - scene.flamenco_worker_tag = old_tag - - return {"FINISHED"} - - class FLAMENCO_OT_ping_manager(FlamencoOpMixin, bpy.types.Operator): bl_idname = "flamenco.ping_manager" bl_label = "Flamenco: Ping Manager" @@ -132,13 +58,13 @@ class FLAMENCO_OT_ping_manager(FlamencoOpMixin, bpy.types.Operator): bl_options = {"REGISTER"} # No UNDO. def execute(self, context: bpy.types.Context) -> set[str]: - from . import comms, preferences + from . import comms api_client = self.get_api_client(context) - prefs = preferences.get(context) - - report, level = comms.ping_manager_with_report( - context.window_manager, api_client, prefs + report, level = comms.ping_manager( + context.window_manager, + context.scene, + api_client, ) self.report({level}, report) @@ -259,29 +185,31 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator): :return: an error string when something went wrong. """ - from . import comms, preferences + from . import comms - # Get the manager's info. This is cached in the preferences, so - # regardless of whether this function actually responds to version - # mismatches, it has to be called to also refresh the shared storage - # location. + # Get the manager's info. This is cached to disk, so regardless of + # whether this function actually responds to version mismatches, it has + # to be called to also refresh the shared storage location. api_client = self.get_api_client(context) - prefs = preferences.get(context) - mgrinfo = comms.ping_manager(context.window_manager, api_client, prefs) - if mgrinfo.error: - return mgrinfo.error + + report, report_level = comms.ping_manager( + context.window_manager, + context.scene, + api_client, + ) + if report_level != "INFO": + return report # Check the Manager's version. if not self.ignore_version_mismatch: - my_version = comms.flamenco_client_version() - assert mgrinfo.version is not None + mgrinfo = manager_info.load_cached() + + # Safe to assume, as otherwise the ping_manager() call would not have succeeded. + assert mgrinfo is not None + + my_version = comms.flamenco_client_version() + mgrversion = mgrinfo.flamenco_version.shortversion - try: - mgrversion = mgrinfo.version.shortversion - except AttributeError: - # shortversion was introduced in Manager version 3.0-beta2, which - # may not be running here yet. - mgrversion = mgrinfo.version.version if mgrversion != my_version: context.window_manager.flamenco_version_mismatch = True return ( @@ -299,6 +227,23 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator): # Empty error message indicates 'ok'. return "" + def _manager_info( + self, context: bpy.types.Context + ) -> Optional[manager_info.ManagerInfo]: + """Load the manager info. + + If it cannot be loaded, returns None after emitting an error message and + calling self._quit(context). + """ + manager = manager_info.load_cached() + if not manager: + self.report( + {"ERROR"}, "No information known about Flamenco Manager, refresh first." + ) + self._quit(context) + return None + return manager + def _save_blendfile(self, context): """Save to a different file, specifically for Flamenco. @@ -368,8 +313,11 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator): self._quit(context) return {"CANCELLED"} - prefs = preferences.get(context) - if prefs.is_shaman_enabled: + manager = self._manager_info(context) + if not manager: + return {"CANCELLED"} + + if manager.shared_storage.shaman_enabled: # self.blendfile_on_farm will be set when BAT created the checkout, # see _on_bat_pack_msg() below. self.blendfile_on_farm = None @@ -414,11 +362,14 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator): raise FileNotFoundError() # Determine where the blend file will be stored. + manager = self._manager_info(context) + if not manager: + raise FileNotFoundError("Manager info not known") unique_dir = "%s-%s" % ( datetime.datetime.now().isoformat("-").replace(":", ""), self.job_name, ) - pack_target_dir = Path(prefs.job_storage) / unique_dir + pack_target_dir = Path(manager.shared_storage.location) / unique_dir # TODO: this should take the blendfile location relative to the project path into account. pack_target_file = pack_target_dir / blendfile.name @@ -690,8 +641,6 @@ class FLAMENCO3_OT_explore_file_path(bpy.types.Operator): classes = ( - FLAMENCO_OT_fetch_job_types, - FLAMENCO_OT_fetch_worker_tags, FLAMENCO_OT_ping_manager, FLAMENCO_OT_eval_setting, FLAMENCO_OT_submit_job, diff --git a/addon/flamenco/preferences.py b/addon/flamenco/preferences.py index 2b5daa27..46e0cc0e 100644 --- a/addon/flamenco/preferences.py +++ b/addon/flamenco/preferences.py @@ -5,7 +5,7 @@ from pathlib import Path import bpy -from . import projects +from . import projects, manager_info def discard_flamenco_client(context): @@ -16,9 +16,7 @@ def discard_flamenco_client(context): context.window_manager.flamenco_status_ping = "" -def _refresh_the_planet( - prefs: "FlamencoPreferences", context: bpy.types.Context -) -> None: +def _refresh_the_planet(context: bpy.types.Context) -> None: """Refresh all GUI areas.""" for win in context.window_manager.windows: for area in win.screen.areas: @@ -35,7 +33,8 @@ def _manager_url_updated(prefs, context): # Warning, be careful what of the context to access here. Accessing / # changing too much can cause crashes, infinite loops, etc. - comms.ping_manager_with_report(context.window_manager, api_client, prefs) + comms.ping_manager(context.window_manager, context.scene, api_client) + _refresh_the_planet(context) _project_finder_enum_items = [ @@ -66,22 +65,6 @@ class FlamencoPreferences(bpy.types.AddonPreferences): items=_project_finder_enum_items, ) - is_shaman_enabled: bpy.props.BoolProperty( # type: ignore - name="Shaman Enabled", - description="Whether this Manager has the Shaman protocol enabled", - default=False, - update=_refresh_the_planet, - ) - - # Property that should be editable from Python. It's not exposed to the GUI. - job_storage: bpy.props.StringProperty( # type: ignore - name="Job Storage Directory", - subtype="DIR_PATH", - default="", - options={"HIDDEN"}, - description="Directory where blend files are stored when submitting them to Flamenco. This value is determined by Flamenco Manager", - ) - # Property that gets its value from the above _job_storage, and cannot be # set. This makes it read-only in the GUI. job_storage_for_gui: bpy.props.StringProperty( # type: ignore @@ -90,14 +73,7 @@ class FlamencoPreferences(bpy.types.AddonPreferences): default="", options={"SKIP_SAVE"}, description="Directory where blend files are stored when submitting them to Flamenco. This value is determined by Flamenco Manager", - get=lambda prefs: prefs.job_storage, - ) - - worker_tags: bpy.props.CollectionProperty( # type: ignore - type=WorkerTag, - name="Worker Tags", - description="Cache for the worker tags available on the configured Manager", - options={"HIDDEN"}, + get=lambda prefs: prefs._job_storage(), ) def draw(self, context: bpy.types.Context) -> None: @@ -116,7 +92,9 @@ class FlamencoPreferences(bpy.types.AddonPreferences): split.label(text="") split.label(text=label) - if not self.job_storage: + manager = manager_info.load_cached() + + if not manager: text_row(col, "Press the refresh button before using Flamenco") if context.window_manager.flamenco_status_ping: @@ -126,7 +104,7 @@ class FlamencoPreferences(bpy.types.AddonPreferences): text_row(aligned, "Press the refresh button to check the connection") text_row(aligned, "and update the job storage location") - if self.is_shaman_enabled: + if manager and manager.shared_storage.shaman_enabled: text_row(col, "Shaman enabled") col.prop(self, "job_storage_for_gui", text="Job Storage") @@ -152,6 +130,12 @@ class FlamencoPreferences(bpy.types.AddonPreferences): blendfile = Path(bpy.data.filepath) return projects.for_blendfile(blendfile, self.project_finder) + def _job_storage(self) -> str: + info = manager_info.load_cached() + if not info: + return "Unknown, refresh first." + return str(info.shared_storage.location) + def get(context: bpy.types.Context) -> FlamencoPreferences: """Return the add-on preferences.""" diff --git a/addon/flamenco/projects.py b/addon/flamenco/projects.py index fb551b5e..246a8ba7 100644 --- a/addon/flamenco/projects.py +++ b/addon/flamenco/projects.py @@ -2,7 +2,7 @@ # from pathlib import Path -from typing import Callable, TypeAlias +from typing import Callable import dataclasses from .bat.submodules import bpathlib @@ -45,7 +45,7 @@ def _finder_subversion(blendfile: Path) -> Path: def _search_path_marker(blendfile: Path, marker_path: str) -> Path: """Go up the directory hierarchy until a file or directory 'marker_path' is found.""" - blendfile_dir = bpathlib.make_absolute(blendfile).parent + blendfile_dir: Path = bpathlib.make_absolute(blendfile).parent directory = blendfile_dir while True: @@ -64,7 +64,7 @@ def _search_path_marker(blendfile: Path, marker_path: str) -> Path: return blendfile_dir -Finder: TypeAlias = Callable[[Path], Path] +Finder = Callable[[Path], Path] @dataclasses.dataclass diff --git a/addon/flamenco/worker_tags.py b/addon/flamenco/worker_tags.py index 2ed75cbb..c5d28e94 100644 --- a/addon/flamenco/worker_tags.py +++ b/addon/flamenco/worker_tags.py @@ -1,57 +1,35 @@ # SPDX-License-Identifier: GPL-3.0-or-later -from typing import TYPE_CHECKING, Union +from typing import Union import bpy -from . import preferences - -if TYPE_CHECKING: - from flamenco.manager import ApiClient as _ApiClient -else: - _ApiClient = object +from . import manager_info _enum_items: list[Union[tuple[str, str, str], tuple[str, str, str, int, int]]] = [] -def refresh(context: bpy.types.Context, api_client: _ApiClient) -> None: - """Fetch the available worker tags from the Manager.""" - from flamenco.manager import ApiClient - from flamenco.manager.api import worker_mgt_api - from flamenco.manager.model.worker_tag_list import WorkerTagList - - assert isinstance(api_client, ApiClient) - - api = worker_mgt_api.WorkerMgtApi(api_client) - response: WorkerTagList = api.fetch_worker_tags() - - # Store on the preferences, so a cached version persists until the next refresh. - prefs = preferences.get(context) - prefs.worker_tags.clear() - - for tag in response.tags: - rna_tag = prefs.worker_tags.add() - rna_tag.id = tag.id - rna_tag.name = tag.name - rna_tag.description = getattr(tag, "description", "") - - # Preferences have changed, so make sure that Blender saves them (assuming - # auto-save here). - context.preferences.is_dirty = True - - def _get_enum_items(self, context): global _enum_items - prefs = preferences.get(context) + + manager = manager_info.load_cached() + if manager is None: + _enum_items = [ + ( + "-", + "-tags unknown-", + "Refresh to load the available Worker tags from the Manager", + ), + ] + return _enum_items _enum_items = [ ("-", "All", "No specific tag assigned, any worker can handle this job"), ] - _enum_items.extend( - (tag.id, tag.name, tag.description) - for tag in prefs.worker_tags - ) + for tag in manager.worker_tags.tags: + _enum_items.append((tag.id, tag.name, getattr(tag, "description", ""))) + return _enum_items @@ -70,9 +48,3 @@ def unregister() -> None: delattr(ob, attr) except AttributeError: pass - - -if __name__ == "__main__": - import doctest - - print(doctest.testmod()) diff --git a/cmd/flamenco-manager/main.go b/cmd/flamenco-manager/main.go index 8da0198f..1741e25e 100644 --- a/cmd/flamenco-manager/main.go +++ b/cmd/flamenco-manager/main.go @@ -27,6 +27,7 @@ import ( "projects.blender.org/studio/flamenco/internal/manager/api_impl/dummy" "projects.blender.org/studio/flamenco/internal/manager/config" "projects.blender.org/studio/flamenco/internal/manager/eventbus" + "projects.blender.org/studio/flamenco/internal/manager/farmstatus" "projects.blender.org/studio/flamenco/internal/manager/job_compilers" "projects.blender.org/studio/flamenco/internal/manager/job_deleter" "projects.blender.org/studio/flamenco/internal/manager/last_rendered" @@ -55,6 +56,10 @@ const ( developmentWebInterfacePort = 8081 webappEntryPoint = "index.html" + + // dbOpenTimeout is the time the persistence layer gets to open the database. + // This includes database migrations, which can take some time to perform. + dbOpenTimeout = 1 * time.Minute ) type shutdownFunc func() @@ -174,10 +179,12 @@ func runFlamencoManager() bool { shamanServer := buildShamanServer(configService, isFirstRun) jobDeleter := job_deleter.NewService(persist, localStorage, eventBroker, shamanServer) + farmStatus := farmstatus.NewService(persist, eventBroker) + flamenco := api_impl.NewFlamenco( compiler, persist, eventBroker, logStorage, configService, taskStateMachine, shamanServer, timeService, lastRender, - localStorage, sleepScheduler, jobDeleter) + localStorage, sleepScheduler, jobDeleter, farmStatus) e := buildWebService(flamenco, persist, ssdp, socketio, urls, localStorage) @@ -278,6 +285,13 @@ func runFlamencoManager() bool { jobDeleter.Run(mainCtx) }() + // Run the Farm Status service. + wg.Add(1) + go func() { + defer wg.Done() + farmStatus.Run(mainCtx) + }() + // Log the URLs last, hopefully that makes them more visible / encouraging to go to for users. go func() { time.Sleep(100 * time.Millisecond) @@ -369,7 +383,7 @@ func openDB(configService config.Service) *persistence.DB { log.Fatal().Msg("configure the database in flamenco-manager.yaml") } - dbCtx, dbCtxCancel := context.WithTimeout(context.Background(), 5*time.Second) + dbCtx, dbCtxCancel := context.WithTimeout(context.Background(), dbOpenTimeout) defer dbCtxCancel() persist, err := persistence.OpenDB(dbCtx, dsn) if err != nil { diff --git a/cmd/flamenco-worker/find_exes.go b/cmd/flamenco-worker/find_exes.go index d14333ed..cfe14da6 100644 --- a/cmd/flamenco-worker/find_exes.go +++ b/cmd/flamenco-worker/find_exes.go @@ -38,7 +38,7 @@ func findBlender() { result, err := find_blender.Find(ctx) switch { case errors.Is(err, fs.ErrNotExist), errors.Is(err, exec.ErrNotFound): - log.Warn().Msg("Blender could not be found. " + helpMsg) + log.Info().Msg("Blender could not be found. " + helpMsg) case err != nil: log.Warn().AnErr("cause", err).Msg("There was an error finding Blender on this system. " + helpMsg) default: diff --git a/cmd/flamenco-worker/main.go b/cmd/flamenco-worker/main.go index d42be507..b4d1ee46 100644 --- a/cmd/flamenco-worker/main.go +++ b/cmd/flamenco-worker/main.go @@ -23,7 +23,9 @@ import ( "projects.blender.org/studio/flamenco/internal/appinfo" "projects.blender.org/studio/flamenco/internal/worker" "projects.blender.org/studio/flamenco/internal/worker/cli_runner" + "projects.blender.org/studio/flamenco/pkg/oomscore" "projects.blender.org/studio/flamenco/pkg/sysinfo" + "projects.blender.org/studio/flamenco/pkg/website" ) var ( @@ -113,6 +115,10 @@ func main() { findBlender() findFFmpeg() + // Create the CLI runner before the auto-discovery, to make any configuration + // problems clear before waiting for the Manager to respond. + cliRunner := createCLIRunner(&configWrangler) + // Give the auto-discovery some time to find a Manager. discoverTimeout := 10 * time.Minute discoverCtx, discoverCancel := context.WithTimeout(context.Background(), discoverTimeout) @@ -148,7 +154,6 @@ func main() { return } - cliRunner := cli_runner.NewCLIRunner() listener = worker.NewListener(client, buffer) cmdRunner := worker.NewCommandExecutor(cliRunner, listener, timeService) taskRunner := worker.NewTaskExecutor(cmdRunner, listener) @@ -296,8 +301,34 @@ func upstreamBufferOrDie(client worker.FlamencoClient, timeService clock.Clock) func logFatalManagerDiscoveryError(err error, discoverTimeout time.Duration) { if errors.Is(err, context.DeadlineExceeded) { - log.Fatal().Str("timeout", discoverTimeout.String()).Msg("could not discover Manager in time") + log.Fatal().Stringer("timeout", discoverTimeout). + Msgf("could not discover Manager in time, see %s", website.CannotFindManagerHelpURL) } else { - log.Fatal().Err(err).Msg("auto-discovery error") + log.Fatal().Err(err). + Msgf("auto-discovery error, see %s", website.CannotFindManagerHelpURL) } } + +func createCLIRunner(configWrangler *worker.FileConfigWrangler) *cli_runner.CLIRunner { + config, err := configWrangler.WorkerConfig() + if err != nil { + log.Fatal().Err(err).Msg("error loading worker configuration") + } + + if config.LinuxOOMScoreAdjust == nil { + log.Debug().Msg("executables will be run without OOM score adjustment") + return cli_runner.NewCLIRunner() + } + + if !oomscore.Available() { + log.Warn(). + Msgf("config: oom_score_adjust configured, but that is only available on Linux, not this platform. See %s for more information.", + website.OOMScoreAdjURL) + return cli_runner.NewCLIRunner() + } + + adjustment := *config.LinuxOOMScoreAdjust + log.Info().Int("oom_score_adjust", adjustment).Msg("executables will be run with OOM score adjustment") + + return cli_runner.NewCLIRunnerWithOOMScoreAdjuster(adjustment) +} diff --git a/cmd/sqlc-export-schema/main.go b/cmd/sqlc-export-schema/main.go new file mode 100644 index 00000000..ba84a798 --- /dev/null +++ b/cmd/sqlc-export-schema/main.go @@ -0,0 +1,189 @@ +package main + +// SPDX-License-Identifier: GPL-3.0-or-later + +import ( + "context" + "database/sql" + "flag" + "fmt" + "os" + "os/signal" + "regexp" + "strings" + "syscall" + "time" + + "github.com/mattn/go-colorable" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "gopkg.in/yaml.v2" + + _ "modernc.org/sqlite" +) + +var ( + // Tables and/or indices to skip when writing the schema. + // Anything that is *not* to be seen by sqlc should be listed here. + skips = map[SQLiteSchema]bool{ + // Goose manages its own versioning table. SQLC should ignore its existence. + {Type: "table", Name: "goose_db_version"}: true, + } + + tableNameDequoter = regexp.MustCompile("^(?:CREATE TABLE )(\"([^\"]+)\")") +) + +type SQLiteSchema struct { + Type string + Name string + TableName string + RootPage int + SQL sql.NullString +} + +func saveSchema(ctx context.Context, sqlOutPath string) error { + db, err := sql.Open("sqlite", "flamenco-manager.sqlite") + if err != nil { + return err + } + defer db.Close() + + rows, err := db.QueryContext(ctx, "select * from sqlite_schema order by type desc, name asc") + if err != nil { + return err + } + defer rows.Close() + + sqlBuilder := strings.Builder{} + + for rows.Next() { + var data SQLiteSchema + if err := rows.Scan( + &data.Type, + &data.Name, + &data.TableName, + &data.RootPage, + &data.SQL, + ); err != nil { + return err + } + if strings.HasPrefix(data.Name, "sqlite_") { + continue + } + if skips[SQLiteSchema{Type: data.Type, Name: data.Name}] { + continue + } + if !data.SQL.Valid { + continue + } + + sql := tableNameDequoter.ReplaceAllString(data.SQL.String, "CREATE TABLE $2") + + sqlBuilder.WriteString(sql) + sqlBuilder.WriteString(";\n") + } + + sqlBytes := []byte(sqlBuilder.String()) + if err := os.WriteFile(sqlOutPath, sqlBytes, os.ModePerm); err != nil { + return fmt.Errorf("writing to %s: %w", sqlOutPath, err) + } + + log.Info().Str("path", sqlOutPath).Msg("schema written to file") + return nil +} + +// SqlcConfig models the minimal subset of the sqlc.yaml we need to parse. +type SqlcConfig struct { + Version string `yaml:"version"` + SQL []struct { + Schema string `yaml:"schema"` + } `yaml:"sql"` +} + +func main() { + output := zerolog.ConsoleWriter{Out: colorable.NewColorableStdout(), TimeFormat: time.RFC3339} + log.Logger = log.Output(output) + parseCliArgs() + + mainCtx, mainCtxCancel := context.WithCancel(context.Background()) + defer mainCtxCancel() + + installSignalHandler(mainCtxCancel) + + schemaPath := schemaPathFromSqlcYAML() + + if err := saveSchema(mainCtx, schemaPath); err != nil { + log.Fatal().Err(err).Msg("couldn't export schema") + } +} + +// installSignalHandler spawns a goroutine that handles incoming POSIX signals. +func installSignalHandler(cancelFunc context.CancelFunc) { + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + signal.Notify(signals, syscall.SIGTERM) + go func() { + for signum := range signals { + log.Info().Str("signal", signum.String()).Msg("signal received, shutting down") + cancelFunc() + } + }() +} + +func parseCliArgs() { + var quiet, debug, trace bool + + flag.BoolVar(&quiet, "quiet", false, "Only log warning-level and worse.") + flag.BoolVar(&debug, "debug", false, "Enable debug-level logging.") + flag.BoolVar(&trace, "trace", false, "Enable trace-level logging.") + + flag.Parse() + + var logLevel zerolog.Level + switch { + case trace: + logLevel = zerolog.TraceLevel + case debug: + logLevel = zerolog.DebugLevel + case quiet: + logLevel = zerolog.WarnLevel + default: + logLevel = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(logLevel) +} + +func schemaPathFromSqlcYAML() string { + var sqlcConfig SqlcConfig + + { + sqlcConfigBytes, err := os.ReadFile("sqlc.yaml") + if err != nil { + log.Fatal().Err(err).Msg("cannot read sqlc.yaml") + } + + if err := yaml.Unmarshal(sqlcConfigBytes, &sqlcConfig); err != nil { + log.Fatal().Err(err).Msg("cannot parse sqlc.yaml") + } + } + + if sqlcConfig.Version != "2" { + log.Fatal(). + Str("version", sqlcConfig.Version). + Str("expected", "2"). + Msg("unexpected version in sqlc.yaml") + } + + if len(sqlcConfig.SQL) != 1 { + log.Fatal(). + Int("sql items", len(sqlcConfig.SQL)). + Msg("sqlc.yaml should contain a single item in the 'sql' list") + } + + schema := sqlcConfig.SQL[0].Schema + if schema == "" { + log.Fatal().Msg("sqlc.yaml should have a 'schema' key in the 'sql' item") + } + + return schema +} diff --git a/go.mod b/go.mod index 99662705..a608c7ce 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module projects.blender.org/studio/flamenco -go 1.22 +go 1.22.2 require ( github.com/adrg/xdg v0.4.0 @@ -30,12 +30,12 @@ require ( github.com/tc-hib/go-winres v0.3.1 github.com/zcalusic/sysinfo v1.0.1 github.com/ziflex/lecho/v3 v3.1.0 - golang.org/x/crypto v0.18.0 + golang.org/x/crypto v0.22.0 golang.org/x/image v0.10.0 - golang.org/x/net v0.20.0 - golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 - golang.org/x/vuln v1.0.4 + golang.org/x/net v0.24.0 + golang.org/x/sync v0.7.0 + golang.org/x/sys v0.19.0 + golang.org/x/vuln v1.1.0 gopkg.in/yaml.v2 v2.4.0 gorm.io/gorm v1.25.5 honnef.co/go/tools v0.4.2 @@ -58,6 +58,7 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/labstack/gommon v0.4.0 // indirect github.com/mailru/easyjson v0.7.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -70,12 +71,18 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.1 // indirect golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect - golang.org/x/tools v0.17.0 // indirect + golang.org/x/tools v0.20.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/uint128 v1.3.0 // indirect + modernc.org/cc/v3 v3.41.0 // indirect + modernc.org/ccgo/v3 v3.16.15 // indirect modernc.org/libc v1.37.6 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.7.2 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/strutil v1.2.0 // indirect + modernc.org/token v1.1.0 // indirect ) diff --git a/go.sum b/go.sum index cd705036..866062dc 100644 --- a/go.sum +++ b/go.sum @@ -152,6 +152,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -221,8 +223,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE= golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -235,8 +237,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -247,16 +249,16 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -286,8 +288,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -313,10 +315,10 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/vuln v1.0.4 h1:SP0mPeg2PmGCu03V+61EcQiOjmpri2XijexKdzv8Z1I= -golang.org/x/vuln v1.0.4/go.mod h1:NbJdUQhX8jY++FtuhrXs2Eyx0yePo9pF7nPlIjo9aaQ= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/vuln v1.1.0 h1:ECEdI+aEtjpF90eqEcDL5Q11DWSZAw5PJQWlp0+gWqc= +golang.org/x/vuln v1.1.0/go.mod h1:HT/Ar8fE34tbxWG2s7PYjVl+iIE4Er36/940Z+K540Y= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -349,6 +351,10 @@ modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q= modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y= modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0= modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw= modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= @@ -361,5 +367,9 @@ modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ= modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE= diff --git a/internal/manager/api_impl/api_impl.go b/internal/manager/api_impl/api_impl.go index 6e109481..b923d3f8 100644 --- a/internal/manager/api_impl/api_impl.go +++ b/internal/manager/api_impl/api_impl.go @@ -28,6 +28,7 @@ type Flamenco struct { localStorage LocalStorage sleepScheduler WorkerSleepScheduler jobDeleter JobDeleter + farmstatus FarmStatusService // The task scheduler can be locked to prevent multiple Workers from getting // the same task. It is also used for certain other queries, like @@ -55,6 +56,7 @@ func NewFlamenco( localStorage LocalStorage, wss WorkerSleepScheduler, jd JobDeleter, + farmstatus FarmStatusService, ) *Flamenco { return &Flamenco{ jobCompiler: jc, @@ -69,6 +71,7 @@ func NewFlamenco( localStorage: localStorage, sleepScheduler: wss, jobDeleter: jd, + farmstatus: farmstatus, done: make(chan struct{}), } diff --git a/internal/manager/api_impl/interfaces.go b/internal/manager/api_impl/interfaces.go index fe549259..d965519f 100644 --- a/internal/manager/api_impl/interfaces.go +++ b/internal/manager/api_impl/interfaces.go @@ -15,6 +15,7 @@ import ( "projects.blender.org/studio/flamenco/internal/manager/config" "projects.blender.org/studio/flamenco/internal/manager/eventbus" + "projects.blender.org/studio/flamenco/internal/manager/farmstatus" "projects.blender.org/studio/flamenco/internal/manager/job_compilers" "projects.blender.org/studio/flamenco/internal/manager/job_deleter" "projects.blender.org/studio/flamenco/internal/manager/last_rendered" @@ -26,7 +27,7 @@ import ( ) // Generate mock implementations of these interfaces. -//go:generate go run github.com/golang/mock/mockgen -destination mocks/api_impl_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/api_impl PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter +//go:generate go run github.com/golang/mock/mockgen -destination mocks/api_impl_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/api_impl PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter,FarmStatusService type PersistenceService interface { StoreAuthoredJob(ctx context.Context, authoredJob job_compilers.AuthoredJob) error @@ -239,3 +240,9 @@ type JobDeleter interface { } var _ JobDeleter = (*job_deleter.Service)(nil) + +type FarmStatusService interface { + Report() api.FarmStatusReport +} + +var _ FarmStatusService = (*farmstatus.Service)(nil) diff --git a/internal/manager/api_impl/jobs_query_test.go b/internal/manager/api_impl/jobs_query_test.go index 3fc57158..9be480b1 100644 --- a/internal/manager/api_impl/jobs_query_test.go +++ b/internal/manager/api_impl/jobs_query_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/persistence" "projects.blender.org/studio/flamenco/pkg/api" ) @@ -52,7 +52,7 @@ func TestQueryJobs(t *testing.T) { Return([]*persistence.Job{&activeJob, &deletionQueuedJob}, nil) err := mf.flamenco.QueryJobs(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) expectedJobs := api.JobsQueryResult{ Jobs: []api.Job{ @@ -160,7 +160,7 @@ func TestFetchTask(t *testing.T) { Return([]*persistence.Worker{&taskWorker}, nil) err := mf.flamenco.FetchTask(echoCtx, taskUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusOK, expectAPITask) } diff --git a/internal/manager/api_impl/jobs_test.go b/internal/manager/api_impl/jobs_test.go index f75c9861..61b52327 100644 --- a/internal/manager/api_impl/jobs_test.go +++ b/internal/manager/api_impl/jobs_test.go @@ -88,7 +88,7 @@ func TestSubmitJobWithoutSettings(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(submittedJob) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.SubmitJob(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) } func TestSubmitJobWithSettings(t *testing.T) { @@ -177,7 +177,7 @@ func TestSubmitJobWithSettings(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(submittedJob) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.SubmitJob(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) } func TestSubmitJobWithEtag(t *testing.T) { @@ -202,7 +202,7 @@ func TestSubmitJobWithEtag(t *testing.T) { { echoCtx := mf.prepareMockedJSONRequest(submittedJob) err := mf.flamenco.SubmitJob(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echoCtx, http.StatusPreconditionFailed, "rejecting job because its settings are outdated, refresh the job type") } @@ -240,7 +240,7 @@ func TestSubmitJobWithEtag(t *testing.T) { submittedJob.TypeEtag = ptr("correct etag") echoCtx := mf.prepareMockedJSONRequest(submittedJob) err := mf.flamenco.SubmitJob(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) } } @@ -318,7 +318,7 @@ func TestSubmitJobWithShamanCheckoutID(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(submittedJob) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.SubmitJob(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) } func TestSubmitJobWithWorkerTag(t *testing.T) { @@ -437,7 +437,33 @@ func TestGetJobTypeHappy(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.GetJobType(echoCtx, "test-job-type") - assert.NoError(t, err) + require.NoError(t, err) + + assertResponseJSON(t, echoCtx, http.StatusOK, jt) +} + +func TestGetJobTypeWithDescriptionHappy(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mf := newMockedFlamenco(mockCtrl) + + // Get an existing job type with a description. + description := "This is a test job type" + jt := api.AvailableJobType{ + Description: &description, + Etag: "some etag", + Name: "test-job-type", + Label: "Test Job Type", + Settings: []api.AvailableJobSetting{ + {Key: "setting", Type: api.AvailableJobSettingTypeString}, + }, + } + mf.jobCompiler.EXPECT().GetJobType("test-job-type"). + Return(jt, nil) + + echoCtx := mf.prepareMockedRequest(nil) + err := mf.flamenco.GetJobType(echoCtx, "test-job-type") + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusOK, jt) } @@ -453,7 +479,7 @@ func TestGetJobTypeUnknown(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.GetJobType(echoCtx, "nonexistent-type") - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusNotFound, api.Error{ Code: http.StatusNotFound, Message: "no such job type known", @@ -482,7 +508,7 @@ func TestSubmitJobCheckWithEtag(t *testing.T) { { echoCtx := mf.prepareMockedJSONRequest(submittedJob) err := mf.flamenco.SubmitJobCheck(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echoCtx, http.StatusPreconditionFailed, "rejecting job because its settings are outdated, refresh the job type") } @@ -502,7 +528,7 @@ func TestSubmitJobCheckWithEtag(t *testing.T) { submittedJob.TypeEtag = ptr("correct etag") echoCtx := mf.prepareMockedJSONRequest(submittedJob) err := mf.flamenco.SubmitJobCheck(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) } } @@ -516,7 +542,7 @@ func TestGetJobTypeError(t *testing.T) { Return(api.AvailableJobType{}, errors.New("didn't expect this")) echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.GetJobType(echoCtx, "error") - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echoCtx, http.StatusInternalServerError, "error getting job type") } @@ -537,7 +563,7 @@ func TestSetJobStatus_nonexistentJob(t *testing.T) { // Do the call. echoCtx := mf.prepareMockedJSONRequest(statusUpdate) err := mf.flamenco.SetJobStatus(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echoCtx, http.StatusNotFound, "no such job") } @@ -571,7 +597,7 @@ func TestSetJobStatus_happy(t *testing.T) { // Do the call. echoCtx := mf.prepareMockedJSONRequest(statusUpdate) err := mf.flamenco.SetJobStatus(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -592,7 +618,7 @@ func TestSetJobPrio_nonexistentJob(t *testing.T) { // Do the call. echoCtx := mf.prepareMockedJSONRequest(prioUpdate) err := mf.flamenco.SetJobStatus(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echoCtx, http.StatusNotFound, "no such job") } @@ -634,7 +660,7 @@ func TestSetJobPrio(t *testing.T) { mf.broadcaster.EXPECT().BroadcastJobUpdate(expectUpdate) err := mf.flamenco.SetJobPriority(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -668,7 +694,7 @@ func TestSetJobStatusFailedToRequeueing(t *testing.T) { // Do the call. err := mf.flamenco.SetJobStatus(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -714,7 +740,7 @@ func TestSetTaskStatusQueued(t *testing.T) { // Do the call. err := mf.flamenco.SetTaskStatus(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -748,7 +774,7 @@ func TestFetchTaskLogTail(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchTaskLogTail(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) // Check that a 204 No Content is also returned when the task log file on disk exists, but is empty. @@ -758,7 +784,7 @@ func TestFetchTaskLogTail(t *testing.T) { echoCtx = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchTaskLogTail(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -794,7 +820,7 @@ func TestFetchTaskLogInfo(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchTaskLogInfo(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) // Check that a 204 No Content is also returned when the task log file on disk exists, but is empty. @@ -803,7 +829,7 @@ func TestFetchTaskLogInfo(t *testing.T) { echoCtx = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchTaskLogInfo(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) // Check that otherwise we actually get the info. @@ -813,7 +839,7 @@ func TestFetchTaskLogInfo(t *testing.T) { echoCtx = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchTaskLogInfo(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusOK, api.TaskLogInfo{ JobId: jobID, TaskId: taskID, @@ -842,7 +868,7 @@ func TestFetchJobLastRenderedInfo(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchJobLastRenderedInfo(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) expectBody := api.JobLastRenderedImageInfo{ Base: "/job-files/relative/path", @@ -857,7 +883,7 @@ func TestFetchJobLastRenderedInfo(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchJobLastRenderedInfo(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } } @@ -876,7 +902,7 @@ func TestFetchGlobalLastRenderedInfo(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchGlobalLastRenderedInfo(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -893,7 +919,7 @@ func TestFetchGlobalLastRenderedInfo(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchGlobalLastRenderedInfo(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) expectBody := api.JobLastRenderedImageInfo{ Base: "/job-files/relative/path", @@ -927,7 +953,7 @@ func TestDeleteJob(t *testing.T) { // Do the call. err := mf.flamenco.DeleteJob(echoCtx, jobID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } diff --git a/internal/manager/api_impl/meta.go b/internal/manager/api_impl/meta.go index c647270b..000bd5de 100644 --- a/internal/manager/api_impl/meta.go +++ b/internal/manager/api_impl/meta.go @@ -321,6 +321,10 @@ func (f *Flamenco) SaveSetupAssistantConfig(e echo.Context) error { return e.NoContent(http.StatusNoContent) } +func (f *Flamenco) GetFarmStatus(e echo.Context) error { + return e.JSON(http.StatusOK, f.farmstatus.Report()) +} + func flamencoManagerDir() (string, error) { exename, err := os.Executable() if err != nil { diff --git a/internal/manager/api_impl/meta_test.go b/internal/manager/api_impl/meta_test.go index ec0d366a..78581804 100644 --- a/internal/manager/api_impl/meta_test.go +++ b/internal/manager/api_impl/meta_test.go @@ -43,7 +43,7 @@ func TestGetVariables(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.GetVariables(echoCtx, api.ManagerVariableAudienceWorkers, "linux") - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusOK, api.ManagerVariables{ AdditionalProperties: map[string]api.ManagerVariable{ "blender": {Value: "/usr/local/blender", IsTwoway: false}, @@ -61,7 +61,7 @@ func TestGetVariables(t *testing.T) { echoCtx := mf.prepareMockedRequest(nil) err := mf.flamenco.GetVariables(echoCtx, api.ManagerVariableAudienceUsers, "troll") - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echoCtx, http.StatusOK, api.ManagerVariables{}) } } @@ -208,9 +208,7 @@ func TestCheckSharedStoragePath(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest( api.PathCheckInput{Path: path}) err := mf.flamenco.CheckSharedStoragePath(echoCtx) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) return echoCtx } @@ -230,9 +228,8 @@ func TestCheckSharedStoragePath(t *testing.T) { Cause: "Directory checked successfully", }) files, err := filepath.Glob(filepath.Join(mf.tempdir, "*")) - if assert.NoError(t, err) { - assert.Empty(t, files, "After a query, there should not be any leftovers") - } + require.NoError(t, err) + assert.Empty(t, files, "After a query, there should not be any leftovers") // Test inaccessible path. // For some reason, this doesn't work on Windows, and creating a file in @@ -253,12 +250,9 @@ func TestCheckSharedStoragePath(t *testing.T) { parentPath := filepath.Join(mf.tempdir, "deep") testPath := filepath.Join(parentPath, "nesting") - if err := os.Mkdir(parentPath, fs.ModePerm); !assert.NoError(t, err) { - t.FailNow() - } - if err := os.Mkdir(testPath, fs.FileMode(0)); !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, os.Mkdir(parentPath, fs.ModePerm)) + require.NoError(t, os.Mkdir(testPath, fs.FileMode(0))) + echoCtx := doTest(testPath) result := api.PathCheckResult{} getResponseJSON(t, echoCtx, http.StatusOK, &result) @@ -295,9 +289,7 @@ func TestSaveSetupAssistantConfig(t *testing.T) { // Call the API. echoCtx := mf.prepareMockedJSONRequest(body) err := mf.flamenco.SaveSetupAssistantConfig(echoCtx) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertResponseNoContent(t, echoCtx) return savedConfig @@ -378,9 +370,7 @@ func metaTestFixtures(t *testing.T) (mockedFlamenco, func()) { mf := newMockedFlamenco(mockCtrl) tempdir, err := os.MkdirTemp("", "test-temp-dir") - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) mf.tempdir = tempdir finish := func() { diff --git a/internal/manager/api_impl/mocks/api_impl_mock.gen.go b/internal/manager/api_impl/mocks/api_impl_mock.gen.go index ccc776b6..a7360e98 100644 --- a/internal/manager/api_impl/mocks/api_impl_mock.gen.go +++ b/internal/manager/api_impl/mocks/api_impl_mock.gen.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: projects.blender.org/studio/flamenco/internal/manager/api_impl (interfaces: PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter) +// Source: projects.blender.org/studio/flamenco/internal/manager/api_impl (interfaces: PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter,FarmStatusService) // Package mocks is a generated GoMock package. package mocks @@ -1413,3 +1413,40 @@ func (mr *MockJobDeleterMockRecorder) WhatWouldBeDeleted(arg0 interface{}) *gomo mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WhatWouldBeDeleted", reflect.TypeOf((*MockJobDeleter)(nil).WhatWouldBeDeleted), arg0) } + +// MockFarmStatusService is a mock of FarmStatusService interface. +type MockFarmStatusService struct { + ctrl *gomock.Controller + recorder *MockFarmStatusServiceMockRecorder +} + +// MockFarmStatusServiceMockRecorder is the mock recorder for MockFarmStatusService. +type MockFarmStatusServiceMockRecorder struct { + mock *MockFarmStatusService +} + +// NewMockFarmStatusService creates a new mock instance. +func NewMockFarmStatusService(ctrl *gomock.Controller) *MockFarmStatusService { + mock := &MockFarmStatusService{ctrl: ctrl} + mock.recorder = &MockFarmStatusServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockFarmStatusService) EXPECT() *MockFarmStatusServiceMockRecorder { + return m.recorder +} + +// Report mocks base method. +func (m *MockFarmStatusService) Report() api.FarmStatusReport { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Report") + ret0, _ := ret[0].(api.FarmStatusReport) + return ret0 +} + +// Report indicates an expected call of Report. +func (mr *MockFarmStatusServiceMockRecorder) Report() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Report", reflect.TypeOf((*MockFarmStatusService)(nil).Report)) +} diff --git a/internal/manager/api_impl/support_test.go b/internal/manager/api_impl/support_test.go index 4a7ea588..23c46553 100644 --- a/internal/manager/api_impl/support_test.go +++ b/internal/manager/api_impl/support_test.go @@ -16,6 +16,7 @@ import ( "github.com/golang/mock/gomock" "github.com/labstack/echo/v4" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/api_impl/mocks" "projects.blender.org/studio/flamenco/internal/manager/config" @@ -37,6 +38,7 @@ type mockedFlamenco struct { localStorage *mocks.MockLocalStorage sleepScheduler *mocks.MockWorkerSleepScheduler jobDeleter *mocks.MockJobDeleter + farmstatus *mocks.MockFarmStatusService // Place for some tests to store a temporary directory. tempdir string @@ -54,6 +56,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco { localStore := mocks.NewMockLocalStorage(mockCtrl) wss := mocks.NewMockWorkerSleepScheduler(mockCtrl) jd := mocks.NewMockJobDeleter(mockCtrl) + fs := mocks.NewMockFarmStatusService(mockCtrl) clock := clock.NewMock() mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T11:14:41+02:00") @@ -62,7 +65,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco { } clock.Set(mockedNow) - f := NewFlamenco(jc, ps, cb, logStore, cs, sm, sha, clock, lr, localStore, wss, jd) + f := NewFlamenco(jc, ps, cb, logStore, cs, sm, sha, clock, lr, localStore, wss, jd, fs) return mockedFlamenco{ flamenco: f, @@ -78,6 +81,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco { localStorage: localStore, sleepScheduler: wss, jobDeleter: jd, + farmstatus: fs, } } @@ -179,14 +183,10 @@ func getResponseJSON(t *testing.T, echoCtx echo.Context, expectStatusCode int, a } actualJSON, err := io.ReadAll(resp.Body) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) err = json.Unmarshal(actualJSON, actualPayloadPtr) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) } // assertResponseJSON asserts that a recorded response is JSON with the given HTTP status code. @@ -201,14 +201,10 @@ func assertResponseJSON(t *testing.T, echoCtx echo.Context, expectStatusCode int } expectJSON, err := json.Marshal(expectBody) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) actualJSON, err := io.ReadAll(resp.Body) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assert.JSONEq(t, string(expectJSON), string(actualJSON)) } diff --git a/internal/manager/api_impl/worker_mgt_test.go b/internal/manager/api_impl/worker_mgt_test.go index 3df92c1a..62942ee3 100644 --- a/internal/manager/api_impl/worker_mgt_test.go +++ b/internal/manager/api_impl/worker_mgt_test.go @@ -33,7 +33,7 @@ func TestFetchWorkers(t *testing.T) { echo := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchWorkers(echo) - assert.NoError(t, err) + require.NoError(t, err) // Check the response workers := api.WorkerList{ @@ -74,7 +74,7 @@ func TestFetchWorker(t *testing.T) { Return(nil, fmt.Errorf("wrapped: %w", persistence.ErrWorkerNotFound)) echo := mf.prepareMockedRequest(nil) err := mf.flamenco.FetchWorker(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusNotFound, fmt.Sprintf("worker %q not found", workerUUID)) // Test database error fetching worker. @@ -82,7 +82,7 @@ func TestFetchWorker(t *testing.T) { Return(nil, errors.New("some unknown error")) echo = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchWorker(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusInternalServerError, "error fetching worker: some unknown error") // Test with worker that does NOT have a status change requested, and DOES have an assigned task. @@ -97,7 +97,7 @@ func TestFetchWorker(t *testing.T) { echo = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchWorker(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.Worker{ WorkerSummary: api.WorkerSummary{ Id: workerUUID, @@ -126,7 +126,7 @@ func TestFetchWorker(t *testing.T) { echo = mf.prepareMockedRequest(nil) err = mf.flamenco.FetchWorker(echo, worker.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.Worker{ WorkerSummary: api.WorkerSummary{ Id: workerUUID, @@ -155,7 +155,7 @@ func TestDeleteWorker(t *testing.T) { Return(nil, fmt.Errorf("wrapped: %w", persistence.ErrWorkerNotFound)) echo := mf.prepareMockedRequest(nil) err := mf.flamenco.DeleteWorker(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusNotFound, fmt.Sprintf("worker %q not found", workerUUID)) // Test with existing worker. @@ -176,7 +176,7 @@ func TestDeleteWorker(t *testing.T) { echo = mf.prepareMockedRequest(nil) err = mf.flamenco.DeleteWorker(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -214,7 +214,7 @@ func TestRequestWorkerStatusChange(t *testing.T) { IsLazy: true, }) err := mf.flamenco.RequestWorkerStatusChange(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -258,7 +258,7 @@ func TestRequestWorkerStatusChangeRevert(t *testing.T) { IsLazy: true, }) err := mf.flamenco.RequestWorkerStatusChange(echo, workerUUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } diff --git a/internal/manager/api_impl/worker_task_updates_test.go b/internal/manager/api_impl/worker_task_updates_test.go index df918c24..6810bc7a 100644 --- a/internal/manager/api_impl/worker_task_updates_test.go +++ b/internal/manager/api_impl/worker_task_updates_test.go @@ -8,6 +8,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/config" "projects.blender.org/studio/flamenco/internal/manager/persistence" @@ -77,7 +78,7 @@ func TestTaskUpdate(t *testing.T) { err := mf.flamenco.TaskUpdate(echoCtx, taskID) // Check the saved task. - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, mockTask.UUID, statusChangedtask.UUID) assert.Equal(t, mockTask.UUID, actUpdatedTask.UUID) assert.Equal(t, mockTask.UUID, touchedTask.UUID) @@ -148,7 +149,7 @@ func TestTaskUpdateFailed(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -164,7 +165,7 @@ func TestTaskUpdateFailed(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } } @@ -248,7 +249,7 @@ func TestBlockingAfterFailure(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -279,7 +280,7 @@ func TestBlockingAfterFailure(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } @@ -314,7 +315,7 @@ func TestBlockingAfterFailure(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } } @@ -381,6 +382,6 @@ func TestJobFailureAfterWorkerTaskFailure(t *testing.T) { echoCtx := mf.prepareMockedJSONRequest(taskUpdate) requestWorkerStore(echoCtx, &worker) err := mf.flamenco.TaskUpdate(echoCtx, taskID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echoCtx) } diff --git a/internal/manager/api_impl/workers_test.go b/internal/manager/api_impl/workers_test.go index c21d8c91..6340441a 100644 --- a/internal/manager/api_impl/workers_test.go +++ b/internal/manager/api_impl/workers_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" "github.com/labstack/echo/v4" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/config" "projects.blender.org/studio/flamenco/internal/manager/last_rendered" @@ -61,7 +62,7 @@ func TestTaskScheduleHappy(t *testing.T) { mf.broadcaster.EXPECT().BroadcastWorkerUpdate(gomock.Any()) err := mf.flamenco.ScheduleTask(echo) - assert.NoError(t, err) + require.NoError(t, err) // Check the response assignedTask := api.AssignedTask{ @@ -98,7 +99,7 @@ func TestTaskScheduleNoTaskAvailable(t *testing.T) { mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker) err := mf.flamenco.ScheduleTask(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -119,7 +120,7 @@ func TestTaskScheduleNonActiveStatus(t *testing.T) { mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker) err := mf.flamenco.ScheduleTask(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) resp := getRecordedResponse(echoCtx) assert.Equal(t, http.StatusConflict, resp.StatusCode) @@ -142,7 +143,7 @@ func TestTaskScheduleOtherStatusRequested(t *testing.T) { mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker) err := mf.flamenco.ScheduleTask(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) expectBody := api.WorkerStateChange{StatusRequested: api.WorkerStatusAsleep} assertResponseJSON(t, echoCtx, http.StatusLocked, expectBody) @@ -169,7 +170,7 @@ func TestTaskScheduleOtherStatusRequestedAndBadState(t *testing.T) { mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker) err := mf.flamenco.ScheduleTask(echoCtx) - assert.NoError(t, err) + require.NoError(t, err) expectBody := api.WorkerStateChange{StatusRequested: api.WorkerStatusAwake} assertResponseJSON(t, echoCtx, http.StatusLocked, expectBody) @@ -206,7 +207,7 @@ func TestWorkerSignOn(t *testing.T) { }) requestWorkerStore(echo, &worker) err := mf.flamenco.SignOn(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.WorkerStateChange{ StatusRequested: api.WorkerStatusAsleep, @@ -253,7 +254,7 @@ func TestWorkerSignoffTaskRequeue(t *testing.T) { }) err := mf.flamenco.SignOff(echo) - assert.NoError(t, err) + require.NoError(t, err) resp := getRecordedResponse(echo) assert.Equal(t, http.StatusNoContent, resp.StatusCode) @@ -292,7 +293,7 @@ func TestWorkerRememberPreviousStatus(t *testing.T) { echo := mf.prepareMockedRequest(nil) requestWorkerStore(echo, &worker) err := mf.flamenco.SignOff(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) assert.Equal(t, api.WorkerStatusAwake, worker.StatusRequested) @@ -329,7 +330,7 @@ func TestWorkerDontRememberPreviousStatus(t *testing.T) { echo := mf.prepareMockedRequest(nil) requestWorkerStore(echo, &worker) err := mf.flamenco.SignOff(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -347,9 +348,8 @@ func TestWorkerState(t *testing.T) { echo := mf.prepareMockedRequest(nil) requestWorkerStore(echo, &worker) err := mf.flamenco.WorkerState(echo) - if assert.NoError(t, err) { - assertResponseNoContent(t, echo) - } + require.NoError(t, err) + assertResponseNoContent(t, echo) } // State change requested. @@ -361,11 +361,10 @@ func TestWorkerState(t *testing.T) { requestWorkerStore(echo, &worker) err := mf.flamenco.WorkerState(echo) - if assert.NoError(t, err) { - assertResponseJSON(t, echo, http.StatusOK, api.WorkerStateChange{ - StatusRequested: requestStatus, - }) - } + require.NoError(t, err) + assertResponseJSON(t, echo, http.StatusOK, api.WorkerStateChange{ + StatusRequested: requestStatus, + }) } } @@ -402,7 +401,7 @@ func TestWorkerStateChanged(t *testing.T) { }) requestWorkerStore(echo, &worker) err := mf.flamenco.WorkerStateChanged(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -445,7 +444,7 @@ func TestWorkerStateChangedAfterChangeRequest(t *testing.T) { }) requestWorkerStore(echo, &worker) err := mf.flamenco.WorkerStateChanged(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } @@ -475,7 +474,7 @@ func TestWorkerStateChangedAfterChangeRequest(t *testing.T) { }) requestWorkerStore(echo, &worker) err := mf.flamenco.WorkerStateChanged(echo) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoContent(t, echo) } } @@ -514,7 +513,7 @@ func TestMayWorkerRun(t *testing.T) { { echo := prepareRequest() err := mf.flamenco.MayWorkerRun(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{ MayKeepRunning: false, Reason: "task not assigned to this worker", @@ -529,7 +528,7 @@ func TestMayWorkerRun(t *testing.T) { echo := prepareRequest() task.WorkerID = &worker.ID err := mf.flamenco.MayWorkerRun(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{ MayKeepRunning: true, }) @@ -541,7 +540,7 @@ func TestMayWorkerRun(t *testing.T) { task.WorkerID = &worker.ID task.Status = api.TaskStatusCanceled err := mf.flamenco.MayWorkerRun(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{ MayKeepRunning: false, Reason: "task is in non-runnable status \"canceled\"", @@ -555,7 +554,7 @@ func TestMayWorkerRun(t *testing.T) { task.WorkerID = &worker.ID task.Status = api.TaskStatusActive err := mf.flamenco.MayWorkerRun(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{ MayKeepRunning: false, Reason: "worker status change requested", @@ -573,7 +572,7 @@ func TestMayWorkerRun(t *testing.T) { task.WorkerID = &worker.ID task.Status = api.TaskStatusActive err := mf.flamenco.MayWorkerRun(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{ MayKeepRunning: true, }) @@ -618,7 +617,7 @@ func TestTaskOutputProduced(t *testing.T) { echo := prepareRequest(nil) err := mf.flamenco.TaskOutputProduced(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusLengthRequired, "Content-Length header required") } @@ -633,7 +632,7 @@ func TestTaskOutputProduced(t *testing.T) { echo := prepareRequest(bytes.NewReader(bodyBytes)) err := mf.flamenco.TaskOutputProduced(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusRequestEntityTooLarge, "image too large; should be max %v bytes", last_rendered.MaxImageSizeBytes) } @@ -648,7 +647,7 @@ func TestTaskOutputProduced(t *testing.T) { mf.lastRender.EXPECT().QueueImage(gomock.Any()).Return(last_rendered.ErrMimeTypeUnsupported) err := mf.flamenco.TaskOutputProduced(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusUnsupportedMediaType, `unsupported mime type "image/openexr"`) } @@ -661,7 +660,7 @@ func TestTaskOutputProduced(t *testing.T) { mf.lastRender.EXPECT().QueueImage(gomock.Any()).Return(last_rendered.ErrQueueFull) err := mf.flamenco.TaskOutputProduced(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseAPIError(t, echo, http.StatusTooManyRequests, "image processing queue is full") } @@ -687,7 +686,7 @@ func TestTaskOutputProduced(t *testing.T) { }) err := mf.flamenco.TaskOutputProduced(echo, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertResponseNoBody(t, echo, http.StatusAccepted) if assert.NotNil(t, actualPayload) { diff --git a/internal/manager/config/defaults.go b/internal/manager/config/defaults.go index d6182169..24031e25 100644 --- a/internal/manager/config/defaults.go +++ b/internal/manager/config/defaults.go @@ -4,6 +4,7 @@ import ( "runtime" "time" + "projects.blender.org/studio/flamenco/internal/manager/eventbus" shaman_config "projects.blender.org/studio/flamenco/pkg/shaman/config" ) @@ -16,11 +17,10 @@ var defaultConfig = Conf{ Base: Base{ Meta: ConfMeta{Version: latestConfigVersion}, - ManagerName: "Flamenco", - Listen: ":8080", - // ListenHTTPS: ":8433", + ManagerName: "Flamenco", + Listen: ":8080", DatabaseDSN: "flamenco-manager.sqlite", - DBIntegrityCheck: 1 * time.Hour, + DBIntegrityCheck: 10 * time.Minute, SSDPDiscovery: true, LocalManagerStoragePath: "./flamenco-manager-storage", SharedStoragePath: "", // Empty string means "first run", and should trigger the config setup assistant. @@ -38,25 +38,15 @@ var defaultConfig = Conf{ TaskTimeout: 10 * time.Minute, WorkerTimeout: 1 * time.Minute, - // // Days are assumed to be 24 hours long. This is not exactly accurate, but should - // // be accurate enough for this type of cleanup. - // TaskCleanupMaxAge: 14 * 24 * time.Hour, - BlocklistThreshold: 3, TaskFailAfterSoftFailCount: 3, - // WorkerCleanupStatus: []string{string(api.WorkerStatusOffline)}, - - // TestTasks: TestTasks{ - // BlenderRender: BlenderRenderConfig{ - // JobStorage: "{job_storage}/test-jobs", - // RenderOutput: "{render}/test-renders", - // }, - // }, - - // JWT: jwtauth.Config{ - // DownloadKeysInterval: 1 * time.Hour, - // }, + MQTT: MQTTConfig{ + Client: eventbus.MQTTClientConfig{ + ClientID: eventbus.MQTTDefaultClientID, + TopicPrefix: eventbus.MQTTDefaultTopicPrefix, + }, + }, }, Variables: map[string]Variable{ diff --git a/internal/manager/eventbus/eventbus.go b/internal/manager/eventbus/eventbus.go index 8d533dd6..27392020 100644 --- a/internal/manager/eventbus/eventbus.go +++ b/internal/manager/eventbus/eventbus.go @@ -10,17 +10,25 @@ type ( EventTopic string ) +// Listener is the interface for internal components that want to respond to events. +type Listener interface { + OnEvent(topic EventTopic, payload interface{}) +} + +// Forwarder is the interface for components that forward events to external systems. type Forwarder interface { Broadcast(topic EventTopic, payload interface{}) } type Broker struct { + listeners []Listener forwarders []Forwarder mutex sync.Mutex } func NewBroker() *Broker { return &Broker{ + listeners: []Listener{}, forwarders: []Forwarder{}, mutex: sync.Mutex{}, } @@ -32,10 +40,20 @@ func (b *Broker) AddForwarder(forwarder Forwarder) { b.forwarders = append(b.forwarders, forwarder) } +func (b *Broker) AddListener(listener Listener) { + b.mutex.Lock() + defer b.mutex.Unlock() + b.listeners = append(b.listeners, listener) +} + func (b *Broker) broadcast(topic EventTopic, payload interface{}) { b.mutex.Lock() defer b.mutex.Unlock() + for _, listener := range b.listeners { + listener.OnEvent(topic, payload) + } + for _, forwarder := range b.forwarders { forwarder.Broadcast(topic, payload) } diff --git a/internal/manager/eventbus/events_farmstatus.go b/internal/manager/eventbus/events_farmstatus.go new file mode 100644 index 00000000..6a03d39a --- /dev/null +++ b/internal/manager/eventbus/events_farmstatus.go @@ -0,0 +1,17 @@ +package eventbus + +// SPDX-License-Identifier: GPL-3.0-or-later + +import ( + "github.com/rs/zerolog/log" + "projects.blender.org/studio/flamenco/pkg/api" +) + +func NewFarmStatusEvent(farmstatus api.FarmStatusReport) api.EventFarmStatus { + return api.EventFarmStatus(farmstatus) +} + +func (b *Broker) BroadcastFarmStatusEvent(event api.EventFarmStatus) { + log.Debug().Interface("event", event).Msg("eventbus: broadcasting FarmStatus event") + b.broadcast(TopicFarmStatus, event) +} diff --git a/internal/manager/eventbus/mqtt_client.go b/internal/manager/eventbus/mqtt_client.go index a2a6b51c..50ef496e 100644 --- a/internal/manager/eventbus/mqtt_client.go +++ b/internal/manager/eventbus/mqtt_client.go @@ -13,10 +13,14 @@ import ( "github.com/eclipse/paho.golang/paho" "github.com/rs/zerolog" "github.com/rs/zerolog/log" + + "projects.blender.org/studio/flamenco/pkg/api" ) const ( - defaultClientID = "flamenco" + MQTTDefaultTopicPrefix = "flamenco" + MQTTDefaultClientID = "flamenco" + keepAlive = 30 // seconds connectRetryDelay = 10 * time.Second @@ -61,7 +65,7 @@ func NewMQTTForwarder(config MQTTClientConfig) *MQTTForwarder { return nil } if config.ClientID == "" { - config.ClientID = defaultClientID + config.ClientID = MQTTDefaultClientID } brokerURL, err := url.Parse(config.BrokerURL) @@ -150,6 +154,11 @@ func (m *MQTTForwarder) queueRunner(queueRunnerCtx context.Context) { } func (m *MQTTForwarder) Broadcast(topic EventTopic, payload interface{}) { + if _, ok := payload.(api.EventTaskLogUpdate); ok { + // Task log updates aren't sent through MQTT, as that can generate a lot of traffic. + return + } + fullTopic := m.topicPrefix + string(topic) asJSON, err := json.Marshal(payload) diff --git a/internal/manager/eventbus/socketio.go b/internal/manager/eventbus/socketio.go index d86c97a4..57f59405 100644 --- a/internal/manager/eventbus/socketio.go +++ b/internal/manager/eventbus/socketio.go @@ -14,6 +14,7 @@ import ( "github.com/rs/zerolog/log" "projects.blender.org/studio/flamenco/internal/uuid" "projects.blender.org/studio/flamenco/pkg/api" + "projects.blender.org/studio/flamenco/pkg/website" ) type SocketIOEventType string @@ -23,6 +24,8 @@ const ( ) var socketIOEventTypes = map[string]string{ + reflect.TypeOf(api.EventLifeCycle{}).Name(): "/lifecycle", + reflect.TypeOf(api.EventFarmStatus{}).Name(): "/status", reflect.TypeOf(api.EventJobUpdate{}).Name(): "/jobs", reflect.TypeOf(api.EventTaskUpdate{}).Name(): "/task", reflect.TypeOf(api.EventLastRenderedUpdate{}).Name(): "/last-rendered", @@ -59,7 +62,17 @@ func (s *SocketIOForwarder) Broadcast(topic EventTopic, payload interface{}) { // SocketIO has a concept of 'event types'. MQTT doesn't have this, and thus the Flamenco event // system doesn't rely on it. We use the payload type name as event type. payloadType := reflect.TypeOf(payload).Name() - eventType := socketIOEventTypes[payloadType] + + eventType, ok := socketIOEventTypes[payloadType] + if !ok { + log.Error(). + Str("topic", string(topic)). + Str("payloadType", payloadType). + Interface("event", payload). + Msgf("socketIO: payload type does not have an event type, please copy-paste this message into a bug report at %s", website.BugReportURL) + return + } + log.Debug(). Str("topic", string(topic)). Str("eventType", eventType). @@ -80,6 +93,10 @@ func (s *SocketIOForwarder) registerSIOEventHandlers() { _ = sio.On(gosocketio.OnConnection, func(c *gosocketio.Channel) { logger := sioLogger(c) logger.Debug().Msg("socketIO: connected") + + // All SocketIO connections get these events, regardless of their subscription. + _ = c.Join(string(TopicLifeCycle)) + _ = c.Join(string(TopicFarmStatus)) }) // socket disconnection diff --git a/internal/manager/eventbus/topics.go b/internal/manager/eventbus/topics.go index ea2b6056..4f00407d 100644 --- a/internal/manager/eventbus/topics.go +++ b/internal/manager/eventbus/topics.go @@ -6,7 +6,9 @@ import "fmt" const ( // Topics on which events are published. + // NOTE: when adding here, also add to socketIOEventTypes in socketio.go. TopicLifeCycle EventTopic = "/lifecycle" // sends api.EventLifeCycle + TopicFarmStatus EventTopic = "/status" // sends api.EventFarmStatus TopicJobUpdate EventTopic = "/jobs" // sends api.EventJobUpdate TopicLastRenderedImage EventTopic = "/last-rendered" // sends api.EventLastRenderedUpdate TopicTaskUpdate EventTopic = "/task" // sends api.EventTaskUpdate diff --git a/internal/manager/farmstatus/farmstatus.go b/internal/manager/farmstatus/farmstatus.go new file mode 100644 index 00000000..5fd86d9d --- /dev/null +++ b/internal/manager/farmstatus/farmstatus.go @@ -0,0 +1,233 @@ +// package farmstatus provides a status indicator for the entire Flamenco farm. +package farmstatus + +import ( + "context" + "errors" + "slices" + "sync" + "time" + + "github.com/rs/zerolog/log" + "projects.blender.org/studio/flamenco/internal/manager/eventbus" + "projects.blender.org/studio/flamenco/pkg/api" + "projects.blender.org/studio/flamenco/pkg/website" +) + +const ( + // pollWait determines how often the persistence layer is queried to get the + // counts & statuses of workers and jobs. + // + // Note that this indicates the time between polls, so between a poll + // operation being done, and the next one starting. + pollWait = 30 * time.Second +) + +// Service keeps track of the overall farm status. +type Service struct { + persist PersistenceService + eventbus EventBus + + mutex sync.Mutex + lastReport api.FarmStatusReport + forcePoll chan struct{} // Send anything here to force a poll, if none is running yet. +} + +// NewService returns a 'farm status' service. Run its Run() function in a +// goroutine to make it actually do something. +func NewService(persist PersistenceService, eventbus EventBus) *Service { + service := Service{ + persist: persist, + eventbus: eventbus, + mutex: sync.Mutex{}, + forcePoll: make(chan struct{}, 1), + lastReport: api.FarmStatusReport{ + Status: api.FarmStatusStarting, + }, + } + + eventbus.AddListener(&service) + return &service +} + +// Run the farm status polling loop. +func (s *Service) Run(ctx context.Context) { + log.Debug().Msg("farm status: polling service running") + defer log.Debug().Msg("farm status: polling service stopped") + + // At startup the first poll should happen quickly. + waitTime := 1 * time.Second + + for { + select { + case <-ctx.Done(): + return + case <-time.After(waitTime): + s.poll(ctx) + case <-s.forcePoll: + s.poll(ctx) + } + + // After the first poll we can go to a slower pace, as mostly the event bus + // is the main source of poll triggers. + waitTime = pollWait + } +} + +func (s *Service) OnEvent(topic eventbus.EventTopic, payload interface{}) { + forcePoll := false + eventSubject := "" + + switch event := payload.(type) { + case api.EventJobUpdate: + forcePoll = event.PreviousStatus != nil && *event.PreviousStatus != event.Status + eventSubject = "job" + case api.EventWorkerUpdate: + forcePoll = event.PreviousStatus != nil && *event.PreviousStatus != event.Status + eventSubject = "worker" + } + + if !forcePoll { + return + } + + log.Debug(). + Str("event", string(topic)). + Msgf("farm status: investigating after %s status change", eventSubject) + + // Polling queries the database, and thus can have a non-trivial duration. + // Better to run in the Run() goroutine. + select { + case s.forcePoll <- struct{}{}: + default: + // If sending to the channel fails, there is already a struct{}{} in + // there, and thus a poll will be triggered ASAP anyway. + } +} + +// Report returns the last-known farm status report. +// +// It is updated every few seconds, from the Run() function. +func (s *Service) Report() api.FarmStatusReport { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.lastReport +} + +// updateStatusReport updates the last status report in a thread-safe way. +// It returns whether the report changed. +func (s *Service) updateStatusReport(report api.FarmStatusReport) bool { + s.mutex.Lock() + defer s.mutex.Unlock() + + reportChanged := s.lastReport != report + s.lastReport = report + + return reportChanged +} + +func (s *Service) poll(ctx context.Context) { + report := s.checkFarmStatus(ctx) + if report == nil { + // Already logged, just keep the last known log around for querying. + return + } + + reportChanged := s.updateStatusReport(*report) + if reportChanged { + event := eventbus.NewFarmStatusEvent(s.lastReport) + s.eventbus.BroadcastFarmStatusEvent(event) + } +} + +// checkFarmStatus checks the farm status by querying the peristence layer. +// This function does not return an error, but instead logs them as warnings and returns nil. +func (s *Service) checkFarmStatus(ctx context.Context) *api.FarmStatusReport { + log.Trace().Msg("farm status: checking the farm status") + startTime := time.Now() + + defer func() { + duration := time.Since(startTime) + log.Debug().Stringer("duration", duration).Msg("farm status: checked the farm status") + }() + + workerStatuses, err := s.persist.SummarizeWorkerStatuses(ctx) + if err != nil { + logDBError(err, "farm status: could not summarize worker statuses") + return nil + } + + // Check some worker statuses first. When there are no workers and the farm is + // inoperative, there is little use in checking jobs. At least for now. Maybe + // later we want to have some info in the reported status that indicates a + // more pressing matter (as in, inoperative AND a job is queued). + + // Check: inoperative + if len(workerStatuses) == 0 || allIn(workerStatuses, api.WorkerStatusOffline, api.WorkerStatusError) { + return &api.FarmStatusReport{ + Status: api.FarmStatusInoperative, + } + } + + jobStatuses, err := s.persist.SummarizeJobStatuses(ctx) + if err != nil { + logDBError(err, "farm status: could not summarize job statuses") + return nil + } + + anyJobActive := jobStatuses[api.JobStatusActive] > 0 + anyJobQueued := jobStatuses[api.JobStatusQueued] > 0 + isWorkAvailable := anyJobActive || anyJobQueued + + anyWorkerAwake := workerStatuses[api.WorkerStatusAwake] > 0 + anyWorkerAsleep := workerStatuses[api.WorkerStatusAsleep] > 0 + allWorkersAsleep := !anyWorkerAwake && anyWorkerAsleep + + report := api.FarmStatusReport{} + switch { + case anyJobActive && anyWorkerAwake: + // - "active" # Actively working on jobs. + report.Status = api.FarmStatusActive + case isWorkAvailable: + // - "waiting" # Work to be done, but there is no worker awake. + report.Status = api.FarmStatusWaiting + case !isWorkAvailable && allWorkersAsleep: + // - "asleep" # Farm is idle, and all workers are asleep. + report.Status = api.FarmStatusAsleep + case !isWorkAvailable: + // - "idle" # Farm could be active, but has no work to do. + report.Status = api.FarmStatusIdle + default: + log.Warn(). + Interface("workerStatuses", workerStatuses). + Interface("jobStatuses", jobStatuses). + Msgf("farm status: unexpected configuration of worker and job statuses, please report this at %s", website.BugReportURL) + report.Status = api.FarmStatusUnknown + } + + return &report +} + +func logDBError(err error, message string) { + switch { + case errors.Is(err, context.DeadlineExceeded): + log.Warn().Msg(message + " (it took too long)") + case errors.Is(err, context.Canceled): + log.Debug().Msg(message + " (Flamenco is shutting down)") + default: + log.Warn().AnErr("cause", err).Msg(message) + } +} + +func allIn[T comparable](statuses map[T]int, shouldBeIn ...T) bool { + for status, count := range statuses { + if count == 0 { + continue + } + + if !slices.Contains(shouldBeIn, status) { + return false + } + } + return true +} diff --git a/internal/manager/farmstatus/farmstatus_test.go b/internal/manager/farmstatus/farmstatus_test.go new file mode 100644 index 00000000..f6eb7e52 --- /dev/null +++ b/internal/manager/farmstatus/farmstatus_test.go @@ -0,0 +1,241 @@ +// package farmstatus provides a status indicator for the entire Flamenco farm. +package farmstatus + +import ( + "context" + "testing" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "projects.blender.org/studio/flamenco/internal/manager/farmstatus/mocks" + "projects.blender.org/studio/flamenco/internal/manager/persistence" + "projects.blender.org/studio/flamenco/pkg/api" +) + +type Fixtures struct { + service *Service + persist *mocks.MockPersistenceService + eventbus *mocks.MockEventBus + ctx context.Context +} + +func TestFarmStatusStarting(t *testing.T) { + f := fixtures(t) + report := f.service.Report() + assert.Equal(t, api.FarmStatusStarting, report.Status) +} + +func TestFarmStatusLoop(t *testing.T) { + f := fixtures(t) + + // Mock an "active" status. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 2, + api.WorkerStatusAsleep: 1, + api.WorkerStatusError: 1, + api.WorkerStatusAwake: 3, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusActive: 1, + }) + + // Before polling, the status should still be 'starting'. + report := f.service.Report() + assert.Equal(t, api.FarmStatusStarting, report.Status) + + // After a single poll, the report should have been updated. + f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{Status: api.FarmStatusActive}) + f.service.poll(f.ctx) + report = f.service.Report() + assert.Equal(t, api.FarmStatusActive, report.Status) +} + +func TestCheckFarmStatusInoperative(t *testing.T) { + f := fixtures(t) + + // "inoperative": no workers. + f.mockWorkerStatuses(persistence.WorkerStatusCount{}) + report := f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusInoperative, report.Status) + + // "inoperative": all workers offline. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 3, + }) + report = f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusInoperative, report.Status) + + // "inoperative": some workers offline, some in error, + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 2, + api.WorkerStatusError: 1, + }) + report = f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusInoperative, report.Status) +} + +func TestCheckFarmStatusActive(t *testing.T) { + f := fixtures(t) + + // "active" # Actively working on jobs. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 2, + api.WorkerStatusAsleep: 1, + api.WorkerStatusError: 1, + api.WorkerStatusAwake: 3, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusActive: 1, + }) + report := f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusActive, report.Status) +} + +func TestCheckFarmStatusWaiting(t *testing.T) { + f := fixtures(t) + + // "waiting": Active job, and only sleeping workers. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusAsleep: 1, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusActive: 1, + }) + report := f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusWaiting, report.Status) + + // "waiting": Queued job, and awake worker. It could pick up the job any + // second now, but it could also have been blocklisted already. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusAsleep: 1, + api.WorkerStatusAwake: 1, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusQueued: 1, + }) + report = f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusWaiting, report.Status) +} + +func TestCheckFarmStatusIdle(t *testing.T) { + f := fixtures(t) + + // "idle" # Farm could be active, but has no work to do. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 2, + api.WorkerStatusAsleep: 1, + api.WorkerStatusAwake: 1, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusCompleted: 1, + api.JobStatusCancelRequested: 1, + }) + report := f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusIdle, report.Status) +} + +func TestCheckFarmStatusAsleep(t *testing.T) { + f := fixtures(t) + + // "asleep": No worker is awake, some are asleep, no work to do. + f.mockWorkerStatuses(persistence.WorkerStatusCount{ + api.WorkerStatusOffline: 2, + api.WorkerStatusAsleep: 2, + }) + f.mockJobStatuses(persistence.JobStatusCount{ + api.JobStatusCanceled: 10, + api.JobStatusCompleted: 4, + api.JobStatusFailed: 2, + }) + report := f.service.checkFarmStatus(f.ctx) + require.NotNil(t, report) + assert.Equal(t, api.FarmStatusAsleep, report.Status) +} + +func TestFarmStatusEvent(t *testing.T) { + f := fixtures(t) + + // "inoperative": no workers. + f.mockWorkerStatuses(persistence.WorkerStatusCount{}) + f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{ + Status: api.FarmStatusInoperative, + }) + f.service.poll(f.ctx) + + // Re-polling should not trigger any event, as the status doesn't change. + f.mockWorkerStatuses(persistence.WorkerStatusCount{}) + f.service.poll(f.ctx) + + // "active": Actively working on jobs. + f.mockWorkerStatuses(persistence.WorkerStatusCount{api.WorkerStatusAwake: 3}) + f.mockJobStatuses(persistence.JobStatusCount{api.JobStatusActive: 1}) + f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{ + Status: api.FarmStatusActive, + }) + f.service.poll(f.ctx) +} + +func Test_allIn(t *testing.T) { + type args struct { + statuses map[api.WorkerStatus]int + shouldBeIn []api.WorkerStatus + } + tests := []struct { + name string + args args + want bool + }{ + {"none", args{map[api.WorkerStatus]int{}, []api.WorkerStatus{api.WorkerStatusAsleep}}, true}, + {"match-only", args{ + map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5}, + []api.WorkerStatus{api.WorkerStatusAsleep}, + }, true}, + {"match-some", args{ + map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5, api.WorkerStatusOffline: 2}, + []api.WorkerStatus{api.WorkerStatusAsleep}, + }, false}, + {"match-all", args{ + map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5, api.WorkerStatusOffline: 2}, + []api.WorkerStatus{api.WorkerStatusAsleep, api.WorkerStatusOffline}, + }, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := allIn(tt.args.statuses, tt.args.shouldBeIn...); got != tt.want { + t.Errorf("allIn() = %v, want %v", got, tt.want) + } + }) + } +} + +func fixtures(t *testing.T) *Fixtures { + mockCtrl := gomock.NewController(t) + + f := Fixtures{ + persist: mocks.NewMockPersistenceService(mockCtrl), + eventbus: mocks.NewMockEventBus(mockCtrl), + ctx: context.Background(), + } + + // calling NewService() immediate registers as a listener with the event bus. + f.eventbus.EXPECT().AddListener(gomock.Any()) + f.service = NewService(f.persist, f.eventbus) + + return &f +} + +func (f *Fixtures) mockWorkerStatuses(workerStatuses persistence.WorkerStatusCount) { + f.persist.EXPECT().SummarizeWorkerStatuses(f.ctx).Return(workerStatuses, nil) +} + +func (f *Fixtures) mockJobStatuses(jobStatuses persistence.JobStatusCount) { + f.persist.EXPECT().SummarizeJobStatuses(f.ctx).Return(jobStatuses, nil) +} diff --git a/internal/manager/farmstatus/interfaces.go b/internal/manager/farmstatus/interfaces.go new file mode 100644 index 00000000..36798e17 --- /dev/null +++ b/internal/manager/farmstatus/interfaces.go @@ -0,0 +1,26 @@ +package farmstatus + +import ( + "context" + + "projects.blender.org/studio/flamenco/internal/manager/eventbus" + "projects.blender.org/studio/flamenco/internal/manager/persistence" + "projects.blender.org/studio/flamenco/pkg/api" +) + +// Generate mock implementations of these interfaces. +//go:generate go run github.com/golang/mock/mockgen -destination mocks/interfaces_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/farmstatus PersistenceService,EventBus + +type PersistenceService interface { + SummarizeJobStatuses(ctx context.Context) (persistence.JobStatusCount, error) + SummarizeWorkerStatuses(ctx context.Context) (persistence.WorkerStatusCount, error) +} + +var _ PersistenceService = (*persistence.DB)(nil) + +type EventBus interface { + AddListener(listener eventbus.Listener) + BroadcastFarmStatusEvent(event api.EventFarmStatus) +} + +var _ EventBus = (*eventbus.Broker)(nil) diff --git a/internal/manager/farmstatus/mocks/interfaces_mock.gen.go b/internal/manager/farmstatus/mocks/interfaces_mock.gen.go new file mode 100644 index 00000000..55099521 --- /dev/null +++ b/internal/manager/farmstatus/mocks/interfaces_mock.gen.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: projects.blender.org/studio/flamenco/internal/manager/farmstatus (interfaces: PersistenceService,EventBus) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + eventbus "projects.blender.org/studio/flamenco/internal/manager/eventbus" + persistence "projects.blender.org/studio/flamenco/internal/manager/persistence" + api "projects.blender.org/studio/flamenco/pkg/api" +) + +// MockPersistenceService is a mock of PersistenceService interface. +type MockPersistenceService struct { + ctrl *gomock.Controller + recorder *MockPersistenceServiceMockRecorder +} + +// MockPersistenceServiceMockRecorder is the mock recorder for MockPersistenceService. +type MockPersistenceServiceMockRecorder struct { + mock *MockPersistenceService +} + +// NewMockPersistenceService creates a new mock instance. +func NewMockPersistenceService(ctrl *gomock.Controller) *MockPersistenceService { + mock := &MockPersistenceService{ctrl: ctrl} + mock.recorder = &MockPersistenceServiceMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPersistenceService) EXPECT() *MockPersistenceServiceMockRecorder { + return m.recorder +} + +// SummarizeJobStatuses mocks base method. +func (m *MockPersistenceService) SummarizeJobStatuses(arg0 context.Context) (persistence.JobStatusCount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SummarizeJobStatuses", arg0) + ret0, _ := ret[0].(persistence.JobStatusCount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SummarizeJobStatuses indicates an expected call of SummarizeJobStatuses. +func (mr *MockPersistenceServiceMockRecorder) SummarizeJobStatuses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SummarizeJobStatuses", reflect.TypeOf((*MockPersistenceService)(nil).SummarizeJobStatuses), arg0) +} + +// SummarizeWorkerStatuses mocks base method. +func (m *MockPersistenceService) SummarizeWorkerStatuses(arg0 context.Context) (persistence.WorkerStatusCount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SummarizeWorkerStatuses", arg0) + ret0, _ := ret[0].(persistence.WorkerStatusCount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SummarizeWorkerStatuses indicates an expected call of SummarizeWorkerStatuses. +func (mr *MockPersistenceServiceMockRecorder) SummarizeWorkerStatuses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SummarizeWorkerStatuses", reflect.TypeOf((*MockPersistenceService)(nil).SummarizeWorkerStatuses), arg0) +} + +// MockEventBus is a mock of EventBus interface. +type MockEventBus struct { + ctrl *gomock.Controller + recorder *MockEventBusMockRecorder +} + +// MockEventBusMockRecorder is the mock recorder for MockEventBus. +type MockEventBusMockRecorder struct { + mock *MockEventBus +} + +// NewMockEventBus creates a new mock instance. +func NewMockEventBus(ctrl *gomock.Controller) *MockEventBus { + mock := &MockEventBus{ctrl: ctrl} + mock.recorder = &MockEventBusMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEventBus) EXPECT() *MockEventBusMockRecorder { + return m.recorder +} + +// AddListener mocks base method. +func (m *MockEventBus) AddListener(arg0 eventbus.Listener) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddListener", arg0) +} + +// AddListener indicates an expected call of AddListener. +func (mr *MockEventBusMockRecorder) AddListener(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddListener", reflect.TypeOf((*MockEventBus)(nil).AddListener), arg0) +} + +// BroadcastFarmStatusEvent mocks base method. +func (m *MockEventBus) BroadcastFarmStatusEvent(arg0 api.EventFarmStatus) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "BroadcastFarmStatusEvent", arg0) +} + +// BroadcastFarmStatusEvent indicates an expected call of BroadcastFarmStatusEvent. +func (mr *MockEventBusMockRecorder) BroadcastFarmStatusEvent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastFarmStatusEvent", reflect.TypeOf((*MockEventBus)(nil).BroadcastFarmStatusEvent), arg0) +} diff --git a/internal/manager/job_compilers/job_compilers_test.go b/internal/manager/job_compilers/job_compilers_test.go index c6abee44..430d8dbd 100644 --- a/internal/manager/job_compilers/job_compilers_test.go +++ b/internal/manager/job_compilers/job_compilers_test.go @@ -58,7 +58,7 @@ func exampleSubmittedJob() api.SubmittedJob { func mockedClock(t *testing.T) clock.Clock { c := clock.NewMock() now, err := time.ParseInLocation("2006-01-02T15:04:05", "2006-01-02T15:04:05", time.Local) - assert.NoError(t, err) + require.NoError(t, err) c.Set(now) return c } @@ -67,7 +67,7 @@ func TestSimpleBlenderRenderHappy(t *testing.T) { c := mockedClock(t) s, err := Load(c) - assert.NoError(t, err) + require.NoError(t, err) // Compiling a job should be really fast. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) @@ -172,7 +172,7 @@ func TestSimpleBlenderRenderWindowsPaths(t *testing.T) { c := mockedClock(t) s, err := Load(c) - assert.NoError(t, err) + require.NoError(t, err) // Compiling a job should be really fast. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) @@ -307,9 +307,8 @@ func TestEtag(t *testing.T) { { // Test without etag. aj, err := s.Compile(ctx, sj) - if assert.NoError(t, err, "job without etag should always be accepted") { - assert.NotNil(t, aj) - } + require.NoError(t, err, "job without etag should always be accepted") + assert.NotNil(t, aj) } { // Test with bad etag. @@ -321,9 +320,8 @@ func TestEtag(t *testing.T) { { // Test with correct etag. sj.TypeEtag = ptr(expectEtag) aj, err := s.Compile(ctx, sj) - if assert.NoError(t, err, "job with correct etag should be accepted") { - assert.NotNil(t, aj) - } + require.NoError(t, err, "job with correct etag should be accepted") + assert.NotNil(t, aj) } } diff --git a/internal/manager/job_compilers/js_globals.go b/internal/manager/job_compilers/js_globals.go index de9dddf9..9628a988 100644 --- a/internal/manager/job_compilers/js_globals.go +++ b/internal/manager/job_compilers/js_globals.go @@ -10,6 +10,7 @@ import ( "time" "github.com/dop251/goja" + "github.com/google/shlex" "github.com/rs/zerolog/log" ) @@ -33,6 +34,19 @@ func jsFormatTimestampLocal(timestamp time.Time) string { return timestamp.Local().Format("2006-01-02_150405") } +// jsShellSplit splits a string into its parts, using CLI/shell semantics. +func jsShellSplit(vm *goja.Runtime, someCLIArgs string) []string { + split, err := shlex.Split(someCLIArgs) + + if err != nil { + // Generate a JS exception by panicing with a Goja Value. + exception := vm.ToValue(err) + panic(exception) + } + + return split +} + type ErrInvalidRange struct { Range string // The frame range that was invalid. Message string // The error message diff --git a/internal/manager/job_compilers/js_globals_test.go b/internal/manager/job_compilers/js_globals_test.go index 9ce7eb06..4029f1ef 100644 --- a/internal/manager/job_compilers/js_globals_test.go +++ b/internal/manager/job_compilers/js_globals_test.go @@ -5,12 +5,31 @@ package job_compilers import ( "testing" + "github.com/dop251/goja" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) +func TestShellSplitHappy(t *testing.T) { + expect := []string{"--python-expr", "print(1 + 1)"} + actual := jsShellSplit(nil, "--python-expr 'print(1 + 1)'") + assert.Equal(t, expect, actual) +} + +func TestShellSplitFailure(t *testing.T) { + vm := goja.New() + + testFunc := func() { + jsShellSplit(vm, "--python-expr invalid_quoting(1 + 1)'") + } + // Testing that a goja.Value is used for the panic is a bit tricky, so just + // test that the function panics. + assert.Panics(t, testFunc) +} + func TestFrameChunkerHappyBlenderStyle(t *testing.T) { chunks, err := jsFrameChunker("1..10,20..25,40,3..8", 4) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks) } @@ -21,24 +40,24 @@ func TestFrameChunkerHappySmallInput(t *testing.T) { // Just one frame. chunks, err := jsFrameChunker("47", 4) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []string{"47"}, chunks) // Just one range of exactly one chunk. chunks, err = jsFrameChunker("1-3", 3) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []string{"1-3"}, chunks) } func TestFrameChunkerHappyRegularStyle(t *testing.T) { chunks, err := jsFrameChunker("1-10,20-25,40", 4) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks) } func TestFrameChunkerHappyExtraWhitespace(t *testing.T) { chunks, err := jsFrameChunker(" 1 .. 10,\t20..25\n,40 ", 4) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks) } @@ -50,7 +69,7 @@ func TestFrameChunkerUnhappy(t *testing.T) { func TestFrameRangeExplode(t *testing.T) { frames, err := frameRangeExplode("1..10,20..25,40") - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []int{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 21, 22, 23, 24, 25, 40, diff --git a/internal/manager/job_compilers/scripts-for-unittest/simple_blender_render.js b/internal/manager/job_compilers/scripts-for-unittest/simple_blender_render.js index cbe73256..157e534b 100644 --- a/internal/manager/job_compilers/scripts-for-unittest/simple_blender_render.js +++ b/internal/manager/job_compilers/scripts-for-unittest/simple_blender_render.js @@ -2,6 +2,7 @@ const JOB_TYPE = { label: "Simple Blender Render", + description: "Render a sequence of frames, and create a preview video file", settings: [ // Settings for artists to determine: { key: "frames", type: "string", required: true, eval: "f'{C.scene.frame_start}-{C.scene.frame_end}'", diff --git a/internal/manager/job_compilers/scripts.go b/internal/manager/job_compilers/scripts.go index efba10e9..92ad4e83 100644 --- a/internal/manager/job_compilers/scripts.go +++ b/internal/manager/job_compilers/scripts.go @@ -140,6 +140,9 @@ func newGojaVM(registry *require.Registry) *goja.Runtime { mustSet("alert", jsAlert) mustSet("frameChunker", jsFrameChunker) mustSet("formatTimestampLocal", jsFormatTimestampLocal) + mustSet("shellSplit", func(cliArgs string) []string { + return jsShellSplit(vm, cliArgs) + }) // Pre-import some useful modules. registry.Enable(vm) diff --git a/internal/manager/job_compilers/scripts/simple_blender_render.js b/internal/manager/job_compilers/scripts/simple_blender_render.js index d4a18672..3c6f4123 100644 --- a/internal/manager/job_compilers/scripts/simple_blender_render.js +++ b/internal/manager/job_compilers/scripts/simple_blender_render.js @@ -2,6 +2,7 @@ const JOB_TYPE = { label: "Simple Blender Render", + description: "Render a sequence of frames, and create a preview video file", settings: [ // Settings for artists to determine: { key: "frames", type: "string", required: true, diff --git a/internal/manager/job_compilers/scripts_test.go b/internal/manager/job_compilers/scripts_test.go index aee41e76..f7f7ec7f 100644 --- a/internal/manager/job_compilers/scripts_test.go +++ b/internal/manager/job_compilers/scripts_test.go @@ -8,12 +8,13 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoadScriptsFrom_skip_nonjs(t *testing.T) { thisDirFS := os.DirFS(".") compilers, err := loadScriptsFrom(thisDirFS) - assert.NoError(t, err, "input without JS files should not cause errors") + require.NoError(t, err, "input without JS files should not cause errors") assert.Empty(t, compilers) } @@ -21,7 +22,7 @@ func TestLoadScriptsFrom_on_disk_js(t *testing.T) { scriptsFS := os.DirFS("scripts-for-unittest") compilers, err := loadScriptsFrom(scriptsFS) - assert.NoError(t, err) + require.NoError(t, err) expectKeys := map[string]bool{ "echo-and-sleep": true, "simple-blender-render": true, @@ -34,7 +35,7 @@ func TestLoadScriptsFrom_embedded(t *testing.T) { initEmbeddedFS() compilers, err := loadScriptsFrom(embeddedScriptsFS) - assert.NoError(t, err) + require.NoError(t, err) expectKeys := map[string]bool{ "echo-sleep-test": true, "simple-blender-render": true, @@ -48,7 +49,7 @@ func BenchmarkLoadScripts_fromEmbedded(b *testing.B) { for i := 0; i < b.N; i++ { compilers, err := loadScriptsFrom(embeddedScriptsFS) - assert.NoError(b, err) + require.NoError(b, err) assert.NotEmpty(b, compilers) } } @@ -59,7 +60,7 @@ func BenchmarkLoadScripts_fromDisk(b *testing.B) { onDiskFS := os.DirFS("scripts-for-unittest") for i := 0; i < b.N; i++ { compilers, err := loadScriptsFrom(onDiskFS) - assert.NoError(b, err) + require.NoError(b, err) assert.NotEmpty(b, compilers) } } diff --git a/internal/manager/job_deleter/job_deleter_test.go b/internal/manager/job_deleter/job_deleter_test.go index 770c45a8..c20d2c2d 100644 --- a/internal/manager/job_deleter/job_deleter_test.go +++ b/internal/manager/job_deleter/job_deleter_test.go @@ -9,6 +9,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/job_deleter/mocks" "projects.blender.org/studio/flamenco/internal/manager/persistence" "projects.blender.org/studio/flamenco/pkg/shaman" @@ -32,16 +33,16 @@ func TestQueueJobDeletion(t *testing.T) { job1 := &persistence.Job{UUID: "2f7d910f-08a6-4b0f-8ecb-b3946939ed1b"} mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job1) - assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job1)) + require.NoError(t, s.QueueJobDeletion(mocks.ctx, job1)) // Call twice more to overflow the queue. job2 := &persistence.Job{UUID: "e8fbe41c-ed24-46df-ba63-8d4f5524071b"} mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job2) - assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job2)) + require.NoError(t, s.QueueJobDeletion(mocks.ctx, job2)) job3 := &persistence.Job{UUID: "deeab6ba-02cd-42c0-b7bc-2367a2f04c7d"} mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job3) - assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job3)) + require.NoError(t, s.QueueJobDeletion(mocks.ctx, job3)) if assert.Len(t, s.queue, 2, "the first two job UUID should be queued") { assert.Equal(t, job1.UUID, <-s.queue) @@ -111,7 +112,7 @@ func TestDeleteJobWithoutShaman(t *testing.T) { mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID) mocks.persist.EXPECT().RequestIntegrityCheck() mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any()) - assert.NoError(t, s.deleteJob(mocks.ctx, jobUUID)) + require.NoError(t, s.deleteJob(mocks.ctx, jobUUID)) } func TestDeleteJobWithShaman(t *testing.T) { @@ -163,7 +164,7 @@ func TestDeleteJobWithShaman(t *testing.T) { mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID) mocks.persist.EXPECT().RequestIntegrityCheck() mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any()) - assert.NoError(t, s.deleteJob(mocks.ctx, jobUUID)) + require.NoError(t, s.deleteJob(mocks.ctx, jobUUID)) } func jobDeleterTestFixtures(t *testing.T) (*Service, func(), *JobDeleterMocks) { diff --git a/internal/manager/last_rendered/last_rendered_test.go b/internal/manager/last_rendered/last_rendered_test.go index f06154c6..7e3142a2 100644 --- a/internal/manager/last_rendered/last_rendered_test.go +++ b/internal/manager/last_rendered/last_rendered_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/local_storage" ) @@ -38,9 +39,9 @@ func TestQueueImage(t *testing.T) { defer storage.MustErase() lrp := New(storage) - assert.NoError(t, lrp.QueueImage(payload)) - assert.NoError(t, lrp.QueueImage(payload)) - assert.NoError(t, lrp.QueueImage(payload)) + require.NoError(t, lrp.QueueImage(payload)) + require.NoError(t, lrp.QueueImage(payload)) + require.NoError(t, lrp.QueueImage(payload)) assert.ErrorIs(t, lrp.QueueImage(payload), ErrQueueFull) } @@ -48,9 +49,7 @@ func TestProcessImage(t *testing.T) { // Load the test image. Note that this intentionally has an approximate 21:9 // ratio, whereas the thumbnail specs define a 16:9 ratio. imgBytes, err := os.ReadFile("last_rendered_test.jpg") - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) jobID := "e078438b-c9f5-43e6-9e86-52f8be91dd12" payload := Payload{ @@ -87,15 +86,11 @@ func TestProcessImage(t *testing.T) { assertImageSize := func(spec Thumbspec) { path := filepath.Join(jobdir, spec.Filename) file, err := os.Open(path) - if !assert.NoError(t, err, "thumbnail %s should be openable", spec.Filename) { - return - } + require.NoError(t, err, "thumbnail %s should be openable", spec.Filename) defer file.Close() img, format, err := image.Decode(file) - if !assert.NoErrorf(t, err, "thumbnail %s should be decodable", spec.Filename) { - return - } + require.NoErrorf(t, err, "thumbnail %s should be decodable", spec.Filename) assert.Equalf(t, "jpeg", format, "thumbnail %s not written in the expected format", spec.Filename) assert.LessOrEqualf(t, img.Bounds().Dx(), spec.MaxWidth, "thumbnail %s has wrong width", spec.Filename) diff --git a/internal/manager/local_storage/local_storage_test.go b/internal/manager/local_storage/local_storage_test.go index 2d4dd36b..36696147 100644 --- a/internal/manager/local_storage/local_storage_test.go +++ b/internal/manager/local_storage/local_storage_test.go @@ -24,16 +24,14 @@ func TestNewNextToExe(t *testing.T) { func TestNewNextToExe_noSubdir(t *testing.T) { exePath, err := os.Executable() - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) exeName := filepath.Base(exePath) // The filesystem in an empty "subdirectory" next to the executable should // contain the executable. si := NewNextToExe("") _, err = os.Stat(filepath.Join(si.rootPath, exeName)) - assert.NoErrorf(t, err, "should be able to stat this executable %s", exeName) + require.NoErrorf(t, err, "should be able to stat this executable %s", exeName) } func TestForJob(t *testing.T) { @@ -52,10 +50,10 @@ func TestErase(t *testing.T) { jobPath := si.ForJob("08e126ef-d773-468b-8bab-19a8213cf2ff") assert.NoDirExists(t, jobPath, "getting a path should not create it") - assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) + require.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) assert.DirExists(t, jobPath, "os.MkdirAll is borked") - assert.NoError(t, si.Erase()) + require.NoError(t, si.Erase()) assert.NoDirExists(t, si.rootPath, "Erase() should erase the root path, and everything in it") } @@ -66,13 +64,13 @@ func TestRemoveJobStorage(t *testing.T) { jobPath := si.ForJob(jobUUID) assert.NoDirExists(t, jobPath, "getting a path should not create it") - assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) + require.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) assert.DirExists(t, jobPath, "os.MkdirAll is borked") taskFile := filepath.Join(jobPath, "task-07c33f32-b345-4da9-8834-9c91532cd97e.txt") - assert.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777)) + require.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777)) - assert.NoError(t, si.RemoveJobStorage(context.Background(), jobUUID)) + require.NoError(t, si.RemoveJobStorage(context.Background(), jobUUID)) assert.NoDirExists(t, jobPath, "RemoveJobStorage() should erase the entire job-specific storage dir, and everything in it") // See if the test assumption (that job dir is in another sub-dir of the root, @@ -91,13 +89,13 @@ func TestRemoveJobStorageWithoutJobUUID(t *testing.T) { jobPath := si.ForJob("") assert.NoDirExists(t, jobPath, "getting a path should not create it") - assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) + require.NoError(t, os.MkdirAll(jobPath, os.ModePerm)) assert.DirExists(t, jobPath, "os.MkdirAll is borked") taskFile := filepath.Join(jobPath, "task-07c33f32-b345-4da9-8834-9c91532cd97e.txt") - assert.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777)) + require.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777)) - assert.NoError(t, si.RemoveJobStorage(context.Background(), "")) + require.NoError(t, si.RemoveJobStorage(context.Background(), "")) assert.NoDirExists(t, jobPath, "RemoveJobStorage() should erase the entire job-specific storage dir, and everything in it") // See if the test assumption (that a jobless dir is directly inside the root) still holds. diff --git a/internal/manager/persistence/db.go b/internal/manager/persistence/db.go index 78e222a0..1ffe2806 100644 --- a/internal/manager/persistence/db.go +++ b/internal/manager/persistence/db.go @@ -5,14 +5,15 @@ package persistence import ( "context" + "database/sql" "fmt" "time" + "github.com/glebarez/sqlite" "github.com/rs/zerolog/log" "gorm.io/gorm" - // sqlite "projects.blender.org/studio/flamenco/pkg/gorm-modernc-sqlite" - "github.com/glebarez/sqlite" + "projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc" ) // DB provides the database interface. @@ -74,6 +75,10 @@ func OpenDB(ctx context.Context, dsn string) (*DB, error) { return nil, ErrIntegrity } + // Perform another vacuum after database migration, as that may have copied a + // lot of data and then dropped another lot of data. + db.vacuum() + closeConnOnReturn = false return db, nil } @@ -171,6 +176,25 @@ func (db *DB) Close() error { return sqldb.Close() } +// queries returns the SQLC Queries struct, connected to this database. +// It is intended that all GORM queries will be migrated to use this interface +// instead. +func (db *DB) queries() (*sqlc.Queries, error) { + sqldb, err := db.gormDB.DB() + if err != nil { + return nil, fmt.Errorf("could not get low-level database driver: %w", err) + } + return sqlc.New(sqldb), nil +} + +// now returns the result of `nowFunc()` wrapped in a sql.NullTime. +func (db *DB) now() sql.NullTime { + return sql.NullTime{ + Time: db.gormDB.NowFunc(), + Valid: true, + } +} + func (db *DB) pragmaForeignKeys(enabled bool) error { var ( value int diff --git a/internal/manager/persistence/integrity.go b/internal/manager/persistence/integrity.go index dad10501..096526b0 100644 --- a/internal/manager/persistence/integrity.go +++ b/internal/manager/persistence/integrity.go @@ -13,7 +13,7 @@ import ( var ErrIntegrity = errors.New("database integrity check failed") const ( - integrityCheckTimeout = 2 * time.Second + integrityCheckTimeout = 10 * time.Second ) type PragmaIntegrityCheckResult struct { @@ -78,6 +78,8 @@ func (db *DB) performIntegrityCheck(ctx context.Context) (ok bool) { log.Debug().Msg("database: performing integrity check") + db.ensureForeignKeysEnabled() + if !db.pragmaIntegrityCheck(checkCtx) { return false } @@ -159,3 +161,29 @@ func (db *DB) pragmaForeignKeyCheck(ctx context.Context) (ok bool) { return false } + +// ensureForeignKeysEnabled checks whether foreign keys are enabled, and if not, +// tries to enable them. +// +// This is likely caused by either GORM or its embedded SQLite creating a new +// connection to the low-level SQLite driver. Unfortunately the GORM-embedded +// SQLite doesn't have an 'on-connect' callback function to always enable +// foreign keys. +func (db *DB) ensureForeignKeysEnabled() { + fkEnabled, err := db.areForeignKeysEnabled() + + if err != nil { + log.Error().AnErr("cause", err).Msg("database: could not check whether foreign keys are enabled") + return + } + + if fkEnabled { + return + } + + log.Warn().Msg("database: foreign keys are disabled, re-enabling them") + if err := db.pragmaForeignKeys(true); err != nil { + log.Error().AnErr("cause", err).Msg("database: error re-enabling foreign keys") + return + } +} diff --git a/internal/manager/persistence/jobs.go b/internal/manager/persistence/jobs.go index 90c77ca6..8a2f94d3 100644 --- a/internal/manager/persistence/jobs.go +++ b/internal/manager/persistence/jobs.go @@ -17,6 +17,7 @@ import ( "gorm.io/gorm/clause" "projects.blender.org/studio/flamenco/internal/manager/job_compilers" + "projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc" "projects.blender.org/studio/flamenco/pkg/api" ) @@ -252,19 +253,20 @@ func (db *DB) storeAuthoredJobTaks( // FetchJob fetches a single job, without fetching its tasks. func (db *DB) FetchJob(ctx context.Context, jobUUID string) (*Job, error) { - dbJob := Job{} - findResult := db.gormDB.WithContext(ctx). - Limit(1). - Preload("WorkerTag"). - Find(&dbJob, "uuid = ?", jobUUID) - if findResult.Error != nil { - return nil, jobError(findResult.Error, "fetching job") - } - if dbJob.ID == 0 { - return nil, ErrJobNotFound + queries, err := db.queries() + if err != nil { + return nil, err } - return &dbJob, nil + sqlcJob, err := queries.FetchJob(ctx, jobUUID) + switch { + case errors.Is(err, sql.ErrNoRows): + return nil, ErrJobNotFound + case err != nil: + return nil, jobError(err, "fetching job") + } + + return convertSqlcJob(sqlcJob) } // DeleteJob deletes a job from the database. @@ -279,24 +281,38 @@ func (db *DB) DeleteJob(ctx context.Context, jobUUID string) error { return ErrDeletingWithoutFK } - tx := db.gormDB.WithContext(ctx). - Where("uuid = ?", jobUUID). - Delete(&Job{}) - if tx.Error != nil { - return jobError(tx.Error, "deleting job") + queries, err := db.queries() + if err != nil { + return err + } + + if err := queries.DeleteJob(ctx, jobUUID); err != nil { + return jobError(err, "deleting job") } return nil } // RequestJobDeletion sets the job's "DeletionRequestedAt" field to "now". func (db *DB) RequestJobDeletion(ctx context.Context, j *Job) error { - j.DeleteRequestedAt.Time = db.gormDB.NowFunc() - j.DeleteRequestedAt.Valid = true - tx := db.gormDB.WithContext(ctx). - Model(j). - Updates(Job{DeleteRequestedAt: j.DeleteRequestedAt}) - if tx.Error != nil { - return jobError(tx.Error, "queueing job for deletion") + queries, err := db.queries() + if err != nil { + return err + } + + // Update the given job itself, so we don't have to re-fetch it from the database. + j.DeleteRequestedAt = db.now() + + params := sqlc.RequestJobDeletionParams{ + Now: j.DeleteRequestedAt, + JobID: int64(j.ID), + } + + log.Trace(). + Str("job", j.UUID). + Time("deletedAt", params.Now.Time). + Msg("database: marking job as deletion-requested") + if err := queries.RequestJobDeletion(ctx, params); err != nil { + return jobError(err, "queueing job for deletion") } return nil } @@ -304,98 +320,114 @@ func (db *DB) RequestJobDeletion(ctx context.Context, j *Job) error { // RequestJobMassDeletion sets multiple job's "DeletionRequestedAt" field to "now". // The list of affected job UUIDs is returned. func (db *DB) RequestJobMassDeletion(ctx context.Context, lastUpdatedMax time.Time) ([]string, error) { - // In order to be able to report which jobs were affected, first fetch the - // list of jobs, then update them. - var jobs []*Job - selectResult := db.gormDB.WithContext(ctx). - Model(&Job{}). - Select("uuid"). - Where("updated_at <= ?", lastUpdatedMax). - Scan(&jobs) - if selectResult.Error != nil { - return nil, jobError(selectResult.Error, "fetching jobs by last-modified timestamp") + queries, err := db.queries() + if err != nil { + return nil, err } - if len(jobs) == 0 { + // In order to be able to report which jobs were affected, first fetch the + // list of jobs, then update them. + uuids, err := queries.FetchJobUUIDsUpdatedBefore(ctx, sql.NullTime{ + Time: lastUpdatedMax, + Valid: true, + }) + switch { + case err != nil: + return nil, jobError(err, "fetching jobs by last-modified timestamp") + case len(uuids) == 0: return nil, ErrJobNotFound } - // Convert array of jobs to array of UUIDs. - uuids := make([]string, len(jobs)) - for index := range jobs { - uuids[index] = jobs[index].UUID - } - // Update the selected jobs. - deleteRequestedAt := sql.NullTime{ - Time: db.gormDB.NowFunc(), - Valid: true, + params := sqlc.RequestMassJobDeletionParams{ + Now: db.now(), + UUIDs: uuids, } - tx := db.gormDB.WithContext(ctx). - Model(Job{}). - Where("uuid in ?", uuids). - Updates(Job{DeleteRequestedAt: deleteRequestedAt}) - if tx.Error != nil { - return nil, jobError(tx.Error, "queueing jobs for deletion") + if err := queries.RequestMassJobDeletion(ctx, params); err != nil { + return nil, jobError(err, "marking jobs as deletion-requested") } return uuids, nil } func (db *DB) FetchJobsDeletionRequested(ctx context.Context) ([]string, error) { - var jobs []*Job - - tx := db.gormDB.WithContext(ctx). - Model(&Job{}). - Select("UUID"). - Where("delete_requested_at is not NULL"). - Order("delete_requested_at"). - Scan(&jobs) - - if tx.Error != nil { - return nil, jobError(tx.Error, "fetching jobs marked for deletion") + queries, err := db.queries() + if err != nil { + return nil, err } - uuids := make([]string, len(jobs)) - for i := range jobs { - uuids[i] = jobs[i].UUID + uuids, err := queries.FetchJobsDeletionRequested(ctx) + if err != nil { + return nil, jobError(err, "fetching jobs marked for deletion") } - return uuids, nil } func (db *DB) FetchJobsInStatus(ctx context.Context, jobStatuses ...api.JobStatus) ([]*Job, error) { - var jobs []*Job - - tx := db.gormDB.WithContext(ctx). - Model(&Job{}). - Where("status in ?", jobStatuses). - Scan(&jobs) - - if tx.Error != nil { - return nil, jobError(tx.Error, "fetching jobs in status %q", jobStatuses) + queries, err := db.queries() + if err != nil { + return nil, err } + + statuses := []string{} + for _, status := range jobStatuses { + statuses = append(statuses, string(status)) + } + + sqlcJobs, err := queries.FetchJobsInStatus(ctx, statuses) + if err != nil { + return nil, jobError(err, "fetching jobs in status %q", jobStatuses) + } + + var jobs []*Job + for index := range sqlcJobs { + job, err := convertSqlcJob(sqlcJobs[index]) + if err != nil { + return nil, jobError(err, "converting fetched jobs in status %q", jobStatuses) + } + jobs = append(jobs, job) + } + return jobs, nil } // SaveJobStatus saves the job's Status and Activity fields. func (db *DB) SaveJobStatus(ctx context.Context, j *Job) error { - tx := db.gormDB.WithContext(ctx). - Model(j). - Updates(Job{Status: j.Status, Activity: j.Activity}) - if tx.Error != nil { - return jobError(tx.Error, "saving job status") + queries, err := db.queries() + if err != nil { + return err + } + + params := sqlc.SaveJobStatusParams{ + Now: db.now(), + ID: int64(j.ID), + Status: string(j.Status), + Activity: j.Activity, + } + + err = queries.SaveJobStatus(ctx, params) + if err != nil { + return jobError(err, "saving job status") } return nil } // SaveJobPriority saves the job's Priority field. func (db *DB) SaveJobPriority(ctx context.Context, j *Job) error { - tx := db.gormDB.WithContext(ctx). - Model(j). - Updates(Job{Priority: j.Priority}) - if tx.Error != nil { - return jobError(tx.Error, "saving job priority") + queries, err := db.queries() + if err != nil { + return err + } + + params := sqlc.SaveJobPriorityParams{ + Now: db.now(), + ID: int64(j.ID), + Priority: int64(j.Priority), + } + + err = queries.SaveJobPriority(ctx, params) + if err != nil { + return jobError(err, "saving job priority") } return nil } @@ -404,12 +436,19 @@ func (db *DB) SaveJobPriority(ctx context.Context, j *Job) error { // NOTE: this function does NOT update the job's `UpdatedAt` field. This is // necessary for `cmd/shaman-checkout-id-setter` to do its work quietly. func (db *DB) SaveJobStorageInfo(ctx context.Context, j *Job) error { - tx := db.gormDB.WithContext(ctx). - Model(j). - Omit("UpdatedAt"). - Updates(Job{Storage: j.Storage}) - if tx.Error != nil { - return jobError(tx.Error, "saving job storage") + queries, err := db.queries() + if err != nil { + return err + } + + params := sqlc.SaveJobStorageInfoParams{ + ID: int64(j.ID), + StorageShamanCheckoutID: j.Storage.ShamanCheckoutID, + } + + err = queries.SaveJobStorageInfo(ctx, params) + if err != nil { + return jobError(err, "saving job storage") } return nil } @@ -713,3 +752,42 @@ func (db *DB) FetchTaskFailureList(ctx context.Context, t *Task) ([]*Worker, err return workers, tx.Error } + +// convertSqlcJob converts a job from the SQLC-generated model to the model +// expected by the rest of the code. This is mostly in place to aid in the GORM +// to SQLC migration. It is intended that eventually the rest of the code will +// use the same SQLC-generated model. +func convertSqlcJob(job sqlc.Job) (*Job, error) { + dbJob := Job{ + Model: Model{ + ID: uint(job.ID), + CreatedAt: job.CreatedAt, + UpdatedAt: job.UpdatedAt.Time, + }, + UUID: job.UUID, + Name: job.Name, + JobType: job.JobType, + Priority: int(job.Priority), + Status: api.JobStatus(job.Status), + Activity: job.Activity, + DeleteRequestedAt: job.DeleteRequestedAt, + Storage: JobStorageInfo{ + ShamanCheckoutID: job.StorageShamanCheckoutID, + }, + } + + if err := json.Unmarshal(job.Settings, &dbJob.Settings); err != nil { + return nil, jobError(err, fmt.Sprintf("job %s has invalid settings: %v", job.UUID, err)) + } + + if err := json.Unmarshal(job.Metadata, &dbJob.Metadata); err != nil { + return nil, jobError(err, fmt.Sprintf("job %s has invalid metadata: %v", job.UUID, err)) + } + + if job.WorkerTagID.Valid { + workerTagID := uint(job.WorkerTagID.Int64) + dbJob.WorkerTagID = &workerTagID + } + + return &dbJob, nil +} diff --git a/internal/manager/persistence/jobs_blocklist_test.go b/internal/manager/persistence/jobs_blocklist_test.go index d82a4092..436c5f18 100644 --- a/internal/manager/persistence/jobs_blocklist_test.go +++ b/internal/manager/persistence/jobs_blocklist_test.go @@ -18,11 +18,11 @@ func TestAddWorkerToJobBlocklist(t *testing.T) { { // Add a worker to the block list. err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender") - assert.NoError(t, err) + require.NoError(t, err) list := []JobBlock{} tx := db.gormDB.Model(&JobBlock{}).Scan(&list) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) if assert.Len(t, list, 1) { entry := list[0] assert.Equal(t, entry.JobID, job.ID) @@ -34,11 +34,11 @@ func TestAddWorkerToJobBlocklist(t *testing.T) { { // Adding the same worker again should be a no-op. err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender") - assert.NoError(t, err) + require.NoError(t, err) list := []JobBlock{} tx := db.gormDB.Model(&JobBlock{}).Scan(&list) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) assert.Len(t, list, 1, "No new entry should have been created") } } @@ -50,10 +50,10 @@ func TestFetchJobBlocklist(t *testing.T) { // Add a worker to the block list. worker := createWorker(ctx, t, db) err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender") - assert.NoError(t, err) + require.NoError(t, err) list, err := db.FetchJobBlocklist(ctx, job.UUID) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, list, 1) { entry := list[0] @@ -73,17 +73,17 @@ func TestClearJobBlocklist(t *testing.T) { // Add a worker and some entries to the block list. worker := createWorker(ctx, t, db) err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender") - assert.NoError(t, err) + require.NoError(t, err) err = db.AddWorkerToJobBlocklist(ctx, job, worker, "ffmpeg") - assert.NoError(t, err) + require.NoError(t, err) // Clear the blocklist. err = db.ClearJobBlocklist(ctx, job) - assert.NoError(t, err) + require.NoError(t, err) // Check that it is indeed empty. list, err := db.FetchJobBlocklist(ctx, job.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, list) } @@ -94,17 +94,17 @@ func TestRemoveFromJobBlocklist(t *testing.T) { // Add a worker and some entries to the block list. worker := createWorker(ctx, t, db) err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender") - assert.NoError(t, err) + require.NoError(t, err) err = db.AddWorkerToJobBlocklist(ctx, job, worker, "ffmpeg") - assert.NoError(t, err) + require.NoError(t, err) // Remove an entry. err = db.RemoveFromJobBlocklist(ctx, job.UUID, worker.UUID, "ffmpeg") - assert.NoError(t, err) + require.NoError(t, err) // Check that the other entry is still there. list, err := db.FetchJobBlocklist(ctx, job.UUID) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, list, 1) { entry := list[0] @@ -120,7 +120,7 @@ func TestWorkersLeftToRun(t *testing.T) { // No workers. left, err := db.WorkersLeftToRun(ctx, job, "blender") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, left) worker1 := createWorker(ctx, t, db) @@ -146,30 +146,27 @@ func TestWorkersLeftToRun(t *testing.T) { // Three workers, no blocklist. left, err = db.WorkersLeftToRun(ctx, job, "blender") - if assert.NoError(t, err) { - assert.Equal(t, uuidMap(worker1, worker2, workerC1), left) - } + require.NoError(t, err) + assert.Equal(t, uuidMap(worker1, worker2, workerC1), left) // Two workers, one blocked. _ = db.AddWorkerToJobBlocklist(ctx, job, worker1, "blender") left, err = db.WorkersLeftToRun(ctx, job, "blender") - if assert.NoError(t, err) { - assert.Equal(t, uuidMap(worker2, workerC1), left) - } + require.NoError(t, err) + assert.Equal(t, uuidMap(worker2, workerC1), left) // All workers blocked. _ = db.AddWorkerToJobBlocklist(ctx, job, worker2, "blender") _ = db.AddWorkerToJobBlocklist(ctx, job, workerC1, "blender") left, err = db.WorkersLeftToRun(ctx, job, "blender") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, left) // Two workers, unknown job. fakeJob := Job{Model: Model{ID: 327}} left, err = db.WorkersLeftToRun(ctx, &fakeJob, "blender") - if assert.NoError(t, err) { - assert.Equal(t, uuidMap(worker1, worker2, workerC1), left) - } + require.NoError(t, err) + assert.Equal(t, uuidMap(worker1, worker2, workerC1), left) } func TestWorkersLeftToRunWithTags(t *testing.T) { @@ -233,7 +230,7 @@ func TestWorkersLeftToRunWithTags(t *testing.T) { // All taged workers blocked. _ = db.AddWorkerToJobBlocklist(ctx, job, workerC13, "blender") left, err = db.WorkersLeftToRun(ctx, job, "blender") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, left) } @@ -261,25 +258,21 @@ func TestCountTaskFailuresOfWorker(t *testing.T) { // Multiple failures. numBlender1, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker1, "blender") - if assert.NoError(t, err) { - assert.Equal(t, 2, numBlender1) - } + require.NoError(t, err) + assert.Equal(t, 2, numBlender1) // Single failure, but multiple tasks exist of this type. numBlender2, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker2, "blender") - if assert.NoError(t, err) { - assert.Equal(t, 1, numBlender2) - } + require.NoError(t, err) + assert.Equal(t, 1, numBlender2) // Single failure, only one task of this type exists. numFFMpeg1, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker1, "ffmpeg") - if assert.NoError(t, err) { - assert.Equal(t, 1, numFFMpeg1) - } + require.NoError(t, err) + assert.Equal(t, 1, numFFMpeg1) // No failure. numFFMpeg2, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker2, "ffmpeg") - if assert.NoError(t, err) { - assert.Equal(t, 0, numFFMpeg2) - } + require.NoError(t, err) + assert.Equal(t, 0, numFFMpeg2) } diff --git a/internal/manager/persistence/jobs_query.go b/internal/manager/persistence/jobs_query.go index c4431b05..fe040aa6 100644 --- a/internal/manager/persistence/jobs_query.go +++ b/internal/manager/persistence/jobs_query.go @@ -86,3 +86,33 @@ func (db *DB) QueryJobTaskSummaries(ctx context.Context, jobUUID string) ([]*Tas return result, tx.Error } + +// JobStatusCount is a mapping from job status to the number of jobs in that status. +type JobStatusCount map[api.JobStatus]int + +func (db *DB) SummarizeJobStatuses(ctx context.Context) (JobStatusCount, error) { + logger := log.Ctx(ctx) + logger.Debug().Msg("database: summarizing job statuses") + + // Query the database using a data structure that's easy to handle in GORM. + type queryResult struct { + Status api.JobStatus + StatusCount int + } + result := []*queryResult{} + tx := db.gormDB.WithContext(ctx).Model(&Job{}). + Select("status as Status", "count(id) as StatusCount"). + Group("status"). + Scan(&result) + if tx.Error != nil { + return nil, jobError(tx.Error, "summarizing job statuses") + } + + // Convert the array-of-structs to a map that's easier to handle by the caller. + statusCounts := make(JobStatusCount) + for _, singleStatusCount := range result { + statusCounts[singleStatusCount.Status] = singleStatusCount.StatusCount + } + + return statusCounts, nil +} diff --git a/internal/manager/persistence/jobs_query_test.go b/internal/manager/persistence/jobs_query_test.go index 6229b799..87983dc9 100644 --- a/internal/manager/persistence/jobs_query_test.go +++ b/internal/manager/persistence/jobs_query_test.go @@ -4,9 +4,12 @@ package persistence // SPDX-License-Identifier: GPL-3.0-or-later import ( + "context" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/job_compilers" "projects.blender.org/studio/flamenco/internal/uuid" @@ -26,14 +29,14 @@ func TestSimpleQuery(t *testing.T) { result, err := db.QueryJobs(ctx, api.JobsQuery{ StatusIn: &[]api.JobStatus{api.JobStatusActive, api.JobStatusCanceled}, }) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, result, 0) // Check job was returned properly on correct status. result, err = db.QueryJobs(ctx, api.JobsQuery{ StatusIn: &[]api.JobStatus{api.JobStatusUnderConstruction, api.JobStatusCanceled}, }) - assert.NoError(t, err) + require.NoError(t, err) if !assert.Len(t, result, 1) { t.FailNow() } @@ -65,7 +68,7 @@ func TestQueryMetadata(t *testing.T) { AdditionalProperties: map[string]string{ "project": "Secret Future Project", }}}) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, result, 0) // Check job was returned properly when querying for the right project. @@ -74,7 +77,7 @@ func TestQueryMetadata(t *testing.T) { AdditionalProperties: map[string]string{ "project": testJob.Metadata["project"], }}}) - assert.NoError(t, err) + require.NoError(t, err) if !assert.Len(t, result, 1) { t.FailNow() } @@ -86,7 +89,7 @@ func TestQueryMetadata(t *testing.T) { AdditionalProperties: map[string]string{ "project": otherJob.Metadata["project"], }}}) - assert.NoError(t, err) + require.NoError(t, err) if !assert.Len(t, result, 1) { t.FailNow() } @@ -97,7 +100,7 @@ func TestQueryMetadata(t *testing.T) { OrderBy: &[]string{"status"}, Metadata: &api.JobsQuery_Metadata{AdditionalProperties: map[string]string{}}, }) - assert.NoError(t, err) + require.NoError(t, err) if !assert.Len(t, result, 2) { t.FailNow() } @@ -129,15 +132,70 @@ func TestQueryJobTaskSummaries(t *testing.T) { // Sanity check for the above code, there should be 6 tasks overall, 3 per job. var numTasks int64 tx := db.gormDB.Model(&Task{}).Count(&numTasks) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) assert.Equal(t, int64(6), numTasks) // Get the task summaries of a particular job. summaries, err := db.QueryJobTaskSummaries(ctx, job.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, summaries, len(expectTaskUUIDs)) for _, summary := range summaries { assert.True(t, expectTaskUUIDs[summary.UUID], "%q should be in %v", summary.UUID, expectTaskUUIDs) } } + +func TestSummarizeJobStatuses(t *testing.T) { + ctx, close, db, job1, authoredJob1 := jobTasksTestFixtures(t) + defer close() + + // Create another job + authoredJob2 := duplicateJobAndTasks(authoredJob1) + job2 := persistAuthoredJob(t, ctx, db, authoredJob2) + + // Test the summary. + summary, err := db.SummarizeJobStatuses(ctx) + require.NoError(t, err) + assert.Equal(t, JobStatusCount{api.JobStatusUnderConstruction: 2}, summary) + + // Change the jobs so that each has a unique status. + job1.Status = api.JobStatusQueued + require.NoError(t, db.SaveJobStatus(ctx, job1)) + job2.Status = api.JobStatusFailed + require.NoError(t, db.SaveJobStatus(ctx, job2)) + + // Test the summary. + summary, err = db.SummarizeJobStatuses(ctx) + require.NoError(t, err) + assert.Equal(t, JobStatusCount{ + api.JobStatusQueued: 1, + api.JobStatusFailed: 1, + }, summary) + + // Delete all jobs. + require.NoError(t, db.DeleteJob(ctx, job1.UUID)) + require.NoError(t, db.DeleteJob(ctx, job2.UUID)) + + // Test the summary. + summary, err = db.SummarizeJobStatuses(ctx) + require.NoError(t, err) + assert.Equal(t, JobStatusCount{}, summary) +} + +// Check that a context timeout can be detected by inspecting the +// returned error. +func TestSummarizeJobStatusesTimeout(t *testing.T) { + ctx, close, db, _, _ := jobTasksTestFixtures(t) + defer close() + + subCtx, subCtxCancel := context.WithTimeout(ctx, 1*time.Nanosecond) + defer subCtxCancel() + + // Force a timeout of the context. And yes, even when a nanosecond is quite + // short, it is still necessary to wait. + time.Sleep(2 * time.Nanosecond) + + summary, err := db.SummarizeJobStatuses(subCtx) + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Nil(t, summary) +} diff --git a/internal/manager/persistence/jobs_test.go b/internal/manager/persistence/jobs_test.go index 0e51aaad..20053085 100644 --- a/internal/manager/persistence/jobs_test.go +++ b/internal/manager/persistence/jobs_test.go @@ -24,10 +24,10 @@ func TestStoreAuthoredJob(t *testing.T) { job := createTestAuthoredJobWithTasks() err := db.StoreAuthoredJob(ctx, job) - assert.NoError(t, err) + require.NoError(t, err) fetchedJob, err := db.FetchJob(ctx, job.JobID) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, fetchedJob) // Test contents of fetched job @@ -43,10 +43,10 @@ func TestStoreAuthoredJob(t *testing.T) { // Fetch tasks of job. var dbJob Job tx := db.gormDB.Where(&Job{UUID: job.JobID}).Find(&dbJob) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) var tasks []Task tx = db.gormDB.Where("job_id = ?", dbJob.ID).Find(&tasks) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) if len(tasks) != 3 { t.Fatalf("expected 3 tasks, got %d", len(tasks)) @@ -108,6 +108,30 @@ func TestSaveJobStorageInfo(t *testing.T) { assert.Equal(t, startTime, updatedJob.UpdatedAt, "SaveJobStorageInfo should not touch UpdatedAt") } +func TestSaveJobPriority(t *testing.T) { + ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second) + defer cancel() + + // Create test job. + authoredJob := createTestAuthoredJobWithTasks() + err := db.StoreAuthoredJob(ctx, authoredJob) + require.NoError(t, err) + + // Set a new priority. + newPriority := 47 + dbJob, err := db.FetchJob(ctx, authoredJob.JobID) + require.NoError(t, err) + require.NotEqual(t, newPriority, dbJob.Priority, + "Initial priority should not be the same as what this test changes it to") + dbJob.Priority = newPriority + require.NoError(t, db.SaveJobPriority(ctx, dbJob)) + + // Check the result. + dbJob, err = db.FetchJob(ctx, authoredJob.JobID) + require.NoError(t, err) + assert.EqualValues(t, newPriority, dbJob.Priority) +} + func TestDeleteJob(t *testing.T) { ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second) defer cancel() @@ -170,7 +194,7 @@ func TestDeleteJobWithoutFK(t *testing.T) { // Test the deletion did not happen. _, err = db.FetchJob(ctx, authJob.JobID) - assert.NoError(t, err, "job should not have been deleted") + require.NoError(t, err, "job should not have been deleted") } func TestRequestJobDeletion(t *testing.T) { @@ -185,20 +209,20 @@ func TestRequestJobDeletion(t *testing.T) { db.gormDB.NowFunc = func() time.Time { return mockNow } err := db.RequestJobDeletion(ctx, job1) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, job1.DeleteRequested()) assert.True(t, job1.DeleteRequestedAt.Valid) assert.Equal(t, job1.DeleteRequestedAt.Time, mockNow) dbJob1, err := db.FetchJob(ctx, job1.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, job1.DeleteRequested()) assert.True(t, dbJob1.DeleteRequestedAt.Valid) assert.WithinDuration(t, mockNow, dbJob1.DeleteRequestedAt.Time, time.Second) // Other jobs shouldn't be touched. dbJob2, err := db.FetchJob(ctx, authoredJob2.JobID) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, dbJob2.DeleteRequested()) assert.False(t, dbJob2.DeleteRequestedAt.Valid) } @@ -228,7 +252,7 @@ func TestRequestJobMassDeletion(t *testing.T) { timeOfDeleteRequest := origGormNow() db.gormDB.NowFunc = func() time.Time { return timeOfDeleteRequest } uuids, err := db.RequestJobMassDeletion(ctx, job3.UpdatedAt) - assert.NoError(t, err) + require.NoError(t, err) db.gormDB.NowFunc = origGormNow @@ -288,10 +312,10 @@ func TestFetchJobsDeletionRequested(t *testing.T) { // Ensure different requests get different timestamps, // out of chronological order. timestamps := []time.Time{ - // timestamps for 'delete requested at' and 'updated at' - now.Add(-3 * time.Second), now.Add(-3 * time.Second), - now.Add(-1 * time.Second), now.Add(-1 * time.Second), - now.Add(-5 * time.Second), now.Add(-5 * time.Second), + // timestamps for 'delete requested at'. + now.Add(-3 * time.Second), + now.Add(-1 * time.Second), + now.Add(-5 * time.Second), } currentTimestampIndex := 0 db.gormDB.NowFunc = func() time.Time { @@ -301,14 +325,14 @@ func TestFetchJobsDeletionRequested(t *testing.T) { } err := db.RequestJobDeletion(ctx, job1) - assert.NoError(t, err) + require.NoError(t, err) err = db.RequestJobDeletion(ctx, job2) - assert.NoError(t, err) + require.NoError(t, err) err = db.RequestJobDeletion(ctx, job3) - assert.NoError(t, err) + require.NoError(t, err) actualUUIDs, err := db.FetchJobsDeletionRequested(ctx) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, actualUUIDs, 3, "3 out of 4 jobs were marked for deletion") // Expect UUIDs in chronological order of deletion requests, so that the @@ -322,11 +346,11 @@ func TestJobHasTasksInStatus(t *testing.T) { defer close() hasTasks, err := db.JobHasTasksInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, hasTasks, "expected freshly-created job to have queued tasks") hasTasks, err = db.JobHasTasksInStatus(ctx, job, api.TaskStatusActive) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, hasTasks, "expected freshly-created job to have no active tasks") } @@ -335,28 +359,28 @@ func TestCountTasksOfJobInStatus(t *testing.T) { defer close() numQueued, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 3, numQueued) assert.Equal(t, 3, numTotal) // Make one task failed. task, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID) - assert.NoError(t, err) + require.NoError(t, err) task.Status = api.TaskStatusFailed - assert.NoError(t, db.SaveTask(ctx, task)) + require.NoError(t, db.SaveTask(ctx, task)) numQueued, numTotal, err = db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, numQueued) assert.Equal(t, 3, numTotal) numFailed, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusFailed) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 1, numFailed) assert.Equal(t, 3, numTotal) numActive, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusActive) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 0, numActive) assert.Equal(t, 3, numTotal) } @@ -370,7 +394,7 @@ func TestCheckIfJobsHoldLargeNumOfTasks(t *testing.T) { defer close() numQueued, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, numtasks, numQueued) assert.Equal(t, numtasks, numTotal) @@ -392,22 +416,22 @@ func TestFetchJobsInStatus(t *testing.T) { // Query single status jobs, err := db.FetchJobsInStatus(ctx, api.JobStatusUnderConstruction) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []*Job{job1, job2, job3}, jobs) // Query two statuses, where only one matches all jobs. jobs, err = db.FetchJobsInStatus(ctx, api.JobStatusCanceled, api.JobStatusUnderConstruction) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []*Job{job1, job2, job3}, jobs) // Update a job status, query for two of the three used statuses. job1.Status = api.JobStatusQueued - assert.NoError(t, db.SaveJobStatus(ctx, job1)) + require.NoError(t, db.SaveJobStatus(ctx, job1)) job2.Status = api.JobStatusRequeueing - assert.NoError(t, db.SaveJobStatus(ctx, job2)) + require.NoError(t, db.SaveJobStatus(ctx, job2)) jobs, err = db.FetchJobsInStatus(ctx, api.JobStatusQueued, api.JobStatusUnderConstruction) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, jobs, 2) { assert.Equal(t, job1.UUID, jobs[0].UUID) assert.Equal(t, job3.UUID, jobs[1].UUID) @@ -419,35 +443,33 @@ func TestFetchTasksOfJobInStatus(t *testing.T) { defer close() allTasks, err := db.FetchTasksOfJob(ctx, job) - if !assert.NoError(t, err) { - return - } + require.NoError(t, err) assert.Equal(t, job, allTasks[0].Job, "FetchTasksOfJob should set job pointer") tasks, err := db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, allTasks, tasks) assert.Equal(t, job, tasks[0].Job, "FetchTasksOfJobInStatus should set job pointer") // Make one task failed. task, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID) - assert.NoError(t, err) + require.NoError(t, err) task.Status = api.TaskStatusFailed - assert.NoError(t, db.SaveTask(ctx, task)) + require.NoError(t, db.SaveTask(ctx, task)) tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusQueued) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, []*Task{allTasks[1], allTasks[2]}, tasks) // Check the failed task. This cannot directly compare to `allTasks[0]` // because saving the task above changed some of its fields. tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusFailed) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, tasks, 1) assert.Equal(t, allTasks[0].ID, tasks[0].ID) tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusActive) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, tasks) } @@ -456,10 +478,10 @@ func TestTaskAssignToWorker(t *testing.T) { defer close() task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID) - assert.NoError(t, err) + require.NoError(t, err) w := createWorker(ctx, t, db) - assert.NoError(t, db.TaskAssignToWorker(ctx, task, w)) + require.NoError(t, db.TaskAssignToWorker(ctx, task, w)) if task.Worker == nil { t.Error("task.Worker == nil") @@ -478,38 +500,111 @@ func TestFetchTasksOfWorkerInStatus(t *testing.T) { defer close() task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID) - assert.NoError(t, err) + require.NoError(t, err) w := createWorker(ctx, t, db) - assert.NoError(t, db.TaskAssignToWorker(ctx, task, w)) + require.NoError(t, db.TaskAssignToWorker(ctx, task, w)) tasks, err := db.FetchTasksOfWorkerInStatus(ctx, w, task.Status) - assert.NoError(t, err) + require.NoError(t, err) assert.Len(t, tasks, 1, "worker should have one task in status %q", task.Status) assert.Equal(t, task.ID, tasks[0].ID) assert.Equal(t, task.UUID, tasks[0].UUID) assert.NotEqual(t, api.TaskStatusCanceled, task.Status) tasks, err = db.FetchTasksOfWorkerInStatus(ctx, w, api.TaskStatusCanceled) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, tasks, "worker should have no task in status %q", w) } +func TestFetchTasksOfWorkerInStatusOfJob(t *testing.T) { + ctx, close, db, dbJob, authoredJob := jobTasksTestFixtures(t) + defer close() + + // Create multiple Workers, to test the function doesn't return tasks from + // other Workers. + worker := createWorker(ctx, t, db, func(worker *Worker) { + worker.UUID = "43300628-5f3b-4724-ab30-9821af8bda86" + }) + otherWorker := createWorker(ctx, t, db, func(worker *Worker) { + worker.UUID = "2327350f-75ec-4b0e-bd28-31a7b045c85c" + }) + + // Create another job, to make sure the function under test doesn't return + // tasks from other jobs. + otherJob := duplicateJobAndTasks(authoredJob) + otherJob.Name = "The other job" + persistAuthoredJob(t, ctx, db, otherJob) + + // Assign a task from each job to each Worker. + // Also double-check the test precondition that all tasks have the same status. + { // Job / Worker. + task1, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID) + require.NoError(t, err) + require.NoError(t, db.TaskAssignToWorker(ctx, task1, worker)) + require.Equal(t, task1.Status, api.TaskStatusQueued) + + task2, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID) + require.NoError(t, err) + require.NoError(t, db.TaskAssignToWorker(ctx, task2, worker)) + require.Equal(t, task2.Status, api.TaskStatusQueued) + } + { // Job / Other Worker. + task, err := db.FetchTask(ctx, authoredJob.Tasks[2].UUID) + require.NoError(t, err) + require.NoError(t, db.TaskAssignToWorker(ctx, task, otherWorker)) + require.Equal(t, task.Status, api.TaskStatusQueued) + } + { // Other Job / Worker. + task, err := db.FetchTask(ctx, otherJob.Tasks[1].UUID) + require.NoError(t, err) + require.NoError(t, db.TaskAssignToWorker(ctx, task, worker)) + require.Equal(t, task.Status, api.TaskStatusQueued) + } + { // Other Job / Other Worker. + task, err := db.FetchTask(ctx, otherJob.Tasks[2].UUID) + require.NoError(t, err) + require.NoError(t, db.TaskAssignToWorker(ctx, task, otherWorker)) + require.Equal(t, task.Status, api.TaskStatusQueued) + } + + { // Test active tasks, should be none. + tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusActive, dbJob) + require.NoError(t, err) + require.Len(t, tasks, 0) + } + { // Test queued tasks, should be two. + tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusQueued, dbJob) + require.NoError(t, err) + require.Len(t, tasks, 2) + assert.Equal(t, authoredJob.Tasks[0].UUID, tasks[0].UUID) + assert.Equal(t, authoredJob.Tasks[1].UUID, tasks[1].UUID) + } + { // Test queued tasks for worker without tasks, should be none. + worker := createWorker(ctx, t, db, func(worker *Worker) { + worker.UUID = "6534a1d4-f58e-4f2c-8925-4b2cd6caac22" + }) + tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusQueued, dbJob) + require.NoError(t, err) + require.Len(t, tasks, 0) + } +} + func TestTaskTouchedByWorker(t *testing.T) { ctx, close, db, _, authoredJob := jobTasksTestFixtures(t) defer close() task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, task.LastTouchedAt.IsZero()) now := db.gormDB.NowFunc() err = db.TaskTouchedByWorker(ctx, task) - assert.NoError(t, err) + require.NoError(t, err) // Test the task instance as well as the database entry. dbTask, err := db.FetchTask(ctx, task.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.WithinDuration(t, now, task.LastTouchedAt, time.Second) assert.WithinDuration(t, now, dbTask.LastTouchedAt, time.Second) } @@ -519,7 +614,7 @@ func TestAddWorkerToTaskFailedList(t *testing.T) { defer close() task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID) - assert.NoError(t, err) + require.NoError(t, err) worker1 := createWorker(ctx, t, db) @@ -528,30 +623,30 @@ func TestAddWorkerToTaskFailedList(t *testing.T) { newWorker.ID = 0 newWorker.UUID = "89ed2b02-b51b-4cd4-b44a-4a1c8d01db85" newWorker.Name = "Worker 2" - assert.NoError(t, db.SaveWorker(ctx, &newWorker)) + require.NoError(t, db.SaveWorker(ctx, &newWorker)) worker2, err := db.FetchWorker(ctx, newWorker.UUID) - assert.NoError(t, err) + require.NoError(t, err) // First failure should be registered just fine. numFailed, err := db.AddWorkerToTaskFailedList(ctx, task, worker1) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 1, numFailed) // Calling again should be a no-op and not cause any errors. numFailed, err = db.AddWorkerToTaskFailedList(ctx, task, worker1) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 1, numFailed) // Another worker should be able to fail this task as well. numFailed, err = db.AddWorkerToTaskFailedList(ctx, task, worker2) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 2, numFailed) // Deleting the task should also delete the failures. - assert.NoError(t, db.DeleteJob(ctx, authoredJob.JobID)) + require.NoError(t, db.DeleteJob(ctx, authoredJob.JobID)) var num int64 tx := db.gormDB.Model(&TaskFailure{}).Count(&num) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) assert.Zero(t, num) } @@ -569,9 +664,9 @@ func TestClearFailureListOfTask(t *testing.T) { newWorker.ID = 0 newWorker.UUID = "89ed2b02-b51b-4cd4-b44a-4a1c8d01db85" newWorker.Name = "Worker 2" - assert.NoError(t, db.SaveWorker(ctx, &newWorker)) + require.NoError(t, db.SaveWorker(ctx, &newWorker)) worker2, err := db.FetchWorker(ctx, newWorker.UUID) - assert.NoError(t, err) + require.NoError(t, err) // Store some failures for different tasks. _, _ = db.AddWorkerToTaskFailedList(ctx, task1, worker1) @@ -579,10 +674,10 @@ func TestClearFailureListOfTask(t *testing.T) { _, _ = db.AddWorkerToTaskFailedList(ctx, task2, worker1) // Clearing should just update this one task. - assert.NoError(t, db.ClearFailureListOfTask(ctx, task1)) + require.NoError(t, db.ClearFailureListOfTask(ctx, task1)) var failures = []TaskFailure{} tx := db.gormDB.Model(&TaskFailure{}).Scan(&failures) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) if assert.Len(t, failures, 1) { assert.Equal(t, task2.ID, failures[0].TaskID) assert.Equal(t, worker1.ID, failures[0].WorkerID) @@ -615,10 +710,10 @@ func TestClearFailureListOfJob(t *testing.T) { assert.Equal(t, 5, countTaskFailures(db)) // Clearing should be limited to the given job. - assert.NoError(t, db.ClearFailureListOfJob(ctx, dbJob1)) + require.NoError(t, db.ClearFailureListOfJob(ctx, dbJob1)) var failures = []TaskFailure{} tx := db.gormDB.Model(&TaskFailure{}).Scan(&failures) - assert.NoError(t, tx.Error) + require.NoError(t, tx.Error) if assert.Len(t, failures, 2) { assert.Equal(t, task2_1.ID, failures[0].TaskID) assert.Equal(t, worker1.ID, failures[0].WorkerID) @@ -634,7 +729,7 @@ func TestFetchTaskFailureList(t *testing.T) { // Test with non-existing task. fakeTask := Task{Model: Model{ID: 327}} failures, err := db.FetchTaskFailureList(ctx, &fakeTask) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, failures) task1_1, _ := db.FetchTask(ctx, authoredJob1.Tasks[1].UUID) @@ -642,7 +737,7 @@ func TestFetchTaskFailureList(t *testing.T) { // Test without failures. failures, err = db.FetchTaskFailureList(ctx, task1_1) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, failures) worker1 := createWorker(ctx, t, db) @@ -655,7 +750,7 @@ func TestFetchTaskFailureList(t *testing.T) { // Fetch one task's failure list. failures, err = db.FetchTaskFailureList(ctx, task1_1) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, failures, 2) { assert.Equal(t, worker1.UUID, failures[0].UUID) @@ -764,17 +859,12 @@ func createTestAuthoredJob(jobID string, tasks ...job_compilers.AuthoredTask) jo func persistAuthoredJob(t *testing.T, ctx context.Context, db *DB, authoredJob job_compilers.AuthoredJob) *Job { err := db.StoreAuthoredJob(ctx, authoredJob) - if err != nil { - t.Fatalf("error storing authored job in DB: %v", err) - } + require.NoError(t, err, "error storing authored job in DB") dbJob, err := db.FetchJob(ctx, authoredJob.JobID) - if err != nil { - t.Fatalf("error fetching job from DB: %v", err) - } - if dbJob == nil { - t.Fatalf("nil job obtained from DB but with no error!") - } + require.NoError(t, err, "error fetching job from DB") + require.NotNil(t, dbJob, "nil job obtained from DB but with no error!") + return dbJob } @@ -851,18 +941,11 @@ func createWorker(ctx context.Context, t *testing.T, db *DB, updaters ...func(*W } err := db.CreateWorker(ctx, &w) - if err != nil { - t.Fatalf("error creating worker: %v", err) - } - assert.NoError(t, err) + require.NoError(t, err, "error creating worker") fetchedWorker, err := db.FetchWorker(ctx, w.UUID) - if err != nil { - t.Fatalf("error fetching worker: %v", err) - } - if fetchedWorker == nil { - t.Fatal("fetched worker is nil, but no error returned") - } + require.NoError(t, err, "error fetching worker") + require.NotNil(t, fetchedWorker, "fetched worker is nil, but no error returned") return fetchedWorker } @@ -874,14 +957,10 @@ func createWorkerFrom(ctx context.Context, t *testing.T, db *DB, worker Worker) worker.Name += " (copy)" err := db.SaveWorker(ctx, &worker) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) dbWorker, err := db.FetchWorker(ctx, worker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) return dbWorker } diff --git a/internal/manager/persistence/last_rendered_test.go b/internal/manager/persistence/last_rendered_test.go index c8cca7f4..adc10a5f 100644 --- a/internal/manager/persistence/last_rendered_test.go +++ b/internal/manager/persistence/last_rendered_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSetLastRendered(t *testing.T) { @@ -15,7 +16,7 @@ func TestSetLastRendered(t *testing.T) { authoredJob2 := authorTestJob("1295757b-e668-4c49-8b89-f73db8270e42", "just-a-job") job2 := persistAuthoredJob(t, ctx, db, authoredJob2) - assert.NoError(t, db.SetLastRendered(ctx, job1)) + require.NoError(t, db.SetLastRendered(ctx, job1)) { entries := []LastRendered{} db.gormDB.Model(&LastRendered{}).Scan(&entries) @@ -24,7 +25,7 @@ func TestSetLastRendered(t *testing.T) { } } - assert.NoError(t, db.SetLastRendered(ctx, job2)) + require.NoError(t, db.SetLastRendered(ctx, job2)) { entries := []LastRendered{} db.gormDB.Model(&LastRendered{}).Scan(&entries) @@ -41,18 +42,16 @@ func TestGetLastRenderedJobUUID(t *testing.T) { { // Test without any renders. lastUUID, err := db.GetLastRenderedJobUUID(ctx) - if assert.NoError(t, err, "absence of renders should not cause an error") { - assert.Empty(t, lastUUID) - } + require.NoError(t, err, "absence of renders should not cause an error") + assert.Empty(t, lastUUID) } { // Test with first render. - assert.NoError(t, db.SetLastRendered(ctx, job1)) + require.NoError(t, db.SetLastRendered(ctx, job1)) lastUUID, err := db.GetLastRenderedJobUUID(ctx) - if assert.NoError(t, err) { - assert.Equal(t, job1.UUID, lastUUID) - } + require.NoError(t, err) + assert.Equal(t, job1.UUID, lastUUID) } { @@ -60,10 +59,9 @@ func TestGetLastRenderedJobUUID(t *testing.T) { authoredJob2 := authorTestJob("1295757b-e668-4c49-8b89-f73db8270e42", "just-a-job") job2 := persistAuthoredJob(t, ctx, db, authoredJob2) - assert.NoError(t, db.SetLastRendered(ctx, job2)) + require.NoError(t, db.SetLastRendered(ctx, job2)) lastUUID, err := db.GetLastRenderedJobUUID(ctx) - if assert.NoError(t, err) { - assert.Equal(t, job2.UUID, lastUUID) - } + require.NoError(t, err) + assert.Equal(t, job2.UUID, lastUUID) } } diff --git a/internal/manager/persistence/migrations/0003_drop_worker_clusters.sql b/internal/manager/persistence/migrations/0003_drop_worker_clusters.sql new file mode 100644 index 00000000..4ae7b85a --- /dev/null +++ b/internal/manager/persistence/migrations/0003_drop_worker_clusters.sql @@ -0,0 +1,15 @@ +-- Drop tables that were in use in beta versions of Flamenco. These might exist +-- in developer databases, as well as databases of studios following the `main` +-- branch, such as Blender Studio. +-- +-- WARNING: this migration simply drops the tables. Their data is erased, and +-- cannot be brought back by rolling the migration back. +-- +-- +goose Up +DROP INDEX IF EXISTS `idx_worker_clusters_uuid`; +DROP TABLE IF EXISTS `worker_cluster_membership`; +DROP TABLE IF EXISTS `worker_clusters`; + +-- +goose Down +-- Do not recreate these tables, as no release of Flamenco ever used them. +-- Also their contents wouldn't be brought back anyway. diff --git a/internal/manager/persistence/migrations/0004_sqlc_compat_and_more_nonnull.sql b/internal/manager/persistence/migrations/0004_sqlc_compat_and_more_nonnull.sql new file mode 100644 index 00000000..02268f82 --- /dev/null +++ b/internal/manager/persistence/migrations/0004_sqlc_compat_and_more_nonnull.sql @@ -0,0 +1,496 @@ +-- GORM automigration wasn't smart, and thus the database had more nullable +-- columns than necessary. This migration makes columns that should never be +-- NULL actually NOT NULL. +-- +-- Since this migration recreates all tables in the database, this is now also +-- done in a way that makes the schema more compatible with sqlc (which is +-- mostly removing various quotes and backticks, and replacing char(N) with +-- varchar(N)). sqlc is the tool that'll replace GORM. +-- +-- +goose Up +CREATE TABLE temp_last_rendereds ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + job_id integer DEFAULT 0 NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_last_rendereds_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE +); +INSERT INTO temp_last_rendereds + SELECT id, created_at, updated_at, job_id FROM last_rendereds; +DROP TABLE last_rendereds; +ALTER TABLE temp_last_rendereds RENAME TO last_rendereds; + +CREATE TABLE temp_task_dependencies ( + task_id integer NOT NULL, + dependency_id integer NOT NULL, + PRIMARY KEY (task_id, dependency_id), + CONSTRAINT fk_task_dependencies_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE, + CONSTRAINT fk_task_dependencies_dependencies FOREIGN KEY (dependency_id) REFERENCES tasks(id) ON DELETE CASCADE +); +INSERT INTO temp_task_dependencies SELECT task_id, dependency_id FROM task_dependencies; +DROP TABLE task_dependencies; +ALTER TABLE temp_task_dependencies RENAME TO task_dependencies; + +CREATE TABLE temp_task_failures ( + created_at datetime NOT NULL, + task_id integer NOT NULL, + worker_id integer NOT NULL, + PRIMARY KEY (task_id, worker_id), + CONSTRAINT fk_task_failures_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE, + CONSTRAINT fk_task_failures_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +INSERT INTO temp_task_failures SELECT created_at, task_id, worker_id FROM task_failures; +DROP TABLE task_failures; +ALTER TABLE temp_task_failures RENAME TO task_failures; + +CREATE TABLE temp_worker_tag_membership ( + worker_tag_id integer NOT NULL, + worker_id integer NOT NULL, + PRIMARY KEY (worker_tag_id, worker_id), + CONSTRAINT fk_worker_tag_membership_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE CASCADE, + CONSTRAINT fk_worker_tag_membership_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +INSERT INTO temp_worker_tag_membership SELECT worker_tag_id, worker_id FROM worker_tag_membership; +DROP TABLE worker_tag_membership; +ALTER TABLE temp_worker_tag_membership RENAME TO worker_tag_membership; + +CREATE TABLE temp_worker_tags ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) UNIQUE DEFAULT '' NOT NULL, + description varchar(255) DEFAULT '' NOT NULL, + PRIMARY KEY (id) +); +INSERT INTO temp_worker_tags SELECT + id, + created_at, + updated_at, + uuid, + name, + description +FROM worker_tags; +DROP TABLE worker_tags; +ALTER TABLE temp_worker_tags RENAME TO worker_tags; + +CREATE TABLE temp_jobs ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + job_type varchar(32) DEFAULT '' NOT NULL, + priority smallint DEFAULT 0 NOT NULL, + status varchar(32) DEFAULT '' NOT NULL, + activity varchar(255) DEFAULT '' NOT NULL, + settings jsonb NOT NULL, + metadata jsonb NOT NULL, + delete_requested_at datetime, + storage_shaman_checkout_id varchar(255) DEFAULT '' NOT NULL, + worker_tag_id integer, + PRIMARY KEY (id), + CONSTRAINT fk_jobs_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE SET NULL +); +INSERT INTO temp_jobs SELECT + id, + created_at, + updated_at, + uuid, + name, + job_type, + priority, + status, + activity, + settings, + metadata, + delete_requested_at, + storage_shaman_checkout_id, + worker_tag_id +FROM jobs; +DROP TABLE jobs; +ALTER TABLE temp_jobs RENAME TO jobs; + +CREATE TABLE temp_workers ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + secret varchar(255) DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + address varchar(39) DEFAULT '' NOT NULL, + platform varchar(16) DEFAULT '' NOT NULL, + software varchar(32) DEFAULT '' NOT NULL, + status varchar(16) DEFAULT '' NOT NULL, + last_seen_at datetime, + status_requested varchar(16) DEFAULT '' NOT NULL, + lazy_status_request smallint DEFAULT false NOT NULL, + supported_task_types varchar(255) DEFAULT '' NOT NULL, + deleted_at datetime, + can_restart smallint DEFAULT false NOT NULL, + PRIMARY KEY (id) +); +UPDATE workers SET supported_task_types = '' where supported_task_types is NULL; +INSERT INTO temp_workers SELECT + id, + created_at, + updated_at, + uuid, + secret, + name, + address, + platform, + software, + status, + last_seen_at, + status_requested, + lazy_status_request, + supported_task_types, + deleted_at, + can_restart +FROM workers; +DROP TABLE workers; +ALTER TABLE temp_workers RENAME TO workers; + +CREATE TABLE temp_job_blocks ( + id integer NOT NULL, + created_at datetime NOT NULL, + job_id integer DEFAULT 0 NOT NULL, + worker_id integer DEFAULT 0 NOT NULL, + task_type text NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_job_blocks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE, + CONSTRAINT fk_job_blocks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +INSERT INTO temp_job_blocks SELECT + id, + created_at, + job_id, + worker_id, + task_type +FROM job_blocks; +DROP TABLE job_blocks; +ALTER TABLE temp_job_blocks RENAME TO job_blocks; + +CREATE TABLE temp_sleep_schedules ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + worker_id integer UNIQUE DEFAULT 0 NOT NULL, + is_active numeric DEFAULT false NOT NULL, + days_of_week text DEFAULT '' NOT NULL, + start_time text DEFAULT '' NOT NULL, + end_time text DEFAULT '' NOT NULL, + next_check datetime, + PRIMARY KEY (id), + CONSTRAINT fk_sleep_schedules_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +INSERT INTO temp_sleep_schedules SELECT + id, + created_at, + updated_at, + worker_id, + is_active, + days_of_week, + start_time, + end_time, + next_check +FROM sleep_schedules; +DROP TABLE sleep_schedules; +ALTER TABLE temp_sleep_schedules RENAME TO sleep_schedules; + +CREATE TABLE temp_tasks ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + type varchar(32) DEFAULT '' NOT NULL, + job_id integer DEFAULT 0 NOT NULL, + priority smallint DEFAULT 50 NOT NULL, + status varchar(16) DEFAULT '' NOT NULL, + worker_id integer, + last_touched_at datetime, + commands jsonb NOT NULL, + activity varchar(255) DEFAULT '' NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_tasks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE, + CONSTRAINT fk_tasks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE SET NULL +); +INSERT INTO temp_tasks SELECT + id, + created_at, + updated_at, + uuid, + name, + type, + job_id, + priority, + status, + worker_id, + last_touched_at, + commands, + activity +FROM tasks; +DROP TABLE tasks; +ALTER TABLE temp_tasks RENAME TO tasks; + +-- Recreate the indices on the new tables. +CREATE INDEX idx_worker_tags_uuid ON worker_tags(uuid); +CREATE INDEX idx_jobs_uuid ON jobs(uuid); +CREATE INDEX idx_workers_address ON workers(address); +CREATE INDEX idx_workers_last_seen_at ON workers(last_seen_at); +CREATE INDEX idx_workers_deleted_at ON workers(deleted_at); +CREATE INDEX idx_workers_uuid ON workers(uuid); +CREATE UNIQUE INDEX job_worker_tasktype ON job_blocks(job_id, worker_id, task_type); +CREATE INDEX idx_sleep_schedules_is_active ON sleep_schedules(is_active); +CREATE INDEX idx_sleep_schedules_worker_id ON sleep_schedules(worker_id); +CREATE INDEX idx_tasks_uuid ON tasks(uuid); +CREATE INDEX idx_tasks_last_touched_at ON tasks(last_touched_at); + +-- +goose Down + +CREATE TABLE `temp_last_rendereds` ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `job_id` integer DEFAULT 0, + PRIMARY KEY (`id`), + CONSTRAINT `fk_last_rendereds_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_last_rendereds SELECT + id, + created_at, + updated_at, + job_id +FROM last_rendereds; +DROP TABLE last_rendereds; +ALTER TABLE temp_last_rendereds RENAME TO `last_rendereds`; + +CREATE TABLE `temp_task_dependencies` ( + `task_id` integer, + `dependency_id` integer, + PRIMARY KEY (`task_id`, `dependency_id`), + CONSTRAINT `fk_task_dependencies_task` FOREIGN KEY (`task_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_task_dependencies_dependencies` FOREIGN KEY (`dependency_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_task_dependencies SELECT task_id, dependency_id FROM task_dependencies; +DROP TABLE task_dependencies; +ALTER TABLE temp_task_dependencies RENAME TO `task_dependencies`; + +CREATE TABLE `temp_task_failures` ( + `created_at` datetime, + `task_id` integer, + `worker_id` integer, + PRIMARY KEY (`task_id`, `worker_id`), + CONSTRAINT `fk_task_failures_task` FOREIGN KEY (`task_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_task_failures_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_task_failures SELECT created_at, task_id, worker_id FROM task_failures; +DROP TABLE task_failures; +ALTER TABLE temp_task_failures RENAME TO `task_failures`; + +CREATE TABLE `temp_worker_tag_membership` ( + `worker_tag_id` integer, + `worker_id` integer, + PRIMARY KEY (`worker_tag_id`, `worker_id`), + CONSTRAINT `fk_worker_tag_membership_worker_tag` FOREIGN KEY (`worker_tag_id`) REFERENCES `worker_tags`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_worker_tag_membership_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_worker_tag_membership SELECT worker_tag_id, worker_id FROM worker_tag_membership; +DROP TABLE worker_tag_membership; +ALTER TABLE temp_worker_tag_membership RENAME TO `worker_tag_membership`; + +CREATE TABLE "temp_worker_tags" ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `uuid` char(36) UNIQUE DEFAULT "", + `name` varchar(64) UNIQUE DEFAULT "", + `description` varchar(255) DEFAULT "", + PRIMARY KEY (`id`) +); +INSERT INTO temp_worker_tags SELECT + id, + created_at, + updated_at, + uuid, + name, + description +FROM worker_tags; +DROP TABLE worker_tags; +ALTER TABLE temp_worker_tags RENAME TO `worker_tags`; + +CREATE TABLE "temp_jobs" ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `uuid` char(36) UNIQUE DEFAULT "", + `name` varchar(64) DEFAULT "", + `job_type` varchar(32) DEFAULT "", + `priority` smallint DEFAULT 0, + `status` varchar(32) DEFAULT "", + `activity` varchar(255) DEFAULT "", + `settings` jsonb, + `metadata` jsonb, + `delete_requested_at` datetime, + `storage_shaman_checkout_id` varchar(255) DEFAULT "", + `worker_tag_id` integer, + PRIMARY KEY(`id`), + CONSTRAINT `fk_jobs_worker_tag` FOREIGN KEY(`worker_tag_id`) REFERENCES `worker_tags`(`id`) ON DELETE SET NULL +); +INSERT INTO temp_jobs SELECT + id, + created_at, + updated_at, + uuid, + name, + job_type, + priority, + status, + activity, + settings, + metadata, + delete_requested_at, + storage_shaman_checkout_id, + worker_tag_id +FROM jobs; +DROP TABLE jobs; +ALTER TABLE temp_jobs RENAME TO `jobs`; + +CREATE TABLE "temp_workers" ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `deleted_at` datetime, + `uuid` char(36) UNIQUE DEFAULT "", + `secret` varchar(255) DEFAULT "", + `name` varchar(64) DEFAULT "", + `address` varchar(39) DEFAULT "", + `platform` varchar(16) DEFAULT "", + `software` varchar(32) DEFAULT "", + `status` varchar(16) DEFAULT "", + `last_seen_at` datetime, + `status_requested` varchar(16) DEFAULT "", + `lazy_status_request` smallint DEFAULT false, + `supported_task_types` varchar(255) DEFAULT "", + `can_restart` smallint DEFAULT false, + PRIMARY KEY (`id`) +); +INSERT INTO temp_workers SELECT + id, + created_at, + updated_at, + deleted_at, + uuid, + secret, + name, + address, + platform, + software, + status, + last_seen_at, + status_requested, + lazy_status_request, + supported_task_types, + can_restart +FROM workers; +DROP TABLE workers; +ALTER TABLE temp_workers RENAME TO `workers`; + +CREATE TABLE "temp_job_blocks" ( + `id` integer, + `created_at` datetime, + `job_id` integer DEFAULT 0, + `worker_id` integer DEFAULT 0, + `task_type` text, + PRIMARY KEY (`id`), + CONSTRAINT `fk_job_blocks_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_job_blocks_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_job_blocks SELECT + id, + created_at, + job_id, + worker_id, + task_type +FROM job_blocks; +DROP TABLE job_blocks; +ALTER TABLE temp_job_blocks RENAME TO `job_blocks`; + +CREATE TABLE "temp_sleep_schedules" ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `worker_id` integer UNIQUE DEFAULT 0, + `is_active` numeric DEFAULT false, + `days_of_week` text DEFAULT "", + `start_time` text DEFAULT "", + `end_time` text DEFAULT "", + `next_check` datetime, + PRIMARY KEY (`id`), + CONSTRAINT `fk_sleep_schedules_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE +); +INSERT INTO temp_sleep_schedules SELECT + id, + created_at, + updated_at, + worker_id, + is_active, + days_of_week, + start_time, + end_time, + next_check +FROM sleep_schedules; +DROP TABLE sleep_schedules; +ALTER TABLE temp_sleep_schedules RENAME TO `sleep_schedules`; + +CREATE TABLE "temp_tasks" ( + `id` integer, + `created_at` datetime, + `updated_at` datetime, + `uuid` char(36) UNIQUE DEFAULT "", + `name` varchar(64) DEFAULT "", + `type` varchar(32) DEFAULT "", + `job_id` integer DEFAULT 0, + `priority` smallint DEFAULT 50, + `status` varchar(16) DEFAULT "", + `worker_id` integer, + `last_touched_at` datetime, + `commands` jsonb, + `activity` varchar(255) DEFAULT "", + PRIMARY KEY (`id`), + CONSTRAINT `fk_tasks_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE, + CONSTRAINT `fk_tasks_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE + SET NULL +); +INSERT INTO temp_tasks SELECT + id, + created_at, + updated_at, + uuid, + name, + type, + job_id, + priority, + status, + worker_id, + last_touched_at, + commands, + activity +FROM tasks; +DROP TABLE tasks; +ALTER TABLE temp_tasks RENAME TO `tasks`; + +CREATE INDEX `idx_worker_tags_uuid` ON `worker_tags`(`uuid`); +CREATE INDEX `idx_jobs_uuid` ON `jobs`(`uuid`); +CREATE INDEX `idx_workers_address` ON `workers`(`address`); +CREATE INDEX `idx_workers_last_seen_at` ON `workers`(`last_seen_at`); +CREATE INDEX `idx_workers_deleted_at` ON `workers`(`deleted_at`); +CREATE INDEX `idx_workers_uuid` ON `workers`(`uuid`); +CREATE UNIQUE INDEX `job_worker_tasktype` ON `job_blocks`(`job_id`, `worker_id`, `task_type`); +CREATE INDEX `idx_sleep_schedules_is_active` ON `sleep_schedules`(`is_active`); +CREATE INDEX `idx_sleep_schedules_worker_id` ON `sleep_schedules`(`worker_id`); +CREATE INDEX `idx_tasks_uuid` ON `tasks`(`uuid`); +CREATE INDEX `idx_tasks_last_touched_at` ON `tasks`(`last_touched_at`); diff --git a/internal/manager/persistence/migrations/README.md b/internal/manager/persistence/migrations/README.md index db9df31e..241ba8ac 100644 --- a/internal/manager/persistence/migrations/README.md +++ b/internal/manager/persistence/migrations/README.md @@ -15,7 +15,11 @@ itself. This means you can replace a table like this, without `ON DELETE` effects running. ```sql -INSERT INTO `temp_table` SELECT * FROM `actual_table`; +INSERT INTO `temp_table` SELECT field1, field2, etc FROM `actual_table`; DROP TABLE `actual_table`; ALTER TABLE `temp_table` RENAME TO `actual_table`; ``` + +Note that the `SELECT` clause lists each field specifically. This is to ensure +that they are selected in the expected order. Without this, data can get +mangled. diff --git a/internal/manager/persistence/sqlc/db.go b/internal/manager/persistence/sqlc/db.go new file mode 100644 index 00000000..8ed64d13 --- /dev/null +++ b/internal/manager/persistence/sqlc/db.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 + +package sqlc + +import ( + "context" + "database/sql" +) + +type DBTX interface { + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) + PrepareContext(context.Context, string) (*sql.Stmt, error) + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row +} + +func New(db DBTX) *Queries { + return &Queries{db: db} +} + +type Queries struct { + db DBTX +} + +func (q *Queries) WithTx(tx *sql.Tx) *Queries { + return &Queries{ + db: tx, + } +} diff --git a/internal/manager/persistence/sqlc/models.go b/internal/manager/persistence/sqlc/models.go new file mode 100644 index 00000000..d57a5b64 --- /dev/null +++ b/internal/manager/persistence/sqlc/models.go @@ -0,0 +1,115 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 + +package sqlc + +import ( + "database/sql" + "encoding/json" + "time" +) + +type Job struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + UUID string + Name string + JobType string + Priority int64 + Status string + Activity string + Settings json.RawMessage + Metadata json.RawMessage + DeleteRequestedAt sql.NullTime + StorageShamanCheckoutID string + WorkerTagID sql.NullInt64 +} + +type JobBlock struct { + ID int64 + CreatedAt time.Time + JobID int64 + WorkerID int64 + TaskType string +} + +type LastRendered struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + JobID int64 +} + +type SleepSchedule struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + WorkerID int64 + IsActive float64 + DaysOfWeek string + StartTime string + EndTime string + NextCheck sql.NullTime +} + +type Task struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + UUID string + Name string + Type string + JobID int64 + Priority int64 + Status string + WorkerID sql.NullInt64 + LastTouchedAt sql.NullTime + Commands json.RawMessage + Activity string +} + +type TaskDependency struct { + TaskID int64 + DependencyID int64 +} + +type TaskFailure struct { + CreatedAt time.Time + TaskID int64 + WorkerID int64 +} + +type Worker struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + UUID string + Secret string + Name string + Address string + Platform string + Software string + Status string + LastSeenAt sql.NullTime + StatusRequested string + LazyStatusRequest int64 + SupportedTaskTypes string + DeletedAt sql.NullTime + CanRestart int64 +} + +type WorkerTag struct { + ID int64 + CreatedAt time.Time + UpdatedAt sql.NullTime + UUID string + Name string + Description string +} + +type WorkerTagMembership struct { + WorkerTagID int64 + WorkerID int64 +} diff --git a/internal/manager/persistence/sqlc/query_jobs.sql b/internal/manager/persistence/sqlc/query_jobs.sql new file mode 100644 index 00000000..0f606454 --- /dev/null +++ b/internal/manager/persistence/sqlc/query_jobs.sql @@ -0,0 +1,57 @@ + +-- Jobs / Tasks queries +-- + +-- name: CreateJob :exec +INSERT INTO jobs ( + created_at, + uuid, + name, + job_type, + priority, + status, + activity, + settings, + metadata, + storage_shaman_checkout_id +) +VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ); + +-- name: FetchJob :one +SELECT * FROM jobs +WHERE uuid = ? LIMIT 1; + +-- name: DeleteJob :exec +DELETE FROM jobs WHERE uuid = ?; + +-- name: RequestJobDeletion :exec +UPDATE jobs SET + updated_at = @now, + delete_requested_at = @now +WHERE id = sqlc.arg('job_id'); + +-- name: FetchJobUUIDsUpdatedBefore :many +SELECT uuid FROM jobs WHERE updated_at <= @updated_at_max; + +-- name: RequestMassJobDeletion :exec +UPDATE jobs SET + updated_at = @now, + delete_requested_at = @now +WHERE uuid in (sqlc.slice('uuids')); + +-- name: FetchJobsDeletionRequested :many +SELECT uuid FROM jobs + WHERE delete_requested_at is not NULL + ORDER BY delete_requested_at; + +-- name: FetchJobsInStatus :many +SELECT * FROM jobs WHERE status IN (sqlc.slice('statuses')); + +-- name: SaveJobStatus :exec +UPDATE jobs SET updated_at=@now, status=@status, activity=@activity WHERE id=@id; + +-- name: SaveJobPriority :exec +UPDATE jobs SET updated_at=@now, priority=@priority WHERE id=@id; + +-- name: SaveJobStorageInfo :exec +UPDATE jobs SET storage_shaman_checkout_id=@storage_shaman_checkout_id WHERE id=@id; diff --git a/internal/manager/persistence/sqlc/query_jobs.sql.go b/internal/manager/persistence/sqlc/query_jobs.sql.go new file mode 100644 index 00000000..26032839 --- /dev/null +++ b/internal/manager/persistence/sqlc/query_jobs.sql.go @@ -0,0 +1,300 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.25.0 +// source: query_jobs.sql + +package sqlc + +import ( + "context" + "database/sql" + "encoding/json" + "strings" + "time" +) + +const createJob = `-- name: CreateJob :exec + +INSERT INTO jobs ( + created_at, + uuid, + name, + job_type, + priority, + status, + activity, + settings, + metadata, + storage_shaman_checkout_id +) +VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? ) +` + +type CreateJobParams struct { + CreatedAt time.Time + UUID string + Name string + JobType string + Priority int64 + Status string + Activity string + Settings json.RawMessage + Metadata json.RawMessage + StorageShamanCheckoutID string +} + +// Jobs / Tasks queries +func (q *Queries) CreateJob(ctx context.Context, arg CreateJobParams) error { + _, err := q.db.ExecContext(ctx, createJob, + arg.CreatedAt, + arg.UUID, + arg.Name, + arg.JobType, + arg.Priority, + arg.Status, + arg.Activity, + arg.Settings, + arg.Metadata, + arg.StorageShamanCheckoutID, + ) + return err +} + +const deleteJob = `-- name: DeleteJob :exec +DELETE FROM jobs WHERE uuid = ? +` + +func (q *Queries) DeleteJob(ctx context.Context, uuid string) error { + _, err := q.db.ExecContext(ctx, deleteJob, uuid) + return err +} + +const fetchJob = `-- name: FetchJob :one +SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activity, settings, metadata, delete_requested_at, storage_shaman_checkout_id, worker_tag_id FROM jobs +WHERE uuid = ? LIMIT 1 +` + +func (q *Queries) FetchJob(ctx context.Context, uuid string) (Job, error) { + row := q.db.QueryRowContext(ctx, fetchJob, uuid) + var i Job + err := row.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.UUID, + &i.Name, + &i.JobType, + &i.Priority, + &i.Status, + &i.Activity, + &i.Settings, + &i.Metadata, + &i.DeleteRequestedAt, + &i.StorageShamanCheckoutID, + &i.WorkerTagID, + ) + return i, err +} + +const fetchJobUUIDsUpdatedBefore = `-- name: FetchJobUUIDsUpdatedBefore :many +SELECT uuid FROM jobs WHERE updated_at <= ?1 +` + +func (q *Queries) FetchJobUUIDsUpdatedBefore(ctx context.Context, updatedAtMax sql.NullTime) ([]string, error) { + rows, err := q.db.QueryContext(ctx, fetchJobUUIDsUpdatedBefore, updatedAtMax) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var uuid string + if err := rows.Scan(&uuid); err != nil { + return nil, err + } + items = append(items, uuid) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchJobsDeletionRequested = `-- name: FetchJobsDeletionRequested :many +SELECT uuid FROM jobs + WHERE delete_requested_at is not NULL + ORDER BY delete_requested_at +` + +func (q *Queries) FetchJobsDeletionRequested(ctx context.Context) ([]string, error) { + rows, err := q.db.QueryContext(ctx, fetchJobsDeletionRequested) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var uuid string + if err := rows.Scan(&uuid); err != nil { + return nil, err + } + items = append(items, uuid) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchJobsInStatus = `-- name: FetchJobsInStatus :many +SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activity, settings, metadata, delete_requested_at, storage_shaman_checkout_id, worker_tag_id FROM jobs WHERE status IN (/*SLICE:statuses*/?) +` + +func (q *Queries) FetchJobsInStatus(ctx context.Context, statuses []string) ([]Job, error) { + query := fetchJobsInStatus + var queryParams []interface{} + if len(statuses) > 0 { + for _, v := range statuses { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:statuses*/?", strings.Repeat(",?", len(statuses))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:statuses*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Job + for rows.Next() { + var i Job + if err := rows.Scan( + &i.ID, + &i.CreatedAt, + &i.UpdatedAt, + &i.UUID, + &i.Name, + &i.JobType, + &i.Priority, + &i.Status, + &i.Activity, + &i.Settings, + &i.Metadata, + &i.DeleteRequestedAt, + &i.StorageShamanCheckoutID, + &i.WorkerTagID, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const requestJobDeletion = `-- name: RequestJobDeletion :exec +UPDATE jobs SET + updated_at = ?1, + delete_requested_at = ?1 +WHERE id = ?2 +` + +type RequestJobDeletionParams struct { + Now sql.NullTime + JobID int64 +} + +func (q *Queries) RequestJobDeletion(ctx context.Context, arg RequestJobDeletionParams) error { + _, err := q.db.ExecContext(ctx, requestJobDeletion, arg.Now, arg.JobID) + return err +} + +const requestMassJobDeletion = `-- name: RequestMassJobDeletion :exec +UPDATE jobs SET + updated_at = ?1, + delete_requested_at = ?1 +WHERE uuid in (/*SLICE:uuids*/?) +` + +type RequestMassJobDeletionParams struct { + Now sql.NullTime + UUIDs []string +} + +func (q *Queries) RequestMassJobDeletion(ctx context.Context, arg RequestMassJobDeletionParams) error { + query := requestMassJobDeletion + var queryParams []interface{} + queryParams = append(queryParams, arg.Now) + if len(arg.UUIDs) > 0 { + for _, v := range arg.UUIDs { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:uuids*/?", strings.Repeat(",?", len(arg.UUIDs))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:uuids*/?", "NULL", 1) + } + _, err := q.db.ExecContext(ctx, query, queryParams...) + return err +} + +const saveJobPriority = `-- name: SaveJobPriority :exec +UPDATE jobs SET updated_at=?1, priority=?2 WHERE id=?3 +` + +type SaveJobPriorityParams struct { + Now sql.NullTime + Priority int64 + ID int64 +} + +func (q *Queries) SaveJobPriority(ctx context.Context, arg SaveJobPriorityParams) error { + _, err := q.db.ExecContext(ctx, saveJobPriority, arg.Now, arg.Priority, arg.ID) + return err +} + +const saveJobStatus = `-- name: SaveJobStatus :exec +UPDATE jobs SET updated_at=?1, status=?2, activity=?3 WHERE id=?4 +` + +type SaveJobStatusParams struct { + Now sql.NullTime + Status string + Activity string + ID int64 +} + +func (q *Queries) SaveJobStatus(ctx context.Context, arg SaveJobStatusParams) error { + _, err := q.db.ExecContext(ctx, saveJobStatus, + arg.Now, + arg.Status, + arg.Activity, + arg.ID, + ) + return err +} + +const saveJobStorageInfo = `-- name: SaveJobStorageInfo :exec +UPDATE jobs SET storage_shaman_checkout_id=?1 WHERE id=?2 +` + +type SaveJobStorageInfoParams struct { + StorageShamanCheckoutID string + ID int64 +} + +func (q *Queries) SaveJobStorageInfo(ctx context.Context, arg SaveJobStorageInfoParams) error { + _, err := q.db.ExecContext(ctx, saveJobStorageInfo, arg.StorageShamanCheckoutID, arg.ID) + return err +} diff --git a/internal/manager/persistence/sqlc/schema.sql b/internal/manager/persistence/sqlc/schema.sql new file mode 100644 index 00000000..916fe101 --- /dev/null +++ b/internal/manager/persistence/sqlc/schema.sql @@ -0,0 +1,128 @@ +CREATE TABLE job_blocks ( + id integer NOT NULL, + created_at datetime NOT NULL, + job_id integer DEFAULT 0 NOT NULL, + worker_id integer DEFAULT 0 NOT NULL, + task_type text NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_job_blocks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE, + CONSTRAINT fk_job_blocks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +CREATE TABLE jobs ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + job_type varchar(32) DEFAULT '' NOT NULL, + priority smallint DEFAULT 0 NOT NULL, + status varchar(32) DEFAULT '' NOT NULL, + activity varchar(255) DEFAULT '' NOT NULL, + settings jsonb NOT NULL, + metadata jsonb NOT NULL, + delete_requested_at datetime, + storage_shaman_checkout_id varchar(255) DEFAULT '' NOT NULL, + worker_tag_id integer, + PRIMARY KEY (id), + CONSTRAINT fk_jobs_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE SET NULL +); +CREATE TABLE last_rendereds ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + job_id integer DEFAULT 0 NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_last_rendereds_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE +); +CREATE TABLE sleep_schedules ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + worker_id integer UNIQUE DEFAULT 0 NOT NULL, + is_active numeric DEFAULT false NOT NULL, + days_of_week text DEFAULT '' NOT NULL, + start_time text DEFAULT '' NOT NULL, + end_time text DEFAULT '' NOT NULL, + next_check datetime, + PRIMARY KEY (id), + CONSTRAINT fk_sleep_schedules_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +CREATE TABLE task_dependencies ( + task_id integer NOT NULL, + dependency_id integer NOT NULL, + PRIMARY KEY (task_id, dependency_id), + CONSTRAINT fk_task_dependencies_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE, + CONSTRAINT fk_task_dependencies_dependencies FOREIGN KEY (dependency_id) REFERENCES tasks(id) ON DELETE CASCADE +); +CREATE TABLE task_failures ( + created_at datetime NOT NULL, + task_id integer NOT NULL, + worker_id integer NOT NULL, + PRIMARY KEY (task_id, worker_id), + CONSTRAINT fk_task_failures_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE, + CONSTRAINT fk_task_failures_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +CREATE TABLE tasks ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + type varchar(32) DEFAULT '' NOT NULL, + job_id integer DEFAULT 0 NOT NULL, + priority smallint DEFAULT 50 NOT NULL, + status varchar(16) DEFAULT '' NOT NULL, + worker_id integer, + last_touched_at datetime, + commands jsonb NOT NULL, + activity varchar(255) DEFAULT '' NOT NULL, + PRIMARY KEY (id), + CONSTRAINT fk_tasks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE, + CONSTRAINT fk_tasks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE SET NULL +); +CREATE TABLE worker_tag_membership ( + worker_tag_id integer NOT NULL, + worker_id integer NOT NULL, + PRIMARY KEY (worker_tag_id, worker_id), + CONSTRAINT fk_worker_tag_membership_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE CASCADE, + CONSTRAINT fk_worker_tag_membership_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE +); +CREATE TABLE worker_tags ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + name varchar(64) UNIQUE DEFAULT '' NOT NULL, + description varchar(255) DEFAULT '' NOT NULL, + PRIMARY KEY (id) +); +CREATE TABLE workers ( + id integer NOT NULL, + created_at datetime NOT NULL, + updated_at datetime, + uuid varchar(36) UNIQUE DEFAULT '' NOT NULL, + secret varchar(255) DEFAULT '' NOT NULL, + name varchar(64) DEFAULT '' NOT NULL, + address varchar(39) DEFAULT '' NOT NULL, + platform varchar(16) DEFAULT '' NOT NULL, + software varchar(32) DEFAULT '' NOT NULL, + status varchar(16) DEFAULT '' NOT NULL, + last_seen_at datetime, + status_requested varchar(16) DEFAULT '' NOT NULL, + lazy_status_request smallint DEFAULT false NOT NULL, + supported_task_types varchar(255) DEFAULT '' NOT NULL, + deleted_at datetime, + can_restart smallint DEFAULT false NOT NULL, + PRIMARY KEY (id) +); +CREATE INDEX idx_jobs_uuid ON jobs(uuid); +CREATE INDEX idx_sleep_schedules_is_active ON sleep_schedules(is_active); +CREATE INDEX idx_sleep_schedules_worker_id ON sleep_schedules(worker_id); +CREATE INDEX idx_tasks_last_touched_at ON tasks(last_touched_at); +CREATE INDEX idx_tasks_uuid ON tasks(uuid); +CREATE INDEX idx_worker_tags_uuid ON worker_tags(uuid); +CREATE INDEX idx_workers_address ON workers(address); +CREATE INDEX idx_workers_deleted_at ON workers(deleted_at); +CREATE INDEX idx_workers_last_seen_at ON workers(last_seen_at); +CREATE INDEX idx_workers_uuid ON workers(uuid); +CREATE UNIQUE INDEX job_worker_tasktype ON job_blocks(job_id, worker_id, task_type); diff --git a/internal/manager/persistence/task_scheduler_test.go b/internal/manager/persistence/task_scheduler_test.go index 289524a1..640b867f 100644 --- a/internal/manager/persistence/task_scheduler_test.go +++ b/internal/manager/persistence/task_scheduler_test.go @@ -26,7 +26,7 @@ func TestNoTasks(t *testing.T) { task, err := db.ScheduleTask(ctx, &w) assert.Nil(t, task) - assert.NoError(t, err) + require.NoError(t, err) } func TestOneJobOneTask(t *testing.T) { @@ -40,7 +40,7 @@ func TestOneJobOneTask(t *testing.T) { job := constructTestJob(ctx, t, db, atj) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) // Check the returned task. if task == nil { @@ -55,7 +55,7 @@ func TestOneJobOneTask(t *testing.T) { // Check the task in the database. now := db.gormDB.NowFunc() dbTask, err := db.FetchTask(context.Background(), authTask.UUID) - assert.NoError(t, err) + require.NoError(t, err) if dbTask == nil { t.Fatal("task cannot be fetched from database") } @@ -84,7 +84,7 @@ func TestOneJobThreeTasksByPrio(t *testing.T) { job := constructTestJob(ctx, t, db, atj) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -115,7 +115,7 @@ func TestOneJobThreeTasksByDependencies(t *testing.T) { job := constructTestJob(ctx, t, db, atj) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -155,7 +155,7 @@ func TestTwoJobsThreeTasks(t *testing.T) { job2 := constructTestJob(ctx, t, db, atj2) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -183,7 +183,7 @@ func TestSomeButNotAllDependenciesCompleted(t *testing.T) { w := linuxWorker(t, db) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task != nil { t.Fatalf("there should not be any task assigned, but received %q", task.Name) } @@ -210,14 +210,14 @@ func TestAlreadyAssigned(t *testing.T) { // This should make it get returned by the scheduler, even when there is // another, higher-prio task to be done. dbTask3, err := db.FetchTask(ctx, att3.UUID) - assert.NoError(t, err) + require.NoError(t, err) dbTask3.WorkerID = &w.ID dbTask3.Status = api.TaskStatusActive err = db.SaveTask(ctx, dbTask3) - assert.NoError(t, err) + require.NoError(t, err) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -245,14 +245,14 @@ func TestAssignedToOtherWorker(t *testing.T) { // Assign the high-prio task to the other worker. Because the task is queued, // it shouldn't matter which worker it's assigned to. dbTask2, err := db.FetchTask(ctx, att2.UUID) - assert.NoError(t, err) + require.NoError(t, err) dbTask2.WorkerID = &w2.ID dbTask2.Status = api.TaskStatusQueued err = db.SaveTask(ctx, dbTask2) - assert.NoError(t, err) + require.NoError(t, err) task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -277,14 +277,14 @@ func TestPreviouslyFailed(t *testing.T) { // Mimick that this worker already failed the first task. tasks, err := db.FetchTasksOfJob(ctx, job) - assert.NoError(t, err) + require.NoError(t, err) numFailed, err := db.AddWorkerToTaskFailedList(ctx, tasks[0], &w) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, 1, numFailed) // This should assign the 2nd task. task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -391,11 +391,11 @@ func TestBlocklisted(t *testing.T) { // Mimick that this worker was already blocked for 'blender' tasks of this job. err := db.AddWorkerToJobBlocklist(ctx, job, &w, "blender") - assert.NoError(t, err) + require.NoError(t, err) // This should assign the 2nd task. task, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) if task == nil { t.Fatal("task is nil") } @@ -410,21 +410,15 @@ func constructTestJob( ctx context.Context, t *testing.T, db *DB, authoredJob job_compilers.AuthoredJob, ) *Job { err := db.StoreAuthoredJob(ctx, authoredJob) - if err != nil { - t.Fatalf("storing authored job: %v", err) - } + require.NoError(t, err, "storing authored job") dbJob, err := db.FetchJob(ctx, authoredJob.JobID) - if err != nil { - t.Fatalf("fetching authored job: %v", err) - } + require.NoError(t, err, "fetching authored job") // Queue the job. dbJob.Status = api.JobStatusQueued err = db.SaveJobStatus(ctx, dbJob) - if err != nil { - t.Fatalf("queueing job: %v", err) - } + require.NoError(t, err, "queueing job") return dbJob } @@ -457,16 +451,11 @@ func authorTestTask(name, taskType string, dependencies ...*job_compilers.Author func setTaskStatus(t *testing.T, db *DB, taskUUID string, status api.TaskStatus) { ctx := context.Background() task, err := db.FetchTask(ctx, taskUUID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) task.Status = status - err = db.SaveTask(ctx, task) - if err != nil { - t.Fatal(err) - } + require.NoError(t, db.SaveTask(ctx, task)) } func linuxWorker(t *testing.T, db *DB, updaters ...func(worker *Worker)) Worker { @@ -483,10 +472,7 @@ func linuxWorker(t *testing.T, db *DB, updaters ...func(worker *Worker)) Worker } err := db.gormDB.Save(&w).Error - if err != nil { - t.Logf("cannot save Linux worker: %v", err) - t.FailNow() - } + require.NoError(t, err, "cannot save Linux worker") return w } @@ -501,10 +487,6 @@ func windowsWorker(t *testing.T, db *DB) Worker { } err := db.gormDB.Save(&w).Error - if err != nil { - t.Logf("cannot save Windows worker: %v", err) - t.FailNow() - } - + require.NoError(t, err, "cannot save Windows worker") return w } diff --git a/internal/manager/persistence/time_of_day_test.go b/internal/manager/persistence/time_of_day_test.go index 01a46876..f4b27f33 100644 --- a/internal/manager/persistence/time_of_day_test.go +++ b/internal/manager/persistence/time_of_day_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var emptyToD = TimeOfDay{timeOfDayNoValue, timeOfDayNoValue} @@ -60,53 +61,56 @@ func TestOnDate(t *testing.T) { } func TestValue(t *testing.T) { - // Test zero -> "00:00" - tod := TimeOfDay{} - if value, err := tod.Value(); assert.NoError(t, err) { + { // Test zero -> "00:00" + tod := TimeOfDay{} + value, err := tod.Value() + require.NoError(t, err) assert.Equal(t, "00:00", value) } - // Test 22:47 -> "22:47" - tod = TimeOfDay{22, 47} - if value, err := tod.Value(); assert.NoError(t, err) { + { // Test 22:47 -> "22:47" + tod := TimeOfDay{22, 47} + value, err := tod.Value() + require.NoError(t, err) assert.Equal(t, "22:47", value) } - // Test empty -> "" - tod = emptyToD - if value, err := tod.Value(); assert.NoError(t, err) { + { // Test empty -> "" + tod := emptyToD + value, err := tod.Value() + require.NoError(t, err) assert.Equal(t, "", value) } } func TestScan(t *testing.T) { - // Test zero -> empty - tod := TimeOfDay{} - if assert.NoError(t, tod.Scan("")) { + { // Test zero -> empty + tod := TimeOfDay{} + require.NoError(t, tod.Scan("")) assert.Equal(t, emptyToD, tod) } - // Test 22:47 -> empty - tod = TimeOfDay{22, 47} - if assert.NoError(t, tod.Scan("")) { + { // Test 22:47 -> empty + tod := TimeOfDay{22, 47} + require.NoError(t, tod.Scan("")) assert.Equal(t, emptyToD, tod) } - // Test 22:47 -> 12:34 - tod = TimeOfDay{22, 47} - if assert.NoError(t, tod.Scan("12:34")) { + { // Test 22:47 -> 12:34 + tod := TimeOfDay{22, 47} + require.NoError(t, tod.Scan("12:34")) assert.Equal(t, TimeOfDay{12, 34}, tod) } - // Test empty -> empty - tod = emptyToD - if assert.NoError(t, tod.Scan("")) { + { // Test empty -> empty + tod := emptyToD + require.NoError(t, tod.Scan("")) assert.Equal(t, emptyToD, tod) } - // Test empty -> 12:34 - tod = emptyToD - if assert.NoError(t, tod.Scan("12:34")) { + { // Test empty -> 12:34 + tod := emptyToD + require.NoError(t, tod.Scan("12:34")) assert.Equal(t, TimeOfDay{12, 34}, tod) } } diff --git a/internal/manager/persistence/timeout_test.go b/internal/manager/persistence/timeout_test.go index b2b2f95d..1ddb2435 100644 --- a/internal/manager/persistence/timeout_test.go +++ b/internal/manager/persistence/timeout_test.go @@ -5,6 +5,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/pkg/api" ) @@ -15,9 +16,7 @@ func TestFetchTimedOutTasks(t *testing.T) { defer close() tasks, err := db.FetchTasksOfJob(ctx, job) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) now := db.gormDB.NowFunc() deadline := now.Add(-5 * time.Minute) @@ -25,23 +24,23 @@ func TestFetchTimedOutTasks(t *testing.T) { // Mark the task as last touched before the deadline, i.e. old enough for a timeout. task := tasks[0] task.LastTouchedAt = deadline.Add(-1 * time.Minute) - assert.NoError(t, db.SaveTask(ctx, task)) + require.NoError(t, db.SaveTask(ctx, task)) w := createWorker(ctx, t, db) - assert.NoError(t, db.TaskAssignToWorker(ctx, task, w)) + require.NoError(t, db.TaskAssignToWorker(ctx, task, w)) // The task should still not be returned, as it's not in 'active' state. timedout, err := db.FetchTimedOutTasks(ctx, deadline) - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, timedout) // Mark as Active: task.Status = api.TaskStatusActive - assert.NoError(t, db.SaveTask(ctx, task)) + require.NoError(t, db.SaveTask(ctx, task)) // Now it should time out: timedout, err = db.FetchTimedOutTasks(ctx, deadline) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, timedout, 1) { // Other fields will be different, like the 'UpdatedAt' field -- this just // tests that the expected task is returned. @@ -92,15 +91,13 @@ func TestFetchTimedOutWorkers(t *testing.T) { workers := []*Worker{&worker0, &worker1, &worker2, &worker3, &worker4} for _, worker := range workers { err := db.CreateWorker(ctx, worker) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) } timedout, err := db.FetchTimedOutWorkers(ctx, timeoutDeadline) - if assert.NoError(t, err) && assert.Len(t, timedout, 3) { - assert.Equal(t, worker1.UUID, timedout[0].UUID) - assert.Equal(t, worker2.UUID, timedout[1].UUID) - assert.Equal(t, worker3.UUID, timedout[2].UUID) - } + require.NoError(t, err) + require.Len(t, timedout, 3) + assert.Equal(t, worker1.UUID, timedout[0].UUID) + assert.Equal(t, worker2.UUID, timedout[1].UUID) + assert.Equal(t, worker3.UUID, timedout[2].UUID) } diff --git a/internal/manager/persistence/worker_sleep_schedule_test.go b/internal/manager/persistence/worker_sleep_schedule_test.go index fdf83f20..038a6884 100644 --- a/internal/manager/persistence/worker_sleep_schedule_test.go +++ b/internal/manager/persistence/worker_sleep_schedule_test.go @@ -26,18 +26,16 @@ func TestFetchWorkerSleepSchedule(t *testing.T) { SupportedTaskTypes: "blender,ffmpeg,file-management", } err := db.CreateWorker(ctx, &linuxWorker) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) // Not an existing Worker. fetched, err := db.FetchWorkerSleepSchedule(ctx, "2cf6153a-3d4e-49f4-a5c0-1c9fc176e155") - assert.NoError(t, err, "non-existent worker should not cause an error") + require.NoError(t, err, "non-existent worker should not cause an error") assert.Nil(t, fetched) // No sleep schedule. fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - assert.NoError(t, err, "non-existent schedule should not cause an error") + require.NoError(t, err, "non-existent schedule should not cause an error") assert.Nil(t, fetched) // Create a sleep schedule. @@ -51,12 +49,10 @@ func TestFetchWorkerSleepSchedule(t *testing.T) { EndTime: TimeOfDay{9, 0}, } tx := db.gormDB.Create(&created) - if !assert.NoError(t, tx.Error) { - t.FailNow() - } + require.NoError(t, tx.Error) fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - assert.NoError(t, err) + require.NoError(t, err) assertEqualSleepSchedule(t, linuxWorker.ID, created, *fetched) } @@ -74,9 +70,7 @@ func TestFetchSleepScheduleWorker(t *testing.T) { SupportedTaskTypes: "blender,ffmpeg,file-management", } err := db.CreateWorker(ctx, &linuxWorker) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) // Create a sleep schedule. created := SleepSchedule{ @@ -89,16 +83,14 @@ func TestFetchSleepScheduleWorker(t *testing.T) { EndTime: TimeOfDay{9, 0}, } tx := db.gormDB.Create(&created) - if !assert.NoError(t, tx.Error) { - t.FailNow() - } + require.NoError(t, tx.Error) dbSchedule, err := db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.Nil(t, dbSchedule.Worker, "worker should be nil when fetching schedule") err = db.FetchSleepScheduleWorker(ctx, dbSchedule) - assert.NoError(t, err) + require.NoError(t, err) if assert.NotNil(t, dbSchedule.Worker) { // Compare a few fields. If these are good, the correct worker has been fetched. assert.Equal(t, linuxWorker.ID, dbSchedule.Worker.ID) @@ -125,9 +117,7 @@ func TestSetWorkerSleepSchedule(t *testing.T) { SupportedTaskTypes: "blender,ffmpeg,file-management", } err := db.CreateWorker(ctx, &linuxWorker) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) schedule := SleepSchedule{ WorkerID: linuxWorker.ID, @@ -145,13 +135,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) { // Create the sleep schedule. err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &schedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) fetched, err := db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertEqualSleepSchedule(t, linuxWorker.ID, schedule, *fetched) // Overwrite the schedule with one that already has a database ID. @@ -161,13 +147,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) { newSchedule.StartTime = TimeOfDay{2, 0} newSchedule.EndTime = TimeOfDay{6, 0} err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &newSchedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertEqualSleepSchedule(t, linuxWorker.ID, newSchedule, *fetched) // Overwrite the schedule with a freshly constructed one. @@ -181,13 +163,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) { EndTime: TimeOfDay{15, 0}, } err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &newerSchedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertEqualSleepSchedule(t, linuxWorker.ID, newerSchedule, *fetched) // Clear the sleep schedule. @@ -201,13 +179,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) { EndTime: emptyToD, } err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &emptySchedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertEqualSleepSchedule(t, linuxWorker.ID, emptySchedule, *fetched) } @@ -236,14 +210,10 @@ func TestSetWorkerSleepScheduleNextCheck(t *testing.T) { schedule.NextCheck = future err := db.SetWorkerSleepScheduleNextCheck(ctx, &schedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) fetched, err := db.FetchWorkerSleepSchedule(ctx, schedule.Worker.UUID) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) assertEqualSleepSchedule(t, schedule.Worker.ID, schedule, *fetched) } @@ -322,12 +292,13 @@ func TestFetchSleepSchedulesToCheck(t *testing.T) { } toCheck, err := db.FetchSleepSchedulesToCheck(ctx) - if assert.NoError(t, err) && assert.Len(t, toCheck, 2) { - assertEqualSleepSchedule(t, schedule0.Worker.ID, schedule0, *toCheck[0]) - assert.Nil(t, toCheck[0].Worker, "the Worker should NOT be fetched") - assertEqualSleepSchedule(t, schedule2.Worker.ID, schedule1, *toCheck[1]) - assert.Nil(t, toCheck[1].Worker, "the Worker should NOT be fetched") - } + require.NoError(t, err) + require.Len(t, toCheck, 2) + + assertEqualSleepSchedule(t, schedule0.Worker.ID, schedule0, *toCheck[0]) + assert.Nil(t, toCheck[0].Worker, "the Worker should NOT be fetched") + assertEqualSleepSchedule(t, schedule2.Worker.ID, schedule1, *toCheck[1]) + assert.Nil(t, toCheck[1].Worker, "the Worker should NOT be fetched") } func assertEqualSleepSchedule(t *testing.T, workerID uint, expect, actual SleepSchedule) { diff --git a/internal/manager/persistence/worker_tag.go b/internal/manager/persistence/worker_tag.go index 6e0c1506..ed318a81 100644 --- a/internal/manager/persistence/worker_tag.go +++ b/internal/manager/persistence/worker_tag.go @@ -62,6 +62,15 @@ func (db *DB) SaveWorkerTag(ctx context.Context, tag *WorkerTag) error { // DeleteWorkerTag deletes the given tag, after unassigning all workers from it. func (db *DB) DeleteWorkerTag(ctx context.Context, uuid string) error { + // As a safety measure, refuse to delete unless foreign key constraints are active. + fkEnabled, err := db.areForeignKeysEnabled() + if err != nil { + return fmt.Errorf("checking whether foreign keys are enabled: %w", err) + } + if !fkEnabled { + return ErrDeletingWithoutFK + } + tx := db.gormDB.WithContext(ctx). Where("uuid = ?", uuid). Delete(&WorkerTag{}) diff --git a/internal/manager/persistence/worker_tag_test.go b/internal/manager/persistence/worker_tag_test.go index f4872cd6..f754cfa6 100644 --- a/internal/manager/persistence/worker_tag_test.go +++ b/internal/manager/persistence/worker_tag_test.go @@ -3,6 +3,7 @@ package persistence // SPDX-License-Identifier: GPL-3.0-or-later import ( + "slices" "testing" "time" @@ -50,17 +51,7 @@ func TestFetchDeleteTags(t *testing.T) { } require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag)) - - allTags, err := f.db.FetchWorkerTags(f.ctx) - require.NoError(t, err) - - require.Len(t, allTags, 2) - var allTagIDs [2]string - for idx := range allTags { - allTagIDs[idx] = allTags[idx].UUID - } - assert.Contains(t, allTagIDs, f.tag.UUID) - assert.Contains(t, allTagIDs, secondTag.UUID) + assertTagsMatch(t, f, f.tag.UUID, secondTag.UUID) has, err = f.db.HasWorkerTags(f.ctx) require.NoError(t, err) @@ -68,11 +59,7 @@ func TestFetchDeleteTags(t *testing.T) { // Test deleting the 2nd tag. require.NoError(t, f.db.DeleteWorkerTag(f.ctx, secondTag.UUID)) - - allTags, err = f.db.FetchWorkerTags(f.ctx) - require.NoError(t, err) - require.Len(t, allTags, 1) - assert.Equal(t, f.tag.UUID, allTags[0].UUID) + assertTagsMatch(t, f, f.tag.UUID) // Test deleting the 1st tag. require.NoError(t, f.db.DeleteWorkerTag(f.ctx, f.tag.UUID)) @@ -81,6 +68,31 @@ func TestFetchDeleteTags(t *testing.T) { assert.False(t, has, "expecting HasWorkerTags to return false") } +func TestDeleteTagsWithoutFK(t *testing.T) { + f := workerTestFixtures(t, 1*time.Second) + defer f.done() + + // Single tag was created by fixture. + has, err := f.db.HasWorkerTags(f.ctx) + require.NoError(t, err) + assert.True(t, has, "expecting HasWorkerTags to return true") + + secondTag := WorkerTag{ + UUID: uuid.New(), + Name: "arbeiderskaartje", + Description: "Worker tag in Dutch", + } + require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag)) + + // Try deleting with foreign key constraints disabled. + require.NoError(t, f.db.pragmaForeignKeys(false)) + err = f.db.DeleteWorkerTag(f.ctx, f.tag.UUID) + require.ErrorIs(t, err, ErrDeletingWithoutFK) + + // Test the deletion did not happen. + assertTagsMatch(t, f, f.tag.UUID, secondTag.UUID) +} + func TestAssignUnassignWorkerTags(t *testing.T) { f := workerTestFixtures(t, 1*time.Second) defer f.done() @@ -163,3 +175,19 @@ func TestDeleteWorkerTagWithWorkersAssigned(t *testing.T) { require.NoError(t, err) assert.Empty(t, w.Tags) } + +func assertTagsMatch(t *testing.T, f WorkerTestFixture, expectUUIDs ...string) { + allTags, err := f.db.FetchWorkerTags(f.ctx) + require.NoError(t, err) + + require.Len(t, allTags, len(expectUUIDs)) + var actualUUIDs []string + for idx := range allTags { + actualUUIDs = append(actualUUIDs, allTags[idx].UUID) + } + + slices.Sort(expectUUIDs) + slices.Sort(actualUUIDs) + + assert.Equal(t, actualUUIDs, expectUUIDs) +} diff --git a/internal/manager/persistence/workers.go b/internal/manager/persistence/workers.go index a8e73d4e..a9637d54 100644 --- a/internal/manager/persistence/workers.go +++ b/internal/manager/persistence/workers.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/rs/zerolog/log" "gorm.io/gorm" "projects.blender.org/studio/flamenco/pkg/api" ) @@ -87,6 +88,15 @@ func (db *DB) FetchWorker(ctx context.Context, uuid string) (*Worker, error) { } func (db *DB) DeleteWorker(ctx context.Context, uuid string) error { + // As a safety measure, refuse to delete unless foreign key constraints are active. + fkEnabled, err := db.areForeignKeysEnabled() + if err != nil { + return fmt.Errorf("checking whether foreign keys are enabled: %w", err) + } + if !fkEnabled { + return ErrDeletingWithoutFK + } + tx := db.gormDB.WithContext(ctx). Where("uuid = ?", uuid). Delete(&Worker{}) @@ -176,3 +186,33 @@ func (db *DB) WorkerSeen(ctx context.Context, w *Worker) error { } return nil } + +// WorkerStatusCount is a mapping from job status to the number of jobs in that status. +type WorkerStatusCount map[api.WorkerStatus]int + +func (db *DB) SummarizeWorkerStatuses(ctx context.Context) (WorkerStatusCount, error) { + logger := log.Ctx(ctx) + logger.Debug().Msg("database: summarizing worker statuses") + + // Query the database using a data structure that's easy to handle in GORM. + type queryResult struct { + Status api.WorkerStatus + StatusCount int + } + result := []*queryResult{} + tx := db.gormDB.WithContext(ctx).Model(&Worker{}). + Select("status as Status", "count(id) as StatusCount"). + Group("status"). + Scan(&result) + if tx.Error != nil { + return nil, workerError(tx.Error, "summarizing worker statuses") + } + + // Convert the array-of-structs to a map that's easier to handle by the caller. + statusCounts := make(WorkerStatusCount) + for _, singleStatusCount := range result { + statusCounts[singleStatusCount.Status] = singleStatusCount.StatusCount + } + + return statusCounts, nil +} diff --git a/internal/manager/persistence/workers_test.go b/internal/manager/persistence/workers_test.go index b58db75d..8bd6ab94 100644 --- a/internal/manager/persistence/workers_test.go +++ b/internal/manager/persistence/workers_test.go @@ -4,6 +4,7 @@ package persistence // SPDX-License-Identifier: GPL-3.0-or-later import ( + "context" "testing" "time" @@ -35,10 +36,10 @@ func TestCreateFetchWorker(t *testing.T) { } err = db.CreateWorker(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) fetchedWorker, err = db.FetchWorker(ctx, w.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, fetchedWorker) // Test contents of fetched job @@ -68,15 +69,12 @@ func TestFetchWorkerTask(t *testing.T) { } err := db.CreateWorker(ctx, &w) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) { // Test without any task assigned. task, err := db.FetchWorkerTask(ctx, &w) - if assert.NoError(t, err) { - assert.Nil(t, task) - } + require.NoError(t, err) + assert.Nil(t, task) } // Create a job with tasks. @@ -87,52 +85,51 @@ func TestFetchWorkerTask(t *testing.T) { constructTestJob(ctx, t, db, atj) assignedTask, err := db.ScheduleTask(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) { // Assigned task should be returned. foundTask, err := db.FetchWorkerTask(ctx, &w) - if assert.NoError(t, err) && assert.NotNil(t, foundTask) { - assert.Equal(t, assignedTask.UUID, foundTask.UUID) - assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") - } + require.NoError(t, err) + require.NotNil(t, foundTask) + assert.Equal(t, assignedTask.UUID, foundTask.UUID) + assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") } // Set the task to 'completed'. assignedTask.Status = api.TaskStatusCompleted - assert.NoError(t, db.SaveTaskStatus(ctx, assignedTask)) + require.NoError(t, db.SaveTaskStatus(ctx, assignedTask)) { // Completed-but-last-assigned task should be returned. foundTask, err := db.FetchWorkerTask(ctx, &w) - if assert.NoError(t, err) && assert.NotNil(t, foundTask) { - assert.Equal(t, assignedTask.UUID, foundTask.UUID) - assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") - } + require.NoError(t, err) + require.NotNil(t, foundTask) + assert.Equal(t, assignedTask.UUID, foundTask.UUID) + assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") } // Assign another task. newlyAssignedTask, err := db.ScheduleTask(ctx, &w) - if !assert.NoError(t, err) || !assert.NotNil(t, newlyAssignedTask) { - t.FailNow() - } + require.NoError(t, err) + require.NotNil(t, newlyAssignedTask) { // Newly assigned task should be returned. foundTask, err := db.FetchWorkerTask(ctx, &w) - if assert.NoError(t, err) && assert.NotNil(t, foundTask) { - assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID) - assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") - } + require.NoError(t, err) + require.NotNil(t, foundTask) + assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID) + assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") } // Set the new task to 'completed'. newlyAssignedTask.Status = api.TaskStatusCompleted - assert.NoError(t, db.SaveTaskStatus(ctx, newlyAssignedTask)) + require.NoError(t, db.SaveTaskStatus(ctx, newlyAssignedTask)) { // Completed-but-last-assigned task should be returned. foundTask, err := db.FetchWorkerTask(ctx, &w) - if assert.NoError(t, err) && assert.NotNil(t, foundTask) { - assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID) - assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") - } + require.NoError(t, err) + require.NotNil(t, foundTask) + assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID) + assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well") } } @@ -152,10 +149,10 @@ func TestSaveWorker(t *testing.T) { } err := db.CreateWorker(ctx, &w) - assert.NoError(t, err) + require.NoError(t, err) fetchedWorker, err := db.FetchWorker(ctx, w.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, fetchedWorker) // Update all updatable fields of the Worker @@ -169,23 +166,23 @@ func TestSaveWorker(t *testing.T) { // Saving only the status should just do that. err = db.SaveWorkerStatus(ctx, &updatedWorker) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "7 မှ 9", updatedWorker.Name, "Saving status should not touch the name") // Check saved worker fetchedWorker, err = db.FetchWorker(ctx, w.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, fetchedWorker) assert.Equal(t, updatedWorker.Status, fetchedWorker.Status, "new status should have been saved") assert.NotEqual(t, updatedWorker.Name, fetchedWorker.Name, "non-status fields should not have been updated") // Saving the entire worker should save everything. err = db.SaveWorker(ctx, &updatedWorker) - assert.NoError(t, err) + require.NoError(t, err) // Check saved worker fetchedWorker, err = db.FetchWorker(ctx, w.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.NotNil(t, fetchedWorker) assert.Equal(t, updatedWorker.Status, fetchedWorker.Status, "new status should have been saved") assert.Equal(t, updatedWorker.Name, fetchedWorker.Name, "non-status fields should also have been updated") @@ -198,10 +195,8 @@ func TestFetchWorkers(t *testing.T) { // No workers workers, err := db.FetchWorkers(ctx) - if !assert.NoError(t, err) { - t.Fatal("error fetching empty list of workers, no use in continuing the test") - } - assert.Empty(t, workers) + require.NoError(t, err) + require.Empty(t, workers) linuxWorker := Worker{ UUID: uuid.New(), @@ -215,12 +210,12 @@ func TestFetchWorkers(t *testing.T) { // One worker: err = db.CreateWorker(ctx, &linuxWorker) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, time.Now().UTC().Location(), linuxWorker.CreatedAt.Location(), "Timestamps should be using UTC timezone") workers, err = db.FetchWorkers(ctx) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, workers, 1) { // FIXME: this fails, because the fetched timestamps have nil location instead of UTC. // assert.Equal(t, time.Now().UTC().Location(), workers[0].CreatedAt.Location(), @@ -244,10 +239,10 @@ func TestFetchWorkers(t *testing.T) { SupportedTaskTypes: "blender,ffmpeg,file-management", } err = db.CreateWorker(ctx, &windowsWorker) - assert.NoError(t, err) + require.NoError(t, err) workers, err = db.FetchWorkers(ctx) - assert.NoError(t, err) + require.NoError(t, err) if assert.Len(t, workers, 2) { assert.Equal(t, linuxWorker.UUID, workers[0].UUID) assert.Equal(t, windowsWorker.UUID, workers[1].UUID) @@ -274,11 +269,11 @@ func TestDeleteWorker(t *testing.T) { Status: api.WorkerStatusOffline, } - assert.NoError(t, db.CreateWorker(ctx, &w1)) - assert.NoError(t, db.CreateWorker(ctx, &w2)) + require.NoError(t, db.CreateWorker(ctx, &w1)) + require.NoError(t, db.CreateWorker(ctx, &w2)) // Delete the 2nd worker, just to have a test with ID != 1. - assert.NoError(t, db.DeleteWorker(ctx, w2.UUID)) + require.NoError(t, db.DeleteWorker(ctx, w2.UUID)) // The deleted worker should now no longer be found. { @@ -290,7 +285,7 @@ func TestDeleteWorker(t *testing.T) { // The other worker should still exist. { fetchedWorker, err := db.FetchWorker(ctx, w1.UUID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, w1.UUID, fetchedWorker.UUID) } @@ -300,18 +295,18 @@ func TestDeleteWorker(t *testing.T) { taskUUID := authJob.Tasks[0].UUID { task, err := db.FetchTask(ctx, taskUUID) - assert.NoError(t, err) + require.NoError(t, err) task.Worker = &w1 - assert.NoError(t, db.SaveTask(ctx, task)) + require.NoError(t, db.SaveTask(ctx, task)) } // Delete the worker. - assert.NoError(t, db.DeleteWorker(ctx, w1.UUID)) + require.NoError(t, db.DeleteWorker(ctx, w1.UUID)) // Check the task after deletion of the Worker. { fetchedTask, err := db.FetchTask(ctx, taskUUID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, taskUUID, fetchedTask.UUID) assert.Equal(t, w1.UUID, fetchedTask.Worker.UUID) assert.NotZero(t, fetchedTask.Worker.DeletedAt.Time) @@ -319,6 +314,30 @@ func TestDeleteWorker(t *testing.T) { } } +func TestDeleteWorkerNoForeignKeys(t *testing.T) { + ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second) + defer cancel() + + // Create a Worker to delete. + w1 := Worker{ + UUID: "fd97a35b-a5bd-44b4-ac2b-64c193ca877d", + Name: "Worker 1", + Status: api.WorkerStatusAwake, + } + require.NoError(t, db.CreateWorker(ctx, &w1)) + + // Try deleting with foreign key constraints disabled. + require.NoError(t, db.pragmaForeignKeys(false)) + require.ErrorIs(t, ErrDeletingWithoutFK, db.DeleteWorker(ctx, w1.UUID)) + + // The worker should still exist. + { + fetchedWorker, err := db.FetchWorker(ctx, w1.UUID) + require.NoError(t, err) + assert.Equal(t, w1.UUID, fetchedWorker.UUID) + } +} + func TestDeleteWorkerWithTagAssigned(t *testing.T) { f := workerTestFixtures(t, 1*time.Second) defer f.done() @@ -334,3 +353,65 @@ func TestDeleteWorkerWithTagAssigned(t *testing.T) { require.NoError(t, err) assert.Empty(t, tag.Workers) } + +func TestSummarizeWorkerStatuses(t *testing.T) { + f := workerTestFixtures(t, 1*time.Second) + defer f.done() + + // Test the summary. + summary, err := f.db.SummarizeWorkerStatuses(f.ctx) + require.NoError(t, err) + assert.Equal(t, WorkerStatusCount{api.WorkerStatusAwake: 1}, summary) + + // Create more workers. + w1 := Worker{ + UUID: "fd97a35b-a5bd-44b4-ac2b-64c193ca877d", + Name: "Worker 1", + Status: api.WorkerStatusAwake, + } + w2 := Worker{ + UUID: "82b2d176-cb8c-4bfa-8300-41c216d766df", + Name: "Worker 2", + Status: api.WorkerStatusOffline, + } + + require.NoError(t, f.db.CreateWorker(f.ctx, &w1)) + require.NoError(t, f.db.CreateWorker(f.ctx, &w2)) + + // Test the summary. + summary, err = f.db.SummarizeWorkerStatuses(f.ctx) + require.NoError(t, err) + assert.Equal(t, WorkerStatusCount{ + api.WorkerStatusAwake: 2, + api.WorkerStatusOffline: 1, + }, summary) + + // Delete all workers. + require.NoError(t, f.db.DeleteWorker(f.ctx, f.worker.UUID)) + require.NoError(t, f.db.DeleteWorker(f.ctx, w1.UUID)) + require.NoError(t, f.db.DeleteWorker(f.ctx, w2.UUID)) + + // Test the summary. + summary, err = f.db.SummarizeWorkerStatuses(f.ctx) + require.NoError(t, err) + assert.Equal(t, WorkerStatusCount{}, summary) +} + +// Check that a context timeout can be detected by inspecting the +// returned error. +func TestSummarizeWorkerStatusesTimeout(t *testing.T) { + f := workerTestFixtures(t, 1*time.Second) + defer f.done() + + subCtx, subCtxCancel := context.WithTimeout(f.ctx, 1*time.Nanosecond) + defer subCtxCancel() + + // Force a timeout of the context. And yes, even when a nanosecond is quite + // short, it is still necessary to wait. + time.Sleep(2 * time.Nanosecond) + + // Test the summary. + summary, err := f.db.SummarizeWorkerStatuses(subCtx) + assert.ErrorIs(t, err, context.DeadlineExceeded) + assert.Nil(t, summary) +} diff --git a/internal/manager/sleep_scheduler/sleep_scheduler_test.go b/internal/manager/sleep_scheduler/sleep_scheduler_test.go index 409ca11a..e02d1d94 100644 --- a/internal/manager/sleep_scheduler/sleep_scheduler_test.go +++ b/internal/manager/sleep_scheduler/sleep_scheduler_test.go @@ -10,6 +10,7 @@ import ( "github.com/benbjohnson/clock" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/persistence" "projects.blender.org/studio/flamenco/internal/manager/sleep_scheduler/mocks" @@ -24,9 +25,8 @@ func TestFetchSchedule(t *testing.T) { mocks.persist.EXPECT().FetchWorkerSleepSchedule(ctx, workerUUID).Return(&dbSched, nil) sched, err := ss.FetchSchedule(ctx, workerUUID) - if assert.NoError(t, err) { - assert.Equal(t, &dbSched, sched) - } + require.NoError(t, err) + assert.Equal(t, &dbSched, sched) } func TestSetSchedule(t *testing.T) { @@ -59,7 +59,7 @@ func TestSetSchedule(t *testing.T) { mocks.broadcaster.EXPECT().BroadcastWorkerUpdate(gomock.Any()) err := ss.SetSchedule(ctx, workerUUID, &sched) - assert.NoError(t, err) + require.NoError(t, err) } func TestSetScheduleSwappedStartEnd(t *testing.T) { @@ -92,7 +92,7 @@ func TestSetScheduleSwappedStartEnd(t *testing.T) { mocks.persist.EXPECT().SetWorkerSleepSchedule(ctx, workerUUID, &expectSavedSchedule) err := ss.SetSchedule(ctx, workerUUID, &sched) - assert.NoError(t, err) + require.NoError(t, err) } // Test that a sleep check that happens at shutdown of the Manager doesn't cause any panics. @@ -157,9 +157,7 @@ func TestApplySleepSchedule(t *testing.T) { // Actually apply the sleep schedule. err := ss.ApplySleepSchedule(ctx, &testSchedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) // Check the SocketIO broadcast. if sioUpdate.Id != "" { @@ -220,9 +218,7 @@ func TestApplySleepScheduleNoStatusChange(t *testing.T) { // Apply the sleep schedule. This should not trigger any persistence or broadcasts. err := ss.ApplySleepSchedule(ctx, &testSchedule) - if !assert.NoError(t, err) { - t.FailNow() - } + require.NoError(t, err) } // Move the clock to the middle of the sleep schedule, so the schedule always @@ -271,9 +267,8 @@ func testFixtures(t *testing.T) (*SleepScheduler, TestMocks, context.Context) { mockedClock := clock.NewMock() mockedNow, err := time.Parse(time.RFC3339, "2022-06-07T11:14:47+02:00") - if err != nil { - panic(err) - } + require.NoError(t, err) + mockedClock.Set(mockedNow) if !assert.Equal(t, time.Tuesday.String(), mockedNow.Weekday().String()) { t.Fatal("tests assume 'now' is a Tuesday") diff --git a/internal/manager/task_logs/log_rotation_test.go b/internal/manager/task_logs/log_rotation_test.go index 972e8ae1..67ce7b67 100644 --- a/internal/manager/task_logs/log_rotation_test.go +++ b/internal/manager/task_logs/log_rotation_test.go @@ -12,11 +12,12 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setUpTest(t *testing.T) string { temppath, err := ioutil.TempDir("", "testlogs") - assert.NoError(t, err) + require.NoError(t, err) return temppath } @@ -55,7 +56,7 @@ func TestNoFiles(t *testing.T) { filepath := filepath.Join(temppath, "nonexisting.txt") err := rotateLogFile(zerolog.Nop(), filepath) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, fileExists(filepath)) } @@ -67,7 +68,7 @@ func TestOneFile(t *testing.T) { fileTouch(filepath) err := rotateLogFile(zerolog.Nop(), filepath) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, fileExists(filepath)) assert.True(t, fileExists(filepath+".1")) } @@ -77,16 +78,16 @@ func TestMultipleFilesWithHoles(t *testing.T) { defer tearDownTest(temppath) filepath := filepath.Join(temppath, "existing.txt") - assert.NoError(t, ioutil.WriteFile(filepath, []byte("thefile"), 0666)) - assert.NoError(t, ioutil.WriteFile(filepath+".1", []byte("file .1"), 0666)) - assert.NoError(t, ioutil.WriteFile(filepath+".2", []byte("file .2"), 0666)) - assert.NoError(t, ioutil.WriteFile(filepath+".3", []byte("file .3"), 0666)) - assert.NoError(t, ioutil.WriteFile(filepath+".5", []byte("file .5"), 0666)) - assert.NoError(t, ioutil.WriteFile(filepath+".7", []byte("file .7"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath, []byte("thefile"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath+".1", []byte("file .1"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath+".2", []byte("file .2"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath+".3", []byte("file .3"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath+".5", []byte("file .5"), 0666)) + require.NoError(t, ioutil.WriteFile(filepath+".7", []byte("file .7"), 0666)) err := rotateLogFile(zerolog.Nop(), filepath) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, fileExists(filepath)) assert.True(t, fileExists(filepath+".1")) assert.True(t, fileExists(filepath+".2")) @@ -100,7 +101,7 @@ func TestMultipleFilesWithHoles(t *testing.T) { read := func(filename string) string { content, err := ioutil.ReadFile(filename) - assert.NoError(t, err) + require.NoError(t, err) return string(content) } diff --git a/internal/manager/task_logs/task_logs_test.go b/internal/manager/task_logs/task_logs_test.go index d1d0c74e..8fc4f512 100644 --- a/internal/manager/task_logs/task_logs_test.go +++ b/internal/manager/task_logs/task_logs_test.go @@ -19,6 +19,7 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/task_logs/mocks" ) @@ -36,14 +37,14 @@ func TestLogWriting(t *testing.T) { mocks.localStorage.EXPECT().ForJob(jobUUID).Times(numWriteCalls).Return(jobDir) err := s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ovo je priča") - assert.NoError(t, err) + require.NoError(t, err) err = s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ima dvije linije") - assert.NoError(t, err) + require.NoError(t, err) filename := filepath.Join(jobDir, "task-20ff9d06-53ec-4019-9e2e-1774f05f170a.txt") contents, err := ioutil.ReadFile(filename) - assert.NoError(t, err, "the log file should exist") + require.NoError(t, err, "the log file should exist") assert.Equal(t, "Ovo je priča\nIma dvije linije\n", string(contents)) } @@ -59,7 +60,7 @@ func TestLogRotation(t *testing.T) { mocks.localStorage.EXPECT().ForJob(jobUUID).Return(jobDir).AnyTimes() err := s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ovo je priča") - assert.NoError(t, err) + require.NoError(t, err) s.RotateFile(zerolog.Nop(), jobUUID, taskUUID) @@ -67,7 +68,7 @@ func TestLogRotation(t *testing.T) { rotatedFilename := filename + ".1" contents, err := ioutil.ReadFile(rotatedFilename) - assert.NoError(t, err, "the rotated log file should exist") + require.NoError(t, err, "the rotated log file should exist") assert.Equal(t, "Ovo je priča\n", string(contents)) _, err = os.Stat(filename) @@ -97,16 +98,16 @@ func TestLogTailAndSize(t *testing.T) { // Test a single line. err = s.Write(zerolog.Nop(), jobID, taskID, "Just a single line") - assert.NoError(t, err) + require.NoError(t, err) contents, err = s.Tail(jobID, taskID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Just a single line\n", string(contents)) // A short file shouldn't do any line stripping. err = s.Write(zerolog.Nop(), jobID, taskID, "And another line!") - assert.NoError(t, err) + require.NoError(t, err) contents, err = s.Tail(jobID, taskID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "Just a single line\nAnd another line!\n", string(contents)) bigString := "" @@ -114,18 +115,17 @@ func TestLogTailAndSize(t *testing.T) { bigString += fmt.Sprintf("This is line #%d\n", lineNum) } err = s.Write(zerolog.Nop(), jobID, taskID, bigString) - assert.NoError(t, err) + require.NoError(t, err) // Check the log size, it should be the entire bigString plus what was written before that. size, err = s.TaskLogSize(jobID, taskID) - if assert.NoError(t, err) { - expect := int64(len("Just a single line\nAnd another line!\n" + bigString)) - assert.Equal(t, expect, size) - } + require.NoError(t, err) + expect := int64(len("Just a single line\nAnd another line!\n" + bigString)) + assert.Equal(t, expect, size) // Check the tail, it should only be the few last lines of bigString. contents, err = s.Tail(jobID, taskID) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, "This is line #887\nThis is line #888\nThis is line #889\nThis is line #890\nThis is line #891\n"+ "This is line #892\nThis is line #893\nThis is line #894\nThis is line #895\nThis is line #896\n"+ @@ -183,7 +183,7 @@ func TestLogWritingParallel(t *testing.T) { } logText := strings.Repeat(string(letter), runLength) - assert.NoError(t, s.Write(logger, jobID, taskID, logText)) + require.NoError(t, s.Write(logger, jobID, taskID, logText)) }(int32(i)) } wg.Wait() @@ -191,7 +191,7 @@ func TestLogWritingParallel(t *testing.T) { // Test that the final log contains 1000 lines of of 100 characters, without // any run getting interrupted by another one. contents, err := os.ReadFile(s.Filepath(jobID, taskID)) - assert.NoError(t, err) + require.NoError(t, err) lines := strings.Split(string(contents), "\n") assert.Equal(t, numGoroutines+1, len(lines), "each goroutine should have written a single line, and the file should have a newline at the end") @@ -217,9 +217,7 @@ func taskLogsTestFixtures(t *testing.T) (*Storage, func(), *TaskLogsMocks) { mockCtrl := gomock.NewController(t) temppath, err := ioutil.TempDir("", "testlogs") - if err != nil { - panic(err) - } + require.NoError(t, err) mocks := &TaskLogsMocks{ temppath: temppath, @@ -229,9 +227,8 @@ func taskLogsTestFixtures(t *testing.T) (*Storage, func(), *TaskLogsMocks) { } mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T16:52:04+02:00") - if err != nil { - panic(err) - } + require.NoError(t, err) + mocks.clock.Set(mockedNow) // This should be called at the end of each unit test. diff --git a/internal/manager/task_state_machine/task_state_machine_test.go b/internal/manager/task_state_machine/task_state_machine_test.go index 7374696f..d9df1b3a 100644 --- a/internal/manager/task_state_machine/task_state_machine_test.go +++ b/internal/manager/task_state_machine/task_state_machine_test.go @@ -10,6 +10,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/persistence" "projects.blender.org/studio/flamenco/internal/manager/task_state_machine/mocks" @@ -37,7 +38,7 @@ func TestTaskStatusChangeQueuedToActive(t *testing.T) { mocks.expectBroadcastJobChange(task.Job, api.JobStatusQueued, api.JobStatusActive) mocks.expectBroadcastTaskChange(task, api.TaskStatusQueued, api.TaskStatusActive) - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusActive)) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusActive)) } func TestTaskStatusChangeSaveTaskAfterJobChangeFailure(t *testing.T) { @@ -75,20 +76,20 @@ func TestTaskStatusChangeActiveToCompleted(t *testing.T) { mocks.expectWriteTaskLogTimestamped(t, task, "task changed status active -> completed") mocks.expectBroadcastTaskChange(task, api.TaskStatusActive, api.TaskStatusCompleted) mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusCompleted).Return(1, 3, nil) // 1 of 3 complete. - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCompleted)) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCompleted)) // Second task hickup: T: active > soft-failed --> J: active > active mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusSoftFailed) mocks.expectWriteTaskLogTimestamped(t, task2, "task changed status active -> soft-failed") mocks.expectBroadcastTaskChange(task2, api.TaskStatusActive, api.TaskStatusSoftFailed) - assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusSoftFailed)) + require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusSoftFailed)) // Second task completing: T: soft-failed > completed --> J: active > active mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusCompleted) mocks.expectWriteTaskLogTimestamped(t, task2, "task changed status soft-failed -> completed") mocks.expectBroadcastTaskChange(task2, api.TaskStatusSoftFailed, api.TaskStatusCompleted) mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusCompleted).Return(2, 3, nil) // 2 of 3 complete. - assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCompleted)) + require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCompleted)) // Third task completing: T: active > completed --> J: active > completed mocks.expectSaveTaskWithStatus(t, task3, api.TaskStatusCompleted) @@ -98,7 +99,7 @@ func TestTaskStatusChangeActiveToCompleted(t *testing.T) { mocks.expectSaveJobWithStatus(t, task.Job, api.JobStatusCompleted) mocks.expectBroadcastJobChange(task.Job, api.JobStatusActive, api.JobStatusCompleted) - assert.NoError(t, sm.TaskStatusChange(ctx, task3, api.TaskStatusCompleted)) + require.NoError(t, sm.TaskStatusChange(ctx, task3, api.TaskStatusCompleted)) } func TestTaskStatusChangeQueuedToFailed(t *testing.T) { @@ -114,7 +115,7 @@ func TestTaskStatusChangeQueuedToFailed(t *testing.T) { mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusFailed).Return(1, 100, nil) // 1 out of 100 failed. mocks.expectBroadcastJobChange(task.Job, api.JobStatusQueued, api.JobStatusActive) - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusFailed)) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusFailed)) } func TestTaskStatusChangeActiveToFailedFailJob(t *testing.T) { @@ -144,7 +145,7 @@ func TestTaskStatusChangeActiveToFailedFailJob(t *testing.T) { "Manager cancelled this task because the job got status \"failed\".", ) - assert.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusFailed)) + require.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusFailed)) } func TestTaskStatusChangeRequeueOnCompletedJob(t *testing.T) { @@ -168,7 +169,7 @@ func TestTaskStatusChangeRequeueOnCompletedJob(t *testing.T) { ) mocks.expectSaveJobWithStatus(t, task.Job, api.JobStatusQueued) - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusQueued)) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusQueued)) } func TestTaskStatusChangeCancelSingleTask(t *testing.T) { @@ -186,7 +187,7 @@ func TestTaskStatusChangeCancelSingleTask(t *testing.T) { mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, job, api.TaskStatusActive, api.TaskStatusQueued, api.TaskStatusSoftFailed). Return(1, 2, nil) - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCanceled)) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCanceled)) // T2: queued > cancelled --> J: cancel-requested > canceled mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusCanceled) @@ -198,7 +199,7 @@ func TestTaskStatusChangeCancelSingleTask(t *testing.T) { mocks.expectSaveJobWithStatus(t, job, api.JobStatusCanceled) mocks.expectBroadcastJobChange(task.Job, api.JobStatusCancelRequested, api.JobStatusCanceled) - assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCanceled)) + require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCanceled)) } func TestTaskStatusChangeCancelSingleTaskWithOtherFailed(t *testing.T) { @@ -222,7 +223,7 @@ func TestTaskStatusChangeCancelSingleTaskWithOtherFailed(t *testing.T) { // The paused task just stays paused, so don't expectBroadcastTaskChange(task3). - assert.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusCanceled)) + require.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusCanceled)) } func TestTaskStatusChangeUnknownStatus(t *testing.T) { @@ -235,7 +236,7 @@ func TestTaskStatusChangeUnknownStatus(t *testing.T) { mocks.expectWriteTaskLogTimestamped(t, task, "task changed status queued -> borked") mocks.expectBroadcastTaskChange(task, api.TaskStatusQueued, api.TaskStatus("borked")) - assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatus("borked"))) + require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatus("borked"))) } func TestJobRequeueWithSomeCompletedTasks(t *testing.T) { @@ -269,7 +270,7 @@ func TestJobRequeueWithSomeCompletedTasks(t *testing.T) { mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusActive, api.JobStatusRequeueing) mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusRequeueing, api.JobStatusQueued) - assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unittest")) + require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unittest")) } func TestJobRequeueWithAllCompletedTasks(t *testing.T) { @@ -301,7 +302,7 @@ func TestJobRequeueWithAllCompletedTasks(t *testing.T) { mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusCompleted, api.JobStatusRequeueing) mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusRequeueing, api.JobStatusQueued) - assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unit test")) + require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unit test")) } func TestJobCancelWithSomeCompletedTasks(t *testing.T) { @@ -332,7 +333,7 @@ func TestJobCancelWithSomeCompletedTasks(t *testing.T) { mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusActive, api.JobStatusCancelRequested) mocks.expectBroadcastJobChange(job, api.JobStatusCancelRequested, api.JobStatusCanceled) - assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusCancelRequested, "someone wrote a unittest")) + require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusCancelRequested, "someone wrote a unittest")) } func TestCheckStuck(t *testing.T) { diff --git a/internal/manager/task_state_machine/worker_requeue_test.go b/internal/manager/task_state_machine/worker_requeue_test.go index 2bfb351a..2cef414b 100644 --- a/internal/manager/task_state_machine/worker_requeue_test.go +++ b/internal/manager/task_state_machine/worker_requeue_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/persistence" "projects.blender.org/studio/flamenco/pkg/api" ) @@ -66,5 +66,5 @@ func TestRequeueActiveTasksOfWorker(t *testing.T) { }) err := sm.RequeueActiveTasksOfWorker(ctx, &worker, "worker had to test") - assert.NoError(t, err) + require.NoError(t, err) } diff --git a/internal/manager/timeout_checker/timeout_checker_test.go b/internal/manager/timeout_checker/timeout_checker_test.go index f0a3ed9a..d24cbef2 100644 --- a/internal/manager/timeout_checker/timeout_checker_test.go +++ b/internal/manager/timeout_checker/timeout_checker_test.go @@ -11,6 +11,7 @@ import ( "github.com/benbjohnson/clock" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "projects.blender.org/studio/flamenco/internal/manager/timeout_checker/mocks" ) @@ -50,9 +51,7 @@ func timeoutCheckerTestFixtures(t *testing.T) (*TimeoutChecker, func(), *Timeout } mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T12:00:00+00:00") - if err != nil { - panic(err) - } + require.NoError(t, err) mocks.clock.Set(mockedNow) ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/worker/cli_runner/cli_runner.go b/internal/worker/cli_runner/cli_runner.go index 7fe843b7..9369c3cf 100644 --- a/internal/worker/cli_runner/cli_runner.go +++ b/internal/worker/cli_runner/cli_runner.go @@ -11,6 +11,7 @@ import ( "github.com/alessio/shellescape" "github.com/rs/zerolog" + "projects.blender.org/studio/flamenco/pkg/oomscore" ) // The buffer size used to read stdout/stderr output from subprocesses, in @@ -20,11 +21,19 @@ const StdoutBufferSize = 40 * 1024 // CLIRunner is a wrapper around exec.CommandContext() to allow mocking. type CLIRunner struct { + oomScoreAdjust int + useOOMScoreAdjust bool } func NewCLIRunner() *CLIRunner { return &CLIRunner{} } +func NewCLIRunnerWithOOMScoreAdjuster(oomScoreAdjust int) *CLIRunner { + return &CLIRunner{ + oomScoreAdjust: oomScoreAdjust, + useOOMScoreAdjust: true, + } +} func (cli *CLIRunner) CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { return exec.CommandContext(ctx, name, arg...) @@ -55,7 +64,7 @@ func (cli *CLIRunner) RunWithTextOutput( return err } - if err := execCmd.Start(); err != nil { + if err := cli.startWithOOMAdjust(execCmd); err != nil { logger.Error().Err(err).Msg("error starting CLI execution") return err } @@ -171,3 +180,13 @@ func (cli *CLIRunner) logCmd( } return nil } + +// startWithOOMAdjust runs the command with its OOM score adjusted. +func (cli *CLIRunner) startWithOOMAdjust(execCmd *exec.Cmd) error { + if cli.useOOMScoreAdjust { + oomScoreRestore := oomscore.Adjust(cli.oomScoreAdjust) + defer oomScoreRestore() + } + + return execCmd.Start() +} diff --git a/internal/worker/command_misc_test.go b/internal/worker/command_misc_test.go index 3958b621..292e81df 100644 --- a/internal/worker/command_misc_test.go +++ b/internal/worker/command_misc_test.go @@ -64,7 +64,7 @@ loop: select { case <-runDone: break loop - default: + case <-time.After(1 * time.Millisecond): mocks.clock.Add(timeStepSize) } } diff --git a/internal/worker/config.go b/internal/worker/config.go index 0ad0d6eb..e786cbb8 100644 --- a/internal/worker/config.go +++ b/internal/worker/config.go @@ -58,6 +58,18 @@ type WorkerConfig struct { TaskTypes []string `yaml:"task_types"` RestartExitCode int `yaml:"restart_exit_code"` + + // LinuxOOMScoreAdjust controls the Linux out-of-memory killer. Is used when + // spawning a sub-process, to adjust the likelyness that that subprocess is + // killed rather than Flamenco Worker itself. That way Flamenco Worker can + // report the failure to the Manager. + // + // If the Worker itself would be OOM-killed, it would just be restarted and + // get the task it was already working on, causing an infinite OOM-loop. + // + // If this value is not specified in the configuration file, Flamenco Worker + // will not attempt to adjust its OOM score. + LinuxOOMScoreAdjust *int `yaml:"oom_score_adjust"` } type WorkerCredentials struct { diff --git a/internal/worker/mocks/client.gen.go b/internal/worker/mocks/client.gen.go index 22f99f23..679769bb 100644 --- a/internal/worker/mocks/client.gen.go +++ b/internal/worker/mocks/client.gen.go @@ -596,6 +596,26 @@ func (mr *MockFlamencoClientMockRecorder) GetConfigurationWithResponse(arg0 inte return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigurationWithResponse", reflect.TypeOf((*MockFlamencoClient)(nil).GetConfigurationWithResponse), varargs...) } +// GetFarmStatusWithResponse mocks base method. +func (m *MockFlamencoClient) GetFarmStatusWithResponse(arg0 context.Context, arg1 ...api.RequestEditorFn) (*api.GetFarmStatusResponse, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0} + for _, a := range arg1 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetFarmStatusWithResponse", varargs...) + ret0, _ := ret[0].(*api.GetFarmStatusResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetFarmStatusWithResponse indicates an expected call of GetFarmStatusWithResponse. +func (mr *MockFlamencoClientMockRecorder) GetFarmStatusWithResponse(arg0 interface{}, arg1 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0}, arg1...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFarmStatusWithResponse", reflect.TypeOf((*MockFlamencoClient)(nil).GetFarmStatusWithResponse), varargs...) +} + // GetJobTypeWithResponse mocks base method. func (m *MockFlamencoClient) GetJobTypeWithResponse(arg0 context.Context, arg1 string, arg2 ...api.RequestEditorFn) (*api.GetJobTypeResponse, error) { m.ctrl.T.Helper() diff --git a/internal/worker/output_uploader.go b/internal/worker/output_uploader.go index d6112219..c1c4cf73 100644 --- a/internal/worker/output_uploader.go +++ b/internal/worker/output_uploader.go @@ -52,8 +52,8 @@ func (ou *OutputUploader) OutputProduced(taskID, filename string) { } func (ou *OutputUploader) Run(ctx context.Context) { - log.Info().Msg("output uploader: running") - defer log.Info().Msg("output uploader: shutting down") + log.Debug().Msg("output uploader: running") + defer log.Debug().Msg("output uploader: shutting down") wg := sync.WaitGroup{} wg.Add(1) diff --git a/internal/worker/upstream_buffer.go b/internal/worker/upstream_buffer.go index 50d7da8e..43e8d353 100644 --- a/internal/worker/upstream_buffer.go +++ b/internal/worker/upstream_buffer.go @@ -133,7 +133,7 @@ func (ub *UpstreamBufferDB) Close() error { ub.wg.Wait() // Attempt one final flush, if it's fast enough: - log.Info().Msg("upstream buffer shutting down, doing one final flush") + log.Debug().Msg("upstream buffer shutting down, doing one final flush") flushCtx, ctxCancel := context.WithTimeout(context.Background(), flushOnShutdownTimeout) defer ctxCancel() if err := ub.Flush(flushCtx); err != nil { diff --git a/magefiles/version.go b/magefiles/version.go index 666a1355..e2dd0b46 100644 --- a/magefiles/version.go +++ b/magefiles/version.go @@ -11,8 +11,8 @@ import ( // To update the version number in all the relevant places, update the VERSION // variable below and run `mage update-version`. const ( - version = "3.3-beta0" - releaseCycle = "beta" + version = "3.6-alpha0" + releaseCycle = "alpha" ) func gitHash() (string, error) { diff --git a/pkg/api/flamenco-openapi.yaml b/pkg/api/flamenco-openapi.yaml index a3f73671..22b64271 100644 --- a/pkg/api/flamenco-openapi.yaml +++ b/pkg/api/flamenco-openapi.yaml @@ -191,6 +191,20 @@ paths: application/json: schema: { $ref: "#/components/schemas/SharedStorageLocation" } + /api/v3/status: + summary: Report the status of the Flamenco farm. + get: + summary: Get the status of this Flamenco farm. + operationId: getFarmStatus + tags: [meta] + responses: + "200": + description: normal response + content: + application/json: + schema: + $ref: "#/components/schemas/FarmStatusReport" + ## Worker /api/v3/worker/register-worker: @@ -1384,6 +1398,26 @@ components: name: Your Manager git: v3.2-76-gdd34d538 + FarmStatusReport: + type: object + properties: + "status": + $ref: "#/components/schemas/FarmStatus" + required: [status] + example: + status: idle + + FarmStatus: + type: string + enum: + - "active" # Actively working on jobs. + - "idle" # Farm could be active, but has no work to do. + - "waiting" # Work has been queued, but all workers are asleep. + - "asleep" # Farm is idle, and all workers are asleep. + - "inoperative" # Cannot work: no workers, or all are offline/error. + - "unknown" # Unexpected configuration of worker and job statuses. + - "starting" # Farm is starting up. + ManagerConfiguration: type: object properties: @@ -1685,6 +1719,9 @@ components: properties: "name": { type: string } "label": { type: string } + "description": + type: string + description: The description/tooltip shown in the user interface. "settings": type: array items: { $ref: "#/components/schemas/AvailableJobSetting" } @@ -1714,6 +1751,8 @@ components: type: object "description": description: The description/tooltip shown in the user interface. + "label": + description: Label for displaying this setting. If not specified, the key is used to generate a reasonable label. "default": description: The default value shown to the user when determining this setting. "eval": @@ -2387,6 +2426,9 @@ components: type: string enum: [manager-startup, manager-shutdown] + EventFarmStatus: + $ref: "#/components/schemas/FarmStatusReport" + SocketIOSubscription: type: object description: > diff --git a/pkg/api/openapi_client.gen.go b/pkg/api/openapi_client.gen.go index 01028496..a586636a 100644 --- a/pkg/api/openapi_client.gen.go +++ b/pkg/api/openapi_client.gen.go @@ -200,6 +200,9 @@ type ClientInterface interface { // ShamanFileStore request with any body ShamanFileStoreWithBody(ctx context.Context, checksum string, filesize int, params *ShamanFileStoreParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + // GetFarmStatus request + GetFarmStatus(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) + // FetchTask request FetchTask(ctx context.Context, taskId string, reqEditors ...RequestEditorFn) (*http.Response, error) @@ -779,6 +782,18 @@ func (c *Client) ShamanFileStoreWithBody(ctx context.Context, checksum string, f return c.Client.Do(req) } +func (c *Client) GetFarmStatus(ctx context.Context, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGetFarmStatusRequest(c.Server) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + func (c *Client) FetchTask(ctx context.Context, taskId string, reqEditors ...RequestEditorFn) (*http.Response, error) { req, err := NewFetchTaskRequest(c.Server, taskId) if err != nil { @@ -2273,6 +2288,33 @@ func NewShamanFileStoreRequestWithBody(server string, checksum string, filesize return req, nil } +// NewGetFarmStatusRequest generates requests for GetFarmStatus +func NewGetFarmStatusRequest(server string) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v3/status") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + return req, nil +} + // NewFetchTaskRequest generates requests for FetchTask func NewFetchTaskRequest(server string, taskId string) (*http.Request, error) { var err error @@ -3370,6 +3412,9 @@ type ClientWithResponsesInterface interface { // ShamanFileStore request with any body ShamanFileStoreWithBodyWithResponse(ctx context.Context, checksum string, filesize int, params *ShamanFileStoreParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*ShamanFileStoreResponse, error) + // GetFarmStatus request + GetFarmStatusWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetFarmStatusResponse, error) + // FetchTask request FetchTaskWithResponse(ctx context.Context, taskId string, reqEditors ...RequestEditorFn) (*FetchTaskResponse, error) @@ -4103,6 +4148,28 @@ func (r ShamanFileStoreResponse) StatusCode() int { return 0 } +type GetFarmStatusResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *FarmStatusReport +} + +// Status returns HTTPResponse.Status +func (r GetFarmStatusResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GetFarmStatusResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + type FetchTaskResponse struct { Body []byte HTTPResponse *http.Response @@ -5037,6 +5104,15 @@ func (c *ClientWithResponses) ShamanFileStoreWithBodyWithResponse(ctx context.Co return ParseShamanFileStoreResponse(rsp) } +// GetFarmStatusWithResponse request returning *GetFarmStatusResponse +func (c *ClientWithResponses) GetFarmStatusWithResponse(ctx context.Context, reqEditors ...RequestEditorFn) (*GetFarmStatusResponse, error) { + rsp, err := c.GetFarmStatus(ctx, reqEditors...) + if err != nil { + return nil, err + } + return ParseGetFarmStatusResponse(rsp) +} + // FetchTaskWithResponse request returning *FetchTaskResponse func (c *ClientWithResponses) FetchTaskWithResponse(ctx context.Context, taskId string, reqEditors ...RequestEditorFn) (*FetchTaskResponse, error) { rsp, err := c.FetchTask(ctx, taskId, reqEditors...) @@ -6193,6 +6269,32 @@ func ParseShamanFileStoreResponse(rsp *http.Response) (*ShamanFileStoreResponse, return response, nil } +// ParseGetFarmStatusResponse parses an HTTP response from a GetFarmStatusWithResponse call +func ParseGetFarmStatusResponse(rsp *http.Response) (*GetFarmStatusResponse, error) { + bodyBytes, err := ioutil.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GetFarmStatusResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest FarmStatusReport + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + } + + return response, nil +} + // ParseFetchTaskResponse parses an HTTP response from a FetchTaskWithResponse call func ParseFetchTaskResponse(rsp *http.Response) (*FetchTaskResponse, error) { bodyBytes, err := ioutil.ReadAll(rsp.Body) diff --git a/pkg/api/openapi_server.gen.go b/pkg/api/openapi_server.gen.go index 7c883470..5a492e4b 100644 --- a/pkg/api/openapi_server.gen.go +++ b/pkg/api/openapi_server.gen.go @@ -98,6 +98,9 @@ type ServerInterface interface { // The file's contents should be sent in the request body. // (POST /api/v3/shaman/files/{checksum}/{filesize}) ShamanFileStore(ctx echo.Context, checksum string, filesize int, params ShamanFileStoreParams) error + // Get the status of this Flamenco farm. + // (GET /api/v3/status) + GetFarmStatus(ctx echo.Context) error // Fetch a single task. // (GET /api/v3/tasks/{task_id}) FetchTask(ctx echo.Context, taskId string) error @@ -600,6 +603,15 @@ func (w *ServerInterfaceWrapper) ShamanFileStore(ctx echo.Context) error { return err } +// GetFarmStatus converts echo context to params. +func (w *ServerInterfaceWrapper) GetFarmStatus(ctx echo.Context) error { + var err error + + // Invoke the callback with all the unmarshalled arguments + err = w.Handler.GetFarmStatus(ctx) + return err +} + // FetchTask converts echo context to params. func (w *ServerInterfaceWrapper) FetchTask(ctx echo.Context) error { var err error @@ -1018,6 +1030,7 @@ func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL router.POST(baseURL+"/api/v3/shaman/checkout/requirements", wrapper.ShamanCheckoutRequirements) router.GET(baseURL+"/api/v3/shaman/files/:checksum/:filesize", wrapper.ShamanFileStoreCheck) router.POST(baseURL+"/api/v3/shaman/files/:checksum/:filesize", wrapper.ShamanFileStore) + router.GET(baseURL+"/api/v3/status", wrapper.GetFarmStatus) router.GET(baseURL+"/api/v3/tasks/:task_id", wrapper.FetchTask) router.GET(baseURL+"/api/v3/tasks/:task_id/log", wrapper.FetchTaskLogInfo) router.GET(baseURL+"/api/v3/tasks/:task_id/logtail", wrapper.FetchTaskLogTail) diff --git a/pkg/api/openapi_spec.gen.go b/pkg/api/openapi_spec.gen.go index 47cc6901..11a214c4 100644 --- a/pkg/api/openapi_spec.gen.go +++ b/pkg/api/openapi_spec.gen.go @@ -19,231 +19,233 @@ import ( var swaggerSpec = []string{ "H4sIAAAAAAAC/+y923LcOJYo+iuInBPhqpjMlCz5Ula/HLcvVaq2yxpL7jonWhVKJInMhEUCbAJUOtvh", - "iPmI8ydnT8R+2PO0f6Dmj3ZgLQAESTAvsiWr3NMP1VaSxGVhYd0vHweJzAspmNBqcPRxoJIFyyn886lS", - "fC5YekbVpfk7ZSopeaG5FIOjxlPCFaFEm39RRbg2f5csYfyKpWS6InrByK+yvGTleDAcFKUsWKk5g1kS", - "medUpPBvrlkO//i/SjYbHA3+Za9e3J5d2d4z/GDwaTjQq4INjga0LOnK/P1eTs3X9melSy7m9veLouSy", - "5HoVvMCFZnNWujfw18jngubxB+vHVJrqauN2DPxO8U2zI6ou+xdSVTw1D2ayzKkeHOEPw/aLn4aDkv29", - "4iVLB0d/cy8Z4Ni9+LUFW2hBKQBJuKphfV6/+Xnl9D1LtFng0yvKMzrN2M9yesq0NsvpYM4pF/OMEYXP", - "iZwRSn6WU2JGUxEEWUie4D+b4/y6YILM+RUTQ5LxnGvAsyua8dT8t2KKaGl+U4zYQcbkjchWpFJmjWTJ", - "9YIg0GByM7dHwQ7w28iWshmtMt1d19mCEfsQ10HUQi6FXQypFCvJ0qw9ZZqVORcw/4IrB5IxDh+MGZ/C", - "/7Knpcw0L+xEXNQTGXwsZzRhMChLuTZbxxHt+mc0U2zYBa5esNIsmmaZXBLzaXuhhM60eWfByHs5JQuq", - "yJQxQVQ1zbnWLB2TX2WVpYTnRbYiKcsYfpZlhH3gCgek6lKRmSxx6PdyOiRUpIaAyLzgmXmH6/G5qBF9", - "KmXGqIAdXdGsC5+TlV5IQdiHomRKcQnAnzJi3q6oZqmBkSxT3KA7BwY7aR6dX5c/m2EXNcywx2Imuwt5", - "zTQdpVRTOxAj98zL94KldTG+c/T2oAaD9ik9r/8y92i5oDo+iaHIqTTrJ8dAnmmmpMGQ1FDsIqMJW8gM", - "4ME+aAMUg0qIpmbAnIqKZoSLotJkxpk5U0UWPE2ZIN9NWUIrheAdSTHC86/xQcv5PGMpkcJxA4Ob3zfO", - "tIammfkVF5d/rrRuQSCKqi+EQWlVb9zMg0u4Z6cmUxiLTNmCXnFZdo+VPG29uuRZZlDGX6k/Z0ykrLyn", - "cGwLVn+9CJCjeqdDWM/ErGcSHgSM28Q4u4Z7CnFuTF4DtLNVcOlqeslhp4IISTIp5qwkhVSKTzOG94YL", - "pRlNga6K8MRwRfcC4N1z1M8AwuxzfC6emmtD8yKDQ7KzES1HUzYqAQIsJbOS5oyUVMzZkCwXPFmYg3U3", - "h1Za5lTzBPYwk4Z+4DAqYcJ/N600Sag5FCKvWFkiMuVu75ZEKsPG4re/xedaeNNEkxi3umSr7o09TpnQ", - "fMZZ6a+shfyQ5JXSZrmV4H+vkH9YWvve8q8oeTC3m5bzCAt7KlaEfdAlJbScV7kRDBybmBarsflQjU9l", - "zk6QQKy++54YqOLN1ZIkJaOaISpbIrIK1lDvtQbUDpSf5zlLOdUsW5GSmaEIha2mbMYFNx8MDZ7B9GbK", - "IcBEVtquiJaaJ1VGS3/Pesi4qqZO6lknLEXki1P7pefQO49wZj+/4nCLrjHCX82XPDNyUxspDY7ZlW0p", - "MJ3WoGjJTdV0ZJ4gxBHnPPl6VpUlEzpbEWkkHOrGBSQOZBw1JpOfnp7+9OL5xcvjVy8uTp6e/TRB+T3l", - "JUu0LFekoHpB/pVMzgd7/wL/Ox9MCC0Kc/3tXWSiys3+ZjxjF+Z9c9946f4JP1tZc0HVgqUX9Zu/Re5I", - "37l0RR8LgWD3wcVEwY4qcvzcXRnYdkDAx+QXSQRTRgpQuqwSXZVMke9AsFNDkvLETEVLztT3hJaMqKoo", - "ZKnbW7eLHxqZ//DAbDqTVA+GgNfbbjJAnQard8g4jAm9jj03OdjEfjM5IjRb0hXS9DGZ1PxqcoToAV9b", - "0vXuGEVwAKgV3EryXcYvGaEOaISm6UiK78dksmTT2DBLNq25IWBdTgWdM0PUkNYLqZGo21kcY3svp2My", - "QVlickQEu2IlDP2nNi5b0mhWirKheRGAA3qnmV3QrElr3GnVAMWZBkB0LFwGw8GSTTeeWRwjne5S4wlK", - "OVwZRk7nrLSMWQNFpLlh/hFFh2ka0ZZ+omoR3njgMuS4QwIUsdwqo1OWkWSBTBaWYUZGwQN/HpMz8zNX", - "yEekqA/fS8tMqKo0nMWKlF6mb05q7kdVgBRNNeuR6GBJu6nWboKtzQIx1bOjtbWIsyVQuLxgziGexSaC", - "bdAhwtRfcaUdhQKS248YXSRwWvf1Nn7W4IQ9u66niG3QXvgTqhfPFiy5fMuU1XJbarmR+Lub72gkKycK", - "6IVBuO+E1N9bOh0VlkBgjWu8KMsCRi6pQtXfYN6MixRncSQ+OrC6wGmjlgQUeRbML9SyElkaujWOCi3A", - "zKIrhUH8QmeyEml0TUpWZbJR4giO5BQ/aB8pAs2uyA8b7nloD2zDkb/kIq1PfCv860GYiMWkuw9D9UJB", - "giolE041kmSzmwsmrq5oObCI0S9AOLNg5zzsA1Iyo4OBiE2JQhuUNWYBvfvAkkqzTebKflugp+zBYwfj", - "ON0JPokdy4uylGV3Pz8ywUqeEGYek5KpQgrFYobVNILqP52dnRC0/hHzhhff/UDk2LDSJKtSNJPgpVhl", - "kqZEScRqD0BcbQO2WWaXxgXaKbk0euUzM9nD/UPPdbxtIaWaTinqmtNKrQx3YgQW6hZlmZcUmnJBKLn3", - "lulyNXo606y8h68uGAXzhVkeFylPqGbKGqhQQ9U8R33bHAVTXvksmS45S8fkJWiqTiyxA3IFgotBE2qE", - "Y8fL7ynL98y7ScaZALNJKomSOTOK4ZyUjCoJ1gkC4hT7gJeH04xMaXIpZzPkmN6g60TJrjU5Z0rReQz3", - "WsgF516/H8WsKyb0z3L6rjBMO6ppKKa9ZXZIzNGCkk5OZXLJ9PGbvdf/dnaGZ4iiI0oWykCxJIItzY9q", - "SCZFya64rNQFIt3EG1bYB8QxhEDbApYxzS7sQbH0gkZYwvHM6qIZA3ZjSK3/wko+znzBc6Y0zQtiSDJi", - "g0EUhwnmU6VlicLQy4zmTCTSc+nmGRmYjcyIUS4ToUDv3h0/dyLcz2CF32DAr+Wi5kC/0DzU/uK2hga4", - "NxFvIyx550PozvCayMP9GDaWbFYytbgA423kaPwF9PKjvSJqAQZh+z1QC7ubewpNwbVwCliHmoQyt80A", - "Xg0N0oHQmVJQIRhNFnDjr3ha0QzdUEuYZW5IJdhHpDQ3eOUGsebgoqQJmKl6zRK7A7HfeQNTR9DjzCOn", - "nJGMKm1XuTXOLam6wBuT9nhJ8IoaLH9vNGX7cn1HzG3Xkkx0WbGJ1S7sk4IlfMbNy6CMgQmRp/dqI7Bi", - "emjJqrlJ7nbnhV5tZbaDC+CAE3imrL8p8Eg1ka6XsL2iSr+1lso+CmcRVJY1ghrI1xZOntN5zRwd9Owy", - "42L7Vr654UAvqnwqKM+2QKtwK8dmReBliAn0OBdVl/ZffpJ+MPEZe7ZKYvKwJ4AZn7FRYl4i7AoUeWs4", - "N6ofsDS1qFCTT+VSDI1kUcKfVTEkTCcx4r6Nmc4vDpaKak1r1702NfyEqstXct53/uC1zuScJItKXFoG", - "pyWhBPialgVP9hyvI6WUOUkZ0rQU37MCkAH5EH65kjw146QgQLQITgwOmYyo+8/MehyN13aVY/Karrz4", - "k1eZ5gXIFIIpeJd90FH9wiHEWpYE/v3hjk7lGtXMNtYewzZSxhmAcYOYAeDoyBlADa4raBj6f9X04G/P", - "y7cD3HAX4rCZ72uc9HMZfzPs4Drf3BQ/i7EHT+Gs5hRhF/4ke3ERVboz2ksU8AVyRucbUJFrj4Yx+oZm", - "vHWQ9EvZln2DAW9L9r2Z5fYZtwIwbXNp8c2N13aJYF0DsYSKCyM90FKvM85wZacEzY1WWo7sV3H7jIVT", - "VHlwMibasZmu1VG7XANtO8D4i0n/uPxtaIa5NxeKsUjwhxEKnDLLVbhe874zYAQWxu3Wvpn0LN3qP5f4", - "IBh2JT/xry4Qr3b5+Bl88RZ1v5sVza9YqWwYxxZkrp+6uXGGjbsSu8NOdvtrPbP155t/zjk4lw7HB6PH", - "j0bzND18kD48/MHNfTT4f2VVOtwZQCxGqf0mBofjwxHNigXdD9YU/ky+64z9vXOB1/cdVrGTN6CxjI9r", - "4dw8QQsGL8l7j0zOqNUuF1VOhZGuVJXDZyhblCxjVDEyrXiWuqhG8ISYK0EVmYSrmqBoLIFU1Z9AmI21", - "puHXkznXE2K/AhtZ1GnSQo/6/Bug8ChjIBrDhp8xIpJm2ZvZ4Ohv6+/IqXPxmK8+DT+ukZXWGv2dNkXc", - "F0QKr0dF5VQMY4gZb80D8Ei5m7j11funtyFdw3ixMyEcf4ZQ5w59gzj36TfE4z9nMrnMuNL9HjdkUNbo", - "REsGllsIX2QpSVgJ6hNoEeiXk0Y8sRaOxCHnVk6PcD0vhC5XMX9H96WOF219vC/uZ1vdwb7dQ0RbJ1AP", - "HYb39pCQ5/Z6xGMcza+ETmWlMQDR6V1WenKSlTWj8IZY1YSGWtCciotkwZJLWen1jrpTeJm4l4PwFbeA", - "kuXyiqWEZlLMMdrXxRtsE03WXEsPaOIWms7CXwhZzRehSwTYBQ08BwVnCSNaznGLKZ/NWAkmUzhBsFma", - "rwklCwmmqoxqfsXIu7evnB8iYsMakzMJzA1CXTDi4+2rofkpoZoJqhk5H3ycUsU+7X2Uwkt7qprN+Aem", - "Pp0PYjK7+aCJlmUWpUJ2mIY/cUNwdesoYKpgpJ6jeE2Vcph6yjKWxGOnT7zXDWN/zbMpsxT9vZwqZ6Ou", - "UdigSyBEgWxuadZFTj8MjgYH+weHo/1Ho/37Z/cPj+4/OLr/8F/3D47297vCT/frTlRgluFC0IPMShaS", - "XLOwmSzBNe34as2bWpdvB/ocBSnTNKWaAvtPU4j4o9lJxJzXYLyNzZRTrktarkhuB3MIPSavzTYMdc3Y", - "hzAWyzrmcml2AUETleJiTiZ0PB0nE0PW6ztkcPWSrVpnVJQS9nE0OC1Krhl5WfL5Qhtmo1g5ZjkYYAdq", - "NS2Z+L+nNm5AlnP3hpWHT+EFcqr/9/+6YtmgB04n1kj9zOsizTMPPSs5/cDzKh8c3d/fHw5yLvCviJul", - "dQ38ID34fxqEzMQPS5cV6/nWywrOHw8MGOMGRGKOAXM/CrRTDAczyvHHglYK/vH3ilX4Gnwx8nLUAPfB", - "KobxcpWB9cjTpGZ4bo1Hfll9UEX3ajwCA58Fcd7W5Y3xT19EXGozDCe62GX1nZKWZS+bsA+BT/ioPBdh", - "7UVKcz0qBeFwyOLMW8gPWEpmPGMKma5gCVOKlqsYAW8xuKiZ+N4zx12Pn98L3PYgujlHeZsRh6kcY/KU", - "G01I4ErdJzGm7ewvVkhwzHtWytxvvU9VigH6jKpLdVrlOS1XsSSkvMjAsUUyKz1iIoqD+pg8Q3s7hjRY", - "K7OLYzQ/uUMCB6R5Po6YAq17dCuhEuyrdsFbBHH1MkL1bxXDPYdMi+dG6344HOQBUe8jk5+GA0iPuZiu", - "IIXMsisIb7WI/pu3wHDRIBieDlgS8VuXBeJaPtbU73485OGzuc9LnmmjkNfcZ+h4yavjv7yoWUk0aF7O", - "Zoo1Fxr1hteg+rhDApnakl737SiMw9xlV8GptW/FW6arUqBRFCQQEJqpo57cihuwhV10pbZ7PEDqfgTu", - "izwE1N/2TqEp45p3KeKFDDgkxjeXI7DFVcVgWP+yqHQql3G2Zg0Cz6SY8XlVUielNjfJ1UteKv22Ehss", - "4lyBdM9R5DcEdGY+rKOd7HykrEQQW+EzkEC8omTGlmRGDSlWQ2Jjv4UUI0jTM1pIEq4XmIwRQJ1S7eOB", - "pwxiMvJCG5Ju3tILtrIitbinyZT1BlsAH8FsrnQr3Q9WoUsq1IyV5OnJMSQyuHjYcU9IB7DYVzKhcf3g", - "uWdJwO8MNzM3DeayH483Gjjas7R3NwwPOIZ69tT+SkvuYlbbCHKhl3JJI7ztjWCjJV2RK/sxRmlDGp9U", - "GoIepbnkNmEMUhw4ZHyVDFIBcwi8MYx38tHIwZ8mVsHkJaaoOZFkAUkhynl6XC64j8x1PqIxOVvKyJrA", - "PGonTTvJAV76YXb5RUa10WZG3maDSZogLthBpiu/6D5Eg482m0isabUGtPtyi/N6WqWciWaEq7VOWQVD", - "rSMObhi1jvWtI3tt9Okwxte0KAyM4ZTdoRCzZUj80j6djGNOdmTDq78wVrythIhmedchYMvg4lpnVU5X", - "5JKxwhAl4YTCuAiVd+bpHmitCPRI9Q2PT4y4tALWaFNfqE3CXuNcWrw+9iFtIJEvGJksvauJTYjZirVg", - "h4nGeH3MJADvuTT/FeyDbgRfoUN3SCZNIEzI63enZ0ZDnkAG32SrOKsWID3U+mAUw3If5H3sovRbeq6N", - "iF9/sVox3JHhbz3p4KvlBoAmxNLNHMWG9m8X0f+WzQ3bLllqPc4dSNI0LZlSO9a7sPQ3ftPkTC9pydZc", - "w509vC5v5sKbqNVuMvZnVcywDMCBKqya4QAxHCSYeHlh43I8FHpWHzutU5ZUJdcrH/DfooDbRn6vC/k+", - "ZboqnirFlaZCo/AZy5UIhTw5NbKd08FB7jKjED9Ml1pbQ9oLSKagW2TT9mePfC1BrbuFKDxBnHvW66k4", - "xSAZa4yxrgdektOfnh48fITXXlX5kCj+D8hOna4guNkIZDbpnWR2US4Lo2s1aRk9YTZw8yL5GdR52uO5", - "RCF0cDQ4fDjdf/DkfnLweLp/eHiY3p9NHzycJfuPf3hC7x8kdP/R9H766MF+evDw0ZPHP+xPf9h/nLKH", - "+w/Sx/sHT9i+GYj/gw2O7j84eAB+Ypwtk/M5F/NwqkeH08cHyaPD6ZMHBw9m6f3D6ZPDx/uz6aP9/UdP", - "9n/YTw7p/YeP7z9OZoc0ffDg4NHhw+n9Hx4nj+gPTx7uP35ST3Xw+FPXkOAgchKltubXQHp0ipDl12Hq", - "vBvHVcfwvhXrV2mbuICGU+WVIvT5hmE35FgQLKhhffXK+VXsWBi740K6zINzvx1y/Px8gMYmp3L7gAGf", - "tkJxFaCrTawdZ6Syar4HVRZGhnrtYaWC0fHzSU9qpkWZLbVpXPtLnrHTgiUbFWscfNg8ps23qeb+Mbuu", - "eYZWutapxEoHXQM9rFu6jRigOFvQ1745vaDCej2bkQNUNQYFt4xNqaWufkR9jclZIF18PvJtEVCy5ZH4", - "o+4SOKuCUSd1UaS8llbZRQd0OC4pthz5sh4PTRn1iN4TGy0ZQyMrbJLacMzoGEBnPnbNbaxJowcbHTVm", - "NXa8Yb+w2wTwr1wvaifMVqB2SnjivJVR0A+tmDokKStsdDrQEecT+cbPZlvZMziOHv9O51RDU/i64+34", - "1ipxKeRSQORLJmmK+hgGD0XNAjjYW1wNlIlx0YvXFTxA0GjArleWuCGh4VYEhFtgb/2H3zwvzGSNczU8", - "LRCzKSmDzxxLGYZHaW0TsnndWXll5I6XPGNBBBQgmuEk9jXzm0uIqOX6MIv4tnCgvpj+PtwMWoQT+ev2", - "hXElIN+fizVYHrFJONpeYjz/XXnulyKEa4leydLTTZpbm5Uo+KzmWDQ1QrHV6YIIPWqtquS82t8/eOTt", - "wVY6q5TB/I6hWUs7YGQuFKb8PbAC1D3VdHdEM4cCC+8OllhvGP40HGQBgHa0tdyCq6R16lmtIfutNwwh", - "zTVFscNmh5xW0zW1LU+ZACu+z77DEDkFIdd7Kvh2gkmJtvKYlrbikKOSwZvm4Xs59dl45JkbEwslzZkO", - "n6PqBaZeqi590rD7O5NzhW4twZgtHlFkPOE6W7lppwyjyMGxYh6thn4jRovAvBP3rhlDCox9+E5LWE9j", - "6pnLVH0vp98D7zavm1fuKchjBKO15jkbnwvn4xNSo2lkuoK0RtBKLB+hmhSl1DKRmSvv46GFvhkEpq/f", - "Cxk901JCxo8ZuRmT0bwcsthIZSK48MbZyrct5hYbxJXAcZa//jBqrNGgZfMY9kgl6h8MZRjvnBwpi3U1", - "39ZvPRAT/TIgZqr+Kyoh9oEiQhyoJpdcpDYnYmsY+MiwLPtZTiFIO8t+9U4tW5CAqstMzvFhGBwbvn5G", - "53H3VyMDIVpoq7ZoBRWptKyxsSnBbBPr8vkhgfbB4e//H/mvf//9P37/z9//x+//8V///vv//P0/f///", - "wxx2qKYQxn3ALKD1HA32MHB3T8323supQjPO/YPDMbwEZpRKXF6gXHMY4OTJLz8aFC3U4MiIVVCc00g7", - "90f397H+3gUkaLGl8jUfITYYa/KxD5oJm8kzLqxryKzkQlba19xprA+n8Cvci+/cFg/sjFdKqdeOZytC", - "Yim6i5oTDjIuqg/B9QOv9cgelQ187kbchkiwIVbEB7xuW/Z7Q52M8Kw3xci4V2vb91aRNXU4YQ/UOuEB", - "SGvEnKiV0iyvA77tt63ycBBmmMi54Ip1xSv7ch0zTUkml6wcJVQxb7a0U7hF2RCTczzQ88GQnA+WXKRy", - "qfCPlJZLLvDfsmBiqlLzB9PJmJz6qWReUM19Ke8f5T1FJmUlgA/++ObN6eRPpKwEmYB/VWYk5UpDvB8E", - "NBguS334n6ui6xepxufiqXLyJ82I2dGwsQ9y7mJ+zgfOOGgrkqNtxoVjQ23CooR8CKrI+aApbbrxzgc1", - "7HOpjDwBYs0lI5opvZeyaTW3JQ8VYVRxKC5opREXF4rea56QVCZQVBYSXbKssbNouYC+RBTzw8X29QmH", - "JJEFDxXMSbtK3diMNvE1a7sVDs/sX3UyhyHeLCXc+sexAEkqmRL3NMmpTjC9gya6opkfqWOYP8NauSA6", - "qnbhQ8AjmaVBYF2zxnm77qSvce1Kg5yL48YCuSIyRz41rG1lUOtqVVClWsWNO+k8UaDbNGhN5yjK2dvn", - "apjV0bdB+vjxcx+aY2u5WN6N6iPVxFeJnDJiSExaZXj9zVLQaAjhCRjdJctgYwa7XPaVQUP3hV9JM/1t", - "KynKul+7dWAiRC4mZ8X7Vpy5uhrYqQLi25TToJ253pUkGxI+ZmOXcOHDZIIwqfFuJSW+ZLeLm0iaxJDd", - "i+nqwkUr7RK8bIMNImvdMoVth0oZkEajZWXwdEO+IkaniZVPlTf/l9bJMzbuaLc0+a/fDOSmcjUd6dnl", - "xLfN72wX8oj1IQm7jfjLtKHxiC33szFBEZLkpG06EpTw+ayKTnHvhCE0YGBvFfMZNizuXUwJavZsnLkq", - "s/jE796+CtOU69kJ14plM+/JlEuRSZpuE4FUl/zxp4g5f7D/vlP5jMwin0ig5EyP2glHMf2xnvAu5QyF", - "t/oaSUNhWkhXJ66UJqybXVqjO+Y7y0ax7rrcHoi/XezfsVzRXSKG101H35IiuZn6TmpdxTF85ksbQuC9", - "E+WkpdKoiiHmWTM32BuBYsGJQe1RFPWwc4mR7P3pge1OFhgw/CcirYmk9QKfC6hU8B3IN9JFXE8cvbXV", - "s4TUhJXURrb6cg5tqd0s6/tN5bW6MeoZF7bPhI2+hUiKe4okvpkBBpjzMH0byDV5c8XKZck1Q1mey0pB", - "IR8RVJ1weaZR8SFWfO2VnNuiap4GYH03JxW7Hghm0XAqMCGjZcZ7qk7rBgncgUpEkauO5ozqAyWDsJSE", - "gU4IyjsXGJWP40Sc/esCQT+PCqy5ZG7S2CWq97hd1RIbNOrz5jqJEsVFsMeWZHBC7LNOhaa1DpntDCr9", - "Y31+YKumsX4yZxQpheP7dcUs6PCRs3yKeLqVSN+oUtZdAGpX2wygLrcjucFRNVxLQfWbaEztp9+GkRT6", - "Ljt01LZGs1fb1BPpXppdlaM2jq73ELvR+28HxncHHoPa4m1t0faXka/ZFbGiKpaUDDilHAmpR5pl2YiK", - "lRQsjGQ+GhyOD/pgf/Q3FzBrJLdZXrC5bf8yqvt/DIaDnKskkgl6zVBzu/CPX/5mteUznKnp6IxNYZG5", - "/8hO+Vy8aR9Wo/CdtczbA3x6cgxd0oKTuKgrbqklnc9ZOar4DR1MqyRfN8Ghv1ZXZ7U3f0yOkMRPprOi", - "NaeUMVacWttXxDdtHnvbmAtPQDXSZbqdGpiBi5aJFNMwvXzj6kj5tPGUrpp6mh/bEGxQlMbkaVFknNla", - "hZgnL82HHOxWk5Su1IWcXSwZu5xAuB+80/zdvOxqMkdWCDKhIAcPRgtZleSnn45ev66ziLGRTo224ciD", - "o0Euia4IxFGAmzC9AKn7aHD/h6P9fUxasUqfTWkGvHJv7T+J1klpTtKNiaQJGylW0BKjdZdylDFoXeTq", - "5VioQ3FiukK+yNhlD5jJd+eDXKLHQVfO2fD9mLwAa2fOqFDkfMCuWLky47mqON1mkH7/gegEAO3JPHKg", - "+RgvQO4BtXm4No/1Yw+b0GyMG6x4zb3QVLM+ndomlJdhet32aT5RjTgYbKtFpS3C6iO+6JJesi5yXScf", - "afswqMZ3oUPfVsc2EjCsazigypAUcwiQ/DMcaKbsK3I2M8oIGAfa9R5rBOovbBnJ7sdKdUi2asXTJjnW", - "IcFQTNaWUY7YBtRFRv+xWh921MyftP4J1ObCtoJArmoPC0ortQZoFV5FZlxwtehrBDn8guc59Ptbc7J9", - "1pg/U8WTNYLn+DNK3y53KX27ixH9q1SZ/VIZgl+sBuw2FUR9BZ6WZlX6nNpr2Jm2L+1a62MxxS9UWMhT", - "dFZS4U1B2crGUa6ctEHnhOvAcQ9VWcC2MfauQWsmLozAIGd16XmjfhLFzd9UMDC+dKWEjkbWqM9ohk4l", - "+fHkHcHADW/lefHiry9ejOuatD+evBvBbxEhodkqeedSmprOx+SZbUJrvZmtEkfUVplHw71NuaDgZi+p", - "SGVOYEBvIrJ98bfyeG5rO9mgW5zR+Zakv6b2HglUx05gd2AQoXmims4veAq6xYPD+wfpox+SEaOP0tGD", - "h48ejZ5MZ49G7Mls/8mUPfghYdOIWuFHCET9zR0z1on+bsS10HFqfmcxu6rwUWPIpzVTo5FkO0tWs/7T", - "x+s6pOLdQSJGkjN0g/vTDtjUJ9SyIS3ZqEN5aPe4oFUsQeidYiUUkLAFcy3LOH4+JAVVainL1JdQBrXa", - "1gkx+o+zX9ZmDYN6ABjgbIav1jtdaF0MPn2CboHo8IPeGIkODCCeVp8xmltXFX6pjvb2Zi5ckMu9bnEM", - "jFkkL2mZ2zBYCJkeDAcZT5jN4vDE6dXVYWf85XI5notqLMv5nv1G7c2LbHQ43h8zMV7oHIsJcp01Vpv7", - "0tu1sn9/vD8GBUkWTNCCg0XG/IR5SHAye7Tge1eHe0m7rNAcDSW+DsVxCj3kdLP+EMiYkAICox3s7zuo", - "MgHfU6ODYgT43nvrQUO83TIAvjkfHF4T6MJgdeZTURAFnaBlVozRM80M9VmnnSZe6r9B0B8QoHqMFyIt", - "JLdVv+e2nXpnwE7lZgP5KHj3IJRnz5lZ+oD9kov0zz6p/AQzx24M3PFmjhF4v5SVqHPMQT327TPhZRvY", - "+IXWhcUNIus49e3ylkbiX5ZSzMet03/JbcS7LEkuS0aevTp2zRvRWQNxb4osKUTMgQzlthNDikKqyElB", - "AnLkqIB3/lmmqy8GjVYhlQhYXNtKWVpfH0QeYfEQiUFkWPrm5vGoUZihu9Jfmhd3iIvEMDc40hkX7O7h", - "1F9pxsHhSkNsug4ytfDUem2v6vFdE+36IDcSFUxTGgWBwGtQtpF29VWx9uTW8POfAjExO63GyGby2gZ2", - "t8M4vciIqQlbShEvMXv7s458h8LFn4aNsVY0z5pjteXiTQjSPoi30Bj2isUFj66csPY0niYJU8o3jI1U", - "U4wMScJULtzYPfDpvymYeHpy7BLVskwubXsR1wV/z0qS9kAnpKDJpTnsc9F/3IrpqhhRV9+nn+yc0isW", - "LSl0M4QnOlWUaYZgNbSbXiF6t5DyQaTTUQsZIAJ9yaa0KJyRJDUq0qzKsrp/qbaVxoxcefdIybs6pKgn", - "tRUrDlmrEzS5EbDDFZlVIsGbCIXYN6C3QYgYZvdWjurHwQbn2/vosk0/7X10TthP60hSgxk2u2wbBZwb", - "2NnyDVaFC/JZa8XZOqp2UXG6Ob5Gi49MGDiT+ydsU6/fbpCZxvO2d6eYTktrJVlnjXzvsAtTI9PbfGlN", - "Ai7R2yCnz/JG2/+O+t265TRqi/cmf/ejqk+C2h1L6wqf/42h19iA+gzkrCsDtM0H5J2qE56d0E7TdITM", - "ZE0WHJJRXxyUTTHja0ahpYthHLHkETKlqq7eNC3lUjXSwa6P8fUed8dxV1+7h/ND8g22oLoRVt9oQtY9", - "5J/l1OYr51x30PMmNY41CwK3WGUkPOSdNkvMiGo2vDVoTq4A2g/uH9y8jHDmKapPh2OaziFrDmTKOm2u", - "+UI0aY5jz+dsRdLKVyezDYwSmiwc8vmh4D5ISTIjmpyLWxWP4AFxJTGblABxzHp2oGakLDt3BOs6QEJd", - "KPtgsfjGcD83cwiZvZSdS4Wq/RZXC/Tar3u/kmAJ667Xg3ia/o4Xwmd7GiqKfTgWRqD85c0ZZlfaxno2", - "faFOz9MLWc0X/32h/igXCtBqw3UC7Pf7NiOBKQ1KqCy5OXFde2d55Jo1uqD1m+WZThY/ZnJKG3UqIIXs", - "ZrlIX1f/LQSaYfzKnbnuei4dGm4PFatoR7geuQj6yEE2MSuvbLfSyOdqw/G9garB2B2nzkKaA6B7ltM6", - "v5wqNcIGZrhV96/mAUKvN2Ybv90QtextKxe1fTYbyzVrvWNDN2kbs42vTVoVNoQLiWtOIZ/V3BTXyNRS", - "xEe3QhFLhmsSMmhbVxNCey7jO0OtXtPyElcagmxYS+Ouq0lScs1KTjdgPIyXm9u206DIA5y0UCdcYQED", - "wxQAVRwltFWpoJCZOXHze9489C7JhUGLUqLtccH8uz7lfUqTy3kpK5GOz8UvEuajeGcn7VaFE+JVVQh7", - "Ml+xlFQFyEpC8xJc+1KkrixIThE90WvXAQ/Wz13JirAPBUv0EKs7MF6SSd1zalInsitbe9coaRnuiUIT", - "V5i1ZdsEYvJ31wsrLnNBpyFbzuiGCIhtxxUz4bULuzZJxZzp8W1rOI3WS/0sCaAaeFZsnBhWhoCKKnxm", - "kBlEGCAFtjkRfHh3SAEIAb4EjAH8dtytbo41g35cECgmUqIkBPh2eZoR3/Y+mv/+QnO21jRkK6RsZRhy", - "A94ZO027zkuvioHP2nKIzaXwAq+BKTSj8ZDYcD5Brn+ztTOWlYmei9riNNTgFoEWtW75l/xuVASAASrb", - "JtegUgFJ3RqI9VSeofjxuiD8iBFmn7aS1bbCal9foB+nN8XA/baNOPUcSVBAxzxj8nV9dMnncyOt3i7R", - "eieQI7KUQGZA1zeJAZ0BJ0UVYEi4SLIqReVIWW0a+nwZdUDOsdgwqty2VpIfxLBrF6TfEQ/IL9I32FCd", - "Lt/frZj+vmmw9JjVr399VYy4FdMgR92uy3RaCpLrSr7ezIQfiZQEOXx993Fv2uyYH7+Zb6HPaqO//m0e", - "yI1IXPVWYgpLVRj8/Q5jToe2PsaqYN8bmStoG+99lx6OW3qS3d2kScIKKI/FhC45s0YtICt2krtGVKCb", - "sFutrUdu7nwAgl3v99fBq5u76GuRC2wpaxDMqFZzqRGeQQ0quP13CRWQRoEJqJkMX5eWd3sANEklBNNa", - "HddvWTV3uF7qwAgZj2rePeeAE6dyO1j72rY3NPV9C0j5BzcpNo/6GubF6KCNRuT9CKSYDssV9fhmQBM4", - "qWsC/cFZpNuJzentcXUItiQONtc0WbqJfN4RVZ4xopXy4KCvHJdruumW4CLh8HsfR/uVieYaZPWSQL0F", - "C4ZmvMtGBK2zI9eh56mvXfXHRs5GCbce1GwmGEN0hjUzXwtNTxvDXQdJmwuymAqeK3/YLqtZ+QYeXvL/", - "g6Bxc5O7IDHooRvZ8xm89W3wZNiLz+eLy4oIY85UWEpNdSSfOyYWUrtuKABHsyxcdQMbtpH34juOI9Fy", - "QfVoKasstf7BUSp7ccrbnH5dUP2r+ehYP/9WBD7nkeyT87BXgjXrRGwQBvkCGQpbGLpMcGfTgURoHAUi", - "EVxVaRetgbVEh2BnyuTcRsH1ymNgMrIdV+pZ6uHQsAT1C4V3f6UkkcLlBGQrNwVXQWtt631w1eqxKyIK", - "nrLSPUapLwOLEFexA86ea4a3hwVw1zDtZg/ZG4r3aU4S80KFHeNcjAaxDTVvz/kU7QEai/F3fTChfbZt", - "1hm4w5Ff7z+5eWLpV0KzktF0ZYuJW4Hhwa363vH0IARNzCGQlUxUC6J1W7lJcE0Q5XmyIFJY8/6tsZuq", - "xW5aROoZtuildadUvP5qlWdcXProAuiWjBDA+DKNRMUCpTKiS5YF1jfsA4fUwjbIsjXeE5pl/oLXkXw1", - "/UCgtrMf7IIoUeFlgsU0OjfTktG1NCNs/rct5QhP9kapSKwB5bYE5SvQkmj/xdh6q6k9NujtIUGcDw9i", - "GNYSM+/YhoXWlXKnrgz096ybI4cwsF1jMeGnkKVW9uLXjNdubCPCP8WMM+qiFT3baA/oW8y5CEjsU4mr", - "qMkOvKu0ERD8Erq3BIbd++h6mH7a+wi/8H+scaiH7QxlyVxobUsG3Lo7LRRP7QqM7tWd/PDDzrxBuXjX", - "2NFXio/M6na/zax1s+LfbvzidVpYbmmIvFOXKCxjVrfajDZdbQiYwX1ZR7w9Rv5zI+MwZlSxRMWVzbQ+", - "B9v6PmUzVhLfydX12slsxub54GD/h/OBR6w6rg6UCvDv6aoUTqSvt6e8HIdhlb51bufAMRKPZkriGErm", - "TApGWKZgnLp+eWyZgC0AwAWjWFLAgvD/GeE0o2dUjJ6bfY7ewQCDCAyDRp0xGMqSz7mgGcxpxofWPVgg", - "PZNhQXXfYpjroF+VbRHMQ6ptlTxXA0sQyuENaEs15xiTvmlvb+zCRi/twgYbY5W2kWdkopkeKV0ymjcp", - "hNfUp1yY+z3cnBj+DOdQrb7k17ArOjG0a1I82P9h0+sWHRuIaEkOxvc+jo5Q2s+NOoBhuFOml8wiuwVn", - "EA3ktXYbDjLzfdVl2aE7XnR2uAzKzsNIFyK8xC51ev2tdTewvjkW8VzsqpyRKTMf+vmnq8a9Q4li0nuF", - "jog5s4mtYAjUpRGdfMvZFBs4EHAGm0/Rz3dIM1638RDu50yWCZ9mK5Jk0jZx+Ons7IQkUggMZHfNkSQU", - "mrSE11bbVI3zYoR9oIkmiubMSpJaukZqJJWVEfLwAwVNaPEtTDXE21TXGoycAJnKdNXLSsOcdjNFrV10", - "wRJKjmBd3Ptoe9d8Wm+AhnJtW4Vd+lY4d9NAaEvuRx0nWFJVzOQdtSw3mzKtMdtFvlhz8nu248f603c9", - "pL4VJHD7WYcL0BXK4UNPQFNbYoIPF1QRAY1QyIrpu4VOYQRCpwEXRmrnDLMScO8bHGC2Ekwr7MANOd6A", - "eBpaC2+BfGfmxbuDfJp90HtFRrnYsbLOWRs43wpeBXFRVGkyY0vbMShAMmzJvhX1Cj/x47kuRGuxarug", - "gKCp0K1i1Ze3QHZau33zcQHIAr+BwADs2OXzwcAMz2Yzlmgn1kIXXhyBKrJkWdbOjjPfMmorXSyqnAqF", - "MdAgnIIL+YrTbvWNupS1uSNQ2N7dKAxohItV36sJ4UJpRtu5ZEF58N6SLr6Q942xdJeO4aa6dhlVn9fR", - "aNBdl0JZX3YEVTvlG05jpzRnAtY2td3nMdJ6uoiEjscwyud6T9O5OYn5dtkkdUXmbRVxTed1YsddjsAO", - "S+5DiXK4DJXAYs2q0W7Zh6mb3aFt34yhIDW+PsYazBtCtteA9cshclBNO07Gg81HUNgL/eFrvXvdhu/N", - "vwDbK6oITLEEWxOoX547boSnzaZtAeyaBi2DabZbpb9OWKHj7mR22tJ3VKBXHurkbYMsDUQb2m1CmxKb", - "jk2buNlHyDbEuvkDU7dyzV715CvUjeTVeE024TJ8rf+exSvUghP/q1+A3RD/FikddN2vQ1nQHuriWqBJ", - "h/IuiyFRsrb3JTTLrKHvUsglhGG9e3f8/O5cQh/AIdhy1+uHkkgT9eK3LejGuOnC3cJt67tqfwErvlvr", - "prumtoKRTYZwnzpRt+EwiJWx7wJv76Pt7bCD6LWVSumHvfl03k69Z4s7nkfZWL67KfE5bWlp+wgea7z5", - "icxz33QYfJgJhNyCA8XWaK0NKEvfxoULMrEtxCagXKEHsPkShlzY/kVDw8QLwjWZ8VLpMXkqVmiRwdfC", - "ViHBMM5nCGS98j26rid3flWc+tKkYA3H3TYteOn7hm0jr5CUaWiT74/Y2XW3u/nbWJWszt9tpnXbR3dT", - "QkS0QdhdMDbdETtQLwJuZw1yGL0TUjqButfQ2ZCnvwk07DT16sHBroxOjp+rhgmh9ru6HuBEzv45cTSo", - "iG4ghdBQC154C9ivu+NnxlgxUkHX4E1crtlm+Ftiec2dbdOUA4JaGn2V1yUls1CoEzL25d1EwQ2U66ti", - "xI1x0k3I4HKM26d4bcuU7+v8Ve1S16RNRoCTpbOsNfrhRtC85cbA3nmsdB3/18hv+KKXt2/u/N8G/fzW", - "WZ8kcau/VdOMgwRL+8X1jjvl7sSIueU3zCsdRaEjo9VHYlhe/aWKIJXR90ZyNlsjevG5eDObbeWCuXuw", - "tB0ugcQ2elv+Ddpltkp8BjovVaRuz70W4M9olmG0orPOaEky64ZzZTrBfKcXbHWvZGQOpVTs8OPeUxEb", - "DkXc6NW2U/Rf6pxpmlJNv4KxNWxW/4e40luj4dNKL5jQEBXv+swZbHChlH3Wgs/GSQxE1hJmsDm4MuBU", - "vD7wKMZqmwgbFYyDUxt8beSAlTrtxgdx9AqkQpL+L+42Vu2OIS7Dy3f5LzFrQqx6gNCLCiN8M+0nYZ3D", - "Sgc3bfPxE8W0ltp/oTye7iyh/oEpj6Xq9tycPRnCEhJvXFCEJoZsZCzF2oSYOGUpyqgZE+XQBXyrXNQJ", - "O5bKsHKUyYRmQOBopr40Vbtijd1UMfcSBAet4bNWHrdx4zdXH9Ya3nvDuqHcWtCupI9c/SJdPVCflumL", - "ZAV2jwf7h1+wdR+iWC9inrDSdU55zgRH0mnz9+OmcwyhsyyPJppfoSWWgXvU1YjKMrlEX4UFi916yecL", - "TYRc2gC+w9tlMO4iUQE5aejAM1I4rA4zyyBjfS6hJbvNzMALt+Olte5B6scPoLHpNgFOOYWzjDe1iUbQ", - "9V8XMyTa376FYFS7k77raGUjLnCJLjDwWlYNO1Y3+jR2S+ocD9Vs7m8xyZWlVNLmc/mx69Jqt20w+Uzm", - "1DDqqssh0auCJxB7aLsNgcBclHJeMqWG0I7INWiQJZlRnlUl28hhHF9RTKQNR50Btxsdqkezkm2+KXs5", - "XY34qKz6w0pf05U1pVTim0hKeU1Xf2GseIse529MPcPAbyvG1NnLgcQcuN4DBlVWguyRS8YK54qvA8DJ", - "m8LVPoJEOsqFIpSgqz2USb1TJuZ/70HkjkQPyl6wstaauKqj0tejtqx0UelRUcq0StYJ+oZYvoGXT9y7", - "d4I5QM2qvfcFm++aTTy03xZi/rUSkQ+2TEQG6c+m2Lq2FQ/u37/5i/aKible+OI9fwo7n6U8xX7XhspS", - "YkEwsp9gXrld6eHNr/SEriDfFNqu0dL2q3pw/+FtuBFUVRSyNAf1mqWckrNVYT1mgGIEMcoJk1OfLl13", - "MQ2jvx4cPLmdDnmufgNySiAdUmKHpJm52LZQnHVL60Uptc6YLSf3h5I8ME/bADqXSpOSJZi97kvfwX5R", - "HgiytTkAB/smmY9rRwgTCmvXYQ4FSO/2lM2X9xRJ+ZwpKH7bPmPyzGfPQ5zYyS8/Apx/PnnxI7GoZAYt", - "MipEPE5rncCjF1U+FZRnaq8o2RVnS0eWeIkF/xy1J0j9nRgEEC2vHDWvymxwNNgbBEaoNrE6bgZBddpa", - "OUzx7ACSVLqFMH6WU2cmBRnt7xUruUG/ul3nsNVOYdyoAqkigz49OW72NwxNZDLPK4HiJhTY6LT0bztw", - "IxNYbHjt10Sg1X9vd2FsxmS2Ye5KKTO3os5k4HSMlHrB9Hk/C/CJOvffQtD3XHwvp76iWTiHTdf/9Nun", - "/xMAAP//yoC45FYOAQA=", + "iPmI8ydnT8R+2PO0f6Dmj3ZgLQAESTAvsiWr3NMP1RaTxGVhYd0vHweJzAspmNBqcPRxoJIFyyn886lS", + "fC5YekbVpfk7ZSopeaG5FIOjxq+EK0KJNv+iinBt/i5ZwvgVS8l0RfSCkV9lecnK8WA4KEpZsFJzBrMk", + "Ms+pSOHfXLMc/vF/lWw2OBr8y169uD27sr1n+MHg03CgVwUbHA1oWdKV+fu9nJqv7WOlSy7m9vlFUXJZ", + "cr0KXuBCszkr3Rv4NPK5oHn8h/VjKk11tXE7Bn6n+KbZEVWX/QupKp6aH2ayzKkeHOGDYfvFT8NByf5e", + "8ZKlg6O/uZcMcOxe/NqCLbSgFIAkXNWwPq/f/Lxy+p4l2izw6RXlGZ1m7Gc5PWVam+V0MOeUi3nGiMLf", + "iZwRSn6WU2JGUxEEWUie4D+b4/y6YILM+RUTQ5LxnGvAsyua8dT8t2KKaGmeKUbsIGPyRmQrUimzRrLk", + "ekEQaDC5mdujYAf4bWRL2YxWme6u62zBiP0R10HUQi6FXQypFCvJ0qw9ZZqVORcw/4IrB5IxDh+MGZ/C", + "P9nTUmaaF3YiLuqJDD6WM5owGJSlXJut44h2/TOaKTbsAlcvWGkWTbNMLon5tL1QQmfavLNg5L2ckgVV", + "ZMqYIKqa5lxrlo7Jr7LKUsLzIluRlGUMP8sywj5whQNSdanITJY49Hs5HRIqUkNAZF7wzLzD9fhc1Ig+", + "lTJjVMCOrmjWhc/JSi+kIOxDUTKluATgTxkxb1dUs9TASJYpbtCdA4OdNI/Or8ufzbCLGmbYYzGT3YW8", + "ZpqOUqqpHYiRe+ble8HSuhjfOXp7UINB+5Se13+Ze7RcUB2fxFDkVJr1k2MgzzRT0mBIaih2kdGELWQG", + "8GAftAGKQSVEUzNgTkVFM8JFUWky48ycqSILnqZMkO+mLKGVQvCOpBjh+df4oOV8nrGUSOG4gcHN7xtn", + "WkPTzPyKi8s/V1q3IBBF1RfCoLSqN27mwSXcs1OTKYxFpmxBr7gsu8dKnrZeXfIsMyjjr9SfMyZSVt5T", + "OLYFq79eBMhRvdMhrGdi1jMJDwLGbWKcXcM9hTg3Jq8B2tkquHQ1veSwU0GEJJkUc1aSQirFpxnDe8OF", + "0oymQFdFeGK4onsB8O456mcAYfY5PhdPzbWheZHBIdnZiJajKRuVAAGWkllJc0ZKKuZsSJYLnizMwbqb", + "Qystc6p5AnuYSUM/cBiVMOG/m1aaJNQcCpFXrCwRmXK3d0silWFj8dvf4nMtvGmiSYxbXbJV98Yep0xo", + "PuOs9FfWQn5I8kpps9xK8L9XyD8srX1v+VeUPGR0yiJE6pV5DJOkXBUZXXX4ADmeESE1UQVLzJLsEV6y", + "lTkXuL1akjkTrKSaEUpKRpWE60Bg0jFKKbKg5TzCQZ+KFWEfdEkJLedVbuQSx6WmxWpsPlTjU5mzE6RP", + "q+++J+ZQ/dRJyczEsGhLw1YBCGpQ1+e0A+Phec5STjXLVqRkZihCAdIpm3HBzQdDg+YwvZlyCEciK21X", + "REvNkyqjpYdoDxdR1dQJXetktYh4c2q/9ALCziOc2c+vOFzia4zwV/Mlz4zY1r4TBsXtyraU105rULTE", + "tmo6Mr8gxBHlPaI+q8qSCZ2tiDQCFnXjAnoHIpYak8lPT09/evH84uXxqxcXJ0/Pfpqg+pDykiValitS", + "UL0g/0om54O9f4H/nQ8mhBaFoT6WFDBR5WZ/M56xC/O+ue68dP+Ex1bUXVC1YOlF/eZvkSvady5dyctC", + "INh9QBdQrqSKHD93Vwa2HfCPMflFEsGUEUKULqtEVyVT5DuQK9WQpDwxU9GSM/U9oSUjqioKWer21u3i", + "h0blODwwm84k1YMh4PW2mwxQpyFpOGQcxmRuJx00adXEfjM5IjRb0hWylDGZ1OxycoToAV9byvnuGDUA", + "AKiVG0vyXcYvDUGzQCM0TUdSfD8mkyWbxoZZsmnNjAHrcironBmihqzGEFLgKXYWx1ffy+mYTFCUmRwR", + "wa5YCUP/qY3LljSalaJoal4E4IDaa2YXNGvSGndaNUBxpgEQHQuXwXCwZNONZxbHSKc61XiCQhZXRo6g", + "c1ZauUADRaS5kT3UFlLnZyscMUlZ04hG+BNVi5CsACc1zK9FZxSxHBmYG0kWKEjAXs3IKFzh4zE5M48d", + "n5SixjCvETChqtKwLys2e72lOam5hFUBmgLVrEdq9Ux+e/OBm2Br00dMve5opi0OYKkgLi+Y057FJq5g", + "cC4iObziSjsyCHS9H/u6mOYsC9fb+FmD3fbsup4itkFLVU6oXjxbsOTyLVNWk2+ZHoxW0918R+taOXlD", + "LwzCfSek/t4yg+gtAKE8fslQXgeMXFKF5g2DeTMuUpzF8ZHowOoCp41aS1CuWjC/UMuvZGmI4zgqGQHH", + "jK4UBvELnclKpNE1KVmVyUaxJjiSU/ygfaQINLsiP2y456E9sA1H/pKLtD7xrfCvB2EiVqHuPo4+NqUV", + "qpRMONVI981uLpi4uqLlwCJGv5TiTJ+d87A/kJIZPRPkeEoU2tmswQ7o3QeWVJptMsn22zs9+wh+djCO", + "053gk9ixvChLWXb386NRaXhCmPmZlEwVUigWMx6nEVT/6ezshKCFk5g3vI7gByLHhl8nWZWiKQgvxSqT", + "NCVKIlZ7AOJqG7DNMrs0LtAWy6XRnZ+ZyR7uH3qu4+0nKdV0SlGfnlZqZbgTI7BQtyjLvKTQlAtCyb23", + "TJer0dOZZuU9fHXBKJhozPK4SHlCNVPWCIdauOY52hTMUTDlFeyS6ZKzdExegjbuZB87IFcgHRk0oUYC", + "dwLDPWX5nnk3yTgTYBpKJVEyZ0b5nTdUTiOzsQ94eTjNyJQml3I2Q47pjdZOXu1azHOmFJ3HcK+FXHDu", + "9ftRzLpiQr+kZX66lRm+fvMtM3zMD/GznL4rDN+PakSKaW/AHhKDHWDLIKcyuWT6+M3e6387O0M0QBEX", + "hRNlDqIkgi3NQzUkk6JkV1xW6gLxduLtT+wDoikCsS2yZUyzC3vWLL2gEa5yPLM6c8aAYxlq7b+wwpOz", + "8vCcKU3zghiqjghlcM0hk/lUaVmiPPUyozkTifSMvnnMBmYjM2KUUUWI2Lt3x8+dFPgzOCs2+Dlq0ao5", + "0C80D7XU2IctcG/CDiNveR9N6PXxGtPD/RhCl2xWMrW4ABt35Gj8HfYiqL1lagF2c/s9EBy7m3sKLea1", + "fAtYhxqPMhfWAF4NDdKB3JpSUHUYTRZANK54WtEMvXVLmMUbkLSUhgis3CDWal6UNAFrXq/5ZHcg9vu4", + "YOoIepx55JQzklGl7Sq3xrklVRd4Y9IeZxJeUYPl741Gb1+u74i57VqSiS4rNrEKiv2lttCB0giWVp7e", + "q23liumhpczmJrnbnRd6tZV1Ey6AA07gwLNuucBx10S6Xtr4iir91hp0+yicRVBZ1ghqIF8bgnlO5zV/", + "ddCzy4xL/lu5MIcDvajyqaA82wKtwq0cmxWBMyamE+BcVF3af/lJ+sHEZ+zZKomJ1J4AZnzGRol5ibAr", + "MDhY/4LRHoErqkWFFodULsXQCCcl/FkVQ8J0EiPu25gT/eJgqagZtXbda/vDT6i6fCXnfecPzv1Mzkmy", + "qMSlZXBaEkqAr2lZ8GTP8TpSSpmTlCFNS/E9K0MZkA/hyZXkqRknBRmkRXBicMhkxGLwzKzH0XhtVzkm", + "r+nKS1B5lWlegFgimIJ32QcdVVEcQqxlSRAGMdzR916jmtnG2mPYRso4AzBuEDMAHB05A6jBdQUNQ/+v", + "moEO2/Py7QA33IU4bOb7Gif9XMbfjM64zjc3xc9i7MFTOKt8RdiFP8leXESt8Iz2EgV8gZzR+QZU5Nqj", + "YYy+oSVwHST9UrZl32AD3JJ9b2a5ffaxAEzbXFp8c+O1XSJY10AsoeLCSA+01OvsO1zZKUH5o5WWI/tV", + "3MRj4RRVHpyMifZ2pmuN1i7XQNsOMP5i0j8ufxuaYe7NhWJMxNyrSjt9mKtwveZ9ZwMJjJTbrX0z6Vm6", + "1X8u8UEw7Ep+4l9dIF7t8vEz+OIt6n43K5pfsVJZv8MWZK6furlxho27ErvDTcuAM9ABdQSjYgr2xCWF", + "+AtDN1XGWAEmOnMlqX2vEpdCLgWuAUS6qOGuY10wc2KUBQRd2oXgtJ/a917taMHoRkbg4ygcrAz71/oE", + "goXNOTgDD8cHo8ePRvM0PXyQPjz8wZ3B0eD/lVXp7tAAQndK7Q9zcDg+HNGsWND94GzCx+S7ztjfd/cP", + "q9jJsdJYxse1+NbEZAsGr9F4D1rOqNWyF1VOhZEyVZXDZyhjlSxjVDEyrXiWuiBYcCoZ0kAVmYSrmqCK", + "IIFk159AVJY1TOLXkznXE2K/AnNj1P/UOvD6HjRA4a+OgWgMG37GAFqaZW9mg6O/rUe4U+ctM199Gn5c", + "IzOu9Z84rZK4L4gUXp+MyusYdhKzg5sfwLnnKNLWJOif3pZ2DSPOzgxh/BnCrTv0DWLtp98Qj/+cyeQy", + "40r3Oy+RUVvjGy0ZGMEh2pWlJGElqJGgTaGLUxoxzVp6EoecW/mPwvW8ELpcxVxH3Zc6Dsn14eG4n211", + "KPt2DxFtnUA9dBgN3kNCntvrEQ+JNU8JncpKY7yq0z+tFOkkTGtO4g3xssUXFzSn4iJZsORSVnq9z/MU", + "Xibu5SDcyC2gZLm8YimhmRRzDA538SHbBB8219IDmrilqrPwF0JW80XoXQJ2QQMnTMFZwoiWc9xiymcz", + "VoLpGE4QbLfma0LJQoLJLgOhhbx7+8q5dCK2vDE5k8DcIDQJI3TevhqaRwnVTFDNyPng45Qq9mnvoxRe", + "6lXVbMY/MPXpfBDTXcwHTbQssygVssM0XLMbYvFbRwFTBSP1HMVrqpTD1FOWsSQe+XLiHZgYKm5+mzJL", + "0d/LqXK2+hqFDboEQhToKJZmXeT0w+BocLB/cDjafzTav392//Do/oOj+w//df/gaH+/K/x0v+5EcWYZ", + "LgSd8axkIck1C5vJErz8jq/WvKl1+Xagz1GQMk1Tqimw/zSFCE2anUTMmg3G29hMOeW6pOWK5HYwh9Bj", + "8tpsw1DXjH0IY+esjzOXZhcQf1IpLuZkQsfTcTIxZL2+QzaAtnVGRSlhH0eD06LkmpGXJZ8vtGE2ipVj", + "loMheqBW05KJ/3tqQzBkOXdvWHn4FF4gp/p//68rlg164HRijfXPvE7WPPPQw5TTDzw32sn9/f3hIOcC", + "/4q4m1rXwA/Sg/+nQfRR/LB0WbGeb/s1p4SKxBwDpgoVaK8ZDmaU48OCVgr+8feKVfgafDHyctQA98Eq", + "hqpXZWA98jSpGc1d45FfVh9U0VMdD2bB34K0ABs9gKFkX0RciutkQ7esvlPSsuxlE/ZH4BM+itIF5HuR", + "0lyPSkH4IrI48xbyA5aSGc+YQqYrWMKUouUqRsBbDC5qLr/3zHHX4+f3gggIEN1czEGbEYeZP2PylBtN", + "SOBK3Scxpu3sUFZIcMx7Vsrcb71PVYoB+oyqS3Va5TktV7GctbzIwMFHMis9Yt6Sg/qYPEO/A0aHWGu7", + "izs1j9whgSPW/D6OmEStm3groRLszHbBW8TD9TJC9W8Vwz2HTIvnRut+OBzkAVHvI5OfhgPIprqYriDj", + "0LIrCEeujQ/WEsVFg2B4OmBJxG9dFohr+VhTv/vx6JHP5j4veaaNQl5zn6HjJa+O//KiZiXRJAc5mynW", + "XGg0KqAG1ccd8g3VlvS6b0dhSOsuuwpOrX0r3jJdlQKNwyCBgNBMHfXkVtyALeyiK7XDBAKk7kfgviBO", + "QP1t7xSaMq55lyLe2IBDYjx6OQJDYVUMhvWTRaVTuYyzNWsQeCbFjM+rkjoptblJrl7yUum3ldjgGeAK", + "pHuOIr8hoDPzYR04ZucjZSWCGBOfsAbiFSUztiQzakixGhIbqy+kGEFWp9FCknC9wGSMAOqUah9aPWUQ", + "m5IX2pB085ZesJUVqcU9TaasN+gE+Agm/6Vb6X6wCl1SoWasJE9PjiHxxIUWj3tCW4DFvpIJjesHzz1L", + "An5nuJm5aTCX/Xi80cDRnqW9u2F4wDHUs6f2V1pyF/7bRpALvZRLGuFtbwQbLemKXNmPMeAdsj6l0hA/", + "Ks0lt/mFkJLCIUGwZJA5mkMAkmG8k49GDv40sQomLzGj0YkkC0jiUc7j5UoH+CBn5ysbk7OljKwJzKN2", + "0rSTzOGlH2aXX2RUG21m5G02mNML4oIdZLryi+5DNPhos4nEmlZrQLsvtzivp1XKmWgGC1vrlFUw1Dri", + "4IZR61jfOrLXRp8OY3xNi8LAGE7ZHQoxW4ZEPe3T/zim8Ec2vPoLY8XbSohoUYA6FG4ZXFzrtMvpilwy", + "VhiiJJxQGBeh8s483QOtFYEeqb7h+YoRl1bgHm3qC7VJ2GucS4vXxz60DyTyBSOTpXe5sQmxviVMT6mz", + "hPH6mEkA3nNp/ivYB90IQkPH9pBMmkCYkNfvTs+MhjyBjMvJVvFmLUB6qPXBKIblPl7+2CU8tPRcm1yw", + "/mK1wuEjw996/sZXS7MATYilmzmKzZLYLjniLZsbtl2y1HreO5CkaVoypXYsj2Lpb/ymyZle0pKtuYY7", + "e7pdCtKFN1Gr3WTszyqwYhmAA1VYZMUBYjhIMFH2wsYneSj0rD52WqcsqUquVz53okUBtw2iXxc9f8p0", + "VTxViitNhUbhM5Z2Egp5cmpkO6eDg9xlRiF+mC61toa0F5CXQrfIfu5PxPlaglp3C1F4gjj3rNdTcYrB", + "QtYYY10PvCSnPz09ePgIr72q8iFR/B+QTTxdQZC3EchsjQSS2UW5hJau1aRl9ITZwM2L5GdQ59WP5xKF", + "0MHR4PDhdP/Bk/vJwePp/uHhYXp/Nn3wcJbsP/7hCb1/kND9R9P76aMH++nBw0dPHv+wP/1h/3HKHu4/", + "SB/vHzxh+2Yg/g82OLr/4OAB+IlxtkzO51zMw6keHU4fHySPDqdPHhw8mKX3D6dPDh/vz6aP9vcfPdn/", + "YT85pPcfPr7/OJkd0vTBg4NHhw+n9394nDyiPzx5uP/4ST3VweNPXUOCg8hJlNqap4H06BQhy6/DUgdu", + "HFdMxftWrF+lbeICGk6VV4rQ5xuGH5FjQbD+ivXVK+dXsWNhDJMLbTM/nPvtkOPn5wM0NjmV2wcM+Awg", + "iqsAXW1i7TgjlVXzPSjKMTLUaw8LW4yOn096slwtymypTePaX/KMnRYs2ahY4+DD5jFtvk0194/Zdc1v", + "aKVrnUqs0tQ10MO6pduIAYqzBX3tm9MLKqzXsxk5QFVjUHDL2Oxk6sqN1NeYnAXSxecj3xYBJVseiT/q", + "LoGzKhh1UhdFymtplV10QIfjkmLLkS/r8dCUUY/oPbHRCkM0ssImqQ3HjI4BdOZj19zGmjR6sNFRY1Zj", + "xxv2C7tNAP/K9aJ2wmwFaqeEJ85bGQX90IqpQ5KywkbpAx1xPpFv/Gy2lT2D4+jx73ROdbguDq8zXmAJ", + "qIMMqyKTNEV9DIOHomYBHOwtrgbK+rgozusKHiBoNGDXK0vckNBwKwLCLbC3/sNvnhcmBce5Gp4WiNmU", + "lMFnjqUMw6O0tgnZvO6svDJyx0uesSACChDNcBL7mnnmEkNquT5MyL4tHKgvpr8PN4MW4UT+un1hXAnI", + "9+diDVbTbBKOtpcYz39XnvulCOFaoley9HST5tZmJQo+qzkWTY1QbHW6IEKPWqsqOa/29w8eeXuwlc4q", + "ZTC/Y2jW0g4YmQuFKX8PrAB1TzXdHdEMqsDCu4Ml1huGPw0HWQCgHW0tt+AqaZ16VmvIfusNQ0hzTVHs", + "sFkyp9V0TWWiUybAiu+zEDFETkHI9Z4Kvp1gcqatFKelrRDlqGTwpvnxvZz6rETyzI2Jha3mTIe/o+oF", + "pl6qLn3ytPs7k3OFbi3BmK3DUWQ84TpbuWmnDKPIwbFifloN/UaMFoH5N+5dM4YUGPvwHVQA1M2pZy5j", + "972cfg+827xuXrmnIJ8TjNaa52x8LpyPT0iNppHpCtI7QSuxfIRqUpRSy0RmrlKShxb6ZhCYvtwzZDZN", + "SwmZT2bkZkxG83LIYiOVieDCG2cr37b4XmwQV03IWf76w6ix3IWWzWPYI5WoHxjKMN45SVQW62r0rd96", + "ICb6ZUDMVP1XVELsA0WEOFBNLrlIbU7E1jDwkWFZ9rOcQpB2lv3qnVq2MANVl5mc449hcGz4+hmdx91f", + "jQyEaGG02qIVFPfSssbGpgSzTazL54cE2h8Of///yH/9++//8ft//v4/fv+P//r33//n7//5+/8f5vJD", + "VYkw7gNmAa3naLCHgbt7arb3Xk4VmnHuHxyO4SUwo1Ti8gLlmsMAJ09++dGgaKEGR0asglquRtq5P7q/", + "j/USLyBRjS2Vr9EJscFYQ5F90EzYTJ5xYV1DZiUXstK+fFFjfTiFX+FefOe22GNnvFJKvXY8W8ETSwde", + "1JxwkHFRfQiuH3itR/aobOBzN+I2RIINsSI+4HXbKvEb6oWEZ70pRsa9Wtu+t4qsqcMJe6DWCQ9AWiPm", + "RK2UZnkd8G2/bVXagzDDRM4FV6wrXtmX65hpSjK5ZOUooYp5s6Wdwi3Khpic44GeD4bkfLDkIpVLhX+k", + "tFxygf+WBRNTlZo/mE7G5NRPJfOCau4rv/8o7ykyKSsBfPDHN29OJ38iZSXIBPyrMiMpVxri/SbEclnq", + "w/9c0WW/SDU+F0+Vkz9pRsyOho19kHMX83M+cMZBW8AebTMuHBuKKBYl5ENQRc4HTWnTjXc+qGGfS2Xk", + "CRBrLhnRTOm9lE2ruS1RqQijikMxSCuNuLhQ9F7zhKQygSLAkOiSZY2dRcsm9CWimAcX25d6HJJEFjxU", + "MCftgn9jM9rE1xjuFos8s3/VyRyGeLOUcOsfx0IsqWRK3NMkpzrB9A6a6IpmfqSOYf4MaxuD6KjaNSQB", + "j2SWBoF1zZL47TqhviS6K5FyLo4bC+SKyBz51LC2lUHZsFVBlWrVwu6k80SBbtPBNZ2jKGdvnysHV0ff", + "Bmn0x899aI6taWN5N6qPVBNfcHPKiCExaZXh9TdLQaMhhCdgdJcsg40Z7HLZVwYN3Rd+Jc30t62kKOt+", + "7dbDiRC5mJwVb3Ny5uqLYGMTiG9TToN25npX3W1I+JiNXcKFD5MJwqTGu5XW+JLNUW4iaRJDdi+mqwsX", + "rbRL8LINNoisdcsUth0qhkAajZaVwdMN+YoYnSZWvmSA+b+0Tp6xcUe7lQv4+r1jbipX05GeXU582/zO", + "dkGTWNuasDmNv0wb+tTYskcbExQhSU7aHjVBKaPPqmwV904YQgMG9lZRo2HD4t7FlKB20caZqzKLT/zu", + "7aswTbmenXCtWDbznky5FJmk6TYRSHXpI3+KmPMH++87lc/ILPKJBErO9KidcBTTH+sJ71LOUHirr5E0", + "FKaFdHXiSmnCutmlNbpjvrNsFFevyw6C+NvF/h3LNt0lYnjddPQtKZKbqe+k1lVew998iUcIvHeinLRU", + "GlUxxDxr5gZ7I1AsODEo44qiHja6MZK9Pz2w3ckCA4b/RKQ1kbRe4HMBlQq+A/lGuojriaO3toqYkJqw", + "ktrIVl/OoS21m2V9v6nMWDdGPePC9gWx0bcQSXFPkcQ3n8AAcx6mbwO5Jm+uWLksuWYoy3NZKShoJIKq", + "Ey7PNCo+xIrQvZJzW1zO0wCsc+ekYtezwiwaTgUmZLTMeE8Bb90ggTtQiShy1dGcUX2gZBCWkjDQCUF5", + "5wKj8nGciLN/XSDo51GBNZfMTRq7RPUet6taYoNGfd5cJ1GiuAj22JIMToj9rVOpaq1DZjuDSv9Ynx/Y", + "qmms/88ZRUrh+H5dOQw6suQsnyKebiXSN6q1dReA2tU2A6jL7UhucFQN11JQ/SYaU/vpt2Ekhb7LDh21", + "rdHs1Tb1RLqXZlflqI2j6z3EbvT+24Hx3YHHoLZ4W1u0fTLytcsiVlTFkpIBp5QjIfVIsywbUbGSgoWR", + "zEeDw/FBH+yP/uYCZo3kNssLNrftekZ1v5bBcJBzlUQyQa8Zam4X/vHL36y2fIYzNR2dsSksMvcf2Smf", + "izftw2oUALSWeXuAT0+Oof9KcBIXdcUttaTzOStHFb+hg2mVJuwmOPTX6uqs9uaPyRGS+Ml0VrTmlDLG", + "ilNr+4r4ps3P3jbmwhNQjXSZbqcGZuCiZSLFNEwv37g6Uj5tPKWrpp7mxzYEGxSlMXlaFBlntmYj5slL", + "8yEHu9UkpSt1IWcXS8YuJxDuB+80n5uXXW3qyApBJhTk4MFoIauS/PTT0evXdRYxNj6q0TYceXA0yCXR", + "FYE4CnATphcgdR8N7v9wtL+PSStW6bMpzYBX7q39J9E6Kc1JujGRNGEjxQpaYrTuUo4yBq2mXL0cC3Uo", + "0kxXyBcZu+wBM/nufJBL9Djoyjkbvh+TF2DtzBkVipwP2BUrV2Y8VxWn2xHJ7z8QnQCgPZlHDjQf44XY", + "PaA2D9fmsX7sYROajXGDFa+5F5pq1qdT24TyMkyv2z7NJ6oRB4Nttai0rwAjXdLLa1dg3GKhG5bXtHz4", + "kpJDu66gDCW0HzFHypR9Rc5mRhkB40C77mWNQP0FPiPZ/VipDslWrXjaJMc6JBiK6tpy0hHbgLrI6D9W", + "68OOmvmT1j+B2lzYBhLIVe1hQWml1gCtwqvIjAuuFn19Q4df8DyHfn9rTrbPGvNnqniyRvAcf0YJ4OUu", + "JYB3MaJ/lWq7XypD8IvVwt2mgqivwNPSrEqfU3sNO9P2JW5rfSym+IUKC3mKzkoqvCkoW9k4ypWTNuic", + "cB047qEqC9g2xt41aM3EhREY5KwuwW/UT6K4+ZsKBsaXrpTQ0cga9RnN0KkkP568Ixi44a08L1789cWL", + "cV2T9seTdyN4FhESmj0Ody6lqel8TJ7ZnsXWm9kqcURttX003NuUCwpu9pKKVOYEBvQmIqX4XDhK9YVs", + "Jxt0izM635L019TeI4Hq2AnsDgwiNE9U0/kFT0G3eHB4/yB99EMyYvRROnrw8NGj0ZPp7NGIPZntP5my", + "Bz8kbBpRK/wIgai/uXPIOtHfjbgWOk7N7yxmVxU+agz5tGZqNJJsZ8lq1n/6eF2HVLxLSsRIcoZucH/a", + "AZv6hFo2pCUbdSgP7R4XtIolCL1TrIQCErZgrmUZx8+HpKBKLWWZ+hLKoFbbOiFG/3H2y9qsYVAPAAOc", + "zfDVeqcLrYvBp0/QeBEdftAjJNGBAcTT6jNGc+uqwi/V0d7ezIULcrnXLY6BMYvkJS1zGwYLIdOD4SDj", + "CbNZHJ44vbo67Iy/XC7Hc1GNZTnfs9+ovXmRjQ7H+2MmxgudYzFBrrPGanNfertW9u+P98egIMmCCVpw", + "sMiYR5iHBCezRwu+d3W4l7TLCs3RUOLrUByn0I5PN+sPgYwJKSAw2sH+voMqE/A9NTooRoDvvbceNMTb", + "LQPgm/PB4TWBLgxWZz4VBVHQCVpmxRg908xQn3U6k+Kl/hsE/QEBqsd4IdJCclv1e26773cG7FRuNpCP", + "gncPQnn2nJmlD9gvuUj/7JPKTzBz7MbAHe+LGYH3S1mJOscc1GPfiRRetoGNX2hdWNwgso5T33lwaST+", + "ZSnFfNw6/ZfcRrzLkuSyZOTZq2PXBxOdNRD3psiSQsQcyFBuOzGkKKSKnBQkIEeOCnjnn2W6+mLQaBVS", + "iYDFdQCVpfX1QeQRFg+RGESGpW9uHo8ahRm6K/2leXGHuEgMc4MjnXHB7h5O/ZVmHByuNMSm6yBTC0+t", + "1/aqHt81Pa8PciNRwTSlURAIvAZlG2lXXxVrT24NP/8pEBOz02qMbCavbWB3O4zTi4yYmrClFPESs7c/", + "68h3KFz8adgYa0XzrDlWWy7ehCDtg3gLPXavWFzw6MoJa0/jaZIwpXzv3Ug1xciQJEzlwo3dA5/+m4KJ", + "pyfHLlEty+TStheBSHNBsz0rSdoDnZCCJpfmsM9F/3ErpqtiRF19n36yc0qvWLSk0M0QnuhUUaYZgtXQ", + "bnqF6N1CygeRjk8tZIAI9CWb0qJwRpLUqEizKsvqPq7aVhozcuXdIyXv6pCintRWrDhkrU7Q5EbADldk", + "VokEbyIUYt+A3gYhYpjdWzmqHwcbnG/vo8s2/bT30TlhP60jSQ1m2GxYbhRwbmBnyzdYFS7IZ60VZ+uo", + "2kXF6eb4Gi0+MmHgTO6fsE29frtBZhrP296dYjotrZVknTXyvcMuTI1Mb/OlNQm4RG+DnD7LG23/O+p3", + "65bTqC3em/zdj6o+CWp3LK0rfP43hl5jA+ozkLOuDNA2H5B3qk54dkI7TdMRMpM1WXBIRn1xUDbFjK8Z", + "hZYuhnHEkkfIlKq6etO0lEvVSAe7PsbXe9wdx1197R7OD8k32ILqRlh9owlZ95B/llObr5xz3UHPm9Q4", + "1iwI3GKVkfCQd9osMSOq2fDWoEm7Amg/uH9w8zLCmaeoPh2OaTqHrDmQKeu0ueYL0aQ5jr2vsxVJK1+d", + "zDYwSmiycMjnh4L7ICXJjGhyLm5VPIIfiCuJ2aQEiGPWswM1I2XZuSNY1wES6kLZB4vFN4b7uZlDyOyl", + "7FwqVO23uFqg137d+5UES1h3vR7E0/R3vBA+29NQUezDsTAC5S9vzjC70jbWs+kLdXqeXshqvvjvC/VH", + "uVCAVhuuE2C/37cZCUxpUEJlyc2J69o7yyPXrNEFrd8sz3Sy+DGTU9qoUwEpZDfLReI947YSaIbxK3fm", + "uuu5dGi4PVSsoh3heuQi6CMH2cSsvLLdSiOfqw3H9waqBmN3nDoLaQ6A7llO6/xyqtQIG5jhVt2/mgcI", + "vd6Ybfx2Q9Syt61c1PbZbCzXrPWODd2kbcw2vjZpVdgQLiSuOYV8VnNTXCNTSxEf3QpFLBmuScigbV1N", + "CO25jO8MtXpNy0tcaQiyYS2Nu64mSck1KzndgPEwXm5u206DIg9w0kKdcIUFDAxTAFRxlNBWpYJCZubE", + "zfO8eehdkguDFqVE2+OC+Xd9yvuUJpfzUlYiHZ+LXyTMR/HOTtqtCifEq6oQ9mS+YimpCpCVhOYluPal", + "SF1ZkJwieqLXrgMerJ+7khVhHwqW6CFWd2C8JJO659SkTmRXtvauUdIy3BOFJq4wa8u2CcTk764XVlzm", + "gk5DtpzRDREQ244rZsJrF3Ztkoo50+Pb1nAarZf6WRJANfCs2DgxrAwBFVX4zCAziDBACmxzIvjw7pAC", + "EAJ8CRgD+O24W90cawb9uCBQTKRESQjw7fI0I77tfTT//YXmbK1pyFZI2cow5Aa8M3aadp2XXhUDf2vL", + "ITaXwgu8BqbQjMZDYsP5BLn+zdbOWFYmei5qi9NQg1sEWtS65V/yu1ERAAaobJtcg0oFJHVrINZTeYbi", + "x+uC8CNGmH3aSlbbCqt9fYF+nN4UA/fbNuLUcyRBAR3zjMnX9dEln8+NtHq7ROudQI7IUgKZAV3fJAZ0", + "BpwUVYAh4SLJqhSVI2W1aejzZdQBOcdiw6hy21pJfhDDrl2Qfkc8IL9I32BDdbp8f7di+vumwdJjVr/+", + "9VUx4lZMgxx1uy7TaSlIriv5ejMTfiRSEuTw9d3HvWmzY378Zr6FPquN/vq3eSA3InHVW4kpLFVh8Pc7", + "jDkd2voYq4J9b2SuoG289116OG7pSXZ3kyYJK6A8FhO65MwatYCs2EnuGlGBbsJutbYeubnzAQh2vd9f", + "B69u7qKvRS6wpaxBMKNazaVGeAY1qOD23yVUQBoFJqBmMnxdWt7tAdAklRBMa3Vcv2XV3OF6qQMjZDyq", + "efecA06cyu1g7Wvb3tDU9y0g5R/cpNg86muYF6ODNhqR9yOQYjosV9TjmwFN4KSuCfQHZ5FuJzant8fV", + "IdiSONhc02TpJvJ5R1R5xohWyoODvnJcrummW4KLhMPvfRztVyaaa5DVSwL1FiwYmvEuGxG0zo5ch56n", + "vnbVHxs5GyXcelCzmWAM0RnWzHwtND1tDHcdJG0uyGIqeK78YbusZuUbeHjJ/w+Cxs1N7oLEoIduZM9n", + "8Na3wZNhLz6fLy4rIow5U2EpNdWRfO6YWEjtuqEAHM2ycNUNbNhG3ovvOI5EywXVo6WsstT6B0ep7MUp", + "b3P6dUH1r+ajY/38WxH4nEeyT87DXgnWrBOxQRjkC2QobGHoMsGdTQcSoXEUiERwVaVdtAbWEh2CnSmT", + "cxsF1yuPgcnIdlypZ6mHQ8MS1C8U3v2VkkQKlxOQrdwUXAWtta33wVWrx66IKHjKSvcYpb4MLEJcxQ44", + "e64Z3h4WwF3DtJs9ZG8o3qc5ScwLFXaMczEaxDbUvD3nU7QHaCzG3/XBhPbZtlln4A5Hfr3/5OaJpV8J", + "zUpG05UtJm4Fhge36nvH04MQNDGHQFYyUS2I1m3lJsE1QZTnyYJIYc37t8Zuqha7aRGpZ9iil9adUvH6", + "q1WecXHpowugWzJCAOPLNBIVC5TKiC5ZFljfsA8cUgvbIMvWeE9olvkLXkfy1fQDgdrOfrALokSFlwkW", + "0+jcTEtG19KMsPnftpQjPNkbpSKxBpTbEpSvQEui/Rdj662m9tigt4cEcT48iGFYS8y8YxsWWlfKnboy", + "0N+zbo4cwsB2jcWEn0KWWtmLXzNeu7GNCP8UM86oi1b0bKM9oG8x5yIgsU8lrqImO/Cu0kZA8Evo3hIY", + "du+j62H6ae8jPOH/WONQD9sZypK50NqWDLh1d1oontoVGN2rO/nhh515g3LxrrGjrxQfmdXtfptZ62bF", + "v934xeu0sNzSEHmnLlFYxqxutRltutoQMIP7so54e4z850bGYcyoYomKK5tpfQ629X3KZqwkvpOr67WT", + "2YzN88HB/g/nA49YdVwdKBXg39NVKZxIX29PeTkOwyp969zOgWMkHs2UxDGUzJkUjLBMwTh1/fLYMgFb", + "AIALRrGkgAXh/zPCaUbPqBg9N/scvYMBBhEYBo06YzCUJZ9zQTOY04wPrXuwQHomw4LqvsUw10G/Ktsi", + "mIdU2yp5rgaWIJTDG9CWas4xJn3T3t7YhY1e2oUNNsYqbSPPyEQzPVK6ZDRvUgivqU+5MPd7uDkx/BnO", + "oVp9ya9hV3RiaNekeLD/w6bXLTo2ENGSHIzvfRwdobSfG3UAw3CnTC+ZRXYLziAayGvtNhxk5vuqy7JD", + "d7zo7HAZlJ2HkS5EeIld6vT6W+tuYH1zLOK52FU5I1NmPvTzT1eNe4cSxaT3Ch0Rc2YTW8EQqEsjOvmW", + "syk2cCDgDDafop/vkGa8buNHuJ8zWSZ8mq1IkknbxOGns7MTkkghMJDdNUeSUGjSEl5bbVM1zosR9oEm", + "miiaMytJaukaqZFUVkbIww8UNKHFtzDVEG9TXWswcgJkKtNVLysNc9rNFLV20QVLQ3L0jpO+AL+XtMxP", + "6zYsNyQY1bO8BdH7+hWwQucBV3WE3oyW+YYkfZy6MwprDxLAD6yzex9t759P6w34UO5uq7BV30robhpY", + "bcuCqOMJS9KKmbyjlvlmU6s1Zs/IF2tOfs92TFl/+q4H17eCBG4/63ABumo5fOgJCGtLnPDhgioioJEM", + "WTF9t9ApjODoNDDDSPecYVYH7n2DA9FW0mmFbbghxxsQT0Nr5i2Q78y8eHeQT7MPeq/IKBc7ViY6awPn", + "W8GrIK6MKk1mbGk7LgVIhi3tt6Je4Sd+PNfFaS1WbRdUETRlulWs+vIW3E5rvG8+rgJZ4DcQWIEdz3w+", + "Hbgx2GzGEu3UAuhijCNQRZYsy9rZheZbRm2lkEWVU6EwhhyEe3DBX3HarV5SlwI3dwQaA7gbhQGhcLHq", + "ezUhXCjNaDsXLyiv3lsSxxdCvzkp3Mq5bqprC+FeYG40OK9LyayXw1E1Vr5hN3aacyZ0bUsD+DxQWk8X", + "0XDwGEb5XO9pOjcnMd8uG6euaL2tIUPTeZ0Yc5cj2MOWBVDiHS5DJbDYtWq0q/Zh/mZ36BsxYygoLVAf", + "Yw3mDSHva8D65RA5qEYeJ+PB5iMo7IX+8LXevW7D9+ZfgO0VVQSmWMKuCdQvzx03wtNmI7cAdk2DoME0", + "2+3TXyescHJ3MmNt6UAqMKoB6gxugywNRBvabUKbF5vOTpu42UfINsQK+gNTt3LNXvXke9SN+NV4TTbm", + "Mnyt/57FK/xCEMRXvwC7If4tUjpzmYJQILQnu7ggaHKivMtnSJSs7aUJzTJrKL0UcglhbO/eHT+/O5fQ", + "B8AIttz1+qEk0kS9+G0LulluunC3cNv6rtpfwAvi1rrprqmtYGSTSdynTtRtOFxibQC6wNv7aHtj7CB6", + "baVS+mFvPh26Uy/b4o7nUTYW8m5KfE5bWto+jMcab34i89w3bQYfcAIhy+CAsjVuawPK0rfB4YJMbAu2", + "CShX6EFtvoQhK7b/09Aw8YJwTWa8VHpMnooVWmTwtbDVSjCM87kCWa98j7PryZ1fFae+NClYw3G3Tate", + "+r5r28grJGWaQp26ZT3NDjd/G6uS1fm7zchu++huSoiINli7C8amO2IH6kXA7axBDqN3QkonUPcaOhvy", + "9DeBhp2maD042JXRyfFz1TAh1H5r10OdyNk/J44GFeUNpBAaasELbwH7dXf8zBgrRirouryJyzXbNH9L", + "LK+5s22amoA3v9GXel1SNwuFOiFjX95NFNxAub4qRtwYJ92EDC5Hu32K17ZM+b7YX9UudU3aZAQ4WTrL", + "WqOfcATNW24M7D3IyhH+vU5+wxe9vH1z5/826Ie4zvokiVv9rZpmHCRY2i+ud9wpdyfGzi2/YV7pKAod", + "Ga0+EsPy6i9VBKmMvjeSs9ka0YvPxZvZbCsXzN2Dpe0QCiS20Rv0b9ButFUiNdB5qSJ1e/O1AH9Gswyj", + "PZ11RkuSWTecK3MK5ju9YKt7JSNzKEVjhx/3norYcCjiRq+2naL/UudM05Rq+hWMrWGz/z/Eld4aDZ9W", + "esGEhqwC16fPYIMLRe2zFnw2TmIgt5Ywg81hlgGn4vWBRzFW20TiqGAcnNrgayMHrNRpNz6Io1cgFZL0", + "f3G3sWp3DHEZcq6pPysx60SseoDQiwojfDPtJ2Gdw0oHN23z8RPFtJbaf6E8nu4sof6BKY+l6vbcnD0Z", + "whISb1xQhCaGbGQsxdqOmHhmKcqoGRPl0AV8q1zUCU+WyrBylMmEZkDgaKa+NFW7Yo3dVDH3EgQHreGz", + "Vh63ceM3V1/XGt57w7qhXF3Q7qWPXP0iXT1Vn9bqi4wFdo8H+4dfsPUholgvYp6w0nWeec4ER9Jp6x/E", + "TecYQmdZHk00v0JLLAP3qKuxlWVyib4KCxa79ZLPF5oIubQBfIe3y2DcRaICcvrQgWekcFgdZuZBxv9c", + "Qkt7m9mCF27HS2vdg9SPH0Bj020CnHIKZxlvChSNoOu/LmZItL99C8Godid919HKRlzgEl1g4LWsGnas", + "bvRp7JbUOR6q4bFzmOTKeipp8+H82HVputs2mHwmc2oYddXlkOhVwROIPbTdmkBgLko5L5lSQ2jn5Bpc", + "yJLMKM+qkm3kMI6vKCbShqPOgNuNDtW3Wck235S9nK5GfFRW/WGlr+nKmlIq8U0kpbymq78wVrxFj/M3", + "pp5h4LcVY+rs70BiDlzvAYMqK0H2yCVjhXPF1wHg5E3hakdBIiLlQhFK0NUeyqTeKRPzv/cgckeiB2Uv", + "WFlrTVzVUenrUVtWuqj0qChlWiXrBH1DLN/Ayyfu3TvBHKDm1977gs13zcYe2m8LMf9aidwHWyZyg/Rn", + "U5Rd248H9+/f/EV7xcRcL3zxoz+FneNSnmK/cENlKbEgGNlPMC/frvTw5ld6QleQrwtt62hp+309uP/w", + "NtwIqioKWZqDes1STsnZqrAeM0AxghjlhMmpTzevu8CG0V8PDp7cTodBV/8COSWQDimxw9TMXGxbaM+6", + "pfWilFpnzJbj+0NJHpjnbgCdS6VJyRLM/velA2G/KA8E2e4cgIN9p8zHtSOECYW1/zCHAqR3e8rmy3uK", + "pHzOFBQPbp8xeearD0Cc2MkvPwKcfz558SOxqGQGLTIqRDxOa53AoxdVPhWUZ2qvKNkVZ0tHlniJBRMd", + "tSdI/Z0YBBAtrxw1r8pscDTYGwRGqDaxOm4GQXXagjlM8ewAklS6hUR+llNnJgUZ7e8VK7lBv7rd6bDV", + "jmLcqKKpIoM+PTlu9ocMTWQyzyuB4iYUKGkvfdx24EYmsNjw2q+JPD05HvZ3Z8ZmVmYb5q6UMnMr6kwG", + "TsdIqRwsP+BnAT5R106wEPQ9K9/Lqa8IF85hyx18+u3T/wkAAP//PbRANsURAQA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/pkg/api/openapi_types.gen.go b/pkg/api/openapi_types.gen.go index 5590d81b..80b49695 100644 --- a/pkg/api/openapi_types.gen.go +++ b/pkg/api/openapi_types.gen.go @@ -55,6 +55,23 @@ const ( BlenderPathSourcePathEnvvar BlenderPathSource = "path_envvar" ) +// Defines values for FarmStatus. +const ( + FarmStatusActive FarmStatus = "active" + + FarmStatusAsleep FarmStatus = "asleep" + + FarmStatusIdle FarmStatus = "idle" + + FarmStatusInoperative FarmStatus = "inoperative" + + FarmStatusStarting FarmStatus = "starting" + + FarmStatusUnknown FarmStatus = "unknown" + + FarmStatusWaiting FarmStatus = "waiting" +) + // Defines values for JobStatus. const ( JobStatusActive JobStatus = "active" @@ -198,6 +215,9 @@ type AvailableJobSetting struct { // Identifier for the setting, must be unique within the job type. Key string `json:"key"` + // Label for displaying this setting. If not specified, the key is used to generate a reasonable label. + Label *interface{} `json:"label,omitempty"` + // Any extra arguments to the bpy.props.SomeProperty() call used to create this property. Propargs *map[string]interface{} `json:"propargs,omitempty"` @@ -225,6 +245,9 @@ type AvailableJobSettingVisibility string // Job type supported by this Manager, and its parameters. type AvailableJobType struct { + // The description/tooltip shown in the user interface. + Description *string `json:"description,omitempty"` + // Hash of the job type. If the job settings or the label change, this etag will change. This is used on job submission to ensure that the submitted job settings are up to date. Etag string `json:"etag"` Label string `json:"label"` @@ -273,6 +296,9 @@ type Error struct { Message string `json:"message"` } +// EventFarmStatus defines model for EventFarmStatus. +type EventFarmStatus FarmStatusReport + // Subset of a Job, sent over SocketIO/MQTT when a job changes. For new jobs, `previous_status` will be excluded. type EventJobUpdate struct { // If job deletion was requested, this is the timestamp at which that request was stored on Flamenco Manager. @@ -373,6 +399,14 @@ type EventWorkerUpdate struct { Version string `json:"version"` } +// FarmStatus defines model for FarmStatus. +type FarmStatus string + +// FarmStatusReport defines model for FarmStatusReport. +type FarmStatusReport struct { + Status FarmStatus `json:"status"` +} + // FlamencoVersion defines model for FlamencoVersion. type FlamencoVersion struct { Git string `json:"git"` diff --git a/pkg/oomscore/oomscore.go b/pkg/oomscore/oomscore.go new file mode 100644 index 00000000..30ac61f4 --- /dev/null +++ b/pkg/oomscore/oomscore.go @@ -0,0 +1,86 @@ +// package oomscore provides some functions to adjust the Linux +// out-of-memory (OOM) score, i.e. the number that determines how likely it is +// that a process is killed in an out-of-memory situation. +// +// It is available only on Linux. On other platforms ErrNotImplemented will be returned. +package oomscore + +import ( + "errors" + + "github.com/rs/zerolog/log" +) + +var ErrNotImplemented = errors.New("OOM score functionality not implemented on this platform") + +// Available returns whether the functionality in this package is available for +// the current platform. +func Available() bool { + return available +} + +// GetOOMScore returns the current process' OOM score. +func GetOOMScore() (int, error) { + return getOOMScore() +} + +// GetOOMScoreAdj returns the current process' OOM score adjustment. +func GetOOMScoreAdj() (int, error) { + return getOOMScoreAdj() +} + +// SetOOMScoreAdj sets the current process' OOM score adjustment. +func SetOOMScoreAdj(score int) error { + return setOOMScoreAdj(score) +} + +type ScoreRestoreFunc func() + +var emptyRestoreFunc ScoreRestoreFunc = func() {} + +// Adjust temporarily sets the OOM score adjustment. +// The returned function MUST be called to restore the original value. +// Any problems changing the score are logged, but not otherwise returned. +func Adjust(score int) (restoreFunc ScoreRestoreFunc) { + restoreFunc = emptyRestoreFunc + + if !Available() { + return + } + + origScore, err := getOOMScoreAdj() + if err != nil { + log.Error(). + AnErr("cause", err). + Msg("could not get the current process' oom_score_adj value") + return + } + + log.Trace(). + Int("oom_score_adj", score). + Msg("setting oom_score_adj") + + err = setOOMScoreAdj(score) + if err != nil { + log.Error(). + Int("oom_score_adj", score). + AnErr("cause", err). + Msg("could not set the current process' oom_score_adj value") + return + } + + return func() { + log.Trace(). + Int("oom_score_adj", origScore). + Msg("restoring oom_score_adj") + + err = setOOMScoreAdj(origScore) + if err != nil { + log.Error(). + Int("oom_score_adj", origScore). + AnErr("cause", err). + Msg("could not restore the current process' oom_score_adj value") + return + } + } +} diff --git a/pkg/oomscore/oomscore_linux.go b/pkg/oomscore/oomscore_linux.go new file mode 100644 index 00000000..225925fd --- /dev/null +++ b/pkg/oomscore/oomscore_linux.go @@ -0,0 +1,66 @@ +//go:build linux + +package oomscore + +import ( + "fmt" + "os" + "path/filepath" +) + +const ( + available = true +) + +// getOOMScore returns the current process' OOM score. +func getOOMScore() (int, error) { + return readInt("oom_score") +} + +// getOOMScoreAdj returns the current process' OOM score adjustment. +func getOOMScoreAdj() (int, error) { + return readInt("oom_score_adj") +} + +// setOOMScoreAdj sets the current process' OOM score adjustment. +func setOOMScoreAdj(newScore int) error { + return writeInt(newScore, "oom_score_adj") +} + +// readInt reads an integer from /proc/{pid}/{filename} +func readInt(filename string) (int, error) { + fullPath := procPidPath(filename) + + file, err := os.Open(fullPath) + if err != nil { + return 0, fmt.Errorf("opening %s: %w", fullPath, err) + } + + var valueInFile int + n, err := fmt.Fscan(file, &valueInFile) + if err != nil { + return 0, fmt.Errorf("reading %s: %w", fullPath, err) + } + if n < 1 { + return 0, fmt.Errorf("reading %s: did not find a number", fullPath) + } + + return valueInFile, nil +} + +// writeInt writes an integer to /proc/{pid}/{filename} +func writeInt(value int, filename string) error { + fullPath := procPidPath(filename) + contents := fmt.Sprint(value) + err := os.WriteFile(fullPath, []byte(contents), os.ModePerm) + if err != nil { + return fmt.Errorf("writing %s: %w", fullPath, err) + } + return nil +} + +// procPidPath returns "/proc/{pid}/{filename}". +func procPidPath(filename string) string { + pid := os.Getpid() + return filepath.Join("/proc", fmt.Sprint(pid), filename) +} diff --git a/pkg/oomscore/oomscore_nonlinux.go b/pkg/oomscore/oomscore_nonlinux.go new file mode 100644 index 00000000..be9d56a8 --- /dev/null +++ b/pkg/oomscore/oomscore_nonlinux.go @@ -0,0 +1,19 @@ +//go:build !linux + +package oomscore + +const ( + available = false +) + +func getOOMScore() (int, error) { + return 0, ErrNotImplemented +} + +func getOOMScoreAdj() (int, error) { + return 0, ErrNotImplemented +} + +func setOOMScoreAdj(int) error { + return ErrNotImplemented +} diff --git a/pkg/website/urls.go b/pkg/website/urls.go index 1b0df789..bccd4d2c 100644 --- a/pkg/website/urls.go +++ b/pkg/website/urls.go @@ -5,7 +5,9 @@ package website const ( DocVariablesURL = "https://flamenco.blender.org/usage/variables/blender/" WorkerCredsUnknownHelpURL = "https://flamenco.blender.org/faq/#what-does-unknown-worker-is-trying-to-communicate-mean" + CannotFindManagerHelpURL = "https://flamenco.blender.org/faq/#my-worker-cannot-find-my-manager-what-do-i-do" BugReportURL = "https://flamenco.blender.org/get-involved" ShamanRequirementsURL = "https://flamenco.blender.org/usage/shared-storage/shaman/#requirements" WorkerConfigURL = "https://flamenco.blender.org/usage/worker-configuration/" + OOMScoreAdjURL = WorkerConfigURL ) diff --git a/sqlc.yaml b/sqlc.yaml new file mode 100644 index 00000000..887ce133 --- /dev/null +++ b/sqlc.yaml @@ -0,0 +1,16 @@ +version: "2" +sql: + - engine: "sqlite" + schema: "internal/manager/persistence/sqlc/schema.sql" + queries: "internal/manager/persistence/sqlc/query_jobs.sql" + gen: + go: + out: "internal/manager/persistence/sqlc" + overrides: + - db_type: "jsonb" + go_type: + import: "encoding/json" + type: "RawMessage" + rename: + uuid: "UUID" + uuids: "UUIDs" diff --git a/web/app/package.json b/web/app/package.json index be3bcd85..0d1d6bab 100644 --- a/web/app/package.json +++ b/web/app/package.json @@ -12,7 +12,7 @@ } ], "scripts": { - "dev": "vite --port 8081 --base /app/", + "dev": "vite --port 8081 --base /app/ --mode development", "build": "vite build", "preview": "vite preview --port 5050", "lint": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs --fix --ignore-path .gitignore" diff --git a/web/app/src/App.vue b/web/app/src/App.vue index 3e9327ce..0e716118 100644 --- a/web/app/src/App.vue +++ b/web/app/src/App.vue @@ -1,5 +1,5 @@