Sync branch magefile with main #104308

Merged
Sybren A. Stüvel merged 85 commits from abelli/flamenco:magefile into magefile 2024-05-13 16:26:32 +02:00
145 changed files with 5998 additions and 1397 deletions

View File

@ -4,11 +4,26 @@ This file contains the history of changes to Flamenco. Only changes that might
be interesting for users are listed here, such as new features and fixes for
bugs in actually-released versions.
## 3.5 - in development
## 3.6 - in development
- Add MQTT support. Flamenco Manager can now send internal events to an MQTT broker.
- Add `label` to job settings, to have full control over how they are presented in Blender's job submission GUI. If a job setting does not define a label, its `key` is used to generate one (like Flamenco 3.5 and older).
- Add `shellSplit(someString)` function to the job compiler scripts. This splits a string into an array of strings using shell/CLI semantics.
## 3.5 - released 2024-04-16
- Add MQTT support ([docs](https://flamenco.blender.org/usage/manager-configuration/mqtt/)). Flamenco Manager can now send internal events to an MQTT broker.
- Simplify the preview video filename when a complex set of frames rendered ([#104285](https://projects.blender.org/studio/flamenco/issues/104285)). Instead of `video-1, 4, 10.mp4` it is now simply `video-1-10.mp4`.
- Make the `blendfile` parameter of a `blender-render` command optional. This makes it possible to pass, for example, a Python file that loads/constructs the blend file, instead of loading one straight from disk.
- Show the farm status in the web frontend. This shows whether the farm is actively working on a job, idle, asleep (all workers are sleeping and no work is queued), waiting (all workers are sleeping, and work is queued), or inoperable (no workers, or all workers are offline). This status is also broadcast as event via the event bus, and thus available via SocketIO and MQTT.
- Fix an issue where the columns in the web interface wouldn't correctly resize when the shown information changed.
- Add-on: replace the different 'refresh' buttons (for Manager info & storage location, job types, and worker tags) with a single button that just refreshes everything in one go. The information obtained from Flamenco Manager is now stored in a JSON file on disk, making it independent from Blender auto-saving the user preferences.
- Ensure the web frontend connects to the backend correctly when served over HTTPS ([#104296](https://projects.blender.org/studio/flamenco/pulls/104296)).
- For Workers running on Linux, it is now possible to configure the "OOM score adjustment" for sub-processes. This makes it possible for the out-of-memory killer to target Blender, and not Flamenco Worker itself.
- Security updates of some dependencies:
- [Incorrect forwarding of sensitive headers and cookies on HTTP redirect in net/http](https://pkg.go.dev/vuln/GO-2024-2600)
- [Memory exhaustion in multipart form parsing in net/textproto and net/http](https://pkg.go.dev/vuln/GO-2024-2599)
- [Verify panics on certificates with an unknown public key algorithm in crypto/x509](https://pkg.go.dev/vuln/GO-2024-2600)
- [HTTP/2 CONTINUATION flood in net/http](https://pkg.go.dev/vuln/GO-2024-2687)
## 3.4 - released 2024-01-12

View File

@ -4,7 +4,7 @@ PKG := projects.blender.org/studio/flamenco
# To update the version number in all the relevant places, update the VERSION
# variable below and run `make update-version`.
VERSION := 3.5-alpha1
VERSION := 3.6-alpha0
# "alpha", "beta", or "release".
RELEASE_CYCLE := alpha
@ -240,7 +240,7 @@ swagger-ui:
test:
# Ensure the web-static directory exists, so that `web/web_app.go` can embed something.
mkdir -p ${WEB_STATIC}
go test -short ./...
go test -short -failfast ./...
clean:
@go clean -i -x

View File

@ -5,14 +5,14 @@
bl_info = {
"name": "Flamenco 3",
"author": "Sybren A. Stüvel",
"version": (3, 5),
"version": (3, 6),
"blender": (3, 1, 0),
"description": "Flamenco client for Blender.",
"location": "Output Properties > Flamenco",
"doc_url": "https://flamenco.blender.org/",
"category": "System",
"support": "COMMUNITY",
"warning": "This is version 3.5-alpha1 of the add-on, which is not a stable release",
"warning": "This is version 3.6-alpha0 of the add-on, which is not a stable release",
}
from pathlib import Path
@ -27,6 +27,7 @@ if __is_first_load:
preferences,
projects,
worker_tags,
manager_info,
)
else:
import importlib
@ -38,6 +39,7 @@ else:
preferences = importlib.reload(preferences)
projects = importlib.reload(projects)
worker_tags = importlib.reload(worker_tags)
manager_info = importlib.reload(manager_info)
import bpy
@ -160,6 +162,9 @@ def register() -> None:
gui.register()
job_types.register()
# Once everything is registered, load the cached manager info from JSON.
manager_info.load_into_cache()
def unregister() -> None:
discard_global_flamenco_data(None)

View File

@ -286,12 +286,12 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore
return None
self.log.debug(" %s: %s", file_spec.status, file_spec.path)
match file_spec.status.value:
case "unknown":
status = file_spec.status.value
if status == "unknown":
to_upload.appendleft(file_spec)
case "uploading":
elif status == "uploading":
to_upload.append(file_spec)
case _:
else:
msg = "Unknown status in response from Shaman: %r" % file_spec
self.log.error(msg)
self.error_set(msg)
@ -375,21 +375,22 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore
x_shaman_original_filename=file_spec.path,
)
except ApiException as ex:
match ex.status:
case 425: # Too Early, i.e. defer uploading this file.
if ex.status == 425:
# Too Early, i.e. defer uploading this file.
self.log.info(
" %s: someone else is uploading this file, deferring",
file_spec.path,
)
defer(file_spec)
continue
case 417: # Expectation Failed; mismatch of checksum or file size.
elif ex.status == 417:
# Expectation Failed; mismatch of checksum or file size.
msg = "Error from Shaman uploading %s, code %d: %s" % (
file_spec.path,
ex.status,
ex.body,
)
case _: # Unknown error
else: # Unknown error
msg = "API exception\nHeaders: %s\nBody: %s\n" % (
ex.headers,
ex.body,
@ -453,15 +454,11 @@ class Transferrer(submodules.transfer.FileTransferer): # type: ignore
checkoutRequest
)
except ApiException as ex:
match ex.status:
case 424: # Files were missing
if ex.status == 424: # Files were missing
msg = "We did not upload some files, checkout aborted"
case 409: # Checkout already exists
msg = (
"There is already an existing checkout at %s"
% self.checkout_path
)
case _: # Unknown error
elif ex.status == 409: # Checkout already exists
msg = "There is already an existing checkout at %s" % self.checkout_path
else: # Unknown error
msg = "API exception\nHeaders: %s\nBody: %s\n" % (
ex.headers,
ex.body,

View File

@ -3,13 +3,12 @@
# <pep8 compliant>
import logging
import dataclasses
import platform
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING
from urllib3.exceptions import HTTPError, MaxRetryError
import bpy
from flamenco import manager_info, job_types
_flamenco_client = None
_log = logging.getLogger(__name__)
@ -27,23 +26,6 @@ else:
_SharedStorageLocation = object
@dataclasses.dataclass(frozen=True)
class ManagerInfo:
version: Optional[_FlamencoVersion] = None
storage: Optional[_SharedStorageLocation] = None
error: str = ""
@classmethod
def with_error(cls, error: str) -> "ManagerInfo":
return cls(error=error)
@classmethod
def with_info(
cls, version: _FlamencoVersion, storage: _SharedStorageLocation
) -> "ManagerInfo":
return cls(version=version, storage=storage)
def flamenco_api_client(manager_url: str) -> _ApiClient:
"""Returns an API client for communicating with a Manager."""
global _flamenco_client
@ -87,12 +69,12 @@ def discard_flamenco_data():
_flamenco_client = None
def ping_manager_with_report(
def ping_manager(
window_manager: bpy.types.WindowManager,
scene: bpy.types.Scene,
api_client: _ApiClient,
prefs: _FlamencoPreferences,
) -> tuple[str, str]:
"""Ping the Manager, update preferences, and return a report as string.
"""Fetch Manager info, and update the scene for it.
:returns: tuple (report, level). The report will be something like "<name>
version <version> found", or an error message. The level will be
@ -100,55 +82,49 @@ def ping_manager_with_report(
`Operator.report()`.
"""
info = ping_manager(window_manager, api_client, prefs)
if info.error:
return info.error, "ERROR"
assert info.version is not None
report = "%s version %s found" % (info.version.name, info.version.version)
return report, "INFO"
def ping_manager(
window_manager: bpy.types.WindowManager,
api_client: _ApiClient,
prefs: _FlamencoPreferences,
) -> ManagerInfo:
"""Fetch Manager config & version, and update cached preferences."""
window_manager.flamenco_status_ping = "..."
# Do a late import, so that the API is only imported when actually used.
from flamenco.manager import ApiException
from flamenco.manager.apis import MetaApi
from flamenco.manager.models import FlamencoVersion, SharedStorageLocation
# Remember the old values, as they may have disappeared from the Manager.
old_job_type_name = getattr(scene, "flamenco_job_type", "")
old_tag_name = getattr(scene, "flamenco_worker_tag", "")
meta_api = MetaApi(api_client)
error = ""
try:
version: FlamencoVersion = meta_api.get_version()
storage: SharedStorageLocation = meta_api.get_shared_storage(
"users", platform.system().lower()
)
except ApiException as ex:
error = "Manager cannot be reached: %s" % ex
except MaxRetryError as ex:
# This is the common error, when for example the port number is
# incorrect and nothing is listening. The exception text is not included
# because it's very long and confusing.
error = "Manager cannot be reached"
except HTTPError as ex:
error = "Manager cannot be reached: %s" % ex
if error:
window_manager.flamenco_status_ping = error
return ManagerInfo.with_error(error)
# Store whether this Manager supports the Shaman API.
prefs.is_shaman_enabled = storage.shaman_enabled
prefs.job_storage = storage.location
report = "%s version %s found" % (version.name, version.version)
info = manager_info.fetch(api_client)
except manager_info.FetchError as ex:
report = str(ex)
window_manager.flamenco_status_ping = report
return report, "ERROR"
return ManagerInfo.with_info(version, storage)
manager_info.save(info)
report = "%s version %s found" % (
info.flamenco_version.name,
info.flamenco_version.version,
)
report_level = "INFO"
job_types.refresh_scene_properties(scene, info.job_types)
# Try to restore the old values.
#
# Since you cannot un-set an enum property, and 'empty string' is not a
# valid value either, when the old choice is no longer available we remove
# the underlying ID property.
if old_job_type_name:
try:
scene.flamenco_job_type = old_job_type_name
except TypeError: # Thrown when the old enum value no longer exists.
del scene["flamenco_job_type"]
report = f"Job type {old_job_type_name!r} no longer available, choose another one"
report_level = "WARNING"
if old_tag_name:
try:
scene.flamenco_worker_tag = old_tag_name
except TypeError: # Thrown when the old enum value no longer exists.
del scene["flamenco_worker_tag"]
report = f"Tag {old_tag_name!r} no longer available, choose another one"
report_level = "WARNING"
window_manager.flamenco_status_ping = report
return report, report_level

View File

@ -43,23 +43,19 @@ class FLAMENCO_PT_job_submission(bpy.types.Panel):
col.prop(context.scene, "flamenco_job_name", text="Job Name")
col.prop(context.scene, "flamenco_job_priority", text="Priority")
# Worker tag:
row = col.row(align=True)
row.prop(context.scene, "flamenco_worker_tag", text="Tag")
row.operator("flamenco.fetch_worker_tags", text="", icon="FILE_REFRESH")
layout.separator()
col = layout.column()
# Refreshables:
col = layout.column(align=True)
col.operator(
"flamenco.ping_manager", text="Refresh from Manager", icon="FILE_REFRESH"
)
if not job_types.are_job_types_available():
col.operator("flamenco.fetch_job_types", icon="FILE_REFRESH")
return
col.prop(context.scene, "flamenco_worker_tag", text="Tag")
row = col.row(align=True)
row.prop(context.scene, "flamenco_job_type", text="")
row.operator("flamenco.fetch_job_types", text="", icon="FILE_REFRESH")
self.draw_job_settings(context, layout.column(align=True))
# Job properties:
job_col = layout.column(align=True)
job_col.prop(context.scene, "flamenco_job_type", text="Job Type")
self.draw_job_settings(context, job_col)
layout.separator()

View File

@ -8,7 +8,7 @@ import bpy
from .job_types_propgroup import JobTypePropertyGroup
from .bat.submodules import bpathlib
from . import preferences
from . import manager_info
if TYPE_CHECKING:
from .manager import ApiClient as _ApiClient
@ -133,8 +133,11 @@ def is_file_inside_job_storage(context: bpy.types.Context, blendfile: Path) -> b
blendfile = bpathlib.make_absolute(blendfile)
prefs = preferences.get(context)
job_storage = bpathlib.make_absolute(Path(prefs.job_storage))
info = manager_info.load_cached()
if not info:
raise RuntimeError("Flamenco Manager info unknown, please refresh.")
job_storage = bpathlib.make_absolute(Path(info.shared_storage.location))
log.info("Checking whether the file is already inside the job storage")
log.info(" file : %s", blendfile)

View File

@ -1,14 +1,10 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import json
import logging
from typing import TYPE_CHECKING, Optional, Union
import bpy
from . import job_types_propgroup
_log = logging.getLogger(__name__)
from . import job_types_propgroup, manager_info
if TYPE_CHECKING:
from flamenco.manager import ApiClient as _ApiClient
@ -29,34 +25,34 @@ else:
_available_job_types: Optional[list[_AvailableJobType]] = None
# Enum property value that indicates 'no job type selected'. This is used
# because an empty string seems to be handled by Blender as 'nothing', which
# never seems to match an enum item even when there is one with "" as its 'key'.
_JOB_TYPE_NOT_SELECTED = "-"
_JOB_TYPE_NOT_SELECTED_ENUM_ITEM = (
_JOB_TYPE_NOT_SELECTED,
"Select a Job Type",
"",
0,
0,
)
# Items for a bpy.props.EnumProperty()
_job_type_enum_items: list[
Union[tuple[str, str, str], tuple[str, str, str, int, int]]
] = []
] = [_JOB_TYPE_NOT_SELECTED_ENUM_ITEM]
_selected_job_type_propgroup: Optional[
type[job_types_propgroup.JobTypePropertyGroup]
] = None
def fetch_available_job_types(api_client: _ApiClient, scene: bpy.types.Scene) -> None:
from flamenco.manager import ApiClient
from flamenco.manager.api import jobs_api
from flamenco.manager.model.available_job_types import AvailableJobTypes
assert isinstance(api_client, ApiClient)
job_api_instance = jobs_api.JobsApi(api_client)
response: AvailableJobTypes = job_api_instance.get_job_types()
def refresh_scene_properties(
scene: bpy.types.Scene, available_job_types: _AvailableJobTypes
) -> None:
_clear_available_job_types(scene)
# Store the response JSON on the scene. This is used when the blend file is
# loaded (and thus the _available_job_types global variable is still empty)
# to generate the PropertyGroup of the selected job type.
scene.flamenco_available_job_types_json = json.dumps(response.to_dict())
_store_available_job_types(response)
_store_available_job_types(available_job_types)
update_job_type_properties(scene)
def setting_is_visible(setting: _AvailableJobSetting) -> bool:
@ -120,36 +116,10 @@ def _store_available_job_types(available_job_types: _AvailableJobTypes) -> None:
else:
# Convert from API response type to list suitable for an EnumProperty.
_job_type_enum_items = [
(job_type.name, job_type.label, "") for job_type in job_types
(job_type.name, job_type.label, getattr(job_type, "description", ""))
for job_type in job_types
]
_job_type_enum_items.insert(0, ("", "Select a Job Type", "", 0, 0))
def _available_job_types_from_json(job_types_json: str) -> None:
"""Convert JSON to AvailableJobTypes object, and update global variables for it."""
from flamenco.manager.models import AvailableJobTypes
from flamenco.manager.configuration import Configuration
from flamenco.manager.model_utils import validate_and_convert_types
json_dict = json.loads(job_types_json)
dummy_cfg = Configuration()
try:
job_types = validate_and_convert_types(
json_dict, (AvailableJobTypes,), ["job_types"], True, True, dummy_cfg
)
except TypeError:
_log.warn(
"Flamenco: could not restore cached job types, refresh them from Flamenco Manager"
)
_store_available_job_types(AvailableJobTypes(job_types=[]))
return
assert isinstance(
job_types, AvailableJobTypes
), "expected AvailableJobTypes, got %s" % type(job_types)
_store_available_job_types(job_types)
_job_type_enum_items.insert(0, _JOB_TYPE_NOT_SELECTED_ENUM_ITEM)
def are_job_types_available() -> bool:
@ -199,7 +169,7 @@ def _clear_available_job_types(scene: bpy.types.Scene) -> None:
_clear_job_type_propgroup()
_available_job_types = None
_job_type_enum_items.clear()
_job_type_enum_items = []
scene.flamenco_available_job_types_json = ""
@ -238,26 +208,27 @@ def _get_job_types_enum_items(dummy1, dummy2):
@bpy.app.handlers.persistent
def restore_available_job_types(dummy1, dummy2):
def restore_available_job_types(_filepath, _none):
scene = bpy.context.scene
job_types_json = getattr(scene, "flamenco_available_job_types_json", "")
if not job_types_json:
info = manager_info.load_cached()
if info is None:
_clear_available_job_types(scene)
return
_available_job_types_from_json(job_types_json)
update_job_type_properties(scene)
refresh_scene_properties(scene, info.job_types)
def discard_flamenco_data():
if _available_job_types:
_available_job_types.clear()
if _job_type_enum_items:
_job_type_enum_items.clear()
global _available_job_types
global _job_type_enum_items
_available_job_types = None
_job_type_enum_items = []
def register() -> None:
bpy.types.Scene.flamenco_job_type = bpy.props.EnumProperty(
name="Job Type",
default=0,
items=_get_job_types_enum_items,
update=_update_job_type,
)

View File

@ -304,8 +304,8 @@ def _create_property(job_type: _AvailableJobType, setting: _AvailableJobSetting)
if not setting.get("editable", True):
prop_kwargs["get"] = _create_prop_getter(job_type, setting)
prop_name = _job_setting_key_to_label(setting.key)
prop = prop_type(name=prop_name, **prop_kwargs)
prop_label = _job_setting_label(setting)
prop = prop_type(name=prop_label, **prop_kwargs)
return prop
@ -316,10 +316,10 @@ def _create_autoeval_property(
assert isinstance(setting, AvailableJobSetting)
setting_name = _job_setting_key_to_label(setting.key)
setting_label = _job_setting_label(setting)
prop_descr = (
"Automatically determine the value for %r when the job gets submitted"
% setting_name
% setting_label
)
prop = bpy.props.BoolProperty(
@ -379,13 +379,13 @@ def _job_type_to_class_name(job_type_name: str) -> str:
return job_type_name.title().replace("-", "")
def _job_setting_key_to_label(setting_key: str) -> str:
"""Change 'some_setting_key' to 'Some Setting Key'.
def _job_setting_label(setting: _AvailableJobSetting) -> str:
"""Return a suitable label for this job setting."""
>>> _job_setting_key_to_label('some_setting_key')
'Some Setting Key'
"""
return setting_key.title().replace("_", " ")
label = setting.get("label", default="")
if label:
return label
return setting.key.title().replace("_", " ")
def _set_if_available(

View File

@ -10,7 +10,7 @@
"""
__version__ = "3.5-alpha1"
__version__ = "3.6-alpha0"
# import ApiClient
from flamenco.manager.api_client import ApiClient

View File

@ -24,6 +24,7 @@ from flamenco.manager.model_utils import ( # noqa: F401
from flamenco.manager.model.blender_path_check_result import BlenderPathCheckResult
from flamenco.manager.model.blender_path_find_result import BlenderPathFindResult
from flamenco.manager.model.error import Error
from flamenco.manager.model.farm_status_report import FarmStatusReport
from flamenco.manager.model.flamenco_version import FlamencoVersion
from flamenco.manager.model.manager_configuration import ManagerConfiguration
from flamenco.manager.model.manager_variable_audience import ManagerVariableAudience
@ -268,6 +269,48 @@ class MetaApi(object):
},
api_client=api_client
)
self.get_farm_status_endpoint = _Endpoint(
settings={
'response_type': (FarmStatusReport,),
'auth': [],
'endpoint_path': '/api/v3/status',
'operation_id': 'get_farm_status',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_shared_storage_endpoint = _Endpoint(
settings={
'response_type': (SharedStorageLocation,),
@ -831,6 +874,78 @@ class MetaApi(object):
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_configuration_file_endpoint.call_with_http_info(**kwargs)
def get_farm_status(
self,
**kwargs
):
"""Get the status of this Flamenco farm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_farm_status(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
FarmStatusReport
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.get_farm_status_endpoint.call_with_http_info(**kwargs)
def get_shared_storage(
self,
audience,

View File

@ -76,7 +76,7 @@ class ApiClient(object):
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'Flamenco/3.5-alpha1 (Blender add-on)'
self.user_agent = 'Flamenco/3.6-alpha0 (Blender add-on)'
def __enter__(self):
return self

View File

@ -404,7 +404,7 @@ conf = flamenco.manager.Configuration(
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 3.5-alpha1".\
"SDK Package Version: 3.6-alpha0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):

View File

@ -11,6 +11,7 @@ Name | Type | Description | Notes
**choices** | **[str]** | When given, limit the valid values to these choices. Only usable with string type. | [optional]
**propargs** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | Any extra arguments to the bpy.props.SomeProperty() call used to create this property. | [optional]
**description** | **bool, date, datetime, dict, float, int, list, str, none_type** | The description/tooltip shown in the user interface. | [optional]
**label** | **bool, date, datetime, dict, float, int, list, str, none_type** | Label for displaying this setting. If not specified, the key is used to generate a reasonable label. | [optional]
**default** | **bool, date, datetime, dict, float, int, list, str, none_type** | The default value shown to the user when determining this setting. | [optional]
**eval** | **str** | Python expression to be evaluated in order to determine the default value for this setting. | [optional]
**eval_info** | [**AvailableJobSettingEvalInfo**](AvailableJobSettingEvalInfo.md) | | [optional]

View File

@ -9,6 +9,7 @@ Name | Type | Description | Notes
**label** | **str** | |
**settings** | [**[AvailableJobSetting]**](AvailableJobSetting.md) | |
**etag** | **str** | Hash of the job type. If the job settings or the label change, this etag will change. This is used on job submission to ensure that the submitted job settings are up to date. |
**description** | **str** | The description/tooltip shown in the user interface. | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# EventFarmStatus
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**value** | **FarmStatusReport** | |
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,11 @@
# FarmStatus
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**value** | **str** | | must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -0,0 +1,12 @@
# FarmStatusReport
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**status** | [**FarmStatus**](FarmStatus.md) | |
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]
[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)

View File

@ -9,6 +9,7 @@ Method | HTTP request | Description
[**find_blender_exe_path**](MetaApi.md#find_blender_exe_path) | **GET** /api/v3/configuration/check/blender | Find one or more CLI commands for use as way to start Blender
[**get_configuration**](MetaApi.md#get_configuration) | **GET** /api/v3/configuration | Get the configuration of this Manager.
[**get_configuration_file**](MetaApi.md#get_configuration_file) | **GET** /api/v3/configuration/file | Retrieve the configuration of Flamenco Manager.
[**get_farm_status**](MetaApi.md#get_farm_status) | **GET** /api/v3/status | Get the status of this Flamenco farm.
[**get_shared_storage**](MetaApi.md#get_shared_storage) | **GET** /api/v3/configuration/shared-storage/{audience}/{platform} | Get the shared storage location of this Manager, adjusted for the given audience and platform.
[**get_variables**](MetaApi.md#get_variables) | **GET** /api/v3/configuration/variables/{audience}/{platform} | Get the variables of this Manager. Used by the Blender add-on to recognise two-way variables, and for the web interface to do variable replacement based on the browser&#39;s platform.
[**get_version**](MetaApi.md#get_version) | **GET** /api/v3/version | Get the Flamenco version of this Manager
@ -341,6 +342,67 @@ No authorization required
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
# **get_farm_status**
> FarmStatusReport get_farm_status()
Get the status of this Flamenco farm.
### Example
```python
import time
import flamenco.manager
from flamenco.manager.api import meta_api
from flamenco.manager.model.farm_status_report import FarmStatusReport
from pprint import pprint
# Defining the host is optional and defaults to http://localhost
# See configuration.py for a list of all supported configuration parameters.
configuration = flamenco.manager.Configuration(
host = "http://localhost"
)
# Enter a context with an instance of the API client
with flamenco.manager.ApiClient() as api_client:
# Create an instance of the API class
api_instance = meta_api.MetaApi(api_client)
# example, this endpoint has no required or optional parameters
try:
# Get the status of this Flamenco farm.
api_response = api_instance.get_farm_status()
pprint(api_response)
except flamenco.manager.ApiException as e:
print("Exception when calling MetaApi->get_farm_status: %s\n" % e)
```
### Parameters
This endpoint does not need any parameter.
### Return type
[**FarmStatusReport**](FarmStatusReport.md)
### Authorization
No authorization required
### HTTP request headers
- **Content-Type**: Not defined
- **Accept**: application/json
### HTTP response details
| Status code | Description | Response headers |
|-------------|-------------|------------------|
**200** | normal response | - |
[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md)
# **get_shared_storage**
> SharedStorageLocation get_shared_storage(audience, platform)

View File

@ -99,6 +99,7 @@ class AvailableJobSetting(ModelNormal):
'choices': ([str],), # noqa: E501
'propargs': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501
'description': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'label': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'default': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'eval': (str,), # noqa: E501
'eval_info': (AvailableJobSettingEvalInfo,), # noqa: E501
@ -119,6 +120,7 @@ class AvailableJobSetting(ModelNormal):
'choices': 'choices', # noqa: E501
'propargs': 'propargs', # noqa: E501
'description': 'description', # noqa: E501
'label': 'label', # noqa: E501
'default': 'default', # noqa: E501
'eval': 'eval', # noqa: E501
'eval_info': 'evalInfo', # noqa: E501
@ -176,6 +178,7 @@ class AvailableJobSetting(ModelNormal):
choices ([str]): When given, limit the valid values to these choices. Only usable with string type.. [optional] # noqa: E501
propargs ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Any extra arguments to the bpy.props.SomeProperty() call used to create this property.. [optional] # noqa: E501
description (bool, date, datetime, dict, float, int, list, str, none_type): The description/tooltip shown in the user interface.. [optional] # noqa: E501
label (bool, date, datetime, dict, float, int, list, str, none_type): Label for displaying this setting. If not specified, the key is used to generate a reasonable label.. [optional] # noqa: E501
default (bool, date, datetime, dict, float, int, list, str, none_type): The default value shown to the user when determining this setting.. [optional] # noqa: E501
eval (str): Python expression to be evaluated in order to determine the default value for this setting.. [optional] # noqa: E501
eval_info (AvailableJobSettingEvalInfo): [optional] # noqa: E501
@ -273,6 +276,7 @@ class AvailableJobSetting(ModelNormal):
choices ([str]): When given, limit the valid values to these choices. Only usable with string type.. [optional] # noqa: E501
propargs ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): Any extra arguments to the bpy.props.SomeProperty() call used to create this property.. [optional] # noqa: E501
description (bool, date, datetime, dict, float, int, list, str, none_type): The description/tooltip shown in the user interface.. [optional] # noqa: E501
label (bool, date, datetime, dict, float, int, list, str, none_type): Label for displaying this setting. If not specified, the key is used to generate a reasonable label.. [optional] # noqa: E501
default (bool, date, datetime, dict, float, int, list, str, none_type): The default value shown to the user when determining this setting.. [optional] # noqa: E501
eval (str): Python expression to be evaluated in order to determine the default value for this setting.. [optional] # noqa: E501
eval_info (AvailableJobSettingEvalInfo): [optional] # noqa: E501

View File

@ -91,6 +91,7 @@ class AvailableJobType(ModelNormal):
'label': (str,), # noqa: E501
'settings': ([AvailableJobSetting],), # noqa: E501
'etag': (str,), # noqa: E501
'description': (str,), # noqa: E501
}
@cached_property
@ -103,6 +104,7 @@ class AvailableJobType(ModelNormal):
'label': 'label', # noqa: E501
'settings': 'settings', # noqa: E501
'etag': 'etag', # noqa: E501
'description': 'description', # noqa: E501
}
read_only_vars = {
@ -152,6 +154,7 @@ class AvailableJobType(ModelNormal):
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): The description/tooltip shown in the user interface.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
@ -243,6 +246,7 @@ class AvailableJobType(ModelNormal):
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): The description/tooltip shown in the user interface.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)

View File

@ -0,0 +1,278 @@
"""
Flamenco manager
Render Farm manager API # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from flamenco.manager.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from flamenco.manager.exceptions import ApiAttributeError
class EventFarmStatus(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (FarmStatusReport,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""EventFarmStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (FarmStatusReport): # noqa: E501
Keyword Args:
value (FarmStatusReport): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""EventFarmStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (FarmStatusReport): # noqa: E501
Keyword Args:
value (FarmStatusReport): # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self

View File

@ -0,0 +1,287 @@
"""
Flamenco manager
Render Farm manager API # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from flamenco.manager.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from flamenco.manager.exceptions import ApiAttributeError
class FarmStatus(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'ACTIVE': "active",
'IDLE': "idle",
'WAITING': "waiting",
'ASLEEP': "asleep",
'INOPERATIVE': "inoperative",
'UNKNOWN': "unknown",
'STARTING': "starting",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
read_only_vars = set()
_composed_schemas = None
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""FarmStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
"""FarmStatus - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501
Keyword Args:
value (str):, must be one of ["active", "idle", "waiting", "asleep", "inoperative", "unknown", "starting", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
self = super(OpenApiModel, cls).__new__(cls)
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
return self

View File

@ -0,0 +1,267 @@
"""
Flamenco manager
Render Farm manager API # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from flamenco.manager.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from flamenco.manager.exceptions import ApiAttributeError
def lazy_import():
from flamenco.manager.model.farm_status import FarmStatus
globals()['FarmStatus'] = FarmStatus
class FarmStatusReport(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'status': (FarmStatus,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'status': 'status', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, status, *args, **kwargs): # noqa: E501
"""FarmStatusReport - a model defined in OpenAPI
Args:
status (FarmStatus):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.status = status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, status, *args, **kwargs): # noqa: E501
"""FarmStatusReport - a model defined in OpenAPI
Args:
status (FarmStatus):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.status = status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")

View File

@ -22,6 +22,7 @@ from flamenco.manager.model.blender_path_find_result import BlenderPathFindResul
from flamenco.manager.model.blender_path_source import BlenderPathSource
from flamenco.manager.model.command import Command
from flamenco.manager.model.error import Error
from flamenco.manager.model.event_farm_status import EventFarmStatus
from flamenco.manager.model.event_job_update import EventJobUpdate
from flamenco.manager.model.event_last_rendered_update import EventLastRenderedUpdate
from flamenco.manager.model.event_life_cycle import EventLifeCycle
@ -29,6 +30,8 @@ from flamenco.manager.model.event_task_log_update import EventTaskLogUpdate
from flamenco.manager.model.event_task_update import EventTaskUpdate
from flamenco.manager.model.event_worker_tag_update import EventWorkerTagUpdate
from flamenco.manager.model.event_worker_update import EventWorkerUpdate
from flamenco.manager.model.farm_status import FarmStatus
from flamenco.manager.model.farm_status_report import FarmStatusReport
from flamenco.manager.model.flamenco_version import FlamencoVersion
from flamenco.manager.model.job import Job
from flamenco.manager.model.job_all_of import JobAllOf

View File

@ -4,7 +4,7 @@ Render Farm manager API
The `flamenco.manager` package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project:
- API version: 1.0.0
- Package version: 3.5-alpha1
- Package version: 3.6-alpha0
- Build package: org.openapitools.codegen.languages.PythonClientCodegen
For more information, please visit [https://flamenco.io/](https://flamenco.io/)
@ -101,6 +101,7 @@ Class | Method | HTTP request | Description
*MetaApi* | [**find_blender_exe_path**](flamenco/manager/docs/MetaApi.md#find_blender_exe_path) | **GET** /api/v3/configuration/check/blender | Find one or more CLI commands for use as way to start Blender
*MetaApi* | [**get_configuration**](flamenco/manager/docs/MetaApi.md#get_configuration) | **GET** /api/v3/configuration | Get the configuration of this Manager.
*MetaApi* | [**get_configuration_file**](flamenco/manager/docs/MetaApi.md#get_configuration_file) | **GET** /api/v3/configuration/file | Retrieve the configuration of Flamenco Manager.
*MetaApi* | [**get_farm_status**](flamenco/manager/docs/MetaApi.md#get_farm_status) | **GET** /api/v3/status | Get the status of this Flamenco farm.
*MetaApi* | [**get_shared_storage**](flamenco/manager/docs/MetaApi.md#get_shared_storage) | **GET** /api/v3/configuration/shared-storage/{audience}/{platform} | Get the shared storage location of this Manager, adjusted for the given audience and platform.
*MetaApi* | [**get_variables**](flamenco/manager/docs/MetaApi.md#get_variables) | **GET** /api/v3/configuration/variables/{audience}/{platform} | Get the variables of this Manager. Used by the Blender add-on to recognise two-way variables, and for the web interface to do variable replacement based on the browser&#39;s platform.
*MetaApi* | [**get_version**](flamenco/manager/docs/MetaApi.md#get_version) | **GET** /api/v3/version | Get the Flamenco version of this Manager
@ -147,6 +148,7 @@ Class | Method | HTTP request | Description
- [BlenderPathSource](flamenco/manager/docs/BlenderPathSource.md)
- [Command](flamenco/manager/docs/Command.md)
- [Error](flamenco/manager/docs/Error.md)
- [EventFarmStatus](flamenco/manager/docs/EventFarmStatus.md)
- [EventJobUpdate](flamenco/manager/docs/EventJobUpdate.md)
- [EventLastRenderedUpdate](flamenco/manager/docs/EventLastRenderedUpdate.md)
- [EventLifeCycle](flamenco/manager/docs/EventLifeCycle.md)
@ -154,6 +156,8 @@ Class | Method | HTTP request | Description
- [EventTaskUpdate](flamenco/manager/docs/EventTaskUpdate.md)
- [EventWorkerTagUpdate](flamenco/manager/docs/EventWorkerTagUpdate.md)
- [EventWorkerUpdate](flamenco/manager/docs/EventWorkerUpdate.md)
- [FarmStatus](flamenco/manager/docs/FarmStatus.md)
- [FarmStatusReport](flamenco/manager/docs/FarmStatusReport.md)
- [FlamencoVersion](flamenco/manager/docs/FlamencoVersion.md)
- [Job](flamenco/manager/docs/Job.md)
- [JobAllOf](flamenco/manager/docs/JobAllOf.md)

View File

@ -0,0 +1,210 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# <pep8 compliant>
import dataclasses
import json
import platform
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from urllib3.exceptions import HTTPError, MaxRetryError
import bpy
if TYPE_CHECKING:
from flamenco.manager import ApiClient as _ApiClient
from flamenco.manager.models import (
AvailableJobTypes as _AvailableJobTypes,
FlamencoVersion as _FlamencoVersion,
SharedStorageLocation as _SharedStorageLocation,
WorkerTagList as _WorkerTagList,
)
else:
_ApiClient = object
_AvailableJobTypes = object
_FlamencoVersion = object
_SharedStorageLocation = object
_WorkerTagList = object
@dataclasses.dataclass
class ManagerInfo:
"""Cached information obtained from a Flamenco Manager.
This is the root object of what is stored on disk, every time someone
presses a 'refresh' button to update worker tags, job types, etc.
"""
flamenco_version: _FlamencoVersion
shared_storage: _SharedStorageLocation
job_types: _AvailableJobTypes
worker_tags: _WorkerTagList
@staticmethod
def type_info() -> dict[str, type]:
# Do a late import, so that the API is only imported when actually used.
from flamenco.manager.models import (
AvailableJobTypes,
FlamencoVersion,
SharedStorageLocation,
WorkerTagList,
)
# These types cannot be obtained by introspecting the ManagerInfo class, as
# at runtime that doesn't use real type annotations.
return {
"flamenco_version": FlamencoVersion,
"shared_storage": SharedStorageLocation,
"job_types": AvailableJobTypes,
"worker_tags": WorkerTagList,
}
class FetchError(RuntimeError):
"""Raised when the manager info could not be fetched from the Manager."""
class LoadError(RuntimeError):
"""Raised when the manager info could not be loaded from disk cache."""
_cached_manager_info: Optional[ManagerInfo] = None
def fetch(api_client: _ApiClient) -> ManagerInfo:
global _cached_manager_info
# Do a late import, so that the API is only imported when actually used.
from flamenco.manager import ApiException
from flamenco.manager.apis import MetaApi, JobsApi, WorkerMgtApi
from flamenco.manager.models import (
AvailableJobTypes,
FlamencoVersion,
SharedStorageLocation,
WorkerTagList,
)
meta_api = MetaApi(api_client)
jobs_api = JobsApi(api_client)
worker_mgt_api = WorkerMgtApi(api_client)
try:
flamenco_version: FlamencoVersion = meta_api.get_version()
shared_storage: SharedStorageLocation = meta_api.get_shared_storage(
"users", platform.system().lower()
)
job_types: AvailableJobTypes = jobs_api.get_job_types()
worker_tags: WorkerTagList = worker_mgt_api.fetch_worker_tags()
except ApiException as ex:
raise FetchError("Manager cannot be reached: %s" % ex) from ex
except MaxRetryError as ex:
# This is the common error, when for example the port number is
# incorrect and nothing is listening. The exception text is not included
# because it's very long and confusing.
raise FetchError("Manager cannot be reached") from ex
except HTTPError as ex:
raise FetchError("Manager cannot be reached: %s" % ex) from ex
_cached_manager_info = ManagerInfo(
flamenco_version=flamenco_version,
shared_storage=shared_storage,
job_types=job_types,
worker_tags=worker_tags,
)
return _cached_manager_info
class Encoder(json.JSONEncoder):
def default(self, o):
from flamenco.manager.model_utils import OpenApiModel
if isinstance(o, OpenApiModel):
return o.to_dict()
if isinstance(o, ManagerInfo):
# dataclasses.asdict() creates a copy of the OpenAPI models,
# in a way that just doesn't work, hence this workaround.
return {f.name: getattr(o, f.name) for f in dataclasses.fields(o)}
return super().default(o)
def _to_json(info: ManagerInfo) -> str:
return json.dumps(info, indent=" ", cls=Encoder)
def _from_json(contents: Union[str, bytes]) -> ManagerInfo:
# Do a late import, so that the API is only imported when actually used.
from flamenco.manager.configuration import Configuration
from flamenco.manager.model_utils import validate_and_convert_types
json_dict = json.loads(contents)
dummy_cfg = Configuration()
api_models = {}
for name, api_type in ManagerInfo.type_info().items():
api_model = validate_and_convert_types(
json_dict[name],
(api_type,),
[name],
True,
True,
dummy_cfg,
)
api_models[name] = api_model
return ManagerInfo(**api_models)
def _json_filepath() -> Path:
# This is the '~/.config/blender/{version}' path.
user_path = Path(bpy.utils.resource_path(type="USER"))
return user_path / "config" / "flamenco-manager-info.json"
def save(info: ManagerInfo) -> None:
json_path = _json_filepath()
json_path.parent.mkdir(parents=True, exist_ok=True)
as_json = _to_json(info)
json_path.write_text(as_json, encoding="utf8")
def load() -> ManagerInfo:
json_path = _json_filepath()
if not json_path.exists():
raise FileNotFoundError(f"{json_path.name} not found in {json_path.parent}")
try:
as_json = json_path.read_text(encoding="utf8")
except OSError as ex:
raise LoadError(f"Could not read {json_path}: {ex}") from ex
try:
return _from_json(as_json)
except json.JSONDecodeError as ex:
raise LoadError(f"Could not decode JSON in {json_path}") from ex
def load_into_cache() -> Optional[ManagerInfo]:
global _cached_manager_info
_cached_manager_info = None
try:
_cached_manager_info = load()
except FileNotFoundError:
return None
except LoadError as ex:
print(f"Could not load Flamenco Manager info from disk: {ex}")
return None
return _cached_manager_info
def load_cached() -> Optional[ManagerInfo]:
global _cached_manager_info
if _cached_manager_info is not None:
return _cached_manager_info
return load_into_cache()

View File

@ -10,7 +10,7 @@ from urllib3.exceptions import HTTPError, MaxRetryError
import bpy
from . import job_types, job_submission, preferences, worker_tags
from . import job_types, job_submission, preferences, manager_info
from .job_types_propgroup import JobTypePropertyGroup
from .bat.submodules import bpathlib
@ -51,80 +51,6 @@ class FlamencoOpMixin:
return api_client
class FLAMENCO_OT_fetch_job_types(FlamencoOpMixin, bpy.types.Operator):
bl_idname = "flamenco.fetch_job_types"
bl_label = "Fetch Job Types"
bl_description = "Query Flamenco Manager to obtain the available job types"
def execute(self, context: bpy.types.Context) -> set[str]:
api_client = self.get_api_client(context)
from flamenco.manager import ApiException
scene = context.scene
old_job_type_name = getattr(scene, "flamenco_job_type", "")
try:
job_types.fetch_available_job_types(api_client, scene)
except ApiException as ex:
self.report({"ERROR"}, "Error getting job types: %s" % ex)
return {"CANCELLED"}
except MaxRetryError as ex:
# This is the common error, when for example the port number is
# incorrect and nothing is listening.
self.report({"ERROR"}, "Unable to reach Manager")
return {"CANCELLED"}
if old_job_type_name:
try:
scene.flamenco_job_type = old_job_type_name
except TypeError: # Thrown when the old job type no longer exists.
# You cannot un-set an enum property, and 'empty string' is not
# a valid value either, so better to just remove the underlying
# ID property.
del scene["flamenco_job_type"]
self.report(
{"WARNING"},
"Job type %r no longer available, choose another one"
% old_job_type_name,
)
job_types.update_job_type_properties(scene)
return {"FINISHED"}
class FLAMENCO_OT_fetch_worker_tags(FlamencoOpMixin, bpy.types.Operator):
bl_idname = "flamenco.fetch_worker_tags"
bl_label = "Fetch Worker Tags"
bl_description = "Query Flamenco Manager to obtain the available worker tags"
def execute(self, context: bpy.types.Context) -> set[str]:
api_client = self.get_api_client(context)
from flamenco.manager import ApiException
scene = context.scene
old_tag = getattr(scene, "flamenco_worker_tag", "")
try:
worker_tags.refresh(context, api_client)
except ApiException as ex:
self.report({"ERROR"}, "Error getting job types: %s" % ex)
return {"CANCELLED"}
except MaxRetryError as ex:
# This is the common error, when for example the port number is
# incorrect and nothing is listening.
self.report({"ERROR"}, "Unable to reach Manager")
return {"CANCELLED"}
if old_tag:
# TODO: handle cases where the old tag no longer exists.
scene.flamenco_worker_tag = old_tag
return {"FINISHED"}
class FLAMENCO_OT_ping_manager(FlamencoOpMixin, bpy.types.Operator):
bl_idname = "flamenco.ping_manager"
bl_label = "Flamenco: Ping Manager"
@ -132,13 +58,13 @@ class FLAMENCO_OT_ping_manager(FlamencoOpMixin, bpy.types.Operator):
bl_options = {"REGISTER"} # No UNDO.
def execute(self, context: bpy.types.Context) -> set[str]:
from . import comms, preferences
from . import comms
api_client = self.get_api_client(context)
prefs = preferences.get(context)
report, level = comms.ping_manager_with_report(
context.window_manager, api_client, prefs
report, level = comms.ping_manager(
context.window_manager,
context.scene,
api_client,
)
self.report({level}, report)
@ -259,29 +185,31 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator):
:return: an error string when something went wrong.
"""
from . import comms, preferences
from . import comms
# Get the manager's info. This is cached in the preferences, so
# regardless of whether this function actually responds to version
# mismatches, it has to be called to also refresh the shared storage
# location.
# Get the manager's info. This is cached to disk, so regardless of
# whether this function actually responds to version mismatches, it has
# to be called to also refresh the shared storage location.
api_client = self.get_api_client(context)
prefs = preferences.get(context)
mgrinfo = comms.ping_manager(context.window_manager, api_client, prefs)
if mgrinfo.error:
return mgrinfo.error
report, report_level = comms.ping_manager(
context.window_manager,
context.scene,
api_client,
)
if report_level != "INFO":
return report
# Check the Manager's version.
if not self.ignore_version_mismatch:
my_version = comms.flamenco_client_version()
assert mgrinfo.version is not None
mgrinfo = manager_info.load_cached()
# Safe to assume, as otherwise the ping_manager() call would not have succeeded.
assert mgrinfo is not None
my_version = comms.flamenco_client_version()
mgrversion = mgrinfo.flamenco_version.shortversion
try:
mgrversion = mgrinfo.version.shortversion
except AttributeError:
# shortversion was introduced in Manager version 3.0-beta2, which
# may not be running here yet.
mgrversion = mgrinfo.version.version
if mgrversion != my_version:
context.window_manager.flamenco_version_mismatch = True
return (
@ -299,6 +227,23 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator):
# Empty error message indicates 'ok'.
return ""
def _manager_info(
self, context: bpy.types.Context
) -> Optional[manager_info.ManagerInfo]:
"""Load the manager info.
If it cannot be loaded, returns None after emitting an error message and
calling self._quit(context).
"""
manager = manager_info.load_cached()
if not manager:
self.report(
{"ERROR"}, "No information known about Flamenco Manager, refresh first."
)
self._quit(context)
return None
return manager
def _save_blendfile(self, context):
"""Save to a different file, specifically for Flamenco.
@ -368,8 +313,11 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator):
self._quit(context)
return {"CANCELLED"}
prefs = preferences.get(context)
if prefs.is_shaman_enabled:
manager = self._manager_info(context)
if not manager:
return {"CANCELLED"}
if manager.shared_storage.shaman_enabled:
# self.blendfile_on_farm will be set when BAT created the checkout,
# see _on_bat_pack_msg() below.
self.blendfile_on_farm = None
@ -414,11 +362,14 @@ class FLAMENCO_OT_submit_job(FlamencoOpMixin, bpy.types.Operator):
raise FileNotFoundError()
# Determine where the blend file will be stored.
manager = self._manager_info(context)
if not manager:
raise FileNotFoundError("Manager info not known")
unique_dir = "%s-%s" % (
datetime.datetime.now().isoformat("-").replace(":", ""),
self.job_name,
)
pack_target_dir = Path(prefs.job_storage) / unique_dir
pack_target_dir = Path(manager.shared_storage.location) / unique_dir
# TODO: this should take the blendfile location relative to the project path into account.
pack_target_file = pack_target_dir / blendfile.name
@ -690,8 +641,6 @@ class FLAMENCO3_OT_explore_file_path(bpy.types.Operator):
classes = (
FLAMENCO_OT_fetch_job_types,
FLAMENCO_OT_fetch_worker_tags,
FLAMENCO_OT_ping_manager,
FLAMENCO_OT_eval_setting,
FLAMENCO_OT_submit_job,

View File

@ -5,7 +5,7 @@ from pathlib import Path
import bpy
from . import projects
from . import projects, manager_info
def discard_flamenco_client(context):
@ -16,9 +16,7 @@ def discard_flamenco_client(context):
context.window_manager.flamenco_status_ping = ""
def _refresh_the_planet(
prefs: "FlamencoPreferences", context: bpy.types.Context
) -> None:
def _refresh_the_planet(context: bpy.types.Context) -> None:
"""Refresh all GUI areas."""
for win in context.window_manager.windows:
for area in win.screen.areas:
@ -35,7 +33,8 @@ def _manager_url_updated(prefs, context):
# Warning, be careful what of the context to access here. Accessing /
# changing too much can cause crashes, infinite loops, etc.
comms.ping_manager_with_report(context.window_manager, api_client, prefs)
comms.ping_manager(context.window_manager, context.scene, api_client)
_refresh_the_planet(context)
_project_finder_enum_items = [
@ -66,22 +65,6 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
items=_project_finder_enum_items,
)
is_shaman_enabled: bpy.props.BoolProperty( # type: ignore
name="Shaman Enabled",
description="Whether this Manager has the Shaman protocol enabled",
default=False,
update=_refresh_the_planet,
)
# Property that should be editable from Python. It's not exposed to the GUI.
job_storage: bpy.props.StringProperty( # type: ignore
name="Job Storage Directory",
subtype="DIR_PATH",
default="",
options={"HIDDEN"},
description="Directory where blend files are stored when submitting them to Flamenco. This value is determined by Flamenco Manager",
)
# Property that gets its value from the above _job_storage, and cannot be
# set. This makes it read-only in the GUI.
job_storage_for_gui: bpy.props.StringProperty( # type: ignore
@ -90,14 +73,7 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
default="",
options={"SKIP_SAVE"},
description="Directory where blend files are stored when submitting them to Flamenco. This value is determined by Flamenco Manager",
get=lambda prefs: prefs.job_storage,
)
worker_tags: bpy.props.CollectionProperty( # type: ignore
type=WorkerTag,
name="Worker Tags",
description="Cache for the worker tags available on the configured Manager",
options={"HIDDEN"},
get=lambda prefs: prefs._job_storage(),
)
def draw(self, context: bpy.types.Context) -> None:
@ -116,7 +92,9 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
split.label(text="")
split.label(text=label)
if not self.job_storage:
manager = manager_info.load_cached()
if not manager:
text_row(col, "Press the refresh button before using Flamenco")
if context.window_manager.flamenco_status_ping:
@ -126,7 +104,7 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
text_row(aligned, "Press the refresh button to check the connection")
text_row(aligned, "and update the job storage location")
if self.is_shaman_enabled:
if manager and manager.shared_storage.shaman_enabled:
text_row(col, "Shaman enabled")
col.prop(self, "job_storage_for_gui", text="Job Storage")
@ -152,6 +130,12 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
blendfile = Path(bpy.data.filepath)
return projects.for_blendfile(blendfile, self.project_finder)
def _job_storage(self) -> str:
info = manager_info.load_cached()
if not info:
return "Unknown, refresh first."
return str(info.shared_storage.location)
def get(context: bpy.types.Context) -> FlamencoPreferences:
"""Return the add-on preferences."""

View File

@ -2,7 +2,7 @@
# <pep8 compliant>
from pathlib import Path
from typing import Callable, TypeAlias
from typing import Callable
import dataclasses
from .bat.submodules import bpathlib
@ -45,7 +45,7 @@ def _finder_subversion(blendfile: Path) -> Path:
def _search_path_marker(blendfile: Path, marker_path: str) -> Path:
"""Go up the directory hierarchy until a file or directory 'marker_path' is found."""
blendfile_dir = bpathlib.make_absolute(blendfile).parent
blendfile_dir: Path = bpathlib.make_absolute(blendfile).parent
directory = blendfile_dir
while True:
@ -64,7 +64,7 @@ def _search_path_marker(blendfile: Path, marker_path: str) -> Path:
return blendfile_dir
Finder: TypeAlias = Callable[[Path], Path]
Finder = Callable[[Path], Path]
@dataclasses.dataclass

View File

@ -1,57 +1,35 @@
# SPDX-License-Identifier: GPL-3.0-or-later
from typing import TYPE_CHECKING, Union
from typing import Union
import bpy
from . import preferences
if TYPE_CHECKING:
from flamenco.manager import ApiClient as _ApiClient
else:
_ApiClient = object
from . import manager_info
_enum_items: list[Union[tuple[str, str, str], tuple[str, str, str, int, int]]] = []
def refresh(context: bpy.types.Context, api_client: _ApiClient) -> None:
"""Fetch the available worker tags from the Manager."""
from flamenco.manager import ApiClient
from flamenco.manager.api import worker_mgt_api
from flamenco.manager.model.worker_tag_list import WorkerTagList
assert isinstance(api_client, ApiClient)
api = worker_mgt_api.WorkerMgtApi(api_client)
response: WorkerTagList = api.fetch_worker_tags()
# Store on the preferences, so a cached version persists until the next refresh.
prefs = preferences.get(context)
prefs.worker_tags.clear()
for tag in response.tags:
rna_tag = prefs.worker_tags.add()
rna_tag.id = tag.id
rna_tag.name = tag.name
rna_tag.description = getattr(tag, "description", "")
# Preferences have changed, so make sure that Blender saves them (assuming
# auto-save here).
context.preferences.is_dirty = True
def _get_enum_items(self, context):
global _enum_items
prefs = preferences.get(context)
manager = manager_info.load_cached()
if manager is None:
_enum_items = [
(
"-",
"-tags unknown-",
"Refresh to load the available Worker tags from the Manager",
),
]
return _enum_items
_enum_items = [
("-", "All", "No specific tag assigned, any worker can handle this job"),
]
_enum_items.extend(
(tag.id, tag.name, tag.description)
for tag in prefs.worker_tags
)
for tag in manager.worker_tags.tags:
_enum_items.append((tag.id, tag.name, getattr(tag, "description", "")))
return _enum_items
@ -70,9 +48,3 @@ def unregister() -> None:
delattr(ob, attr)
except AttributeError:
pass
if __name__ == "__main__":
import doctest
print(doctest.testmod())

View File

@ -27,6 +27,7 @@ import (
"projects.blender.org/studio/flamenco/internal/manager/api_impl/dummy"
"projects.blender.org/studio/flamenco/internal/manager/config"
"projects.blender.org/studio/flamenco/internal/manager/eventbus"
"projects.blender.org/studio/flamenco/internal/manager/farmstatus"
"projects.blender.org/studio/flamenco/internal/manager/job_compilers"
"projects.blender.org/studio/flamenco/internal/manager/job_deleter"
"projects.blender.org/studio/flamenco/internal/manager/last_rendered"
@ -55,6 +56,10 @@ const (
developmentWebInterfacePort = 8081
webappEntryPoint = "index.html"
// dbOpenTimeout is the time the persistence layer gets to open the database.
// This includes database migrations, which can take some time to perform.
dbOpenTimeout = 1 * time.Minute
)
type shutdownFunc func()
@ -174,10 +179,12 @@ func runFlamencoManager() bool {
shamanServer := buildShamanServer(configService, isFirstRun)
jobDeleter := job_deleter.NewService(persist, localStorage, eventBroker, shamanServer)
farmStatus := farmstatus.NewService(persist, eventBroker)
flamenco := api_impl.NewFlamenco(
compiler, persist, eventBroker, logStorage, configService,
taskStateMachine, shamanServer, timeService, lastRender,
localStorage, sleepScheduler, jobDeleter)
localStorage, sleepScheduler, jobDeleter, farmStatus)
e := buildWebService(flamenco, persist, ssdp, socketio, urls, localStorage)
@ -278,6 +285,13 @@ func runFlamencoManager() bool {
jobDeleter.Run(mainCtx)
}()
// Run the Farm Status service.
wg.Add(1)
go func() {
defer wg.Done()
farmStatus.Run(mainCtx)
}()
// Log the URLs last, hopefully that makes them more visible / encouraging to go to for users.
go func() {
time.Sleep(100 * time.Millisecond)
@ -369,7 +383,7 @@ func openDB(configService config.Service) *persistence.DB {
log.Fatal().Msg("configure the database in flamenco-manager.yaml")
}
dbCtx, dbCtxCancel := context.WithTimeout(context.Background(), 5*time.Second)
dbCtx, dbCtxCancel := context.WithTimeout(context.Background(), dbOpenTimeout)
defer dbCtxCancel()
persist, err := persistence.OpenDB(dbCtx, dsn)
if err != nil {

View File

@ -38,7 +38,7 @@ func findBlender() {
result, err := find_blender.Find(ctx)
switch {
case errors.Is(err, fs.ErrNotExist), errors.Is(err, exec.ErrNotFound):
log.Warn().Msg("Blender could not be found. " + helpMsg)
log.Info().Msg("Blender could not be found. " + helpMsg)
case err != nil:
log.Warn().AnErr("cause", err).Msg("There was an error finding Blender on this system. " + helpMsg)
default:

View File

@ -23,7 +23,9 @@ import (
"projects.blender.org/studio/flamenco/internal/appinfo"
"projects.blender.org/studio/flamenco/internal/worker"
"projects.blender.org/studio/flamenco/internal/worker/cli_runner"
"projects.blender.org/studio/flamenco/pkg/oomscore"
"projects.blender.org/studio/flamenco/pkg/sysinfo"
"projects.blender.org/studio/flamenco/pkg/website"
)
var (
@ -113,6 +115,10 @@ func main() {
findBlender()
findFFmpeg()
// Create the CLI runner before the auto-discovery, to make any configuration
// problems clear before waiting for the Manager to respond.
cliRunner := createCLIRunner(&configWrangler)
// Give the auto-discovery some time to find a Manager.
discoverTimeout := 10 * time.Minute
discoverCtx, discoverCancel := context.WithTimeout(context.Background(), discoverTimeout)
@ -148,7 +154,6 @@ func main() {
return
}
cliRunner := cli_runner.NewCLIRunner()
listener = worker.NewListener(client, buffer)
cmdRunner := worker.NewCommandExecutor(cliRunner, listener, timeService)
taskRunner := worker.NewTaskExecutor(cmdRunner, listener)
@ -296,8 +301,34 @@ func upstreamBufferOrDie(client worker.FlamencoClient, timeService clock.Clock)
func logFatalManagerDiscoveryError(err error, discoverTimeout time.Duration) {
if errors.Is(err, context.DeadlineExceeded) {
log.Fatal().Str("timeout", discoverTimeout.String()).Msg("could not discover Manager in time")
log.Fatal().Stringer("timeout", discoverTimeout).
Msgf("could not discover Manager in time, see %s", website.CannotFindManagerHelpURL)
} else {
log.Fatal().Err(err).Msg("auto-discovery error")
log.Fatal().Err(err).
Msgf("auto-discovery error, see %s", website.CannotFindManagerHelpURL)
}
}
func createCLIRunner(configWrangler *worker.FileConfigWrangler) *cli_runner.CLIRunner {
config, err := configWrangler.WorkerConfig()
if err != nil {
log.Fatal().Err(err).Msg("error loading worker configuration")
}
if config.LinuxOOMScoreAdjust == nil {
log.Debug().Msg("executables will be run without OOM score adjustment")
return cli_runner.NewCLIRunner()
}
if !oomscore.Available() {
log.Warn().
Msgf("config: oom_score_adjust configured, but that is only available on Linux, not this platform. See %s for more information.",
website.OOMScoreAdjURL)
return cli_runner.NewCLIRunner()
}
adjustment := *config.LinuxOOMScoreAdjust
log.Info().Int("oom_score_adjust", adjustment).Msg("executables will be run with OOM score adjustment")
return cli_runner.NewCLIRunnerWithOOMScoreAdjuster(adjustment)
}

View File

@ -0,0 +1,189 @@
package main
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"context"
"database/sql"
"flag"
"fmt"
"os"
"os/signal"
"regexp"
"strings"
"syscall"
"time"
"github.com/mattn/go-colorable"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v2"
_ "modernc.org/sqlite"
)
var (
// Tables and/or indices to skip when writing the schema.
// Anything that is *not* to be seen by sqlc should be listed here.
skips = map[SQLiteSchema]bool{
// Goose manages its own versioning table. SQLC should ignore its existence.
{Type: "table", Name: "goose_db_version"}: true,
}
tableNameDequoter = regexp.MustCompile("^(?:CREATE TABLE )(\"([^\"]+)\")")
)
type SQLiteSchema struct {
Type string
Name string
TableName string
RootPage int
SQL sql.NullString
}
func saveSchema(ctx context.Context, sqlOutPath string) error {
db, err := sql.Open("sqlite", "flamenco-manager.sqlite")
if err != nil {
return err
}
defer db.Close()
rows, err := db.QueryContext(ctx, "select * from sqlite_schema order by type desc, name asc")
if err != nil {
return err
}
defer rows.Close()
sqlBuilder := strings.Builder{}
for rows.Next() {
var data SQLiteSchema
if err := rows.Scan(
&data.Type,
&data.Name,
&data.TableName,
&data.RootPage,
&data.SQL,
); err != nil {
return err
}
if strings.HasPrefix(data.Name, "sqlite_") {
continue
}
if skips[SQLiteSchema{Type: data.Type, Name: data.Name}] {
continue
}
if !data.SQL.Valid {
continue
}
sql := tableNameDequoter.ReplaceAllString(data.SQL.String, "CREATE TABLE $2")
sqlBuilder.WriteString(sql)
sqlBuilder.WriteString(";\n")
}
sqlBytes := []byte(sqlBuilder.String())
if err := os.WriteFile(sqlOutPath, sqlBytes, os.ModePerm); err != nil {
return fmt.Errorf("writing to %s: %w", sqlOutPath, err)
}
log.Info().Str("path", sqlOutPath).Msg("schema written to file")
return nil
}
// SqlcConfig models the minimal subset of the sqlc.yaml we need to parse.
type SqlcConfig struct {
Version string `yaml:"version"`
SQL []struct {
Schema string `yaml:"schema"`
} `yaml:"sql"`
}
func main() {
output := zerolog.ConsoleWriter{Out: colorable.NewColorableStdout(), TimeFormat: time.RFC3339}
log.Logger = log.Output(output)
parseCliArgs()
mainCtx, mainCtxCancel := context.WithCancel(context.Background())
defer mainCtxCancel()
installSignalHandler(mainCtxCancel)
schemaPath := schemaPathFromSqlcYAML()
if err := saveSchema(mainCtx, schemaPath); err != nil {
log.Fatal().Err(err).Msg("couldn't export schema")
}
}
// installSignalHandler spawns a goroutine that handles incoming POSIX signals.
func installSignalHandler(cancelFunc context.CancelFunc) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
signal.Notify(signals, syscall.SIGTERM)
go func() {
for signum := range signals {
log.Info().Str("signal", signum.String()).Msg("signal received, shutting down")
cancelFunc()
}
}()
}
func parseCliArgs() {
var quiet, debug, trace bool
flag.BoolVar(&quiet, "quiet", false, "Only log warning-level and worse.")
flag.BoolVar(&debug, "debug", false, "Enable debug-level logging.")
flag.BoolVar(&trace, "trace", false, "Enable trace-level logging.")
flag.Parse()
var logLevel zerolog.Level
switch {
case trace:
logLevel = zerolog.TraceLevel
case debug:
logLevel = zerolog.DebugLevel
case quiet:
logLevel = zerolog.WarnLevel
default:
logLevel = zerolog.InfoLevel
}
zerolog.SetGlobalLevel(logLevel)
}
func schemaPathFromSqlcYAML() string {
var sqlcConfig SqlcConfig
{
sqlcConfigBytes, err := os.ReadFile("sqlc.yaml")
if err != nil {
log.Fatal().Err(err).Msg("cannot read sqlc.yaml")
}
if err := yaml.Unmarshal(sqlcConfigBytes, &sqlcConfig); err != nil {
log.Fatal().Err(err).Msg("cannot parse sqlc.yaml")
}
}
if sqlcConfig.Version != "2" {
log.Fatal().
Str("version", sqlcConfig.Version).
Str("expected", "2").
Msg("unexpected version in sqlc.yaml")
}
if len(sqlcConfig.SQL) != 1 {
log.Fatal().
Int("sql items", len(sqlcConfig.SQL)).
Msg("sqlc.yaml should contain a single item in the 'sql' list")
}
schema := sqlcConfig.SQL[0].Schema
if schema == "" {
log.Fatal().Msg("sqlc.yaml should have a 'schema' key in the 'sql' item")
}
return schema
}

23
go.mod
View File

@ -1,6 +1,6 @@
module projects.blender.org/studio/flamenco
go 1.22
go 1.22.2
require (
github.com/adrg/xdg v0.4.0
@ -30,12 +30,12 @@ require (
github.com/tc-hib/go-winres v0.3.1
github.com/zcalusic/sysinfo v1.0.1
github.com/ziflex/lecho/v3 v3.1.0
golang.org/x/crypto v0.18.0
golang.org/x/crypto v0.22.0
golang.org/x/image v0.10.0
golang.org/x/net v0.20.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.16.0
golang.org/x/vuln v1.0.4
golang.org/x/net v0.24.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.19.0
golang.org/x/vuln v1.1.0
gopkg.in/yaml.v2 v2.4.0
gorm.io/gorm v1.25.5
honnef.co/go/tools v0.4.2
@ -58,6 +58,7 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/labstack/gommon v0.4.0 // indirect
github.com/mailru/easyjson v0.7.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@ -70,12 +71,18 @@ require (
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.1 // indirect
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.17.0 // indirect
golang.org/x/tools v0.20.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.3.0 // indirect
modernc.org/cc/v3 v3.41.0 // indirect
modernc.org/ccgo/v3 v3.16.15 // indirect
modernc.org/libc v1.37.6 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.7.2 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/strutil v1.2.0 // indirect
modernc.org/token v1.1.0 // indirect
)

38
go.sum
View File

@ -152,6 +152,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@ -221,8 +223,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a h1:Jw5wfR+h9mnIYH+OtGT2im5wV1YGGDora5vTv/aa5bE=
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -235,8 +237,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -247,16 +249,16 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -286,8 +288,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -313,10 +315,10 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/vuln v1.0.4 h1:SP0mPeg2PmGCu03V+61EcQiOjmpri2XijexKdzv8Z1I=
golang.org/x/vuln v1.0.4/go.mod h1:NbJdUQhX8jY++FtuhrXs2Eyx0yePo9pF7nPlIjo9aaQ=
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
golang.org/x/vuln v1.1.0 h1:ECEdI+aEtjpF90eqEcDL5Q11DWSZAw5PJQWlp0+gWqc=
golang.org/x/vuln v1.1.0/go.mod h1:HT/Ar8fE34tbxWG2s7PYjVl+iIE4Er36/940Z+K540Y=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -349,6 +351,10 @@ modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
@ -361,5 +367,9 @@ modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY=
modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY=
modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE=

View File

@ -28,6 +28,7 @@ type Flamenco struct {
localStorage LocalStorage
sleepScheduler WorkerSleepScheduler
jobDeleter JobDeleter
farmstatus FarmStatusService
// The task scheduler can be locked to prevent multiple Workers from getting
// the same task. It is also used for certain other queries, like
@ -55,6 +56,7 @@ func NewFlamenco(
localStorage LocalStorage,
wss WorkerSleepScheduler,
jd JobDeleter,
farmstatus FarmStatusService,
) *Flamenco {
return &Flamenco{
jobCompiler: jc,
@ -69,6 +71,7 @@ func NewFlamenco(
localStorage: localStorage,
sleepScheduler: wss,
jobDeleter: jd,
farmstatus: farmstatus,
done: make(chan struct{}),
}

View File

@ -15,6 +15,7 @@ import (
"projects.blender.org/studio/flamenco/internal/manager/config"
"projects.blender.org/studio/flamenco/internal/manager/eventbus"
"projects.blender.org/studio/flamenco/internal/manager/farmstatus"
"projects.blender.org/studio/flamenco/internal/manager/job_compilers"
"projects.blender.org/studio/flamenco/internal/manager/job_deleter"
"projects.blender.org/studio/flamenco/internal/manager/last_rendered"
@ -26,7 +27,7 @@ import (
)
// Generate mock implementations of these interfaces.
//go:generate go run github.com/golang/mock/mockgen -destination mocks/api_impl_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/api_impl PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter
//go:generate go run github.com/golang/mock/mockgen -destination mocks/api_impl_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/api_impl PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter,FarmStatusService
type PersistenceService interface {
StoreAuthoredJob(ctx context.Context, authoredJob job_compilers.AuthoredJob) error
@ -239,3 +240,9 @@ type JobDeleter interface {
}
var _ JobDeleter = (*job_deleter.Service)(nil)
type FarmStatusService interface {
Report() api.FarmStatusReport
}
var _ FarmStatusService = (*farmstatus.Service)(nil)

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -52,7 +52,7 @@ func TestQueryJobs(t *testing.T) {
Return([]*persistence.Job{&activeJob, &deletionQueuedJob}, nil)
err := mf.flamenco.QueryJobs(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
expectedJobs := api.JobsQueryResult{
Jobs: []api.Job{
@ -160,7 +160,7 @@ func TestFetchTask(t *testing.T) {
Return([]*persistence.Worker{&taskWorker}, nil)
err := mf.flamenco.FetchTask(echoCtx, taskUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, expectAPITask)
}

View File

@ -88,7 +88,7 @@ func TestSubmitJobWithoutSettings(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.SubmitJob(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
}
func TestSubmitJobWithSettings(t *testing.T) {
@ -177,7 +177,7 @@ func TestSubmitJobWithSettings(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.SubmitJob(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
}
func TestSubmitJobWithEtag(t *testing.T) {
@ -202,7 +202,7 @@ func TestSubmitJobWithEtag(t *testing.T) {
{
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
err := mf.flamenco.SubmitJob(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echoCtx,
http.StatusPreconditionFailed, "rejecting job because its settings are outdated, refresh the job type")
}
@ -240,7 +240,7 @@ func TestSubmitJobWithEtag(t *testing.T) {
submittedJob.TypeEtag = ptr("correct etag")
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
err := mf.flamenco.SubmitJob(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
}
}
@ -318,7 +318,7 @@ func TestSubmitJobWithShamanCheckoutID(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.SubmitJob(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
}
func TestSubmitJobWithWorkerTag(t *testing.T) {
@ -437,7 +437,33 @@ func TestGetJobTypeHappy(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetJobType(echoCtx, "test-job-type")
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, jt)
}
func TestGetJobTypeWithDescriptionHappy(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mf := newMockedFlamenco(mockCtrl)
// Get an existing job type with a description.
description := "This is a test job type"
jt := api.AvailableJobType{
Description: &description,
Etag: "some etag",
Name: "test-job-type",
Label: "Test Job Type",
Settings: []api.AvailableJobSetting{
{Key: "setting", Type: api.AvailableJobSettingTypeString},
},
}
mf.jobCompiler.EXPECT().GetJobType("test-job-type").
Return(jt, nil)
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetJobType(echoCtx, "test-job-type")
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, jt)
}
@ -453,7 +479,7 @@ func TestGetJobTypeUnknown(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetJobType(echoCtx, "nonexistent-type")
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusNotFound, api.Error{
Code: http.StatusNotFound,
Message: "no such job type known",
@ -482,7 +508,7 @@ func TestSubmitJobCheckWithEtag(t *testing.T) {
{
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
err := mf.flamenco.SubmitJobCheck(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echoCtx,
http.StatusPreconditionFailed, "rejecting job because its settings are outdated, refresh the job type")
}
@ -502,7 +528,7 @@ func TestSubmitJobCheckWithEtag(t *testing.T) {
submittedJob.TypeEtag = ptr("correct etag")
echoCtx := mf.prepareMockedJSONRequest(submittedJob)
err := mf.flamenco.SubmitJobCheck(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
}
}
@ -516,7 +542,7 @@ func TestGetJobTypeError(t *testing.T) {
Return(api.AvailableJobType{}, errors.New("didn't expect this"))
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetJobType(echoCtx, "error")
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echoCtx, http.StatusInternalServerError, "error getting job type")
}
@ -537,7 +563,7 @@ func TestSetJobStatus_nonexistentJob(t *testing.T) {
// Do the call.
echoCtx := mf.prepareMockedJSONRequest(statusUpdate)
err := mf.flamenco.SetJobStatus(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echoCtx, http.StatusNotFound, "no such job")
}
@ -571,7 +597,7 @@ func TestSetJobStatus_happy(t *testing.T) {
// Do the call.
echoCtx := mf.prepareMockedJSONRequest(statusUpdate)
err := mf.flamenco.SetJobStatus(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -592,7 +618,7 @@ func TestSetJobPrio_nonexistentJob(t *testing.T) {
// Do the call.
echoCtx := mf.prepareMockedJSONRequest(prioUpdate)
err := mf.flamenco.SetJobStatus(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echoCtx, http.StatusNotFound, "no such job")
}
@ -634,7 +660,7 @@ func TestSetJobPrio(t *testing.T) {
mf.broadcaster.EXPECT().BroadcastJobUpdate(expectUpdate)
err := mf.flamenco.SetJobPriority(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -668,7 +694,7 @@ func TestSetJobStatusFailedToRequeueing(t *testing.T) {
// Do the call.
err := mf.flamenco.SetJobStatus(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -714,7 +740,7 @@ func TestSetTaskStatusQueued(t *testing.T) {
// Do the call.
err := mf.flamenco.SetTaskStatus(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -748,7 +774,7 @@ func TestFetchTaskLogTail(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchTaskLogTail(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
// Check that a 204 No Content is also returned when the task log file on disk exists, but is empty.
@ -758,7 +784,7 @@ func TestFetchTaskLogTail(t *testing.T) {
echoCtx = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchTaskLogTail(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -794,7 +820,7 @@ func TestFetchTaskLogInfo(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchTaskLogInfo(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
// Check that a 204 No Content is also returned when the task log file on disk exists, but is empty.
@ -803,7 +829,7 @@ func TestFetchTaskLogInfo(t *testing.T) {
echoCtx = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchTaskLogInfo(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
// Check that otherwise we actually get the info.
@ -813,7 +839,7 @@ func TestFetchTaskLogInfo(t *testing.T) {
echoCtx = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchTaskLogInfo(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, api.TaskLogInfo{
JobId: jobID,
TaskId: taskID,
@ -842,7 +868,7 @@ func TestFetchJobLastRenderedInfo(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchJobLastRenderedInfo(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
expectBody := api.JobLastRenderedImageInfo{
Base: "/job-files/relative/path",
@ -857,7 +883,7 @@ func TestFetchJobLastRenderedInfo(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchJobLastRenderedInfo(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
}
@ -876,7 +902,7 @@ func TestFetchGlobalLastRenderedInfo(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchGlobalLastRenderedInfo(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -893,7 +919,7 @@ func TestFetchGlobalLastRenderedInfo(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchGlobalLastRenderedInfo(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
expectBody := api.JobLastRenderedImageInfo{
Base: "/job-files/relative/path",
@ -927,7 +953,7 @@ func TestDeleteJob(t *testing.T) {
// Do the call.
err := mf.flamenco.DeleteJob(echoCtx, jobID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}

View File

@ -321,6 +321,10 @@ func (f *Flamenco) SaveSetupAssistantConfig(e echo.Context) error {
return e.NoContent(http.StatusNoContent)
}
func (f *Flamenco) GetFarmStatus(e echo.Context) error {
return e.JSON(http.StatusOK, f.farmstatus.Report())
}
func flamencoManagerDir() (string, error) {
exename, err := os.Executable()
if err != nil {

View File

@ -43,7 +43,7 @@ func TestGetVariables(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetVariables(echoCtx, api.ManagerVariableAudienceWorkers, "linux")
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, api.ManagerVariables{
AdditionalProperties: map[string]api.ManagerVariable{
"blender": {Value: "/usr/local/blender", IsTwoway: false},
@ -61,7 +61,7 @@ func TestGetVariables(t *testing.T) {
echoCtx := mf.prepareMockedRequest(nil)
err := mf.flamenco.GetVariables(echoCtx, api.ManagerVariableAudienceUsers, "troll")
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echoCtx, http.StatusOK, api.ManagerVariables{})
}
}
@ -208,9 +208,7 @@ func TestCheckSharedStoragePath(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(
api.PathCheckInput{Path: path})
err := mf.flamenco.CheckSharedStoragePath(echoCtx)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
return echoCtx
}
@ -230,9 +228,8 @@ func TestCheckSharedStoragePath(t *testing.T) {
Cause: "Directory checked successfully",
})
files, err := filepath.Glob(filepath.Join(mf.tempdir, "*"))
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Empty(t, files, "After a query, there should not be any leftovers")
}
// Test inaccessible path.
// For some reason, this doesn't work on Windows, and creating a file in
@ -253,12 +250,9 @@ func TestCheckSharedStoragePath(t *testing.T) {
parentPath := filepath.Join(mf.tempdir, "deep")
testPath := filepath.Join(parentPath, "nesting")
if err := os.Mkdir(parentPath, fs.ModePerm); !assert.NoError(t, err) {
t.FailNow()
}
if err := os.Mkdir(testPath, fs.FileMode(0)); !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, os.Mkdir(parentPath, fs.ModePerm))
require.NoError(t, os.Mkdir(testPath, fs.FileMode(0)))
echoCtx := doTest(testPath)
result := api.PathCheckResult{}
getResponseJSON(t, echoCtx, http.StatusOK, &result)
@ -295,9 +289,7 @@ func TestSaveSetupAssistantConfig(t *testing.T) {
// Call the API.
echoCtx := mf.prepareMockedJSONRequest(body)
err := mf.flamenco.SaveSetupAssistantConfig(echoCtx)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
return savedConfig
@ -378,9 +370,7 @@ func metaTestFixtures(t *testing.T) (mockedFlamenco, func()) {
mf := newMockedFlamenco(mockCtrl)
tempdir, err := os.MkdirTemp("", "test-temp-dir")
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
mf.tempdir = tempdir
finish := func() {

View File

@ -1,5 +1,5 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: projects.blender.org/studio/flamenco/internal/manager/api_impl (interfaces: PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter)
// Source: projects.blender.org/studio/flamenco/internal/manager/api_impl (interfaces: PersistenceService,ChangeBroadcaster,JobCompiler,LogStorage,ConfigService,TaskStateMachine,Shaman,LastRendered,LocalStorage,WorkerSleepScheduler,JobDeleter,FarmStatusService)
// Package mocks is a generated GoMock package.
package mocks
@ -1413,3 +1413,40 @@ func (mr *MockJobDeleterMockRecorder) WhatWouldBeDeleted(arg0 interface{}) *gomo
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WhatWouldBeDeleted", reflect.TypeOf((*MockJobDeleter)(nil).WhatWouldBeDeleted), arg0)
}
// MockFarmStatusService is a mock of FarmStatusService interface.
type MockFarmStatusService struct {
ctrl *gomock.Controller
recorder *MockFarmStatusServiceMockRecorder
}
// MockFarmStatusServiceMockRecorder is the mock recorder for MockFarmStatusService.
type MockFarmStatusServiceMockRecorder struct {
mock *MockFarmStatusService
}
// NewMockFarmStatusService creates a new mock instance.
func NewMockFarmStatusService(ctrl *gomock.Controller) *MockFarmStatusService {
mock := &MockFarmStatusService{ctrl: ctrl}
mock.recorder = &MockFarmStatusServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockFarmStatusService) EXPECT() *MockFarmStatusServiceMockRecorder {
return m.recorder
}
// Report mocks base method.
func (m *MockFarmStatusService) Report() api.FarmStatusReport {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Report")
ret0, _ := ret[0].(api.FarmStatusReport)
return ret0
}
// Report indicates an expected call of Report.
func (mr *MockFarmStatusServiceMockRecorder) Report() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Report", reflect.TypeOf((*MockFarmStatusService)(nil).Report))
}

View File

@ -16,6 +16,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/api_impl/mocks"
"projects.blender.org/studio/flamenco/internal/manager/config"
@ -37,6 +38,7 @@ type mockedFlamenco struct {
localStorage *mocks.MockLocalStorage
sleepScheduler *mocks.MockWorkerSleepScheduler
jobDeleter *mocks.MockJobDeleter
farmstatus *mocks.MockFarmStatusService
// Place for some tests to store a temporary directory.
tempdir string
@ -54,6 +56,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco {
localStore := mocks.NewMockLocalStorage(mockCtrl)
wss := mocks.NewMockWorkerSleepScheduler(mockCtrl)
jd := mocks.NewMockJobDeleter(mockCtrl)
fs := mocks.NewMockFarmStatusService(mockCtrl)
clock := clock.NewMock()
mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T11:14:41+02:00")
@ -62,7 +65,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco {
}
clock.Set(mockedNow)
f := NewFlamenco(jc, ps, cb, logStore, cs, sm, sha, clock, lr, localStore, wss, jd)
f := NewFlamenco(jc, ps, cb, logStore, cs, sm, sha, clock, lr, localStore, wss, jd, fs)
return mockedFlamenco{
flamenco: f,
@ -78,6 +81,7 @@ func newMockedFlamenco(mockCtrl *gomock.Controller) mockedFlamenco {
localStorage: localStore,
sleepScheduler: wss,
jobDeleter: jd,
farmstatus: fs,
}
}
@ -179,14 +183,10 @@ func getResponseJSON(t *testing.T, echoCtx echo.Context, expectStatusCode int, a
}
actualJSON, err := io.ReadAll(resp.Body)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
err = json.Unmarshal(actualJSON, actualPayloadPtr)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
}
// assertResponseJSON asserts that a recorded response is JSON with the given HTTP status code.
@ -201,14 +201,10 @@ func assertResponseJSON(t *testing.T, echoCtx echo.Context, expectStatusCode int
}
expectJSON, err := json.Marshal(expectBody)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
actualJSON, err := io.ReadAll(resp.Body)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assert.JSONEq(t, string(expectJSON), string(actualJSON))
}

View File

@ -33,7 +33,7 @@ func TestFetchWorkers(t *testing.T) {
echo := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchWorkers(echo)
assert.NoError(t, err)
require.NoError(t, err)
// Check the response
workers := api.WorkerList{
@ -74,7 +74,7 @@ func TestFetchWorker(t *testing.T) {
Return(nil, fmt.Errorf("wrapped: %w", persistence.ErrWorkerNotFound))
echo := mf.prepareMockedRequest(nil)
err := mf.flamenco.FetchWorker(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusNotFound, fmt.Sprintf("worker %q not found", workerUUID))
// Test database error fetching worker.
@ -82,7 +82,7 @@ func TestFetchWorker(t *testing.T) {
Return(nil, errors.New("some unknown error"))
echo = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchWorker(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusInternalServerError, "error fetching worker: some unknown error")
// Test with worker that does NOT have a status change requested, and DOES have an assigned task.
@ -97,7 +97,7 @@ func TestFetchWorker(t *testing.T) {
echo = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchWorker(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.Worker{
WorkerSummary: api.WorkerSummary{
Id: workerUUID,
@ -126,7 +126,7 @@ func TestFetchWorker(t *testing.T) {
echo = mf.prepareMockedRequest(nil)
err = mf.flamenco.FetchWorker(echo, worker.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.Worker{
WorkerSummary: api.WorkerSummary{
Id: workerUUID,
@ -155,7 +155,7 @@ func TestDeleteWorker(t *testing.T) {
Return(nil, fmt.Errorf("wrapped: %w", persistence.ErrWorkerNotFound))
echo := mf.prepareMockedRequest(nil)
err := mf.flamenco.DeleteWorker(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusNotFound, fmt.Sprintf("worker %q not found", workerUUID))
// Test with existing worker.
@ -176,7 +176,7 @@ func TestDeleteWorker(t *testing.T) {
echo = mf.prepareMockedRequest(nil)
err = mf.flamenco.DeleteWorker(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -214,7 +214,7 @@ func TestRequestWorkerStatusChange(t *testing.T) {
IsLazy: true,
})
err := mf.flamenco.RequestWorkerStatusChange(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -258,7 +258,7 @@ func TestRequestWorkerStatusChangeRevert(t *testing.T) {
IsLazy: true,
})
err := mf.flamenco.RequestWorkerStatusChange(echo, workerUUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}

View File

@ -8,6 +8,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/config"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
@ -77,7 +78,7 @@ func TestTaskUpdate(t *testing.T) {
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
// Check the saved task.
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, mockTask.UUID, statusChangedtask.UUID)
assert.Equal(t, mockTask.UUID, actUpdatedTask.UUID)
assert.Equal(t, mockTask.UUID, touchedTask.UUID)
@ -148,7 +149,7 @@ func TestTaskUpdateFailed(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -164,7 +165,7 @@ func TestTaskUpdateFailed(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
}
@ -248,7 +249,7 @@ func TestBlockingAfterFailure(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -279,7 +280,7 @@ func TestBlockingAfterFailure(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
@ -314,7 +315,7 @@ func TestBlockingAfterFailure(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}
}
@ -381,6 +382,6 @@ func TestJobFailureAfterWorkerTaskFailure(t *testing.T) {
echoCtx := mf.prepareMockedJSONRequest(taskUpdate)
requestWorkerStore(echoCtx, &worker)
err := mf.flamenco.TaskUpdate(echoCtx, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echoCtx)
}

View File

@ -12,6 +12,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/labstack/echo/v4"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/config"
"projects.blender.org/studio/flamenco/internal/manager/last_rendered"
@ -61,7 +62,7 @@ func TestTaskScheduleHappy(t *testing.T) {
mf.broadcaster.EXPECT().BroadcastWorkerUpdate(gomock.Any())
err := mf.flamenco.ScheduleTask(echo)
assert.NoError(t, err)
require.NoError(t, err)
// Check the response
assignedTask := api.AssignedTask{
@ -98,7 +99,7 @@ func TestTaskScheduleNoTaskAvailable(t *testing.T) {
mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker)
err := mf.flamenco.ScheduleTask(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -119,7 +120,7 @@ func TestTaskScheduleNonActiveStatus(t *testing.T) {
mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker)
err := mf.flamenco.ScheduleTask(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
resp := getRecordedResponse(echoCtx)
assert.Equal(t, http.StatusConflict, resp.StatusCode)
@ -142,7 +143,7 @@ func TestTaskScheduleOtherStatusRequested(t *testing.T) {
mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker)
err := mf.flamenco.ScheduleTask(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
expectBody := api.WorkerStateChange{StatusRequested: api.WorkerStatusAsleep}
assertResponseJSON(t, echoCtx, http.StatusLocked, expectBody)
@ -169,7 +170,7 @@ func TestTaskScheduleOtherStatusRequestedAndBadState(t *testing.T) {
mf.persistence.EXPECT().WorkerSeen(bgCtx, &worker)
err := mf.flamenco.ScheduleTask(echoCtx)
assert.NoError(t, err)
require.NoError(t, err)
expectBody := api.WorkerStateChange{StatusRequested: api.WorkerStatusAwake}
assertResponseJSON(t, echoCtx, http.StatusLocked, expectBody)
@ -206,7 +207,7 @@ func TestWorkerSignOn(t *testing.T) {
})
requestWorkerStore(echo, &worker)
err := mf.flamenco.SignOn(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.WorkerStateChange{
StatusRequested: api.WorkerStatusAsleep,
@ -253,7 +254,7 @@ func TestWorkerSignoffTaskRequeue(t *testing.T) {
})
err := mf.flamenco.SignOff(echo)
assert.NoError(t, err)
require.NoError(t, err)
resp := getRecordedResponse(echo)
assert.Equal(t, http.StatusNoContent, resp.StatusCode)
@ -292,7 +293,7 @@ func TestWorkerRememberPreviousStatus(t *testing.T) {
echo := mf.prepareMockedRequest(nil)
requestWorkerStore(echo, &worker)
err := mf.flamenco.SignOff(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
assert.Equal(t, api.WorkerStatusAwake, worker.StatusRequested)
@ -329,7 +330,7 @@ func TestWorkerDontRememberPreviousStatus(t *testing.T) {
echo := mf.prepareMockedRequest(nil)
requestWorkerStore(echo, &worker)
err := mf.flamenco.SignOff(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -347,10 +348,9 @@ func TestWorkerState(t *testing.T) {
echo := mf.prepareMockedRequest(nil)
requestWorkerStore(echo, &worker)
err := mf.flamenco.WorkerState(echo)
if assert.NoError(t, err) {
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
}
// State change requested.
{
@ -361,12 +361,11 @@ func TestWorkerState(t *testing.T) {
requestWorkerStore(echo, &worker)
err := mf.flamenco.WorkerState(echo)
if assert.NoError(t, err) {
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.WorkerStateChange{
StatusRequested: requestStatus,
})
}
}
}
func TestWorkerStateChanged(t *testing.T) {
@ -402,7 +401,7 @@ func TestWorkerStateChanged(t *testing.T) {
})
requestWorkerStore(echo, &worker)
err := mf.flamenco.WorkerStateChanged(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -445,7 +444,7 @@ func TestWorkerStateChangedAfterChangeRequest(t *testing.T) {
})
requestWorkerStore(echo, &worker)
err := mf.flamenco.WorkerStateChanged(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
@ -475,7 +474,7 @@ func TestWorkerStateChangedAfterChangeRequest(t *testing.T) {
})
requestWorkerStore(echo, &worker)
err := mf.flamenco.WorkerStateChanged(echo)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoContent(t, echo)
}
}
@ -514,7 +513,7 @@ func TestMayWorkerRun(t *testing.T) {
{
echo := prepareRequest()
err := mf.flamenco.MayWorkerRun(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{
MayKeepRunning: false,
Reason: "task not assigned to this worker",
@ -529,7 +528,7 @@ func TestMayWorkerRun(t *testing.T) {
echo := prepareRequest()
task.WorkerID = &worker.ID
err := mf.flamenco.MayWorkerRun(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{
MayKeepRunning: true,
})
@ -541,7 +540,7 @@ func TestMayWorkerRun(t *testing.T) {
task.WorkerID = &worker.ID
task.Status = api.TaskStatusCanceled
err := mf.flamenco.MayWorkerRun(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{
MayKeepRunning: false,
Reason: "task is in non-runnable status \"canceled\"",
@ -555,7 +554,7 @@ func TestMayWorkerRun(t *testing.T) {
task.WorkerID = &worker.ID
task.Status = api.TaskStatusActive
err := mf.flamenco.MayWorkerRun(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{
MayKeepRunning: false,
Reason: "worker status change requested",
@ -573,7 +572,7 @@ func TestMayWorkerRun(t *testing.T) {
task.WorkerID = &worker.ID
task.Status = api.TaskStatusActive
err := mf.flamenco.MayWorkerRun(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseJSON(t, echo, http.StatusOK, api.MayKeepRunning{
MayKeepRunning: true,
})
@ -618,7 +617,7 @@ func TestTaskOutputProduced(t *testing.T) {
echo := prepareRequest(nil)
err := mf.flamenco.TaskOutputProduced(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusLengthRequired, "Content-Length header required")
}
@ -633,7 +632,7 @@ func TestTaskOutputProduced(t *testing.T) {
echo := prepareRequest(bytes.NewReader(bodyBytes))
err := mf.flamenco.TaskOutputProduced(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusRequestEntityTooLarge,
"image too large; should be max %v bytes", last_rendered.MaxImageSizeBytes)
}
@ -648,7 +647,7 @@ func TestTaskOutputProduced(t *testing.T) {
mf.lastRender.EXPECT().QueueImage(gomock.Any()).Return(last_rendered.ErrMimeTypeUnsupported)
err := mf.flamenco.TaskOutputProduced(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusUnsupportedMediaType, `unsupported mime type "image/openexr"`)
}
@ -661,7 +660,7 @@ func TestTaskOutputProduced(t *testing.T) {
mf.lastRender.EXPECT().QueueImage(gomock.Any()).Return(last_rendered.ErrQueueFull)
err := mf.flamenco.TaskOutputProduced(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseAPIError(t, echo, http.StatusTooManyRequests, "image processing queue is full")
}
@ -687,7 +686,7 @@ func TestTaskOutputProduced(t *testing.T) {
})
err := mf.flamenco.TaskOutputProduced(echo, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertResponseNoBody(t, echo, http.StatusAccepted)
if assert.NotNil(t, actualPayload) {

View File

@ -4,6 +4,7 @@ import (
"runtime"
"time"
"projects.blender.org/studio/flamenco/internal/manager/eventbus"
shaman_config "projects.blender.org/studio/flamenco/pkg/shaman/config"
)
@ -18,9 +19,8 @@ var defaultConfig = Conf{
ManagerName: "Flamenco",
Listen: ":8080",
// ListenHTTPS: ":8433",
DatabaseDSN: "flamenco-manager.sqlite",
DBIntegrityCheck: 1 * time.Hour,
DBIntegrityCheck: 10 * time.Minute,
SSDPDiscovery: true,
LocalManagerStoragePath: "./flamenco-manager-storage",
SharedStoragePath: "", // Empty string means "first run", and should trigger the config setup assistant.
@ -38,25 +38,15 @@ var defaultConfig = Conf{
TaskTimeout: 10 * time.Minute,
WorkerTimeout: 1 * time.Minute,
// // Days are assumed to be 24 hours long. This is not exactly accurate, but should
// // be accurate enough for this type of cleanup.
// TaskCleanupMaxAge: 14 * 24 * time.Hour,
BlocklistThreshold: 3,
TaskFailAfterSoftFailCount: 3,
// WorkerCleanupStatus: []string{string(api.WorkerStatusOffline)},
// TestTasks: TestTasks{
// BlenderRender: BlenderRenderConfig{
// JobStorage: "{job_storage}/test-jobs",
// RenderOutput: "{render}/test-renders",
// },
// },
// JWT: jwtauth.Config{
// DownloadKeysInterval: 1 * time.Hour,
// },
MQTT: MQTTConfig{
Client: eventbus.MQTTClientConfig{
ClientID: eventbus.MQTTDefaultClientID,
TopicPrefix: eventbus.MQTTDefaultTopicPrefix,
},
},
},
Variables: map[string]Variable{

View File

@ -10,17 +10,25 @@ type (
EventTopic string
)
// Listener is the interface for internal components that want to respond to events.
type Listener interface {
OnEvent(topic EventTopic, payload interface{})
}
// Forwarder is the interface for components that forward events to external systems.
type Forwarder interface {
Broadcast(topic EventTopic, payload interface{})
}
type Broker struct {
listeners []Listener
forwarders []Forwarder
mutex sync.Mutex
}
func NewBroker() *Broker {
return &Broker{
listeners: []Listener{},
forwarders: []Forwarder{},
mutex: sync.Mutex{},
}
@ -32,10 +40,20 @@ func (b *Broker) AddForwarder(forwarder Forwarder) {
b.forwarders = append(b.forwarders, forwarder)
}
func (b *Broker) AddListener(listener Listener) {
b.mutex.Lock()
defer b.mutex.Unlock()
b.listeners = append(b.listeners, listener)
}
func (b *Broker) broadcast(topic EventTopic, payload interface{}) {
b.mutex.Lock()
defer b.mutex.Unlock()
for _, listener := range b.listeners {
listener.OnEvent(topic, payload)
}
for _, forwarder := range b.forwarders {
forwarder.Broadcast(topic, payload)
}

View File

@ -0,0 +1,17 @@
package eventbus
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"github.com/rs/zerolog/log"
"projects.blender.org/studio/flamenco/pkg/api"
)
func NewFarmStatusEvent(farmstatus api.FarmStatusReport) api.EventFarmStatus {
return api.EventFarmStatus(farmstatus)
}
func (b *Broker) BroadcastFarmStatusEvent(event api.EventFarmStatus) {
log.Debug().Interface("event", event).Msg("eventbus: broadcasting FarmStatus event")
b.broadcast(TopicFarmStatus, event)
}

View File

@ -13,10 +13,14 @@ import (
"github.com/eclipse/paho.golang/paho"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"projects.blender.org/studio/flamenco/pkg/api"
)
const (
defaultClientID = "flamenco"
MQTTDefaultTopicPrefix = "flamenco"
MQTTDefaultClientID = "flamenco"
keepAlive = 30 // seconds
connectRetryDelay = 10 * time.Second
@ -61,7 +65,7 @@ func NewMQTTForwarder(config MQTTClientConfig) *MQTTForwarder {
return nil
}
if config.ClientID == "" {
config.ClientID = defaultClientID
config.ClientID = MQTTDefaultClientID
}
brokerURL, err := url.Parse(config.BrokerURL)
@ -150,6 +154,11 @@ func (m *MQTTForwarder) queueRunner(queueRunnerCtx context.Context) {
}
func (m *MQTTForwarder) Broadcast(topic EventTopic, payload interface{}) {
if _, ok := payload.(api.EventTaskLogUpdate); ok {
// Task log updates aren't sent through MQTT, as that can generate a lot of traffic.
return
}
fullTopic := m.topicPrefix + string(topic)
asJSON, err := json.Marshal(payload)

View File

@ -14,6 +14,7 @@ import (
"github.com/rs/zerolog/log"
"projects.blender.org/studio/flamenco/internal/uuid"
"projects.blender.org/studio/flamenco/pkg/api"
"projects.blender.org/studio/flamenco/pkg/website"
)
type SocketIOEventType string
@ -23,6 +24,8 @@ const (
)
var socketIOEventTypes = map[string]string{
reflect.TypeOf(api.EventLifeCycle{}).Name(): "/lifecycle",
reflect.TypeOf(api.EventFarmStatus{}).Name(): "/status",
reflect.TypeOf(api.EventJobUpdate{}).Name(): "/jobs",
reflect.TypeOf(api.EventTaskUpdate{}).Name(): "/task",
reflect.TypeOf(api.EventLastRenderedUpdate{}).Name(): "/last-rendered",
@ -59,7 +62,17 @@ func (s *SocketIOForwarder) Broadcast(topic EventTopic, payload interface{}) {
// SocketIO has a concept of 'event types'. MQTT doesn't have this, and thus the Flamenco event
// system doesn't rely on it. We use the payload type name as event type.
payloadType := reflect.TypeOf(payload).Name()
eventType := socketIOEventTypes[payloadType]
eventType, ok := socketIOEventTypes[payloadType]
if !ok {
log.Error().
Str("topic", string(topic)).
Str("payloadType", payloadType).
Interface("event", payload).
Msgf("socketIO: payload type does not have an event type, please copy-paste this message into a bug report at %s", website.BugReportURL)
return
}
log.Debug().
Str("topic", string(topic)).
Str("eventType", eventType).
@ -80,6 +93,10 @@ func (s *SocketIOForwarder) registerSIOEventHandlers() {
_ = sio.On(gosocketio.OnConnection, func(c *gosocketio.Channel) {
logger := sioLogger(c)
logger.Debug().Msg("socketIO: connected")
// All SocketIO connections get these events, regardless of their subscription.
_ = c.Join(string(TopicLifeCycle))
_ = c.Join(string(TopicFarmStatus))
})
// socket disconnection

View File

@ -6,7 +6,9 @@ import "fmt"
const (
// Topics on which events are published.
// NOTE: when adding here, also add to socketIOEventTypes in socketio.go.
TopicLifeCycle EventTopic = "/lifecycle" // sends api.EventLifeCycle
TopicFarmStatus EventTopic = "/status" // sends api.EventFarmStatus
TopicJobUpdate EventTopic = "/jobs" // sends api.EventJobUpdate
TopicLastRenderedImage EventTopic = "/last-rendered" // sends api.EventLastRenderedUpdate
TopicTaskUpdate EventTopic = "/task" // sends api.EventTaskUpdate

View File

@ -0,0 +1,233 @@
// package farmstatus provides a status indicator for the entire Flamenco farm.
package farmstatus
import (
"context"
"errors"
"slices"
"sync"
"time"
"github.com/rs/zerolog/log"
"projects.blender.org/studio/flamenco/internal/manager/eventbus"
"projects.blender.org/studio/flamenco/pkg/api"
"projects.blender.org/studio/flamenco/pkg/website"
)
const (
// pollWait determines how often the persistence layer is queried to get the
// counts & statuses of workers and jobs.
//
// Note that this indicates the time between polls, so between a poll
// operation being done, and the next one starting.
pollWait = 30 * time.Second
)
// Service keeps track of the overall farm status.
type Service struct {
persist PersistenceService
eventbus EventBus
mutex sync.Mutex
lastReport api.FarmStatusReport
forcePoll chan struct{} // Send anything here to force a poll, if none is running yet.
}
// NewService returns a 'farm status' service. Run its Run() function in a
// goroutine to make it actually do something.
func NewService(persist PersistenceService, eventbus EventBus) *Service {
service := Service{
persist: persist,
eventbus: eventbus,
mutex: sync.Mutex{},
forcePoll: make(chan struct{}, 1),
lastReport: api.FarmStatusReport{
Status: api.FarmStatusStarting,
},
}
eventbus.AddListener(&service)
return &service
}
// Run the farm status polling loop.
func (s *Service) Run(ctx context.Context) {
log.Debug().Msg("farm status: polling service running")
defer log.Debug().Msg("farm status: polling service stopped")
// At startup the first poll should happen quickly.
waitTime := 1 * time.Second
for {
select {
case <-ctx.Done():
return
case <-time.After(waitTime):
s.poll(ctx)
case <-s.forcePoll:
s.poll(ctx)
}
// After the first poll we can go to a slower pace, as mostly the event bus
// is the main source of poll triggers.
waitTime = pollWait
}
}
func (s *Service) OnEvent(topic eventbus.EventTopic, payload interface{}) {
forcePoll := false
eventSubject := ""
switch event := payload.(type) {
case api.EventJobUpdate:
forcePoll = event.PreviousStatus != nil && *event.PreviousStatus != event.Status
eventSubject = "job"
case api.EventWorkerUpdate:
forcePoll = event.PreviousStatus != nil && *event.PreviousStatus != event.Status
eventSubject = "worker"
}
if !forcePoll {
return
}
log.Debug().
Str("event", string(topic)).
Msgf("farm status: investigating after %s status change", eventSubject)
// Polling queries the database, and thus can have a non-trivial duration.
// Better to run in the Run() goroutine.
select {
case s.forcePoll <- struct{}{}:
default:
// If sending to the channel fails, there is already a struct{}{} in
// there, and thus a poll will be triggered ASAP anyway.
}
}
// Report returns the last-known farm status report.
//
// It is updated every few seconds, from the Run() function.
func (s *Service) Report() api.FarmStatusReport {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.lastReport
}
// updateStatusReport updates the last status report in a thread-safe way.
// It returns whether the report changed.
func (s *Service) updateStatusReport(report api.FarmStatusReport) bool {
s.mutex.Lock()
defer s.mutex.Unlock()
reportChanged := s.lastReport != report
s.lastReport = report
return reportChanged
}
func (s *Service) poll(ctx context.Context) {
report := s.checkFarmStatus(ctx)
if report == nil {
// Already logged, just keep the last known log around for querying.
return
}
reportChanged := s.updateStatusReport(*report)
if reportChanged {
event := eventbus.NewFarmStatusEvent(s.lastReport)
s.eventbus.BroadcastFarmStatusEvent(event)
}
}
// checkFarmStatus checks the farm status by querying the peristence layer.
// This function does not return an error, but instead logs them as warnings and returns nil.
func (s *Service) checkFarmStatus(ctx context.Context) *api.FarmStatusReport {
log.Trace().Msg("farm status: checking the farm status")
startTime := time.Now()
defer func() {
duration := time.Since(startTime)
log.Debug().Stringer("duration", duration).Msg("farm status: checked the farm status")
}()
workerStatuses, err := s.persist.SummarizeWorkerStatuses(ctx)
if err != nil {
logDBError(err, "farm status: could not summarize worker statuses")
return nil
}
// Check some worker statuses first. When there are no workers and the farm is
// inoperative, there is little use in checking jobs. At least for now. Maybe
// later we want to have some info in the reported status that indicates a
// more pressing matter (as in, inoperative AND a job is queued).
// Check: inoperative
if len(workerStatuses) == 0 || allIn(workerStatuses, api.WorkerStatusOffline, api.WorkerStatusError) {
return &api.FarmStatusReport{
Status: api.FarmStatusInoperative,
}
}
jobStatuses, err := s.persist.SummarizeJobStatuses(ctx)
if err != nil {
logDBError(err, "farm status: could not summarize job statuses")
return nil
}
anyJobActive := jobStatuses[api.JobStatusActive] > 0
anyJobQueued := jobStatuses[api.JobStatusQueued] > 0
isWorkAvailable := anyJobActive || anyJobQueued
anyWorkerAwake := workerStatuses[api.WorkerStatusAwake] > 0
anyWorkerAsleep := workerStatuses[api.WorkerStatusAsleep] > 0
allWorkersAsleep := !anyWorkerAwake && anyWorkerAsleep
report := api.FarmStatusReport{}
switch {
case anyJobActive && anyWorkerAwake:
// - "active" # Actively working on jobs.
report.Status = api.FarmStatusActive
case isWorkAvailable:
// - "waiting" # Work to be done, but there is no worker awake.
report.Status = api.FarmStatusWaiting
case !isWorkAvailable && allWorkersAsleep:
// - "asleep" # Farm is idle, and all workers are asleep.
report.Status = api.FarmStatusAsleep
case !isWorkAvailable:
// - "idle" # Farm could be active, but has no work to do.
report.Status = api.FarmStatusIdle
default:
log.Warn().
Interface("workerStatuses", workerStatuses).
Interface("jobStatuses", jobStatuses).
Msgf("farm status: unexpected configuration of worker and job statuses, please report this at %s", website.BugReportURL)
report.Status = api.FarmStatusUnknown
}
return &report
}
func logDBError(err error, message string) {
switch {
case errors.Is(err, context.DeadlineExceeded):
log.Warn().Msg(message + " (it took too long)")
case errors.Is(err, context.Canceled):
log.Debug().Msg(message + " (Flamenco is shutting down)")
default:
log.Warn().AnErr("cause", err).Msg(message)
}
}
func allIn[T comparable](statuses map[T]int, shouldBeIn ...T) bool {
for status, count := range statuses {
if count == 0 {
continue
}
if !slices.Contains(shouldBeIn, status) {
return false
}
}
return true
}

View File

@ -0,0 +1,241 @@
// package farmstatus provides a status indicator for the entire Flamenco farm.
package farmstatus
import (
"context"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/farmstatus/mocks"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/pkg/api"
)
type Fixtures struct {
service *Service
persist *mocks.MockPersistenceService
eventbus *mocks.MockEventBus
ctx context.Context
}
func TestFarmStatusStarting(t *testing.T) {
f := fixtures(t)
report := f.service.Report()
assert.Equal(t, api.FarmStatusStarting, report.Status)
}
func TestFarmStatusLoop(t *testing.T) {
f := fixtures(t)
// Mock an "active" status.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 2,
api.WorkerStatusAsleep: 1,
api.WorkerStatusError: 1,
api.WorkerStatusAwake: 3,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusActive: 1,
})
// Before polling, the status should still be 'starting'.
report := f.service.Report()
assert.Equal(t, api.FarmStatusStarting, report.Status)
// After a single poll, the report should have been updated.
f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{Status: api.FarmStatusActive})
f.service.poll(f.ctx)
report = f.service.Report()
assert.Equal(t, api.FarmStatusActive, report.Status)
}
func TestCheckFarmStatusInoperative(t *testing.T) {
f := fixtures(t)
// "inoperative": no workers.
f.mockWorkerStatuses(persistence.WorkerStatusCount{})
report := f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusInoperative, report.Status)
// "inoperative": all workers offline.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 3,
})
report = f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusInoperative, report.Status)
// "inoperative": some workers offline, some in error,
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 2,
api.WorkerStatusError: 1,
})
report = f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusInoperative, report.Status)
}
func TestCheckFarmStatusActive(t *testing.T) {
f := fixtures(t)
// "active" # Actively working on jobs.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 2,
api.WorkerStatusAsleep: 1,
api.WorkerStatusError: 1,
api.WorkerStatusAwake: 3,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusActive: 1,
})
report := f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusActive, report.Status)
}
func TestCheckFarmStatusWaiting(t *testing.T) {
f := fixtures(t)
// "waiting": Active job, and only sleeping workers.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusAsleep: 1,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusActive: 1,
})
report := f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusWaiting, report.Status)
// "waiting": Queued job, and awake worker. It could pick up the job any
// second now, but it could also have been blocklisted already.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusAsleep: 1,
api.WorkerStatusAwake: 1,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusQueued: 1,
})
report = f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusWaiting, report.Status)
}
func TestCheckFarmStatusIdle(t *testing.T) {
f := fixtures(t)
// "idle" # Farm could be active, but has no work to do.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 2,
api.WorkerStatusAsleep: 1,
api.WorkerStatusAwake: 1,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusCompleted: 1,
api.JobStatusCancelRequested: 1,
})
report := f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusIdle, report.Status)
}
func TestCheckFarmStatusAsleep(t *testing.T) {
f := fixtures(t)
// "asleep": No worker is awake, some are asleep, no work to do.
f.mockWorkerStatuses(persistence.WorkerStatusCount{
api.WorkerStatusOffline: 2,
api.WorkerStatusAsleep: 2,
})
f.mockJobStatuses(persistence.JobStatusCount{
api.JobStatusCanceled: 10,
api.JobStatusCompleted: 4,
api.JobStatusFailed: 2,
})
report := f.service.checkFarmStatus(f.ctx)
require.NotNil(t, report)
assert.Equal(t, api.FarmStatusAsleep, report.Status)
}
func TestFarmStatusEvent(t *testing.T) {
f := fixtures(t)
// "inoperative": no workers.
f.mockWorkerStatuses(persistence.WorkerStatusCount{})
f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{
Status: api.FarmStatusInoperative,
})
f.service.poll(f.ctx)
// Re-polling should not trigger any event, as the status doesn't change.
f.mockWorkerStatuses(persistence.WorkerStatusCount{})
f.service.poll(f.ctx)
// "active": Actively working on jobs.
f.mockWorkerStatuses(persistence.WorkerStatusCount{api.WorkerStatusAwake: 3})
f.mockJobStatuses(persistence.JobStatusCount{api.JobStatusActive: 1})
f.eventbus.EXPECT().BroadcastFarmStatusEvent(api.EventFarmStatus{
Status: api.FarmStatusActive,
})
f.service.poll(f.ctx)
}
func Test_allIn(t *testing.T) {
type args struct {
statuses map[api.WorkerStatus]int
shouldBeIn []api.WorkerStatus
}
tests := []struct {
name string
args args
want bool
}{
{"none", args{map[api.WorkerStatus]int{}, []api.WorkerStatus{api.WorkerStatusAsleep}}, true},
{"match-only", args{
map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5},
[]api.WorkerStatus{api.WorkerStatusAsleep},
}, true},
{"match-some", args{
map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5, api.WorkerStatusOffline: 2},
[]api.WorkerStatus{api.WorkerStatusAsleep},
}, false},
{"match-all", args{
map[api.WorkerStatus]int{api.WorkerStatusAsleep: 5, api.WorkerStatusOffline: 2},
[]api.WorkerStatus{api.WorkerStatusAsleep, api.WorkerStatusOffline},
}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := allIn(tt.args.statuses, tt.args.shouldBeIn...); got != tt.want {
t.Errorf("allIn() = %v, want %v", got, tt.want)
}
})
}
}
func fixtures(t *testing.T) *Fixtures {
mockCtrl := gomock.NewController(t)
f := Fixtures{
persist: mocks.NewMockPersistenceService(mockCtrl),
eventbus: mocks.NewMockEventBus(mockCtrl),
ctx: context.Background(),
}
// calling NewService() immediate registers as a listener with the event bus.
f.eventbus.EXPECT().AddListener(gomock.Any())
f.service = NewService(f.persist, f.eventbus)
return &f
}
func (f *Fixtures) mockWorkerStatuses(workerStatuses persistence.WorkerStatusCount) {
f.persist.EXPECT().SummarizeWorkerStatuses(f.ctx).Return(workerStatuses, nil)
}
func (f *Fixtures) mockJobStatuses(jobStatuses persistence.JobStatusCount) {
f.persist.EXPECT().SummarizeJobStatuses(f.ctx).Return(jobStatuses, nil)
}

View File

@ -0,0 +1,26 @@
package farmstatus
import (
"context"
"projects.blender.org/studio/flamenco/internal/manager/eventbus"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/pkg/api"
)
// Generate mock implementations of these interfaces.
//go:generate go run github.com/golang/mock/mockgen -destination mocks/interfaces_mock.gen.go -package mocks projects.blender.org/studio/flamenco/internal/manager/farmstatus PersistenceService,EventBus
type PersistenceService interface {
SummarizeJobStatuses(ctx context.Context) (persistence.JobStatusCount, error)
SummarizeWorkerStatuses(ctx context.Context) (persistence.WorkerStatusCount, error)
}
var _ PersistenceService = (*persistence.DB)(nil)
type EventBus interface {
AddListener(listener eventbus.Listener)
BroadcastFarmStatusEvent(event api.EventFarmStatus)
}
var _ EventBus = (*eventbus.Broker)(nil)

View File

@ -0,0 +1,115 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: projects.blender.org/studio/flamenco/internal/manager/farmstatus (interfaces: PersistenceService,EventBus)
// Package mocks is a generated GoMock package.
package mocks
import (
context "context"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
eventbus "projects.blender.org/studio/flamenco/internal/manager/eventbus"
persistence "projects.blender.org/studio/flamenco/internal/manager/persistence"
api "projects.blender.org/studio/flamenco/pkg/api"
)
// MockPersistenceService is a mock of PersistenceService interface.
type MockPersistenceService struct {
ctrl *gomock.Controller
recorder *MockPersistenceServiceMockRecorder
}
// MockPersistenceServiceMockRecorder is the mock recorder for MockPersistenceService.
type MockPersistenceServiceMockRecorder struct {
mock *MockPersistenceService
}
// NewMockPersistenceService creates a new mock instance.
func NewMockPersistenceService(ctrl *gomock.Controller) *MockPersistenceService {
mock := &MockPersistenceService{ctrl: ctrl}
mock.recorder = &MockPersistenceServiceMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockPersistenceService) EXPECT() *MockPersistenceServiceMockRecorder {
return m.recorder
}
// SummarizeJobStatuses mocks base method.
func (m *MockPersistenceService) SummarizeJobStatuses(arg0 context.Context) (persistence.JobStatusCount, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SummarizeJobStatuses", arg0)
ret0, _ := ret[0].(persistence.JobStatusCount)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SummarizeJobStatuses indicates an expected call of SummarizeJobStatuses.
func (mr *MockPersistenceServiceMockRecorder) SummarizeJobStatuses(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SummarizeJobStatuses", reflect.TypeOf((*MockPersistenceService)(nil).SummarizeJobStatuses), arg0)
}
// SummarizeWorkerStatuses mocks base method.
func (m *MockPersistenceService) SummarizeWorkerStatuses(arg0 context.Context) (persistence.WorkerStatusCount, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SummarizeWorkerStatuses", arg0)
ret0, _ := ret[0].(persistence.WorkerStatusCount)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// SummarizeWorkerStatuses indicates an expected call of SummarizeWorkerStatuses.
func (mr *MockPersistenceServiceMockRecorder) SummarizeWorkerStatuses(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SummarizeWorkerStatuses", reflect.TypeOf((*MockPersistenceService)(nil).SummarizeWorkerStatuses), arg0)
}
// MockEventBus is a mock of EventBus interface.
type MockEventBus struct {
ctrl *gomock.Controller
recorder *MockEventBusMockRecorder
}
// MockEventBusMockRecorder is the mock recorder for MockEventBus.
type MockEventBusMockRecorder struct {
mock *MockEventBus
}
// NewMockEventBus creates a new mock instance.
func NewMockEventBus(ctrl *gomock.Controller) *MockEventBus {
mock := &MockEventBus{ctrl: ctrl}
mock.recorder = &MockEventBusMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockEventBus) EXPECT() *MockEventBusMockRecorder {
return m.recorder
}
// AddListener mocks base method.
func (m *MockEventBus) AddListener(arg0 eventbus.Listener) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "AddListener", arg0)
}
// AddListener indicates an expected call of AddListener.
func (mr *MockEventBusMockRecorder) AddListener(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddListener", reflect.TypeOf((*MockEventBus)(nil).AddListener), arg0)
}
// BroadcastFarmStatusEvent mocks base method.
func (m *MockEventBus) BroadcastFarmStatusEvent(arg0 api.EventFarmStatus) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "BroadcastFarmStatusEvent", arg0)
}
// BroadcastFarmStatusEvent indicates an expected call of BroadcastFarmStatusEvent.
func (mr *MockEventBusMockRecorder) BroadcastFarmStatusEvent(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BroadcastFarmStatusEvent", reflect.TypeOf((*MockEventBus)(nil).BroadcastFarmStatusEvent), arg0)
}

View File

@ -58,7 +58,7 @@ func exampleSubmittedJob() api.SubmittedJob {
func mockedClock(t *testing.T) clock.Clock {
c := clock.NewMock()
now, err := time.ParseInLocation("2006-01-02T15:04:05", "2006-01-02T15:04:05", time.Local)
assert.NoError(t, err)
require.NoError(t, err)
c.Set(now)
return c
}
@ -67,7 +67,7 @@ func TestSimpleBlenderRenderHappy(t *testing.T) {
c := mockedClock(t)
s, err := Load(c)
assert.NoError(t, err)
require.NoError(t, err)
// Compiling a job should be really fast.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
@ -172,7 +172,7 @@ func TestSimpleBlenderRenderWindowsPaths(t *testing.T) {
c := mockedClock(t)
s, err := Load(c)
assert.NoError(t, err)
require.NoError(t, err)
// Compiling a job should be really fast.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
@ -307,10 +307,9 @@ func TestEtag(t *testing.T) {
{ // Test without etag.
aj, err := s.Compile(ctx, sj)
if assert.NoError(t, err, "job without etag should always be accepted") {
require.NoError(t, err, "job without etag should always be accepted")
assert.NotNil(t, aj)
}
}
{ // Test with bad etag.
sj.TypeEtag = ptr("this is not the right etag")
@ -321,10 +320,9 @@ func TestEtag(t *testing.T) {
{ // Test with correct etag.
sj.TypeEtag = ptr(expectEtag)
aj, err := s.Compile(ctx, sj)
if assert.NoError(t, err, "job with correct etag should be accepted") {
require.NoError(t, err, "job with correct etag should be accepted")
assert.NotNil(t, aj)
}
}
}
func TestComplexFrameRange(t *testing.T) {

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/dop251/goja"
"github.com/google/shlex"
"github.com/rs/zerolog/log"
)
@ -33,6 +34,19 @@ func jsFormatTimestampLocal(timestamp time.Time) string {
return timestamp.Local().Format("2006-01-02_150405")
}
// jsShellSplit splits a string into its parts, using CLI/shell semantics.
func jsShellSplit(vm *goja.Runtime, someCLIArgs string) []string {
split, err := shlex.Split(someCLIArgs)
if err != nil {
// Generate a JS exception by panicing with a Goja Value.
exception := vm.ToValue(err)
panic(exception)
}
return split
}
type ErrInvalidRange struct {
Range string // The frame range that was invalid.
Message string // The error message

View File

@ -5,12 +5,31 @@ package job_compilers
import (
"testing"
"github.com/dop251/goja"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestShellSplitHappy(t *testing.T) {
expect := []string{"--python-expr", "print(1 + 1)"}
actual := jsShellSplit(nil, "--python-expr 'print(1 + 1)'")
assert.Equal(t, expect, actual)
}
func TestShellSplitFailure(t *testing.T) {
vm := goja.New()
testFunc := func() {
jsShellSplit(vm, "--python-expr invalid_quoting(1 + 1)'")
}
// Testing that a goja.Value is used for the panic is a bit tricky, so just
// test that the function panics.
assert.Panics(t, testFunc)
}
func TestFrameChunkerHappyBlenderStyle(t *testing.T) {
chunks, err := jsFrameChunker("1..10,20..25,40,3..8", 4)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks)
}
@ -21,24 +40,24 @@ func TestFrameChunkerHappySmallInput(t *testing.T) {
// Just one frame.
chunks, err := jsFrameChunker("47", 4)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"47"}, chunks)
// Just one range of exactly one chunk.
chunks, err = jsFrameChunker("1-3", 3)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"1-3"}, chunks)
}
func TestFrameChunkerHappyRegularStyle(t *testing.T) {
chunks, err := jsFrameChunker("1-10,20-25,40", 4)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks)
}
func TestFrameChunkerHappyExtraWhitespace(t *testing.T) {
chunks, err := jsFrameChunker(" 1 .. 10,\t20..25\n,40 ", 4)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []string{"1-4", "5-8", "9,10,20,21", "22-25", "40"}, chunks)
}
@ -50,7 +69,7 @@ func TestFrameChunkerUnhappy(t *testing.T) {
func TestFrameRangeExplode(t *testing.T) {
frames, err := frameRangeExplode("1..10,20..25,40")
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []int{
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
20, 21, 22, 23, 24, 25, 40,

View File

@ -2,6 +2,7 @@
const JOB_TYPE = {
label: "Simple Blender Render",
description: "Render a sequence of frames, and create a preview video file",
settings: [
// Settings for artists to determine:
{ key: "frames", type: "string", required: true, eval: "f'{C.scene.frame_start}-{C.scene.frame_end}'",

View File

@ -140,6 +140,9 @@ func newGojaVM(registry *require.Registry) *goja.Runtime {
mustSet("alert", jsAlert)
mustSet("frameChunker", jsFrameChunker)
mustSet("formatTimestampLocal", jsFormatTimestampLocal)
mustSet("shellSplit", func(cliArgs string) []string {
return jsShellSplit(vm, cliArgs)
})
// Pre-import some useful modules.
registry.Enable(vm)

View File

@ -2,6 +2,7 @@
const JOB_TYPE = {
label: "Simple Blender Render",
description: "Render a sequence of frames, and create a preview video file",
settings: [
// Settings for artists to determine:
{ key: "frames", type: "string", required: true,

View File

@ -8,12 +8,13 @@ import (
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLoadScriptsFrom_skip_nonjs(t *testing.T) {
thisDirFS := os.DirFS(".")
compilers, err := loadScriptsFrom(thisDirFS)
assert.NoError(t, err, "input without JS files should not cause errors")
require.NoError(t, err, "input without JS files should not cause errors")
assert.Empty(t, compilers)
}
@ -21,7 +22,7 @@ func TestLoadScriptsFrom_on_disk_js(t *testing.T) {
scriptsFS := os.DirFS("scripts-for-unittest")
compilers, err := loadScriptsFrom(scriptsFS)
assert.NoError(t, err)
require.NoError(t, err)
expectKeys := map[string]bool{
"echo-and-sleep": true,
"simple-blender-render": true,
@ -34,7 +35,7 @@ func TestLoadScriptsFrom_embedded(t *testing.T) {
initEmbeddedFS()
compilers, err := loadScriptsFrom(embeddedScriptsFS)
assert.NoError(t, err)
require.NoError(t, err)
expectKeys := map[string]bool{
"echo-sleep-test": true,
"simple-blender-render": true,
@ -48,7 +49,7 @@ func BenchmarkLoadScripts_fromEmbedded(b *testing.B) {
for i := 0; i < b.N; i++ {
compilers, err := loadScriptsFrom(embeddedScriptsFS)
assert.NoError(b, err)
require.NoError(b, err)
assert.NotEmpty(b, compilers)
}
}
@ -59,7 +60,7 @@ func BenchmarkLoadScripts_fromDisk(b *testing.B) {
onDiskFS := os.DirFS("scripts-for-unittest")
for i := 0; i < b.N; i++ {
compilers, err := loadScriptsFrom(onDiskFS)
assert.NoError(b, err)
require.NoError(b, err)
assert.NotEmpty(b, compilers)
}
}

View File

@ -9,6 +9,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/job_deleter/mocks"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/pkg/shaman"
@ -32,16 +33,16 @@ func TestQueueJobDeletion(t *testing.T) {
job1 := &persistence.Job{UUID: "2f7d910f-08a6-4b0f-8ecb-b3946939ed1b"}
mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job1)
assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job1))
require.NoError(t, s.QueueJobDeletion(mocks.ctx, job1))
// Call twice more to overflow the queue.
job2 := &persistence.Job{UUID: "e8fbe41c-ed24-46df-ba63-8d4f5524071b"}
mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job2)
assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job2))
require.NoError(t, s.QueueJobDeletion(mocks.ctx, job2))
job3 := &persistence.Job{UUID: "deeab6ba-02cd-42c0-b7bc-2367a2f04c7d"}
mocks.persist.EXPECT().RequestJobDeletion(mocks.ctx, job3)
assert.NoError(t, s.QueueJobDeletion(mocks.ctx, job3))
require.NoError(t, s.QueueJobDeletion(mocks.ctx, job3))
if assert.Len(t, s.queue, 2, "the first two job UUID should be queued") {
assert.Equal(t, job1.UUID, <-s.queue)
@ -111,7 +112,7 @@ func TestDeleteJobWithoutShaman(t *testing.T) {
mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID)
mocks.persist.EXPECT().RequestIntegrityCheck()
mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any())
assert.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
require.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
}
func TestDeleteJobWithShaman(t *testing.T) {
@ -163,7 +164,7 @@ func TestDeleteJobWithShaman(t *testing.T) {
mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID)
mocks.persist.EXPECT().RequestIntegrityCheck()
mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any())
assert.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
require.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
}
func jobDeleterTestFixtures(t *testing.T) (*Service, func(), *JobDeleterMocks) {

View File

@ -10,6 +10,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/local_storage"
)
@ -38,9 +39,9 @@ func TestQueueImage(t *testing.T) {
defer storage.MustErase()
lrp := New(storage)
assert.NoError(t, lrp.QueueImage(payload))
assert.NoError(t, lrp.QueueImage(payload))
assert.NoError(t, lrp.QueueImage(payload))
require.NoError(t, lrp.QueueImage(payload))
require.NoError(t, lrp.QueueImage(payload))
require.NoError(t, lrp.QueueImage(payload))
assert.ErrorIs(t, lrp.QueueImage(payload), ErrQueueFull)
}
@ -48,9 +49,7 @@ func TestProcessImage(t *testing.T) {
// Load the test image. Note that this intentionally has an approximate 21:9
// ratio, whereas the thumbnail specs define a 16:9 ratio.
imgBytes, err := os.ReadFile("last_rendered_test.jpg")
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
jobID := "e078438b-c9f5-43e6-9e86-52f8be91dd12"
payload := Payload{
@ -87,15 +86,11 @@ func TestProcessImage(t *testing.T) {
assertImageSize := func(spec Thumbspec) {
path := filepath.Join(jobdir, spec.Filename)
file, err := os.Open(path)
if !assert.NoError(t, err, "thumbnail %s should be openable", spec.Filename) {
return
}
require.NoError(t, err, "thumbnail %s should be openable", spec.Filename)
defer file.Close()
img, format, err := image.Decode(file)
if !assert.NoErrorf(t, err, "thumbnail %s should be decodable", spec.Filename) {
return
}
require.NoErrorf(t, err, "thumbnail %s should be decodable", spec.Filename)
assert.Equalf(t, "jpeg", format, "thumbnail %s not written in the expected format", spec.Filename)
assert.LessOrEqualf(t, img.Bounds().Dx(), spec.MaxWidth, "thumbnail %s has wrong width", spec.Filename)

View File

@ -24,16 +24,14 @@ func TestNewNextToExe(t *testing.T) {
func TestNewNextToExe_noSubdir(t *testing.T) {
exePath, err := os.Executable()
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
exeName := filepath.Base(exePath)
// The filesystem in an empty "subdirectory" next to the executable should
// contain the executable.
si := NewNextToExe("")
_, err = os.Stat(filepath.Join(si.rootPath, exeName))
assert.NoErrorf(t, err, "should be able to stat this executable %s", exeName)
require.NoErrorf(t, err, "should be able to stat this executable %s", exeName)
}
func TestForJob(t *testing.T) {
@ -52,10 +50,10 @@ func TestErase(t *testing.T) {
jobPath := si.ForJob("08e126ef-d773-468b-8bab-19a8213cf2ff")
assert.NoDirExists(t, jobPath, "getting a path should not create it")
assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
require.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
assert.DirExists(t, jobPath, "os.MkdirAll is borked")
assert.NoError(t, si.Erase())
require.NoError(t, si.Erase())
assert.NoDirExists(t, si.rootPath, "Erase() should erase the root path, and everything in it")
}
@ -66,13 +64,13 @@ func TestRemoveJobStorage(t *testing.T) {
jobPath := si.ForJob(jobUUID)
assert.NoDirExists(t, jobPath, "getting a path should not create it")
assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
require.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
assert.DirExists(t, jobPath, "os.MkdirAll is borked")
taskFile := filepath.Join(jobPath, "task-07c33f32-b345-4da9-8834-9c91532cd97e.txt")
assert.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777))
require.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777))
assert.NoError(t, si.RemoveJobStorage(context.Background(), jobUUID))
require.NoError(t, si.RemoveJobStorage(context.Background(), jobUUID))
assert.NoDirExists(t, jobPath, "RemoveJobStorage() should erase the entire job-specific storage dir, and everything in it")
// See if the test assumption (that job dir is in another sub-dir of the root,
@ -91,13 +89,13 @@ func TestRemoveJobStorageWithoutJobUUID(t *testing.T) {
jobPath := si.ForJob("")
assert.NoDirExists(t, jobPath, "getting a path should not create it")
assert.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
require.NoError(t, os.MkdirAll(jobPath, os.ModePerm))
assert.DirExists(t, jobPath, "os.MkdirAll is borked")
taskFile := filepath.Join(jobPath, "task-07c33f32-b345-4da9-8834-9c91532cd97e.txt")
assert.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777))
require.NoError(t, os.WriteFile(taskFile, []byte("dummy task log"), 0o777))
assert.NoError(t, si.RemoveJobStorage(context.Background(), ""))
require.NoError(t, si.RemoveJobStorage(context.Background(), ""))
assert.NoDirExists(t, jobPath, "RemoveJobStorage() should erase the entire job-specific storage dir, and everything in it")
// See if the test assumption (that a jobless dir is directly inside the root) still holds.

View File

@ -5,14 +5,15 @@ package persistence
import (
"context"
"database/sql"
"fmt"
"time"
"github.com/glebarez/sqlite"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
// sqlite "projects.blender.org/studio/flamenco/pkg/gorm-modernc-sqlite"
"github.com/glebarez/sqlite"
"projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc"
)
// DB provides the database interface.
@ -74,6 +75,10 @@ func OpenDB(ctx context.Context, dsn string) (*DB, error) {
return nil, ErrIntegrity
}
// Perform another vacuum after database migration, as that may have copied a
// lot of data and then dropped another lot of data.
db.vacuum()
closeConnOnReturn = false
return db, nil
}
@ -171,6 +176,25 @@ func (db *DB) Close() error {
return sqldb.Close()
}
// queries returns the SQLC Queries struct, connected to this database.
// It is intended that all GORM queries will be migrated to use this interface
// instead.
func (db *DB) queries() (*sqlc.Queries, error) {
sqldb, err := db.gormDB.DB()
if err != nil {
return nil, fmt.Errorf("could not get low-level database driver: %w", err)
}
return sqlc.New(sqldb), nil
}
// now returns the result of `nowFunc()` wrapped in a sql.NullTime.
func (db *DB) now() sql.NullTime {
return sql.NullTime{
Time: db.gormDB.NowFunc(),
Valid: true,
}
}
func (db *DB) pragmaForeignKeys(enabled bool) error {
var (
value int

View File

@ -13,7 +13,7 @@ import (
var ErrIntegrity = errors.New("database integrity check failed")
const (
integrityCheckTimeout = 2 * time.Second
integrityCheckTimeout = 10 * time.Second
)
type PragmaIntegrityCheckResult struct {
@ -78,6 +78,8 @@ func (db *DB) performIntegrityCheck(ctx context.Context) (ok bool) {
log.Debug().Msg("database: performing integrity check")
db.ensureForeignKeysEnabled()
if !db.pragmaIntegrityCheck(checkCtx) {
return false
}
@ -159,3 +161,29 @@ func (db *DB) pragmaForeignKeyCheck(ctx context.Context) (ok bool) {
return false
}
// ensureForeignKeysEnabled checks whether foreign keys are enabled, and if not,
// tries to enable them.
//
// This is likely caused by either GORM or its embedded SQLite creating a new
// connection to the low-level SQLite driver. Unfortunately the GORM-embedded
// SQLite doesn't have an 'on-connect' callback function to always enable
// foreign keys.
func (db *DB) ensureForeignKeysEnabled() {
fkEnabled, err := db.areForeignKeysEnabled()
if err != nil {
log.Error().AnErr("cause", err).Msg("database: could not check whether foreign keys are enabled")
return
}
if fkEnabled {
return
}
log.Warn().Msg("database: foreign keys are disabled, re-enabling them")
if err := db.pragmaForeignKeys(true); err != nil {
log.Error().AnErr("cause", err).Msg("database: error re-enabling foreign keys")
return
}
}

View File

@ -17,6 +17,7 @@ import (
"gorm.io/gorm/clause"
"projects.blender.org/studio/flamenco/internal/manager/job_compilers"
"projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -252,19 +253,20 @@ func (db *DB) storeAuthoredJobTaks(
// FetchJob fetches a single job, without fetching its tasks.
func (db *DB) FetchJob(ctx context.Context, jobUUID string) (*Job, error) {
dbJob := Job{}
findResult := db.gormDB.WithContext(ctx).
Limit(1).
Preload("WorkerTag").
Find(&dbJob, "uuid = ?", jobUUID)
if findResult.Error != nil {
return nil, jobError(findResult.Error, "fetching job")
}
if dbJob.ID == 0 {
return nil, ErrJobNotFound
queries, err := db.queries()
if err != nil {
return nil, err
}
return &dbJob, nil
sqlcJob, err := queries.FetchJob(ctx, jobUUID)
switch {
case errors.Is(err, sql.ErrNoRows):
return nil, ErrJobNotFound
case err != nil:
return nil, jobError(err, "fetching job")
}
return convertSqlcJob(sqlcJob)
}
// DeleteJob deletes a job from the database.
@ -279,24 +281,38 @@ func (db *DB) DeleteJob(ctx context.Context, jobUUID string) error {
return ErrDeletingWithoutFK
}
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", jobUUID).
Delete(&Job{})
if tx.Error != nil {
return jobError(tx.Error, "deleting job")
queries, err := db.queries()
if err != nil {
return err
}
if err := queries.DeleteJob(ctx, jobUUID); err != nil {
return jobError(err, "deleting job")
}
return nil
}
// RequestJobDeletion sets the job's "DeletionRequestedAt" field to "now".
func (db *DB) RequestJobDeletion(ctx context.Context, j *Job) error {
j.DeleteRequestedAt.Time = db.gormDB.NowFunc()
j.DeleteRequestedAt.Valid = true
tx := db.gormDB.WithContext(ctx).
Model(j).
Updates(Job{DeleteRequestedAt: j.DeleteRequestedAt})
if tx.Error != nil {
return jobError(tx.Error, "queueing job for deletion")
queries, err := db.queries()
if err != nil {
return err
}
// Update the given job itself, so we don't have to re-fetch it from the database.
j.DeleteRequestedAt = db.now()
params := sqlc.RequestJobDeletionParams{
Now: j.DeleteRequestedAt,
JobID: int64(j.ID),
}
log.Trace().
Str("job", j.UUID).
Time("deletedAt", params.Now.Time).
Msg("database: marking job as deletion-requested")
if err := queries.RequestJobDeletion(ctx, params); err != nil {
return jobError(err, "queueing job for deletion")
}
return nil
}
@ -304,98 +320,114 @@ func (db *DB) RequestJobDeletion(ctx context.Context, j *Job) error {
// RequestJobMassDeletion sets multiple job's "DeletionRequestedAt" field to "now".
// The list of affected job UUIDs is returned.
func (db *DB) RequestJobMassDeletion(ctx context.Context, lastUpdatedMax time.Time) ([]string, error) {
// In order to be able to report which jobs were affected, first fetch the
// list of jobs, then update them.
var jobs []*Job
selectResult := db.gormDB.WithContext(ctx).
Model(&Job{}).
Select("uuid").
Where("updated_at <= ?", lastUpdatedMax).
Scan(&jobs)
if selectResult.Error != nil {
return nil, jobError(selectResult.Error, "fetching jobs by last-modified timestamp")
queries, err := db.queries()
if err != nil {
return nil, err
}
if len(jobs) == 0 {
// In order to be able to report which jobs were affected, first fetch the
// list of jobs, then update them.
uuids, err := queries.FetchJobUUIDsUpdatedBefore(ctx, sql.NullTime{
Time: lastUpdatedMax,
Valid: true,
})
switch {
case err != nil:
return nil, jobError(err, "fetching jobs by last-modified timestamp")
case len(uuids) == 0:
return nil, ErrJobNotFound
}
// Convert array of jobs to array of UUIDs.
uuids := make([]string, len(jobs))
for index := range jobs {
uuids[index] = jobs[index].UUID
}
// Update the selected jobs.
deleteRequestedAt := sql.NullTime{
Time: db.gormDB.NowFunc(),
Valid: true,
params := sqlc.RequestMassJobDeletionParams{
Now: db.now(),
UUIDs: uuids,
}
tx := db.gormDB.WithContext(ctx).
Model(Job{}).
Where("uuid in ?", uuids).
Updates(Job{DeleteRequestedAt: deleteRequestedAt})
if tx.Error != nil {
return nil, jobError(tx.Error, "queueing jobs for deletion")
if err := queries.RequestMassJobDeletion(ctx, params); err != nil {
return nil, jobError(err, "marking jobs as deletion-requested")
}
return uuids, nil
}
func (db *DB) FetchJobsDeletionRequested(ctx context.Context) ([]string, error) {
var jobs []*Job
tx := db.gormDB.WithContext(ctx).
Model(&Job{}).
Select("UUID").
Where("delete_requested_at is not NULL").
Order("delete_requested_at").
Scan(&jobs)
if tx.Error != nil {
return nil, jobError(tx.Error, "fetching jobs marked for deletion")
queries, err := db.queries()
if err != nil {
return nil, err
}
uuids := make([]string, len(jobs))
for i := range jobs {
uuids[i] = jobs[i].UUID
uuids, err := queries.FetchJobsDeletionRequested(ctx)
if err != nil {
return nil, jobError(err, "fetching jobs marked for deletion")
}
return uuids, nil
}
func (db *DB) FetchJobsInStatus(ctx context.Context, jobStatuses ...api.JobStatus) ([]*Job, error) {
var jobs []*Job
tx := db.gormDB.WithContext(ctx).
Model(&Job{}).
Where("status in ?", jobStatuses).
Scan(&jobs)
if tx.Error != nil {
return nil, jobError(tx.Error, "fetching jobs in status %q", jobStatuses)
queries, err := db.queries()
if err != nil {
return nil, err
}
statuses := []string{}
for _, status := range jobStatuses {
statuses = append(statuses, string(status))
}
sqlcJobs, err := queries.FetchJobsInStatus(ctx, statuses)
if err != nil {
return nil, jobError(err, "fetching jobs in status %q", jobStatuses)
}
var jobs []*Job
for index := range sqlcJobs {
job, err := convertSqlcJob(sqlcJobs[index])
if err != nil {
return nil, jobError(err, "converting fetched jobs in status %q", jobStatuses)
}
jobs = append(jobs, job)
}
return jobs, nil
}
// SaveJobStatus saves the job's Status and Activity fields.
func (db *DB) SaveJobStatus(ctx context.Context, j *Job) error {
tx := db.gormDB.WithContext(ctx).
Model(j).
Updates(Job{Status: j.Status, Activity: j.Activity})
if tx.Error != nil {
return jobError(tx.Error, "saving job status")
queries, err := db.queries()
if err != nil {
return err
}
params := sqlc.SaveJobStatusParams{
Now: db.now(),
ID: int64(j.ID),
Status: string(j.Status),
Activity: j.Activity,
}
err = queries.SaveJobStatus(ctx, params)
if err != nil {
return jobError(err, "saving job status")
}
return nil
}
// SaveJobPriority saves the job's Priority field.
func (db *DB) SaveJobPriority(ctx context.Context, j *Job) error {
tx := db.gormDB.WithContext(ctx).
Model(j).
Updates(Job{Priority: j.Priority})
if tx.Error != nil {
return jobError(tx.Error, "saving job priority")
queries, err := db.queries()
if err != nil {
return err
}
params := sqlc.SaveJobPriorityParams{
Now: db.now(),
ID: int64(j.ID),
Priority: int64(j.Priority),
}
err = queries.SaveJobPriority(ctx, params)
if err != nil {
return jobError(err, "saving job priority")
}
return nil
}
@ -404,12 +436,19 @@ func (db *DB) SaveJobPriority(ctx context.Context, j *Job) error {
// NOTE: this function does NOT update the job's `UpdatedAt` field. This is
// necessary for `cmd/shaman-checkout-id-setter` to do its work quietly.
func (db *DB) SaveJobStorageInfo(ctx context.Context, j *Job) error {
tx := db.gormDB.WithContext(ctx).
Model(j).
Omit("UpdatedAt").
Updates(Job{Storage: j.Storage})
if tx.Error != nil {
return jobError(tx.Error, "saving job storage")
queries, err := db.queries()
if err != nil {
return err
}
params := sqlc.SaveJobStorageInfoParams{
ID: int64(j.ID),
StorageShamanCheckoutID: j.Storage.ShamanCheckoutID,
}
err = queries.SaveJobStorageInfo(ctx, params)
if err != nil {
return jobError(err, "saving job storage")
}
return nil
}
@ -713,3 +752,42 @@ func (db *DB) FetchTaskFailureList(ctx context.Context, t *Task) ([]*Worker, err
return workers, tx.Error
}
// convertSqlcJob converts a job from the SQLC-generated model to the model
// expected by the rest of the code. This is mostly in place to aid in the GORM
// to SQLC migration. It is intended that eventually the rest of the code will
// use the same SQLC-generated model.
func convertSqlcJob(job sqlc.Job) (*Job, error) {
dbJob := Job{
Model: Model{
ID: uint(job.ID),
CreatedAt: job.CreatedAt,
UpdatedAt: job.UpdatedAt.Time,
},
UUID: job.UUID,
Name: job.Name,
JobType: job.JobType,
Priority: int(job.Priority),
Status: api.JobStatus(job.Status),
Activity: job.Activity,
DeleteRequestedAt: job.DeleteRequestedAt,
Storage: JobStorageInfo{
ShamanCheckoutID: job.StorageShamanCheckoutID,
},
}
if err := json.Unmarshal(job.Settings, &dbJob.Settings); err != nil {
return nil, jobError(err, fmt.Sprintf("job %s has invalid settings: %v", job.UUID, err))
}
if err := json.Unmarshal(job.Metadata, &dbJob.Metadata); err != nil {
return nil, jobError(err, fmt.Sprintf("job %s has invalid metadata: %v", job.UUID, err))
}
if job.WorkerTagID.Valid {
workerTagID := uint(job.WorkerTagID.Int64)
dbJob.WorkerTagID = &workerTagID
}
return &dbJob, nil
}

View File

@ -18,11 +18,11 @@ func TestAddWorkerToJobBlocklist(t *testing.T) {
{
// Add a worker to the block list.
err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender")
assert.NoError(t, err)
require.NoError(t, err)
list := []JobBlock{}
tx := db.gormDB.Model(&JobBlock{}).Scan(&list)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
if assert.Len(t, list, 1) {
entry := list[0]
assert.Equal(t, entry.JobID, job.ID)
@ -34,11 +34,11 @@ func TestAddWorkerToJobBlocklist(t *testing.T) {
{
// Adding the same worker again should be a no-op.
err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender")
assert.NoError(t, err)
require.NoError(t, err)
list := []JobBlock{}
tx := db.gormDB.Model(&JobBlock{}).Scan(&list)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
assert.Len(t, list, 1, "No new entry should have been created")
}
}
@ -50,10 +50,10 @@ func TestFetchJobBlocklist(t *testing.T) {
// Add a worker to the block list.
worker := createWorker(ctx, t, db)
err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender")
assert.NoError(t, err)
require.NoError(t, err)
list, err := db.FetchJobBlocklist(ctx, job.UUID)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, list, 1) {
entry := list[0]
@ -73,17 +73,17 @@ func TestClearJobBlocklist(t *testing.T) {
// Add a worker and some entries to the block list.
worker := createWorker(ctx, t, db)
err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender")
assert.NoError(t, err)
require.NoError(t, err)
err = db.AddWorkerToJobBlocklist(ctx, job, worker, "ffmpeg")
assert.NoError(t, err)
require.NoError(t, err)
// Clear the blocklist.
err = db.ClearJobBlocklist(ctx, job)
assert.NoError(t, err)
require.NoError(t, err)
// Check that it is indeed empty.
list, err := db.FetchJobBlocklist(ctx, job.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, list)
}
@ -94,17 +94,17 @@ func TestRemoveFromJobBlocklist(t *testing.T) {
// Add a worker and some entries to the block list.
worker := createWorker(ctx, t, db)
err := db.AddWorkerToJobBlocklist(ctx, job, worker, "blender")
assert.NoError(t, err)
require.NoError(t, err)
err = db.AddWorkerToJobBlocklist(ctx, job, worker, "ffmpeg")
assert.NoError(t, err)
require.NoError(t, err)
// Remove an entry.
err = db.RemoveFromJobBlocklist(ctx, job.UUID, worker.UUID, "ffmpeg")
assert.NoError(t, err)
require.NoError(t, err)
// Check that the other entry is still there.
list, err := db.FetchJobBlocklist(ctx, job.UUID)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, list, 1) {
entry := list[0]
@ -120,7 +120,7 @@ func TestWorkersLeftToRun(t *testing.T) {
// No workers.
left, err := db.WorkersLeftToRun(ctx, job, "blender")
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, left)
worker1 := createWorker(ctx, t, db)
@ -146,30 +146,27 @@ func TestWorkersLeftToRun(t *testing.T) {
// Three workers, no blocklist.
left, err = db.WorkersLeftToRun(ctx, job, "blender")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, uuidMap(worker1, worker2, workerC1), left)
}
// Two workers, one blocked.
_ = db.AddWorkerToJobBlocklist(ctx, job, worker1, "blender")
left, err = db.WorkersLeftToRun(ctx, job, "blender")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, uuidMap(worker2, workerC1), left)
}
// All workers blocked.
_ = db.AddWorkerToJobBlocklist(ctx, job, worker2, "blender")
_ = db.AddWorkerToJobBlocklist(ctx, job, workerC1, "blender")
left, err = db.WorkersLeftToRun(ctx, job, "blender")
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, left)
// Two workers, unknown job.
fakeJob := Job{Model: Model{ID: 327}}
left, err = db.WorkersLeftToRun(ctx, &fakeJob, "blender")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, uuidMap(worker1, worker2, workerC1), left)
}
}
func TestWorkersLeftToRunWithTags(t *testing.T) {
@ -233,7 +230,7 @@ func TestWorkersLeftToRunWithTags(t *testing.T) {
// All taged workers blocked.
_ = db.AddWorkerToJobBlocklist(ctx, job, workerC13, "blender")
left, err = db.WorkersLeftToRun(ctx, job, "blender")
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, left)
}
@ -261,25 +258,21 @@ func TestCountTaskFailuresOfWorker(t *testing.T) {
// Multiple failures.
numBlender1, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker1, "blender")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, 2, numBlender1)
}
// Single failure, but multiple tasks exist of this type.
numBlender2, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker2, "blender")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, 1, numBlender2)
}
// Single failure, only one task of this type exists.
numFFMpeg1, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker1, "ffmpeg")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, 1, numFFMpeg1)
}
// No failure.
numFFMpeg2, err := db.CountTaskFailuresOfWorker(ctx, dbJob, worker2, "ffmpeg")
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, 0, numFFMpeg2)
}
}

View File

@ -86,3 +86,33 @@ func (db *DB) QueryJobTaskSummaries(ctx context.Context, jobUUID string) ([]*Tas
return result, tx.Error
}
// JobStatusCount is a mapping from job status to the number of jobs in that status.
type JobStatusCount map[api.JobStatus]int
func (db *DB) SummarizeJobStatuses(ctx context.Context) (JobStatusCount, error) {
logger := log.Ctx(ctx)
logger.Debug().Msg("database: summarizing job statuses")
// Query the database using a data structure that's easy to handle in GORM.
type queryResult struct {
Status api.JobStatus
StatusCount int
}
result := []*queryResult{}
tx := db.gormDB.WithContext(ctx).Model(&Job{}).
Select("status as Status", "count(id) as StatusCount").
Group("status").
Scan(&result)
if tx.Error != nil {
return nil, jobError(tx.Error, "summarizing job statuses")
}
// Convert the array-of-structs to a map that's easier to handle by the caller.
statusCounts := make(JobStatusCount)
for _, singleStatusCount := range result {
statusCounts[singleStatusCount.Status] = singleStatusCount.StatusCount
}
return statusCounts, nil
}

View File

@ -4,9 +4,12 @@ package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/job_compilers"
"projects.blender.org/studio/flamenco/internal/uuid"
@ -26,14 +29,14 @@ func TestSimpleQuery(t *testing.T) {
result, err := db.QueryJobs(ctx, api.JobsQuery{
StatusIn: &[]api.JobStatus{api.JobStatusActive, api.JobStatusCanceled},
})
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, result, 0)
// Check job was returned properly on correct status.
result, err = db.QueryJobs(ctx, api.JobsQuery{
StatusIn: &[]api.JobStatus{api.JobStatusUnderConstruction, api.JobStatusCanceled},
})
assert.NoError(t, err)
require.NoError(t, err)
if !assert.Len(t, result, 1) {
t.FailNow()
}
@ -65,7 +68,7 @@ func TestQueryMetadata(t *testing.T) {
AdditionalProperties: map[string]string{
"project": "Secret Future Project",
}}})
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, result, 0)
// Check job was returned properly when querying for the right project.
@ -74,7 +77,7 @@ func TestQueryMetadata(t *testing.T) {
AdditionalProperties: map[string]string{
"project": testJob.Metadata["project"],
}}})
assert.NoError(t, err)
require.NoError(t, err)
if !assert.Len(t, result, 1) {
t.FailNow()
}
@ -86,7 +89,7 @@ func TestQueryMetadata(t *testing.T) {
AdditionalProperties: map[string]string{
"project": otherJob.Metadata["project"],
}}})
assert.NoError(t, err)
require.NoError(t, err)
if !assert.Len(t, result, 1) {
t.FailNow()
}
@ -97,7 +100,7 @@ func TestQueryMetadata(t *testing.T) {
OrderBy: &[]string{"status"},
Metadata: &api.JobsQuery_Metadata{AdditionalProperties: map[string]string{}},
})
assert.NoError(t, err)
require.NoError(t, err)
if !assert.Len(t, result, 2) {
t.FailNow()
}
@ -129,15 +132,70 @@ func TestQueryJobTaskSummaries(t *testing.T) {
// Sanity check for the above code, there should be 6 tasks overall, 3 per job.
var numTasks int64
tx := db.gormDB.Model(&Task{}).Count(&numTasks)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
assert.Equal(t, int64(6), numTasks)
// Get the task summaries of a particular job.
summaries, err := db.QueryJobTaskSummaries(ctx, job.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, summaries, len(expectTaskUUIDs))
for _, summary := range summaries {
assert.True(t, expectTaskUUIDs[summary.UUID], "%q should be in %v", summary.UUID, expectTaskUUIDs)
}
}
func TestSummarizeJobStatuses(t *testing.T) {
ctx, close, db, job1, authoredJob1 := jobTasksTestFixtures(t)
defer close()
// Create another job
authoredJob2 := duplicateJobAndTasks(authoredJob1)
job2 := persistAuthoredJob(t, ctx, db, authoredJob2)
// Test the summary.
summary, err := db.SummarizeJobStatuses(ctx)
require.NoError(t, err)
assert.Equal(t, JobStatusCount{api.JobStatusUnderConstruction: 2}, summary)
// Change the jobs so that each has a unique status.
job1.Status = api.JobStatusQueued
require.NoError(t, db.SaveJobStatus(ctx, job1))
job2.Status = api.JobStatusFailed
require.NoError(t, db.SaveJobStatus(ctx, job2))
// Test the summary.
summary, err = db.SummarizeJobStatuses(ctx)
require.NoError(t, err)
assert.Equal(t, JobStatusCount{
api.JobStatusQueued: 1,
api.JobStatusFailed: 1,
}, summary)
// Delete all jobs.
require.NoError(t, db.DeleteJob(ctx, job1.UUID))
require.NoError(t, db.DeleteJob(ctx, job2.UUID))
// Test the summary.
summary, err = db.SummarizeJobStatuses(ctx)
require.NoError(t, err)
assert.Equal(t, JobStatusCount{}, summary)
}
// Check that a context timeout can be detected by inspecting the
// returned error.
func TestSummarizeJobStatusesTimeout(t *testing.T) {
ctx, close, db, _, _ := jobTasksTestFixtures(t)
defer close()
subCtx, subCtxCancel := context.WithTimeout(ctx, 1*time.Nanosecond)
defer subCtxCancel()
// Force a timeout of the context. And yes, even when a nanosecond is quite
// short, it is still necessary to wait.
time.Sleep(2 * time.Nanosecond)
summary, err := db.SummarizeJobStatuses(subCtx)
assert.ErrorIs(t, err, context.DeadlineExceeded)
assert.Nil(t, summary)
}

View File

@ -24,10 +24,10 @@ func TestStoreAuthoredJob(t *testing.T) {
job := createTestAuthoredJobWithTasks()
err := db.StoreAuthoredJob(ctx, job)
assert.NoError(t, err)
require.NoError(t, err)
fetchedJob, err := db.FetchJob(ctx, job.JobID)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, fetchedJob)
// Test contents of fetched job
@ -43,10 +43,10 @@ func TestStoreAuthoredJob(t *testing.T) {
// Fetch tasks of job.
var dbJob Job
tx := db.gormDB.Where(&Job{UUID: job.JobID}).Find(&dbJob)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
var tasks []Task
tx = db.gormDB.Where("job_id = ?", dbJob.ID).Find(&tasks)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
if len(tasks) != 3 {
t.Fatalf("expected 3 tasks, got %d", len(tasks))
@ -108,6 +108,30 @@ func TestSaveJobStorageInfo(t *testing.T) {
assert.Equal(t, startTime, updatedJob.UpdatedAt, "SaveJobStorageInfo should not touch UpdatedAt")
}
func TestSaveJobPriority(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
defer cancel()
// Create test job.
authoredJob := createTestAuthoredJobWithTasks()
err := db.StoreAuthoredJob(ctx, authoredJob)
require.NoError(t, err)
// Set a new priority.
newPriority := 47
dbJob, err := db.FetchJob(ctx, authoredJob.JobID)
require.NoError(t, err)
require.NotEqual(t, newPriority, dbJob.Priority,
"Initial priority should not be the same as what this test changes it to")
dbJob.Priority = newPriority
require.NoError(t, db.SaveJobPriority(ctx, dbJob))
// Check the result.
dbJob, err = db.FetchJob(ctx, authoredJob.JobID)
require.NoError(t, err)
assert.EqualValues(t, newPriority, dbJob.Priority)
}
func TestDeleteJob(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
defer cancel()
@ -170,7 +194,7 @@ func TestDeleteJobWithoutFK(t *testing.T) {
// Test the deletion did not happen.
_, err = db.FetchJob(ctx, authJob.JobID)
assert.NoError(t, err, "job should not have been deleted")
require.NoError(t, err, "job should not have been deleted")
}
func TestRequestJobDeletion(t *testing.T) {
@ -185,20 +209,20 @@ func TestRequestJobDeletion(t *testing.T) {
db.gormDB.NowFunc = func() time.Time { return mockNow }
err := db.RequestJobDeletion(ctx, job1)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, job1.DeleteRequested())
assert.True(t, job1.DeleteRequestedAt.Valid)
assert.Equal(t, job1.DeleteRequestedAt.Time, mockNow)
dbJob1, err := db.FetchJob(ctx, job1.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, job1.DeleteRequested())
assert.True(t, dbJob1.DeleteRequestedAt.Valid)
assert.WithinDuration(t, mockNow, dbJob1.DeleteRequestedAt.Time, time.Second)
// Other jobs shouldn't be touched.
dbJob2, err := db.FetchJob(ctx, authoredJob2.JobID)
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, dbJob2.DeleteRequested())
assert.False(t, dbJob2.DeleteRequestedAt.Valid)
}
@ -228,7 +252,7 @@ func TestRequestJobMassDeletion(t *testing.T) {
timeOfDeleteRequest := origGormNow()
db.gormDB.NowFunc = func() time.Time { return timeOfDeleteRequest }
uuids, err := db.RequestJobMassDeletion(ctx, job3.UpdatedAt)
assert.NoError(t, err)
require.NoError(t, err)
db.gormDB.NowFunc = origGormNow
@ -288,10 +312,10 @@ func TestFetchJobsDeletionRequested(t *testing.T) {
// Ensure different requests get different timestamps,
// out of chronological order.
timestamps := []time.Time{
// timestamps for 'delete requested at' and 'updated at'
now.Add(-3 * time.Second), now.Add(-3 * time.Second),
now.Add(-1 * time.Second), now.Add(-1 * time.Second),
now.Add(-5 * time.Second), now.Add(-5 * time.Second),
// timestamps for 'delete requested at'.
now.Add(-3 * time.Second),
now.Add(-1 * time.Second),
now.Add(-5 * time.Second),
}
currentTimestampIndex := 0
db.gormDB.NowFunc = func() time.Time {
@ -301,14 +325,14 @@ func TestFetchJobsDeletionRequested(t *testing.T) {
}
err := db.RequestJobDeletion(ctx, job1)
assert.NoError(t, err)
require.NoError(t, err)
err = db.RequestJobDeletion(ctx, job2)
assert.NoError(t, err)
require.NoError(t, err)
err = db.RequestJobDeletion(ctx, job3)
assert.NoError(t, err)
require.NoError(t, err)
actualUUIDs, err := db.FetchJobsDeletionRequested(ctx)
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, actualUUIDs, 3, "3 out of 4 jobs were marked for deletion")
// Expect UUIDs in chronological order of deletion requests, so that the
@ -322,11 +346,11 @@ func TestJobHasTasksInStatus(t *testing.T) {
defer close()
hasTasks, err := db.JobHasTasksInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, hasTasks, "expected freshly-created job to have queued tasks")
hasTasks, err = db.JobHasTasksInStatus(ctx, job, api.TaskStatusActive)
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, hasTasks, "expected freshly-created job to have no active tasks")
}
@ -335,28 +359,28 @@ func TestCountTasksOfJobInStatus(t *testing.T) {
defer close()
numQueued, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 3, numQueued)
assert.Equal(t, 3, numTotal)
// Make one task failed.
task, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID)
assert.NoError(t, err)
require.NoError(t, err)
task.Status = api.TaskStatusFailed
assert.NoError(t, db.SaveTask(ctx, task))
require.NoError(t, db.SaveTask(ctx, task))
numQueued, numTotal, err = db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 2, numQueued)
assert.Equal(t, 3, numTotal)
numFailed, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusFailed)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, numFailed)
assert.Equal(t, 3, numTotal)
numActive, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusActive)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 0, numActive)
assert.Equal(t, 3, numTotal)
}
@ -370,7 +394,7 @@ func TestCheckIfJobsHoldLargeNumOfTasks(t *testing.T) {
defer close()
numQueued, numTotal, err := db.CountTasksOfJobInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, numtasks, numQueued)
assert.Equal(t, numtasks, numTotal)
@ -392,22 +416,22 @@ func TestFetchJobsInStatus(t *testing.T) {
// Query single status
jobs, err := db.FetchJobsInStatus(ctx, api.JobStatusUnderConstruction)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []*Job{job1, job2, job3}, jobs)
// Query two statuses, where only one matches all jobs.
jobs, err = db.FetchJobsInStatus(ctx, api.JobStatusCanceled, api.JobStatusUnderConstruction)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []*Job{job1, job2, job3}, jobs)
// Update a job status, query for two of the three used statuses.
job1.Status = api.JobStatusQueued
assert.NoError(t, db.SaveJobStatus(ctx, job1))
require.NoError(t, db.SaveJobStatus(ctx, job1))
job2.Status = api.JobStatusRequeueing
assert.NoError(t, db.SaveJobStatus(ctx, job2))
require.NoError(t, db.SaveJobStatus(ctx, job2))
jobs, err = db.FetchJobsInStatus(ctx, api.JobStatusQueued, api.JobStatusUnderConstruction)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, jobs, 2) {
assert.Equal(t, job1.UUID, jobs[0].UUID)
assert.Equal(t, job3.UUID, jobs[1].UUID)
@ -419,35 +443,33 @@ func TestFetchTasksOfJobInStatus(t *testing.T) {
defer close()
allTasks, err := db.FetchTasksOfJob(ctx, job)
if !assert.NoError(t, err) {
return
}
require.NoError(t, err)
assert.Equal(t, job, allTasks[0].Job, "FetchTasksOfJob should set job pointer")
tasks, err := db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, allTasks, tasks)
assert.Equal(t, job, tasks[0].Job, "FetchTasksOfJobInStatus should set job pointer")
// Make one task failed.
task, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID)
assert.NoError(t, err)
require.NoError(t, err)
task.Status = api.TaskStatusFailed
assert.NoError(t, db.SaveTask(ctx, task))
require.NoError(t, db.SaveTask(ctx, task))
tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusQueued)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, []*Task{allTasks[1], allTasks[2]}, tasks)
// Check the failed task. This cannot directly compare to `allTasks[0]`
// because saving the task above changed some of its fields.
tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusFailed)
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, tasks, 1)
assert.Equal(t, allTasks[0].ID, tasks[0].ID)
tasks, err = db.FetchTasksOfJobInStatus(ctx, job, api.TaskStatusActive)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, tasks)
}
@ -456,10 +478,10 @@ func TestTaskAssignToWorker(t *testing.T) {
defer close()
task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
assert.NoError(t, err)
require.NoError(t, err)
w := createWorker(ctx, t, db)
assert.NoError(t, db.TaskAssignToWorker(ctx, task, w))
require.NoError(t, db.TaskAssignToWorker(ctx, task, w))
if task.Worker == nil {
t.Error("task.Worker == nil")
@ -478,38 +500,111 @@ func TestFetchTasksOfWorkerInStatus(t *testing.T) {
defer close()
task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
assert.NoError(t, err)
require.NoError(t, err)
w := createWorker(ctx, t, db)
assert.NoError(t, db.TaskAssignToWorker(ctx, task, w))
require.NoError(t, db.TaskAssignToWorker(ctx, task, w))
tasks, err := db.FetchTasksOfWorkerInStatus(ctx, w, task.Status)
assert.NoError(t, err)
require.NoError(t, err)
assert.Len(t, tasks, 1, "worker should have one task in status %q", task.Status)
assert.Equal(t, task.ID, tasks[0].ID)
assert.Equal(t, task.UUID, tasks[0].UUID)
assert.NotEqual(t, api.TaskStatusCanceled, task.Status)
tasks, err = db.FetchTasksOfWorkerInStatus(ctx, w, api.TaskStatusCanceled)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, tasks, "worker should have no task in status %q", w)
}
func TestFetchTasksOfWorkerInStatusOfJob(t *testing.T) {
ctx, close, db, dbJob, authoredJob := jobTasksTestFixtures(t)
defer close()
// Create multiple Workers, to test the function doesn't return tasks from
// other Workers.
worker := createWorker(ctx, t, db, func(worker *Worker) {
worker.UUID = "43300628-5f3b-4724-ab30-9821af8bda86"
})
otherWorker := createWorker(ctx, t, db, func(worker *Worker) {
worker.UUID = "2327350f-75ec-4b0e-bd28-31a7b045c85c"
})
// Create another job, to make sure the function under test doesn't return
// tasks from other jobs.
otherJob := duplicateJobAndTasks(authoredJob)
otherJob.Name = "The other job"
persistAuthoredJob(t, ctx, db, otherJob)
// Assign a task from each job to each Worker.
// Also double-check the test precondition that all tasks have the same status.
{ // Job / Worker.
task1, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
require.NoError(t, err)
require.NoError(t, db.TaskAssignToWorker(ctx, task1, worker))
require.Equal(t, task1.Status, api.TaskStatusQueued)
task2, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID)
require.NoError(t, err)
require.NoError(t, db.TaskAssignToWorker(ctx, task2, worker))
require.Equal(t, task2.Status, api.TaskStatusQueued)
}
{ // Job / Other Worker.
task, err := db.FetchTask(ctx, authoredJob.Tasks[2].UUID)
require.NoError(t, err)
require.NoError(t, db.TaskAssignToWorker(ctx, task, otherWorker))
require.Equal(t, task.Status, api.TaskStatusQueued)
}
{ // Other Job / Worker.
task, err := db.FetchTask(ctx, otherJob.Tasks[1].UUID)
require.NoError(t, err)
require.NoError(t, db.TaskAssignToWorker(ctx, task, worker))
require.Equal(t, task.Status, api.TaskStatusQueued)
}
{ // Other Job / Other Worker.
task, err := db.FetchTask(ctx, otherJob.Tasks[2].UUID)
require.NoError(t, err)
require.NoError(t, db.TaskAssignToWorker(ctx, task, otherWorker))
require.Equal(t, task.Status, api.TaskStatusQueued)
}
{ // Test active tasks, should be none.
tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusActive, dbJob)
require.NoError(t, err)
require.Len(t, tasks, 0)
}
{ // Test queued tasks, should be two.
tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusQueued, dbJob)
require.NoError(t, err)
require.Len(t, tasks, 2)
assert.Equal(t, authoredJob.Tasks[0].UUID, tasks[0].UUID)
assert.Equal(t, authoredJob.Tasks[1].UUID, tasks[1].UUID)
}
{ // Test queued tasks for worker without tasks, should be none.
worker := createWorker(ctx, t, db, func(worker *Worker) {
worker.UUID = "6534a1d4-f58e-4f2c-8925-4b2cd6caac22"
})
tasks, err := db.FetchTasksOfWorkerInStatusOfJob(ctx, worker, api.TaskStatusQueued, dbJob)
require.NoError(t, err)
require.Len(t, tasks, 0)
}
}
func TestTaskTouchedByWorker(t *testing.T) {
ctx, close, db, _, authoredJob := jobTasksTestFixtures(t)
defer close()
task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.True(t, task.LastTouchedAt.IsZero())
now := db.gormDB.NowFunc()
err = db.TaskTouchedByWorker(ctx, task)
assert.NoError(t, err)
require.NoError(t, err)
// Test the task instance as well as the database entry.
dbTask, err := db.FetchTask(ctx, task.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.WithinDuration(t, now, task.LastTouchedAt, time.Second)
assert.WithinDuration(t, now, dbTask.LastTouchedAt, time.Second)
}
@ -519,7 +614,7 @@ func TestAddWorkerToTaskFailedList(t *testing.T) {
defer close()
task, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
assert.NoError(t, err)
require.NoError(t, err)
worker1 := createWorker(ctx, t, db)
@ -528,30 +623,30 @@ func TestAddWorkerToTaskFailedList(t *testing.T) {
newWorker.ID = 0
newWorker.UUID = "89ed2b02-b51b-4cd4-b44a-4a1c8d01db85"
newWorker.Name = "Worker 2"
assert.NoError(t, db.SaveWorker(ctx, &newWorker))
require.NoError(t, db.SaveWorker(ctx, &newWorker))
worker2, err := db.FetchWorker(ctx, newWorker.UUID)
assert.NoError(t, err)
require.NoError(t, err)
// First failure should be registered just fine.
numFailed, err := db.AddWorkerToTaskFailedList(ctx, task, worker1)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, numFailed)
// Calling again should be a no-op and not cause any errors.
numFailed, err = db.AddWorkerToTaskFailedList(ctx, task, worker1)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, numFailed)
// Another worker should be able to fail this task as well.
numFailed, err = db.AddWorkerToTaskFailedList(ctx, task, worker2)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 2, numFailed)
// Deleting the task should also delete the failures.
assert.NoError(t, db.DeleteJob(ctx, authoredJob.JobID))
require.NoError(t, db.DeleteJob(ctx, authoredJob.JobID))
var num int64
tx := db.gormDB.Model(&TaskFailure{}).Count(&num)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
assert.Zero(t, num)
}
@ -569,9 +664,9 @@ func TestClearFailureListOfTask(t *testing.T) {
newWorker.ID = 0
newWorker.UUID = "89ed2b02-b51b-4cd4-b44a-4a1c8d01db85"
newWorker.Name = "Worker 2"
assert.NoError(t, db.SaveWorker(ctx, &newWorker))
require.NoError(t, db.SaveWorker(ctx, &newWorker))
worker2, err := db.FetchWorker(ctx, newWorker.UUID)
assert.NoError(t, err)
require.NoError(t, err)
// Store some failures for different tasks.
_, _ = db.AddWorkerToTaskFailedList(ctx, task1, worker1)
@ -579,10 +674,10 @@ func TestClearFailureListOfTask(t *testing.T) {
_, _ = db.AddWorkerToTaskFailedList(ctx, task2, worker1)
// Clearing should just update this one task.
assert.NoError(t, db.ClearFailureListOfTask(ctx, task1))
require.NoError(t, db.ClearFailureListOfTask(ctx, task1))
var failures = []TaskFailure{}
tx := db.gormDB.Model(&TaskFailure{}).Scan(&failures)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
if assert.Len(t, failures, 1) {
assert.Equal(t, task2.ID, failures[0].TaskID)
assert.Equal(t, worker1.ID, failures[0].WorkerID)
@ -615,10 +710,10 @@ func TestClearFailureListOfJob(t *testing.T) {
assert.Equal(t, 5, countTaskFailures(db))
// Clearing should be limited to the given job.
assert.NoError(t, db.ClearFailureListOfJob(ctx, dbJob1))
require.NoError(t, db.ClearFailureListOfJob(ctx, dbJob1))
var failures = []TaskFailure{}
tx := db.gormDB.Model(&TaskFailure{}).Scan(&failures)
assert.NoError(t, tx.Error)
require.NoError(t, tx.Error)
if assert.Len(t, failures, 2) {
assert.Equal(t, task2_1.ID, failures[0].TaskID)
assert.Equal(t, worker1.ID, failures[0].WorkerID)
@ -634,7 +729,7 @@ func TestFetchTaskFailureList(t *testing.T) {
// Test with non-existing task.
fakeTask := Task{Model: Model{ID: 327}}
failures, err := db.FetchTaskFailureList(ctx, &fakeTask)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, failures)
task1_1, _ := db.FetchTask(ctx, authoredJob1.Tasks[1].UUID)
@ -642,7 +737,7 @@ func TestFetchTaskFailureList(t *testing.T) {
// Test without failures.
failures, err = db.FetchTaskFailureList(ctx, task1_1)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, failures)
worker1 := createWorker(ctx, t, db)
@ -655,7 +750,7 @@ func TestFetchTaskFailureList(t *testing.T) {
// Fetch one task's failure list.
failures, err = db.FetchTaskFailureList(ctx, task1_1)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, failures, 2) {
assert.Equal(t, worker1.UUID, failures[0].UUID)
@ -764,17 +859,12 @@ func createTestAuthoredJob(jobID string, tasks ...job_compilers.AuthoredTask) jo
func persistAuthoredJob(t *testing.T, ctx context.Context, db *DB, authoredJob job_compilers.AuthoredJob) *Job {
err := db.StoreAuthoredJob(ctx, authoredJob)
if err != nil {
t.Fatalf("error storing authored job in DB: %v", err)
}
require.NoError(t, err, "error storing authored job in DB")
dbJob, err := db.FetchJob(ctx, authoredJob.JobID)
if err != nil {
t.Fatalf("error fetching job from DB: %v", err)
}
if dbJob == nil {
t.Fatalf("nil job obtained from DB but with no error!")
}
require.NoError(t, err, "error fetching job from DB")
require.NotNil(t, dbJob, "nil job obtained from DB but with no error!")
return dbJob
}
@ -851,18 +941,11 @@ func createWorker(ctx context.Context, t *testing.T, db *DB, updaters ...func(*W
}
err := db.CreateWorker(ctx, &w)
if err != nil {
t.Fatalf("error creating worker: %v", err)
}
assert.NoError(t, err)
require.NoError(t, err, "error creating worker")
fetchedWorker, err := db.FetchWorker(ctx, w.UUID)
if err != nil {
t.Fatalf("error fetching worker: %v", err)
}
if fetchedWorker == nil {
t.Fatal("fetched worker is nil, but no error returned")
}
require.NoError(t, err, "error fetching worker")
require.NotNil(t, fetchedWorker, "fetched worker is nil, but no error returned")
return fetchedWorker
}
@ -874,14 +957,10 @@ func createWorkerFrom(ctx context.Context, t *testing.T, db *DB, worker Worker)
worker.Name += " (copy)"
err := db.SaveWorker(ctx, &worker)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
dbWorker, err := db.FetchWorker(ctx, worker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
return dbWorker
}

View File

@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSetLastRendered(t *testing.T) {
@ -15,7 +16,7 @@ func TestSetLastRendered(t *testing.T) {
authoredJob2 := authorTestJob("1295757b-e668-4c49-8b89-f73db8270e42", "just-a-job")
job2 := persistAuthoredJob(t, ctx, db, authoredJob2)
assert.NoError(t, db.SetLastRendered(ctx, job1))
require.NoError(t, db.SetLastRendered(ctx, job1))
{
entries := []LastRendered{}
db.gormDB.Model(&LastRendered{}).Scan(&entries)
@ -24,7 +25,7 @@ func TestSetLastRendered(t *testing.T) {
}
}
assert.NoError(t, db.SetLastRendered(ctx, job2))
require.NoError(t, db.SetLastRendered(ctx, job2))
{
entries := []LastRendered{}
db.gormDB.Model(&LastRendered{}).Scan(&entries)
@ -41,29 +42,26 @@ func TestGetLastRenderedJobUUID(t *testing.T) {
{
// Test without any renders.
lastUUID, err := db.GetLastRenderedJobUUID(ctx)
if assert.NoError(t, err, "absence of renders should not cause an error") {
require.NoError(t, err, "absence of renders should not cause an error")
assert.Empty(t, lastUUID)
}
}
{
// Test with first render.
assert.NoError(t, db.SetLastRendered(ctx, job1))
require.NoError(t, db.SetLastRendered(ctx, job1))
lastUUID, err := db.GetLastRenderedJobUUID(ctx)
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, job1.UUID, lastUUID)
}
}
{
// Test with 2nd or subsequent render.
authoredJob2 := authorTestJob("1295757b-e668-4c49-8b89-f73db8270e42", "just-a-job")
job2 := persistAuthoredJob(t, ctx, db, authoredJob2)
assert.NoError(t, db.SetLastRendered(ctx, job2))
require.NoError(t, db.SetLastRendered(ctx, job2))
lastUUID, err := db.GetLastRenderedJobUUID(ctx)
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, job2.UUID, lastUUID)
}
}
}

View File

@ -0,0 +1,15 @@
-- Drop tables that were in use in beta versions of Flamenco. These might exist
-- in developer databases, as well as databases of studios following the `main`
-- branch, such as Blender Studio.
--
-- WARNING: this migration simply drops the tables. Their data is erased, and
-- cannot be brought back by rolling the migration back.
--
-- +goose Up
DROP INDEX IF EXISTS `idx_worker_clusters_uuid`;
DROP TABLE IF EXISTS `worker_cluster_membership`;
DROP TABLE IF EXISTS `worker_clusters`;
-- +goose Down
-- Do not recreate these tables, as no release of Flamenco ever used them.
-- Also their contents wouldn't be brought back anyway.

View File

@ -0,0 +1,496 @@
-- GORM automigration wasn't smart, and thus the database had more nullable
-- columns than necessary. This migration makes columns that should never be
-- NULL actually NOT NULL.
--
-- Since this migration recreates all tables in the database, this is now also
-- done in a way that makes the schema more compatible with sqlc (which is
-- mostly removing various quotes and backticks, and replacing char(N) with
-- varchar(N)). sqlc is the tool that'll replace GORM.
--
-- +goose Up
CREATE TABLE temp_last_rendereds (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
job_id integer DEFAULT 0 NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_last_rendereds_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE
);
INSERT INTO temp_last_rendereds
SELECT id, created_at, updated_at, job_id FROM last_rendereds;
DROP TABLE last_rendereds;
ALTER TABLE temp_last_rendereds RENAME TO last_rendereds;
CREATE TABLE temp_task_dependencies (
task_id integer NOT NULL,
dependency_id integer NOT NULL,
PRIMARY KEY (task_id, dependency_id),
CONSTRAINT fk_task_dependencies_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE,
CONSTRAINT fk_task_dependencies_dependencies FOREIGN KEY (dependency_id) REFERENCES tasks(id) ON DELETE CASCADE
);
INSERT INTO temp_task_dependencies SELECT task_id, dependency_id FROM task_dependencies;
DROP TABLE task_dependencies;
ALTER TABLE temp_task_dependencies RENAME TO task_dependencies;
CREATE TABLE temp_task_failures (
created_at datetime NOT NULL,
task_id integer NOT NULL,
worker_id integer NOT NULL,
PRIMARY KEY (task_id, worker_id),
CONSTRAINT fk_task_failures_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE,
CONSTRAINT fk_task_failures_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
INSERT INTO temp_task_failures SELECT created_at, task_id, worker_id FROM task_failures;
DROP TABLE task_failures;
ALTER TABLE temp_task_failures RENAME TO task_failures;
CREATE TABLE temp_worker_tag_membership (
worker_tag_id integer NOT NULL,
worker_id integer NOT NULL,
PRIMARY KEY (worker_tag_id, worker_id),
CONSTRAINT fk_worker_tag_membership_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE CASCADE,
CONSTRAINT fk_worker_tag_membership_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
INSERT INTO temp_worker_tag_membership SELECT worker_tag_id, worker_id FROM worker_tag_membership;
DROP TABLE worker_tag_membership;
ALTER TABLE temp_worker_tag_membership RENAME TO worker_tag_membership;
CREATE TABLE temp_worker_tags (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) UNIQUE DEFAULT '' NOT NULL,
description varchar(255) DEFAULT '' NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO temp_worker_tags SELECT
id,
created_at,
updated_at,
uuid,
name,
description
FROM worker_tags;
DROP TABLE worker_tags;
ALTER TABLE temp_worker_tags RENAME TO worker_tags;
CREATE TABLE temp_jobs (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
job_type varchar(32) DEFAULT '' NOT NULL,
priority smallint DEFAULT 0 NOT NULL,
status varchar(32) DEFAULT '' NOT NULL,
activity varchar(255) DEFAULT '' NOT NULL,
settings jsonb NOT NULL,
metadata jsonb NOT NULL,
delete_requested_at datetime,
storage_shaman_checkout_id varchar(255) DEFAULT '' NOT NULL,
worker_tag_id integer,
PRIMARY KEY (id),
CONSTRAINT fk_jobs_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE SET NULL
);
INSERT INTO temp_jobs SELECT
id,
created_at,
updated_at,
uuid,
name,
job_type,
priority,
status,
activity,
settings,
metadata,
delete_requested_at,
storage_shaman_checkout_id,
worker_tag_id
FROM jobs;
DROP TABLE jobs;
ALTER TABLE temp_jobs RENAME TO jobs;
CREATE TABLE temp_workers (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
secret varchar(255) DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
address varchar(39) DEFAULT '' NOT NULL,
platform varchar(16) DEFAULT '' NOT NULL,
software varchar(32) DEFAULT '' NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
last_seen_at datetime,
status_requested varchar(16) DEFAULT '' NOT NULL,
lazy_status_request smallint DEFAULT false NOT NULL,
supported_task_types varchar(255) DEFAULT '' NOT NULL,
deleted_at datetime,
can_restart smallint DEFAULT false NOT NULL,
PRIMARY KEY (id)
);
UPDATE workers SET supported_task_types = '' where supported_task_types is NULL;
INSERT INTO temp_workers SELECT
id,
created_at,
updated_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
deleted_at,
can_restart
FROM workers;
DROP TABLE workers;
ALTER TABLE temp_workers RENAME TO workers;
CREATE TABLE temp_job_blocks (
id integer NOT NULL,
created_at datetime NOT NULL,
job_id integer DEFAULT 0 NOT NULL,
worker_id integer DEFAULT 0 NOT NULL,
task_type text NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_job_blocks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE,
CONSTRAINT fk_job_blocks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
INSERT INTO temp_job_blocks SELECT
id,
created_at,
job_id,
worker_id,
task_type
FROM job_blocks;
DROP TABLE job_blocks;
ALTER TABLE temp_job_blocks RENAME TO job_blocks;
CREATE TABLE temp_sleep_schedules (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
worker_id integer UNIQUE DEFAULT 0 NOT NULL,
is_active numeric DEFAULT false NOT NULL,
days_of_week text DEFAULT '' NOT NULL,
start_time text DEFAULT '' NOT NULL,
end_time text DEFAULT '' NOT NULL,
next_check datetime,
PRIMARY KEY (id),
CONSTRAINT fk_sleep_schedules_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
INSERT INTO temp_sleep_schedules SELECT
id,
created_at,
updated_at,
worker_id,
is_active,
days_of_week,
start_time,
end_time,
next_check
FROM sleep_schedules;
DROP TABLE sleep_schedules;
ALTER TABLE temp_sleep_schedules RENAME TO sleep_schedules;
CREATE TABLE temp_tasks (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
type varchar(32) DEFAULT '' NOT NULL,
job_id integer DEFAULT 0 NOT NULL,
priority smallint DEFAULT 50 NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
worker_id integer,
last_touched_at datetime,
commands jsonb NOT NULL,
activity varchar(255) DEFAULT '' NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_tasks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE,
CONSTRAINT fk_tasks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE SET NULL
);
INSERT INTO temp_tasks SELECT
id,
created_at,
updated_at,
uuid,
name,
type,
job_id,
priority,
status,
worker_id,
last_touched_at,
commands,
activity
FROM tasks;
DROP TABLE tasks;
ALTER TABLE temp_tasks RENAME TO tasks;
-- Recreate the indices on the new tables.
CREATE INDEX idx_worker_tags_uuid ON worker_tags(uuid);
CREATE INDEX idx_jobs_uuid ON jobs(uuid);
CREATE INDEX idx_workers_address ON workers(address);
CREATE INDEX idx_workers_last_seen_at ON workers(last_seen_at);
CREATE INDEX idx_workers_deleted_at ON workers(deleted_at);
CREATE INDEX idx_workers_uuid ON workers(uuid);
CREATE UNIQUE INDEX job_worker_tasktype ON job_blocks(job_id, worker_id, task_type);
CREATE INDEX idx_sleep_schedules_is_active ON sleep_schedules(is_active);
CREATE INDEX idx_sleep_schedules_worker_id ON sleep_schedules(worker_id);
CREATE INDEX idx_tasks_uuid ON tasks(uuid);
CREATE INDEX idx_tasks_last_touched_at ON tasks(last_touched_at);
-- +goose Down
CREATE TABLE `temp_last_rendereds` (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`job_id` integer DEFAULT 0,
PRIMARY KEY (`id`),
CONSTRAINT `fk_last_rendereds_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_last_rendereds SELECT
id,
created_at,
updated_at,
job_id
FROM last_rendereds;
DROP TABLE last_rendereds;
ALTER TABLE temp_last_rendereds RENAME TO `last_rendereds`;
CREATE TABLE `temp_task_dependencies` (
`task_id` integer,
`dependency_id` integer,
PRIMARY KEY (`task_id`, `dependency_id`),
CONSTRAINT `fk_task_dependencies_task` FOREIGN KEY (`task_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_task_dependencies_dependencies` FOREIGN KEY (`dependency_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_task_dependencies SELECT task_id, dependency_id FROM task_dependencies;
DROP TABLE task_dependencies;
ALTER TABLE temp_task_dependencies RENAME TO `task_dependencies`;
CREATE TABLE `temp_task_failures` (
`created_at` datetime,
`task_id` integer,
`worker_id` integer,
PRIMARY KEY (`task_id`, `worker_id`),
CONSTRAINT `fk_task_failures_task` FOREIGN KEY (`task_id`) REFERENCES `tasks`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_task_failures_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_task_failures SELECT created_at, task_id, worker_id FROM task_failures;
DROP TABLE task_failures;
ALTER TABLE temp_task_failures RENAME TO `task_failures`;
CREATE TABLE `temp_worker_tag_membership` (
`worker_tag_id` integer,
`worker_id` integer,
PRIMARY KEY (`worker_tag_id`, `worker_id`),
CONSTRAINT `fk_worker_tag_membership_worker_tag` FOREIGN KEY (`worker_tag_id`) REFERENCES `worker_tags`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_worker_tag_membership_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_worker_tag_membership SELECT worker_tag_id, worker_id FROM worker_tag_membership;
DROP TABLE worker_tag_membership;
ALTER TABLE temp_worker_tag_membership RENAME TO `worker_tag_membership`;
CREATE TABLE "temp_worker_tags" (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`uuid` char(36) UNIQUE DEFAULT "",
`name` varchar(64) UNIQUE DEFAULT "",
`description` varchar(255) DEFAULT "",
PRIMARY KEY (`id`)
);
INSERT INTO temp_worker_tags SELECT
id,
created_at,
updated_at,
uuid,
name,
description
FROM worker_tags;
DROP TABLE worker_tags;
ALTER TABLE temp_worker_tags RENAME TO `worker_tags`;
CREATE TABLE "temp_jobs" (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`uuid` char(36) UNIQUE DEFAULT "",
`name` varchar(64) DEFAULT "",
`job_type` varchar(32) DEFAULT "",
`priority` smallint DEFAULT 0,
`status` varchar(32) DEFAULT "",
`activity` varchar(255) DEFAULT "",
`settings` jsonb,
`metadata` jsonb,
`delete_requested_at` datetime,
`storage_shaman_checkout_id` varchar(255) DEFAULT "",
`worker_tag_id` integer,
PRIMARY KEY(`id`),
CONSTRAINT `fk_jobs_worker_tag` FOREIGN KEY(`worker_tag_id`) REFERENCES `worker_tags`(`id`) ON DELETE SET NULL
);
INSERT INTO temp_jobs SELECT
id,
created_at,
updated_at,
uuid,
name,
job_type,
priority,
status,
activity,
settings,
metadata,
delete_requested_at,
storage_shaman_checkout_id,
worker_tag_id
FROM jobs;
DROP TABLE jobs;
ALTER TABLE temp_jobs RENAME TO `jobs`;
CREATE TABLE "temp_workers" (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`deleted_at` datetime,
`uuid` char(36) UNIQUE DEFAULT "",
`secret` varchar(255) DEFAULT "",
`name` varchar(64) DEFAULT "",
`address` varchar(39) DEFAULT "",
`platform` varchar(16) DEFAULT "",
`software` varchar(32) DEFAULT "",
`status` varchar(16) DEFAULT "",
`last_seen_at` datetime,
`status_requested` varchar(16) DEFAULT "",
`lazy_status_request` smallint DEFAULT false,
`supported_task_types` varchar(255) DEFAULT "",
`can_restart` smallint DEFAULT false,
PRIMARY KEY (`id`)
);
INSERT INTO temp_workers SELECT
id,
created_at,
updated_at,
deleted_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
can_restart
FROM workers;
DROP TABLE workers;
ALTER TABLE temp_workers RENAME TO `workers`;
CREATE TABLE "temp_job_blocks" (
`id` integer,
`created_at` datetime,
`job_id` integer DEFAULT 0,
`worker_id` integer DEFAULT 0,
`task_type` text,
PRIMARY KEY (`id`),
CONSTRAINT `fk_job_blocks_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_job_blocks_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_job_blocks SELECT
id,
created_at,
job_id,
worker_id,
task_type
FROM job_blocks;
DROP TABLE job_blocks;
ALTER TABLE temp_job_blocks RENAME TO `job_blocks`;
CREATE TABLE "temp_sleep_schedules" (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`worker_id` integer UNIQUE DEFAULT 0,
`is_active` numeric DEFAULT false,
`days_of_week` text DEFAULT "",
`start_time` text DEFAULT "",
`end_time` text DEFAULT "",
`next_check` datetime,
PRIMARY KEY (`id`),
CONSTRAINT `fk_sleep_schedules_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE CASCADE
);
INSERT INTO temp_sleep_schedules SELECT
id,
created_at,
updated_at,
worker_id,
is_active,
days_of_week,
start_time,
end_time,
next_check
FROM sleep_schedules;
DROP TABLE sleep_schedules;
ALTER TABLE temp_sleep_schedules RENAME TO `sleep_schedules`;
CREATE TABLE "temp_tasks" (
`id` integer,
`created_at` datetime,
`updated_at` datetime,
`uuid` char(36) UNIQUE DEFAULT "",
`name` varchar(64) DEFAULT "",
`type` varchar(32) DEFAULT "",
`job_id` integer DEFAULT 0,
`priority` smallint DEFAULT 50,
`status` varchar(16) DEFAULT "",
`worker_id` integer,
`last_touched_at` datetime,
`commands` jsonb,
`activity` varchar(255) DEFAULT "",
PRIMARY KEY (`id`),
CONSTRAINT `fk_tasks_job` FOREIGN KEY (`job_id`) REFERENCES `jobs`(`id`) ON DELETE CASCADE,
CONSTRAINT `fk_tasks_worker` FOREIGN KEY (`worker_id`) REFERENCES `workers`(`id`) ON DELETE
SET NULL
);
INSERT INTO temp_tasks SELECT
id,
created_at,
updated_at,
uuid,
name,
type,
job_id,
priority,
status,
worker_id,
last_touched_at,
commands,
activity
FROM tasks;
DROP TABLE tasks;
ALTER TABLE temp_tasks RENAME TO `tasks`;
CREATE INDEX `idx_worker_tags_uuid` ON `worker_tags`(`uuid`);
CREATE INDEX `idx_jobs_uuid` ON `jobs`(`uuid`);
CREATE INDEX `idx_workers_address` ON `workers`(`address`);
CREATE INDEX `idx_workers_last_seen_at` ON `workers`(`last_seen_at`);
CREATE INDEX `idx_workers_deleted_at` ON `workers`(`deleted_at`);
CREATE INDEX `idx_workers_uuid` ON `workers`(`uuid`);
CREATE UNIQUE INDEX `job_worker_tasktype` ON `job_blocks`(`job_id`, `worker_id`, `task_type`);
CREATE INDEX `idx_sleep_schedules_is_active` ON `sleep_schedules`(`is_active`);
CREATE INDEX `idx_sleep_schedules_worker_id` ON `sleep_schedules`(`worker_id`);
CREATE INDEX `idx_tasks_uuid` ON `tasks`(`uuid`);
CREATE INDEX `idx_tasks_last_touched_at` ON `tasks`(`last_touched_at`);

View File

@ -15,7 +15,11 @@ itself. This means you can replace a table like this, without `ON DELETE`
effects running.
```sql
INSERT INTO `temp_table` SELECT * FROM `actual_table`;
INSERT INTO `temp_table` SELECT field1, field2, etc FROM `actual_table`;
DROP TABLE `actual_table`;
ALTER TABLE `temp_table` RENAME TO `actual_table`;
```
Note that the `SELECT` clause lists each field specifically. This is to ensure
that they are selected in the expected order. Without this, data can get
mangled.

View File

@ -0,0 +1,31 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
package sqlc
import (
"context"
"database/sql"
)
type DBTX interface {
ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
PrepareContext(context.Context, string) (*sql.Stmt, error)
QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error)
QueryRowContext(context.Context, string, ...interface{}) *sql.Row
}
func New(db DBTX) *Queries {
return &Queries{db: db}
}
type Queries struct {
db DBTX
}
func (q *Queries) WithTx(tx *sql.Tx) *Queries {
return &Queries{
db: tx,
}
}

View File

@ -0,0 +1,115 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
package sqlc
import (
"database/sql"
"encoding/json"
"time"
)
type Job struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
UUID string
Name string
JobType string
Priority int64
Status string
Activity string
Settings json.RawMessage
Metadata json.RawMessage
DeleteRequestedAt sql.NullTime
StorageShamanCheckoutID string
WorkerTagID sql.NullInt64
}
type JobBlock struct {
ID int64
CreatedAt time.Time
JobID int64
WorkerID int64
TaskType string
}
type LastRendered struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
JobID int64
}
type SleepSchedule struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
WorkerID int64
IsActive float64
DaysOfWeek string
StartTime string
EndTime string
NextCheck sql.NullTime
}
type Task struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
UUID string
Name string
Type string
JobID int64
Priority int64
Status string
WorkerID sql.NullInt64
LastTouchedAt sql.NullTime
Commands json.RawMessage
Activity string
}
type TaskDependency struct {
TaskID int64
DependencyID int64
}
type TaskFailure struct {
CreatedAt time.Time
TaskID int64
WorkerID int64
}
type Worker struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
UUID string
Secret string
Name string
Address string
Platform string
Software string
Status string
LastSeenAt sql.NullTime
StatusRequested string
LazyStatusRequest int64
SupportedTaskTypes string
DeletedAt sql.NullTime
CanRestart int64
}
type WorkerTag struct {
ID int64
CreatedAt time.Time
UpdatedAt sql.NullTime
UUID string
Name string
Description string
}
type WorkerTagMembership struct {
WorkerTagID int64
WorkerID int64
}

View File

@ -0,0 +1,57 @@
-- Jobs / Tasks queries
--
-- name: CreateJob :exec
INSERT INTO jobs (
created_at,
uuid,
name,
job_type,
priority,
status,
activity,
settings,
metadata,
storage_shaman_checkout_id
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? );
-- name: FetchJob :one
SELECT * FROM jobs
WHERE uuid = ? LIMIT 1;
-- name: DeleteJob :exec
DELETE FROM jobs WHERE uuid = ?;
-- name: RequestJobDeletion :exec
UPDATE jobs SET
updated_at = @now,
delete_requested_at = @now
WHERE id = sqlc.arg('job_id');
-- name: FetchJobUUIDsUpdatedBefore :many
SELECT uuid FROM jobs WHERE updated_at <= @updated_at_max;
-- name: RequestMassJobDeletion :exec
UPDATE jobs SET
updated_at = @now,
delete_requested_at = @now
WHERE uuid in (sqlc.slice('uuids'));
-- name: FetchJobsDeletionRequested :many
SELECT uuid FROM jobs
WHERE delete_requested_at is not NULL
ORDER BY delete_requested_at;
-- name: FetchJobsInStatus :many
SELECT * FROM jobs WHERE status IN (sqlc.slice('statuses'));
-- name: SaveJobStatus :exec
UPDATE jobs SET updated_at=@now, status=@status, activity=@activity WHERE id=@id;
-- name: SaveJobPriority :exec
UPDATE jobs SET updated_at=@now, priority=@priority WHERE id=@id;
-- name: SaveJobStorageInfo :exec
UPDATE jobs SET storage_shaman_checkout_id=@storage_shaman_checkout_id WHERE id=@id;

View File

@ -0,0 +1,300 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
// source: query_jobs.sql
package sqlc
import (
"context"
"database/sql"
"encoding/json"
"strings"
"time"
)
const createJob = `-- name: CreateJob :exec
INSERT INTO jobs (
created_at,
uuid,
name,
job_type,
priority,
status,
activity,
settings,
metadata,
storage_shaman_checkout_id
)
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )
`
type CreateJobParams struct {
CreatedAt time.Time
UUID string
Name string
JobType string
Priority int64
Status string
Activity string
Settings json.RawMessage
Metadata json.RawMessage
StorageShamanCheckoutID string
}
// Jobs / Tasks queries
func (q *Queries) CreateJob(ctx context.Context, arg CreateJobParams) error {
_, err := q.db.ExecContext(ctx, createJob,
arg.CreatedAt,
arg.UUID,
arg.Name,
arg.JobType,
arg.Priority,
arg.Status,
arg.Activity,
arg.Settings,
arg.Metadata,
arg.StorageShamanCheckoutID,
)
return err
}
const deleteJob = `-- name: DeleteJob :exec
DELETE FROM jobs WHERE uuid = ?
`
func (q *Queries) DeleteJob(ctx context.Context, uuid string) error {
_, err := q.db.ExecContext(ctx, deleteJob, uuid)
return err
}
const fetchJob = `-- name: FetchJob :one
SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activity, settings, metadata, delete_requested_at, storage_shaman_checkout_id, worker_tag_id FROM jobs
WHERE uuid = ? LIMIT 1
`
func (q *Queries) FetchJob(ctx context.Context, uuid string) (Job, error) {
row := q.db.QueryRowContext(ctx, fetchJob, uuid)
var i Job
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Name,
&i.JobType,
&i.Priority,
&i.Status,
&i.Activity,
&i.Settings,
&i.Metadata,
&i.DeleteRequestedAt,
&i.StorageShamanCheckoutID,
&i.WorkerTagID,
)
return i, err
}
const fetchJobUUIDsUpdatedBefore = `-- name: FetchJobUUIDsUpdatedBefore :many
SELECT uuid FROM jobs WHERE updated_at <= ?1
`
func (q *Queries) FetchJobUUIDsUpdatedBefore(ctx context.Context, updatedAtMax sql.NullTime) ([]string, error) {
rows, err := q.db.QueryContext(ctx, fetchJobUUIDsUpdatedBefore, updatedAtMax)
if err != nil {
return nil, err
}
defer rows.Close()
var items []string
for rows.Next() {
var uuid string
if err := rows.Scan(&uuid); err != nil {
return nil, err
}
items = append(items, uuid)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchJobsDeletionRequested = `-- name: FetchJobsDeletionRequested :many
SELECT uuid FROM jobs
WHERE delete_requested_at is not NULL
ORDER BY delete_requested_at
`
func (q *Queries) FetchJobsDeletionRequested(ctx context.Context) ([]string, error) {
rows, err := q.db.QueryContext(ctx, fetchJobsDeletionRequested)
if err != nil {
return nil, err
}
defer rows.Close()
var items []string
for rows.Next() {
var uuid string
if err := rows.Scan(&uuid); err != nil {
return nil, err
}
items = append(items, uuid)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchJobsInStatus = `-- name: FetchJobsInStatus :many
SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activity, settings, metadata, delete_requested_at, storage_shaman_checkout_id, worker_tag_id FROM jobs WHERE status IN (/*SLICE:statuses*/?)
`
func (q *Queries) FetchJobsInStatus(ctx context.Context, statuses []string) ([]Job, error) {
query := fetchJobsInStatus
var queryParams []interface{}
if len(statuses) > 0 {
for _, v := range statuses {
queryParams = append(queryParams, v)
}
query = strings.Replace(query, "/*SLICE:statuses*/?", strings.Repeat(",?", len(statuses))[1:], 1)
} else {
query = strings.Replace(query, "/*SLICE:statuses*/?", "NULL", 1)
}
rows, err := q.db.QueryContext(ctx, query, queryParams...)
if err != nil {
return nil, err
}
defer rows.Close()
var items []Job
for rows.Next() {
var i Job
if err := rows.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Name,
&i.JobType,
&i.Priority,
&i.Status,
&i.Activity,
&i.Settings,
&i.Metadata,
&i.DeleteRequestedAt,
&i.StorageShamanCheckoutID,
&i.WorkerTagID,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const requestJobDeletion = `-- name: RequestJobDeletion :exec
UPDATE jobs SET
updated_at = ?1,
delete_requested_at = ?1
WHERE id = ?2
`
type RequestJobDeletionParams struct {
Now sql.NullTime
JobID int64
}
func (q *Queries) RequestJobDeletion(ctx context.Context, arg RequestJobDeletionParams) error {
_, err := q.db.ExecContext(ctx, requestJobDeletion, arg.Now, arg.JobID)
return err
}
const requestMassJobDeletion = `-- name: RequestMassJobDeletion :exec
UPDATE jobs SET
updated_at = ?1,
delete_requested_at = ?1
WHERE uuid in (/*SLICE:uuids*/?)
`
type RequestMassJobDeletionParams struct {
Now sql.NullTime
UUIDs []string
}
func (q *Queries) RequestMassJobDeletion(ctx context.Context, arg RequestMassJobDeletionParams) error {
query := requestMassJobDeletion
var queryParams []interface{}
queryParams = append(queryParams, arg.Now)
if len(arg.UUIDs) > 0 {
for _, v := range arg.UUIDs {
queryParams = append(queryParams, v)
}
query = strings.Replace(query, "/*SLICE:uuids*/?", strings.Repeat(",?", len(arg.UUIDs))[1:], 1)
} else {
query = strings.Replace(query, "/*SLICE:uuids*/?", "NULL", 1)
}
_, err := q.db.ExecContext(ctx, query, queryParams...)
return err
}
const saveJobPriority = `-- name: SaveJobPriority :exec
UPDATE jobs SET updated_at=?1, priority=?2 WHERE id=?3
`
type SaveJobPriorityParams struct {
Now sql.NullTime
Priority int64
ID int64
}
func (q *Queries) SaveJobPriority(ctx context.Context, arg SaveJobPriorityParams) error {
_, err := q.db.ExecContext(ctx, saveJobPriority, arg.Now, arg.Priority, arg.ID)
return err
}
const saveJobStatus = `-- name: SaveJobStatus :exec
UPDATE jobs SET updated_at=?1, status=?2, activity=?3 WHERE id=?4
`
type SaveJobStatusParams struct {
Now sql.NullTime
Status string
Activity string
ID int64
}
func (q *Queries) SaveJobStatus(ctx context.Context, arg SaveJobStatusParams) error {
_, err := q.db.ExecContext(ctx, saveJobStatus,
arg.Now,
arg.Status,
arg.Activity,
arg.ID,
)
return err
}
const saveJobStorageInfo = `-- name: SaveJobStorageInfo :exec
UPDATE jobs SET storage_shaman_checkout_id=?1 WHERE id=?2
`
type SaveJobStorageInfoParams struct {
StorageShamanCheckoutID string
ID int64
}
func (q *Queries) SaveJobStorageInfo(ctx context.Context, arg SaveJobStorageInfoParams) error {
_, err := q.db.ExecContext(ctx, saveJobStorageInfo, arg.StorageShamanCheckoutID, arg.ID)
return err
}

View File

@ -0,0 +1,128 @@
CREATE TABLE job_blocks (
id integer NOT NULL,
created_at datetime NOT NULL,
job_id integer DEFAULT 0 NOT NULL,
worker_id integer DEFAULT 0 NOT NULL,
task_type text NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_job_blocks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE,
CONSTRAINT fk_job_blocks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
CREATE TABLE jobs (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
job_type varchar(32) DEFAULT '' NOT NULL,
priority smallint DEFAULT 0 NOT NULL,
status varchar(32) DEFAULT '' NOT NULL,
activity varchar(255) DEFAULT '' NOT NULL,
settings jsonb NOT NULL,
metadata jsonb NOT NULL,
delete_requested_at datetime,
storage_shaman_checkout_id varchar(255) DEFAULT '' NOT NULL,
worker_tag_id integer,
PRIMARY KEY (id),
CONSTRAINT fk_jobs_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE SET NULL
);
CREATE TABLE last_rendereds (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
job_id integer DEFAULT 0 NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_last_rendereds_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE
);
CREATE TABLE sleep_schedules (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
worker_id integer UNIQUE DEFAULT 0 NOT NULL,
is_active numeric DEFAULT false NOT NULL,
days_of_week text DEFAULT '' NOT NULL,
start_time text DEFAULT '' NOT NULL,
end_time text DEFAULT '' NOT NULL,
next_check datetime,
PRIMARY KEY (id),
CONSTRAINT fk_sleep_schedules_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
CREATE TABLE task_dependencies (
task_id integer NOT NULL,
dependency_id integer NOT NULL,
PRIMARY KEY (task_id, dependency_id),
CONSTRAINT fk_task_dependencies_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE,
CONSTRAINT fk_task_dependencies_dependencies FOREIGN KEY (dependency_id) REFERENCES tasks(id) ON DELETE CASCADE
);
CREATE TABLE task_failures (
created_at datetime NOT NULL,
task_id integer NOT NULL,
worker_id integer NOT NULL,
PRIMARY KEY (task_id, worker_id),
CONSTRAINT fk_task_failures_task FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE,
CONSTRAINT fk_task_failures_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
CREATE TABLE tasks (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
type varchar(32) DEFAULT '' NOT NULL,
job_id integer DEFAULT 0 NOT NULL,
priority smallint DEFAULT 50 NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
worker_id integer,
last_touched_at datetime,
commands jsonb NOT NULL,
activity varchar(255) DEFAULT '' NOT NULL,
PRIMARY KEY (id),
CONSTRAINT fk_tasks_job FOREIGN KEY (job_id) REFERENCES jobs(id) ON DELETE CASCADE,
CONSTRAINT fk_tasks_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE SET NULL
);
CREATE TABLE worker_tag_membership (
worker_tag_id integer NOT NULL,
worker_id integer NOT NULL,
PRIMARY KEY (worker_tag_id, worker_id),
CONSTRAINT fk_worker_tag_membership_worker_tag FOREIGN KEY (worker_tag_id) REFERENCES worker_tags(id) ON DELETE CASCADE,
CONSTRAINT fk_worker_tag_membership_worker FOREIGN KEY (worker_id) REFERENCES workers(id) ON DELETE CASCADE
);
CREATE TABLE worker_tags (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
name varchar(64) UNIQUE DEFAULT '' NOT NULL,
description varchar(255) DEFAULT '' NOT NULL,
PRIMARY KEY (id)
);
CREATE TABLE workers (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
secret varchar(255) DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
address varchar(39) DEFAULT '' NOT NULL,
platform varchar(16) DEFAULT '' NOT NULL,
software varchar(32) DEFAULT '' NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
last_seen_at datetime,
status_requested varchar(16) DEFAULT '' NOT NULL,
lazy_status_request smallint DEFAULT false NOT NULL,
supported_task_types varchar(255) DEFAULT '' NOT NULL,
deleted_at datetime,
can_restart smallint DEFAULT false NOT NULL,
PRIMARY KEY (id)
);
CREATE INDEX idx_jobs_uuid ON jobs(uuid);
CREATE INDEX idx_sleep_schedules_is_active ON sleep_schedules(is_active);
CREATE INDEX idx_sleep_schedules_worker_id ON sleep_schedules(worker_id);
CREATE INDEX idx_tasks_last_touched_at ON tasks(last_touched_at);
CREATE INDEX idx_tasks_uuid ON tasks(uuid);
CREATE INDEX idx_worker_tags_uuid ON worker_tags(uuid);
CREATE INDEX idx_workers_address ON workers(address);
CREATE INDEX idx_workers_deleted_at ON workers(deleted_at);
CREATE INDEX idx_workers_last_seen_at ON workers(last_seen_at);
CREATE INDEX idx_workers_uuid ON workers(uuid);
CREATE UNIQUE INDEX job_worker_tasktype ON job_blocks(job_id, worker_id, task_type);

View File

@ -26,7 +26,7 @@ func TestNoTasks(t *testing.T) {
task, err := db.ScheduleTask(ctx, &w)
assert.Nil(t, task)
assert.NoError(t, err)
require.NoError(t, err)
}
func TestOneJobOneTask(t *testing.T) {
@ -40,7 +40,7 @@ func TestOneJobOneTask(t *testing.T) {
job := constructTestJob(ctx, t, db, atj)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
// Check the returned task.
if task == nil {
@ -55,7 +55,7 @@ func TestOneJobOneTask(t *testing.T) {
// Check the task in the database.
now := db.gormDB.NowFunc()
dbTask, err := db.FetchTask(context.Background(), authTask.UUID)
assert.NoError(t, err)
require.NoError(t, err)
if dbTask == nil {
t.Fatal("task cannot be fetched from database")
}
@ -84,7 +84,7 @@ func TestOneJobThreeTasksByPrio(t *testing.T) {
job := constructTestJob(ctx, t, db, atj)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -115,7 +115,7 @@ func TestOneJobThreeTasksByDependencies(t *testing.T) {
job := constructTestJob(ctx, t, db, atj)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -155,7 +155,7 @@ func TestTwoJobsThreeTasks(t *testing.T) {
job2 := constructTestJob(ctx, t, db, atj2)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -183,7 +183,7 @@ func TestSomeButNotAllDependenciesCompleted(t *testing.T) {
w := linuxWorker(t, db)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task != nil {
t.Fatalf("there should not be any task assigned, but received %q", task.Name)
}
@ -210,14 +210,14 @@ func TestAlreadyAssigned(t *testing.T) {
// This should make it get returned by the scheduler, even when there is
// another, higher-prio task to be done.
dbTask3, err := db.FetchTask(ctx, att3.UUID)
assert.NoError(t, err)
require.NoError(t, err)
dbTask3.WorkerID = &w.ID
dbTask3.Status = api.TaskStatusActive
err = db.SaveTask(ctx, dbTask3)
assert.NoError(t, err)
require.NoError(t, err)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -245,14 +245,14 @@ func TestAssignedToOtherWorker(t *testing.T) {
// Assign the high-prio task to the other worker. Because the task is queued,
// it shouldn't matter which worker it's assigned to.
dbTask2, err := db.FetchTask(ctx, att2.UUID)
assert.NoError(t, err)
require.NoError(t, err)
dbTask2.WorkerID = &w2.ID
dbTask2.Status = api.TaskStatusQueued
err = db.SaveTask(ctx, dbTask2)
assert.NoError(t, err)
require.NoError(t, err)
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -277,14 +277,14 @@ func TestPreviouslyFailed(t *testing.T) {
// Mimick that this worker already failed the first task.
tasks, err := db.FetchTasksOfJob(ctx, job)
assert.NoError(t, err)
require.NoError(t, err)
numFailed, err := db.AddWorkerToTaskFailedList(ctx, tasks[0], &w)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, 1, numFailed)
// This should assign the 2nd task.
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -391,11 +391,11 @@ func TestBlocklisted(t *testing.T) {
// Mimick that this worker was already blocked for 'blender' tasks of this job.
err := db.AddWorkerToJobBlocklist(ctx, job, &w, "blender")
assert.NoError(t, err)
require.NoError(t, err)
// This should assign the 2nd task.
task, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
if task == nil {
t.Fatal("task is nil")
}
@ -410,21 +410,15 @@ func constructTestJob(
ctx context.Context, t *testing.T, db *DB, authoredJob job_compilers.AuthoredJob,
) *Job {
err := db.StoreAuthoredJob(ctx, authoredJob)
if err != nil {
t.Fatalf("storing authored job: %v", err)
}
require.NoError(t, err, "storing authored job")
dbJob, err := db.FetchJob(ctx, authoredJob.JobID)
if err != nil {
t.Fatalf("fetching authored job: %v", err)
}
require.NoError(t, err, "fetching authored job")
// Queue the job.
dbJob.Status = api.JobStatusQueued
err = db.SaveJobStatus(ctx, dbJob)
if err != nil {
t.Fatalf("queueing job: %v", err)
}
require.NoError(t, err, "queueing job")
return dbJob
}
@ -457,16 +451,11 @@ func authorTestTask(name, taskType string, dependencies ...*job_compilers.Author
func setTaskStatus(t *testing.T, db *DB, taskUUID string, status api.TaskStatus) {
ctx := context.Background()
task, err := db.FetchTask(ctx, taskUUID)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
task.Status = status
err = db.SaveTask(ctx, task)
if err != nil {
t.Fatal(err)
}
require.NoError(t, db.SaveTask(ctx, task))
}
func linuxWorker(t *testing.T, db *DB, updaters ...func(worker *Worker)) Worker {
@ -483,10 +472,7 @@ func linuxWorker(t *testing.T, db *DB, updaters ...func(worker *Worker)) Worker
}
err := db.gormDB.Save(&w).Error
if err != nil {
t.Logf("cannot save Linux worker: %v", err)
t.FailNow()
}
require.NoError(t, err, "cannot save Linux worker")
return w
}
@ -501,10 +487,6 @@ func windowsWorker(t *testing.T, db *DB) Worker {
}
err := db.gormDB.Save(&w).Error
if err != nil {
t.Logf("cannot save Windows worker: %v", err)
t.FailNow()
}
require.NoError(t, err, "cannot save Windows worker")
return w
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var emptyToD = TimeOfDay{timeOfDayNoValue, timeOfDayNoValue}
@ -60,53 +61,56 @@ func TestOnDate(t *testing.T) {
}
func TestValue(t *testing.T) {
// Test zero -> "00:00"
{ // Test zero -> "00:00"
tod := TimeOfDay{}
if value, err := tod.Value(); assert.NoError(t, err) {
value, err := tod.Value()
require.NoError(t, err)
assert.Equal(t, "00:00", value)
}
// Test 22:47 -> "22:47"
tod = TimeOfDay{22, 47}
if value, err := tod.Value(); assert.NoError(t, err) {
{ // Test 22:47 -> "22:47"
tod := TimeOfDay{22, 47}
value, err := tod.Value()
require.NoError(t, err)
assert.Equal(t, "22:47", value)
}
// Test empty -> ""
tod = emptyToD
if value, err := tod.Value(); assert.NoError(t, err) {
{ // Test empty -> ""
tod := emptyToD
value, err := tod.Value()
require.NoError(t, err)
assert.Equal(t, "", value)
}
}
func TestScan(t *testing.T) {
// Test zero -> empty
{ // Test zero -> empty
tod := TimeOfDay{}
if assert.NoError(t, tod.Scan("")) {
require.NoError(t, tod.Scan(""))
assert.Equal(t, emptyToD, tod)
}
// Test 22:47 -> empty
tod = TimeOfDay{22, 47}
if assert.NoError(t, tod.Scan("")) {
{ // Test 22:47 -> empty
tod := TimeOfDay{22, 47}
require.NoError(t, tod.Scan(""))
assert.Equal(t, emptyToD, tod)
}
// Test 22:47 -> 12:34
tod = TimeOfDay{22, 47}
if assert.NoError(t, tod.Scan("12:34")) {
{ // Test 22:47 -> 12:34
tod := TimeOfDay{22, 47}
require.NoError(t, tod.Scan("12:34"))
assert.Equal(t, TimeOfDay{12, 34}, tod)
}
// Test empty -> empty
tod = emptyToD
if assert.NoError(t, tod.Scan("")) {
{ // Test empty -> empty
tod := emptyToD
require.NoError(t, tod.Scan(""))
assert.Equal(t, emptyToD, tod)
}
// Test empty -> 12:34
tod = emptyToD
if assert.NoError(t, tod.Scan("12:34")) {
{ // Test empty -> 12:34
tod := emptyToD
require.NoError(t, tod.Scan("12:34"))
assert.Equal(t, TimeOfDay{12, 34}, tod)
}
}

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -15,9 +16,7 @@ func TestFetchTimedOutTasks(t *testing.T) {
defer close()
tasks, err := db.FetchTasksOfJob(ctx, job)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
now := db.gormDB.NowFunc()
deadline := now.Add(-5 * time.Minute)
@ -25,23 +24,23 @@ func TestFetchTimedOutTasks(t *testing.T) {
// Mark the task as last touched before the deadline, i.e. old enough for a timeout.
task := tasks[0]
task.LastTouchedAt = deadline.Add(-1 * time.Minute)
assert.NoError(t, db.SaveTask(ctx, task))
require.NoError(t, db.SaveTask(ctx, task))
w := createWorker(ctx, t, db)
assert.NoError(t, db.TaskAssignToWorker(ctx, task, w))
require.NoError(t, db.TaskAssignToWorker(ctx, task, w))
// The task should still not be returned, as it's not in 'active' state.
timedout, err := db.FetchTimedOutTasks(ctx, deadline)
assert.NoError(t, err)
require.NoError(t, err)
assert.Empty(t, timedout)
// Mark as Active:
task.Status = api.TaskStatusActive
assert.NoError(t, db.SaveTask(ctx, task))
require.NoError(t, db.SaveTask(ctx, task))
// Now it should time out:
timedout, err = db.FetchTimedOutTasks(ctx, deadline)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, timedout, 1) {
// Other fields will be different, like the 'UpdatedAt' field -- this just
// tests that the expected task is returned.
@ -92,15 +91,13 @@ func TestFetchTimedOutWorkers(t *testing.T) {
workers := []*Worker{&worker0, &worker1, &worker2, &worker3, &worker4}
for _, worker := range workers {
err := db.CreateWorker(ctx, worker)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
}
timedout, err := db.FetchTimedOutWorkers(ctx, timeoutDeadline)
if assert.NoError(t, err) && assert.Len(t, timedout, 3) {
require.NoError(t, err)
require.Len(t, timedout, 3)
assert.Equal(t, worker1.UUID, timedout[0].UUID)
assert.Equal(t, worker2.UUID, timedout[1].UUID)
assert.Equal(t, worker3.UUID, timedout[2].UUID)
}
}

View File

@ -26,18 +26,16 @@ func TestFetchWorkerSleepSchedule(t *testing.T) {
SupportedTaskTypes: "blender,ffmpeg,file-management",
}
err := db.CreateWorker(ctx, &linuxWorker)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
// Not an existing Worker.
fetched, err := db.FetchWorkerSleepSchedule(ctx, "2cf6153a-3d4e-49f4-a5c0-1c9fc176e155")
assert.NoError(t, err, "non-existent worker should not cause an error")
require.NoError(t, err, "non-existent worker should not cause an error")
assert.Nil(t, fetched)
// No sleep schedule.
fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
assert.NoError(t, err, "non-existent schedule should not cause an error")
require.NoError(t, err, "non-existent schedule should not cause an error")
assert.Nil(t, fetched)
// Create a sleep schedule.
@ -51,12 +49,10 @@ func TestFetchWorkerSleepSchedule(t *testing.T) {
EndTime: TimeOfDay{9, 0},
}
tx := db.gormDB.Create(&created)
if !assert.NoError(t, tx.Error) {
t.FailNow()
}
require.NoError(t, tx.Error)
fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assertEqualSleepSchedule(t, linuxWorker.ID, created, *fetched)
}
@ -74,9 +70,7 @@ func TestFetchSleepScheduleWorker(t *testing.T) {
SupportedTaskTypes: "blender,ffmpeg,file-management",
}
err := db.CreateWorker(ctx, &linuxWorker)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
// Create a sleep schedule.
created := SleepSchedule{
@ -89,16 +83,14 @@ func TestFetchSleepScheduleWorker(t *testing.T) {
EndTime: TimeOfDay{9, 0},
}
tx := db.gormDB.Create(&created)
if !assert.NoError(t, tx.Error) {
t.FailNow()
}
require.NoError(t, tx.Error)
dbSchedule, err := db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Nil(t, dbSchedule.Worker, "worker should be nil when fetching schedule")
err = db.FetchSleepScheduleWorker(ctx, dbSchedule)
assert.NoError(t, err)
require.NoError(t, err)
if assert.NotNil(t, dbSchedule.Worker) {
// Compare a few fields. If these are good, the correct worker has been fetched.
assert.Equal(t, linuxWorker.ID, dbSchedule.Worker.ID)
@ -125,9 +117,7 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
SupportedTaskTypes: "blender,ffmpeg,file-management",
}
err := db.CreateWorker(ctx, &linuxWorker)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
schedule := SleepSchedule{
WorkerID: linuxWorker.ID,
@ -145,13 +135,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
// Create the sleep schedule.
err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &schedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
fetched, err := db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertEqualSleepSchedule(t, linuxWorker.ID, schedule, *fetched)
// Overwrite the schedule with one that already has a database ID.
@ -161,13 +147,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
newSchedule.StartTime = TimeOfDay{2, 0}
newSchedule.EndTime = TimeOfDay{6, 0}
err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &newSchedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertEqualSleepSchedule(t, linuxWorker.ID, newSchedule, *fetched)
// Overwrite the schedule with a freshly constructed one.
@ -181,13 +163,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
EndTime: TimeOfDay{15, 0},
}
err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &newerSchedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertEqualSleepSchedule(t, linuxWorker.ID, newerSchedule, *fetched)
// Clear the sleep schedule.
@ -201,13 +179,9 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
EndTime: emptyToD,
}
err = db.SetWorkerSleepSchedule(ctx, linuxWorker.UUID, &emptySchedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
fetched, err = db.FetchWorkerSleepSchedule(ctx, linuxWorker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertEqualSleepSchedule(t, linuxWorker.ID, emptySchedule, *fetched)
}
@ -236,14 +210,10 @@ func TestSetWorkerSleepScheduleNextCheck(t *testing.T) {
schedule.NextCheck = future
err := db.SetWorkerSleepScheduleNextCheck(ctx, &schedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
fetched, err := db.FetchWorkerSleepSchedule(ctx, schedule.Worker.UUID)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
assertEqualSleepSchedule(t, schedule.Worker.ID, schedule, *fetched)
}
@ -322,12 +292,13 @@ func TestFetchSleepSchedulesToCheck(t *testing.T) {
}
toCheck, err := db.FetchSleepSchedulesToCheck(ctx)
if assert.NoError(t, err) && assert.Len(t, toCheck, 2) {
require.NoError(t, err)
require.Len(t, toCheck, 2)
assertEqualSleepSchedule(t, schedule0.Worker.ID, schedule0, *toCheck[0])
assert.Nil(t, toCheck[0].Worker, "the Worker should NOT be fetched")
assertEqualSleepSchedule(t, schedule2.Worker.ID, schedule1, *toCheck[1])
assert.Nil(t, toCheck[1].Worker, "the Worker should NOT be fetched")
}
}
func assertEqualSleepSchedule(t *testing.T, workerID uint, expect, actual SleepSchedule) {

View File

@ -62,6 +62,15 @@ func (db *DB) SaveWorkerTag(ctx context.Context, tag *WorkerTag) error {
// DeleteWorkerTag deletes the given tag, after unassigning all workers from it.
func (db *DB) DeleteWorkerTag(ctx context.Context, uuid string) error {
// As a safety measure, refuse to delete unless foreign key constraints are active.
fkEnabled, err := db.areForeignKeysEnabled()
if err != nil {
return fmt.Errorf("checking whether foreign keys are enabled: %w", err)
}
if !fkEnabled {
return ErrDeletingWithoutFK
}
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", uuid).
Delete(&WorkerTag{})

View File

@ -3,6 +3,7 @@ package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"slices"
"testing"
"time"
@ -50,17 +51,7 @@ func TestFetchDeleteTags(t *testing.T) {
}
require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag))
allTags, err := f.db.FetchWorkerTags(f.ctx)
require.NoError(t, err)
require.Len(t, allTags, 2)
var allTagIDs [2]string
for idx := range allTags {
allTagIDs[idx] = allTags[idx].UUID
}
assert.Contains(t, allTagIDs, f.tag.UUID)
assert.Contains(t, allTagIDs, secondTag.UUID)
assertTagsMatch(t, f, f.tag.UUID, secondTag.UUID)
has, err = f.db.HasWorkerTags(f.ctx)
require.NoError(t, err)
@ -68,11 +59,7 @@ func TestFetchDeleteTags(t *testing.T) {
// Test deleting the 2nd tag.
require.NoError(t, f.db.DeleteWorkerTag(f.ctx, secondTag.UUID))
allTags, err = f.db.FetchWorkerTags(f.ctx)
require.NoError(t, err)
require.Len(t, allTags, 1)
assert.Equal(t, f.tag.UUID, allTags[0].UUID)
assertTagsMatch(t, f, f.tag.UUID)
// Test deleting the 1st tag.
require.NoError(t, f.db.DeleteWorkerTag(f.ctx, f.tag.UUID))
@ -81,6 +68,31 @@ func TestFetchDeleteTags(t *testing.T) {
assert.False(t, has, "expecting HasWorkerTags to return false")
}
func TestDeleteTagsWithoutFK(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Single tag was created by fixture.
has, err := f.db.HasWorkerTags(f.ctx)
require.NoError(t, err)
assert.True(t, has, "expecting HasWorkerTags to return true")
secondTag := WorkerTag{
UUID: uuid.New(),
Name: "arbeiderskaartje",
Description: "Worker tag in Dutch",
}
require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag))
// Try deleting with foreign key constraints disabled.
require.NoError(t, f.db.pragmaForeignKeys(false))
err = f.db.DeleteWorkerTag(f.ctx, f.tag.UUID)
require.ErrorIs(t, err, ErrDeletingWithoutFK)
// Test the deletion did not happen.
assertTagsMatch(t, f, f.tag.UUID, secondTag.UUID)
}
func TestAssignUnassignWorkerTags(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
@ -163,3 +175,19 @@ func TestDeleteWorkerTagWithWorkersAssigned(t *testing.T) {
require.NoError(t, err)
assert.Empty(t, w.Tags)
}
func assertTagsMatch(t *testing.T, f WorkerTestFixture, expectUUIDs ...string) {
allTags, err := f.db.FetchWorkerTags(f.ctx)
require.NoError(t, err)
require.Len(t, allTags, len(expectUUIDs))
var actualUUIDs []string
for idx := range allTags {
actualUUIDs = append(actualUUIDs, allTags[idx].UUID)
}
slices.Sort(expectUUIDs)
slices.Sort(actualUUIDs)
assert.Equal(t, actualUUIDs, expectUUIDs)
}

View File

@ -8,6 +8,7 @@ import (
"strings"
"time"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -87,6 +88,15 @@ func (db *DB) FetchWorker(ctx context.Context, uuid string) (*Worker, error) {
}
func (db *DB) DeleteWorker(ctx context.Context, uuid string) error {
// As a safety measure, refuse to delete unless foreign key constraints are active.
fkEnabled, err := db.areForeignKeysEnabled()
if err != nil {
return fmt.Errorf("checking whether foreign keys are enabled: %w", err)
}
if !fkEnabled {
return ErrDeletingWithoutFK
}
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", uuid).
Delete(&Worker{})
@ -176,3 +186,33 @@ func (db *DB) WorkerSeen(ctx context.Context, w *Worker) error {
}
return nil
}
// WorkerStatusCount is a mapping from job status to the number of jobs in that status.
type WorkerStatusCount map[api.WorkerStatus]int
func (db *DB) SummarizeWorkerStatuses(ctx context.Context) (WorkerStatusCount, error) {
logger := log.Ctx(ctx)
logger.Debug().Msg("database: summarizing worker statuses")
// Query the database using a data structure that's easy to handle in GORM.
type queryResult struct {
Status api.WorkerStatus
StatusCount int
}
result := []*queryResult{}
tx := db.gormDB.WithContext(ctx).Model(&Worker{}).
Select("status as Status", "count(id) as StatusCount").
Group("status").
Scan(&result)
if tx.Error != nil {
return nil, workerError(tx.Error, "summarizing worker statuses")
}
// Convert the array-of-structs to a map that's easier to handle by the caller.
statusCounts := make(WorkerStatusCount)
for _, singleStatusCount := range result {
statusCounts[singleStatusCount.Status] = singleStatusCount.StatusCount
}
return statusCounts, nil
}

View File

@ -4,6 +4,7 @@ package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"context"
"testing"
"time"
@ -35,10 +36,10 @@ func TestCreateFetchWorker(t *testing.T) {
}
err = db.CreateWorker(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
fetchedWorker, err = db.FetchWorker(ctx, w.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, fetchedWorker)
// Test contents of fetched job
@ -68,16 +69,13 @@ func TestFetchWorkerTask(t *testing.T) {
}
err := db.CreateWorker(ctx, &w)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
{ // Test without any task assigned.
task, err := db.FetchWorkerTask(ctx, &w)
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Nil(t, task)
}
}
// Create a job with tasks.
authTask1 := authorTestTask("the task", "blender")
@ -87,53 +85,52 @@ func TestFetchWorkerTask(t *testing.T) {
constructTestJob(ctx, t, db, atj)
assignedTask, err := db.ScheduleTask(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
{ // Assigned task should be returned.
foundTask, err := db.FetchWorkerTask(ctx, &w)
if assert.NoError(t, err) && assert.NotNil(t, foundTask) {
require.NoError(t, err)
require.NotNil(t, foundTask)
assert.Equal(t, assignedTask.UUID, foundTask.UUID)
assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well")
}
}
// Set the task to 'completed'.
assignedTask.Status = api.TaskStatusCompleted
assert.NoError(t, db.SaveTaskStatus(ctx, assignedTask))
require.NoError(t, db.SaveTaskStatus(ctx, assignedTask))
{ // Completed-but-last-assigned task should be returned.
foundTask, err := db.FetchWorkerTask(ctx, &w)
if assert.NoError(t, err) && assert.NotNil(t, foundTask) {
require.NoError(t, err)
require.NotNil(t, foundTask)
assert.Equal(t, assignedTask.UUID, foundTask.UUID)
assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well")
}
}
// Assign another task.
newlyAssignedTask, err := db.ScheduleTask(ctx, &w)
if !assert.NoError(t, err) || !assert.NotNil(t, newlyAssignedTask) {
t.FailNow()
}
require.NoError(t, err)
require.NotNil(t, newlyAssignedTask)
{ // Newly assigned task should be returned.
foundTask, err := db.FetchWorkerTask(ctx, &w)
if assert.NoError(t, err) && assert.NotNil(t, foundTask) {
require.NoError(t, err)
require.NotNil(t, foundTask)
assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID)
assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well")
}
}
// Set the new task to 'completed'.
newlyAssignedTask.Status = api.TaskStatusCompleted
assert.NoError(t, db.SaveTaskStatus(ctx, newlyAssignedTask))
require.NoError(t, db.SaveTaskStatus(ctx, newlyAssignedTask))
{ // Completed-but-last-assigned task should be returned.
foundTask, err := db.FetchWorkerTask(ctx, &w)
if assert.NoError(t, err) && assert.NotNil(t, foundTask) {
require.NoError(t, err)
require.NotNil(t, foundTask)
assert.Equal(t, newlyAssignedTask.UUID, foundTask.UUID)
assert.Equal(t, jobUUID, foundTask.Job.UUID, "the job UUID should be returned as well")
}
}
}
@ -152,10 +149,10 @@ func TestSaveWorker(t *testing.T) {
}
err := db.CreateWorker(ctx, &w)
assert.NoError(t, err)
require.NoError(t, err)
fetchedWorker, err := db.FetchWorker(ctx, w.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, fetchedWorker)
// Update all updatable fields of the Worker
@ -169,23 +166,23 @@ func TestSaveWorker(t *testing.T) {
// Saving only the status should just do that.
err = db.SaveWorkerStatus(ctx, &updatedWorker)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, "7 မှ 9", updatedWorker.Name, "Saving status should not touch the name")
// Check saved worker
fetchedWorker, err = db.FetchWorker(ctx, w.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, fetchedWorker)
assert.Equal(t, updatedWorker.Status, fetchedWorker.Status, "new status should have been saved")
assert.NotEqual(t, updatedWorker.Name, fetchedWorker.Name, "non-status fields should not have been updated")
// Saving the entire worker should save everything.
err = db.SaveWorker(ctx, &updatedWorker)
assert.NoError(t, err)
require.NoError(t, err)
// Check saved worker
fetchedWorker, err = db.FetchWorker(ctx, w.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.NotNil(t, fetchedWorker)
assert.Equal(t, updatedWorker.Status, fetchedWorker.Status, "new status should have been saved")
assert.Equal(t, updatedWorker.Name, fetchedWorker.Name, "non-status fields should also have been updated")
@ -198,10 +195,8 @@ func TestFetchWorkers(t *testing.T) {
// No workers
workers, err := db.FetchWorkers(ctx)
if !assert.NoError(t, err) {
t.Fatal("error fetching empty list of workers, no use in continuing the test")
}
assert.Empty(t, workers)
require.NoError(t, err)
require.Empty(t, workers)
linuxWorker := Worker{
UUID: uuid.New(),
@ -215,12 +210,12 @@ func TestFetchWorkers(t *testing.T) {
// One worker:
err = db.CreateWorker(ctx, &linuxWorker)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, time.Now().UTC().Location(), linuxWorker.CreatedAt.Location(),
"Timestamps should be using UTC timezone")
workers, err = db.FetchWorkers(ctx)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, workers, 1) {
// FIXME: this fails, because the fetched timestamps have nil location instead of UTC.
// assert.Equal(t, time.Now().UTC().Location(), workers[0].CreatedAt.Location(),
@ -244,10 +239,10 @@ func TestFetchWorkers(t *testing.T) {
SupportedTaskTypes: "blender,ffmpeg,file-management",
}
err = db.CreateWorker(ctx, &windowsWorker)
assert.NoError(t, err)
require.NoError(t, err)
workers, err = db.FetchWorkers(ctx)
assert.NoError(t, err)
require.NoError(t, err)
if assert.Len(t, workers, 2) {
assert.Equal(t, linuxWorker.UUID, workers[0].UUID)
assert.Equal(t, windowsWorker.UUID, workers[1].UUID)
@ -274,11 +269,11 @@ func TestDeleteWorker(t *testing.T) {
Status: api.WorkerStatusOffline,
}
assert.NoError(t, db.CreateWorker(ctx, &w1))
assert.NoError(t, db.CreateWorker(ctx, &w2))
require.NoError(t, db.CreateWorker(ctx, &w1))
require.NoError(t, db.CreateWorker(ctx, &w2))
// Delete the 2nd worker, just to have a test with ID != 1.
assert.NoError(t, db.DeleteWorker(ctx, w2.UUID))
require.NoError(t, db.DeleteWorker(ctx, w2.UUID))
// The deleted worker should now no longer be found.
{
@ -290,7 +285,7 @@ func TestDeleteWorker(t *testing.T) {
// The other worker should still exist.
{
fetchedWorker, err := db.FetchWorker(ctx, w1.UUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, w1.UUID, fetchedWorker.UUID)
}
@ -300,18 +295,18 @@ func TestDeleteWorker(t *testing.T) {
taskUUID := authJob.Tasks[0].UUID
{
task, err := db.FetchTask(ctx, taskUUID)
assert.NoError(t, err)
require.NoError(t, err)
task.Worker = &w1
assert.NoError(t, db.SaveTask(ctx, task))
require.NoError(t, db.SaveTask(ctx, task))
}
// Delete the worker.
assert.NoError(t, db.DeleteWorker(ctx, w1.UUID))
require.NoError(t, db.DeleteWorker(ctx, w1.UUID))
// Check the task after deletion of the Worker.
{
fetchedTask, err := db.FetchTask(ctx, taskUUID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, taskUUID, fetchedTask.UUID)
assert.Equal(t, w1.UUID, fetchedTask.Worker.UUID)
assert.NotZero(t, fetchedTask.Worker.DeletedAt.Time)
@ -319,6 +314,30 @@ func TestDeleteWorker(t *testing.T) {
}
}
func TestDeleteWorkerNoForeignKeys(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
defer cancel()
// Create a Worker to delete.
w1 := Worker{
UUID: "fd97a35b-a5bd-44b4-ac2b-64c193ca877d",
Name: "Worker 1",
Status: api.WorkerStatusAwake,
}
require.NoError(t, db.CreateWorker(ctx, &w1))
// Try deleting with foreign key constraints disabled.
require.NoError(t, db.pragmaForeignKeys(false))
require.ErrorIs(t, ErrDeletingWithoutFK, db.DeleteWorker(ctx, w1.UUID))
// The worker should still exist.
{
fetchedWorker, err := db.FetchWorker(ctx, w1.UUID)
require.NoError(t, err)
assert.Equal(t, w1.UUID, fetchedWorker.UUID)
}
}
func TestDeleteWorkerWithTagAssigned(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
@ -334,3 +353,65 @@ func TestDeleteWorkerWithTagAssigned(t *testing.T) {
require.NoError(t, err)
assert.Empty(t, tag.Workers)
}
func TestSummarizeWorkerStatuses(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Test the summary.
summary, err := f.db.SummarizeWorkerStatuses(f.ctx)
require.NoError(t, err)
assert.Equal(t, WorkerStatusCount{api.WorkerStatusAwake: 1}, summary)
// Create more workers.
w1 := Worker{
UUID: "fd97a35b-a5bd-44b4-ac2b-64c193ca877d",
Name: "Worker 1",
Status: api.WorkerStatusAwake,
}
w2 := Worker{
UUID: "82b2d176-cb8c-4bfa-8300-41c216d766df",
Name: "Worker 2",
Status: api.WorkerStatusOffline,
}
require.NoError(t, f.db.CreateWorker(f.ctx, &w1))
require.NoError(t, f.db.CreateWorker(f.ctx, &w2))
// Test the summary.
summary, err = f.db.SummarizeWorkerStatuses(f.ctx)
require.NoError(t, err)
assert.Equal(t, WorkerStatusCount{
api.WorkerStatusAwake: 2,
api.WorkerStatusOffline: 1,
}, summary)
// Delete all workers.
require.NoError(t, f.db.DeleteWorker(f.ctx, f.worker.UUID))
require.NoError(t, f.db.DeleteWorker(f.ctx, w1.UUID))
require.NoError(t, f.db.DeleteWorker(f.ctx, w2.UUID))
// Test the summary.
summary, err = f.db.SummarizeWorkerStatuses(f.ctx)
require.NoError(t, err)
assert.Equal(t, WorkerStatusCount{}, summary)
}
// Check that a context timeout can be detected by inspecting the
// returned error.
func TestSummarizeWorkerStatusesTimeout(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
subCtx, subCtxCancel := context.WithTimeout(f.ctx, 1*time.Nanosecond)
defer subCtxCancel()
// Force a timeout of the context. And yes, even when a nanosecond is quite
// short, it is still necessary to wait.
time.Sleep(2 * time.Nanosecond)
// Test the summary.
summary, err := f.db.SummarizeWorkerStatuses(subCtx)
assert.ErrorIs(t, err, context.DeadlineExceeded)
assert.Nil(t, summary)
}

View File

@ -10,6 +10,7 @@ import (
"github.com/benbjohnson/clock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/internal/manager/sleep_scheduler/mocks"
@ -24,9 +25,8 @@ func TestFetchSchedule(t *testing.T) {
mocks.persist.EXPECT().FetchWorkerSleepSchedule(ctx, workerUUID).Return(&dbSched, nil)
sched, err := ss.FetchSchedule(ctx, workerUUID)
if assert.NoError(t, err) {
require.NoError(t, err)
assert.Equal(t, &dbSched, sched)
}
}
func TestSetSchedule(t *testing.T) {
@ -59,7 +59,7 @@ func TestSetSchedule(t *testing.T) {
mocks.broadcaster.EXPECT().BroadcastWorkerUpdate(gomock.Any())
err := ss.SetSchedule(ctx, workerUUID, &sched)
assert.NoError(t, err)
require.NoError(t, err)
}
func TestSetScheduleSwappedStartEnd(t *testing.T) {
@ -92,7 +92,7 @@ func TestSetScheduleSwappedStartEnd(t *testing.T) {
mocks.persist.EXPECT().SetWorkerSleepSchedule(ctx, workerUUID, &expectSavedSchedule)
err := ss.SetSchedule(ctx, workerUUID, &sched)
assert.NoError(t, err)
require.NoError(t, err)
}
// Test that a sleep check that happens at shutdown of the Manager doesn't cause any panics.
@ -157,9 +157,7 @@ func TestApplySleepSchedule(t *testing.T) {
// Actually apply the sleep schedule.
err := ss.ApplySleepSchedule(ctx, &testSchedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
// Check the SocketIO broadcast.
if sioUpdate.Id != "" {
@ -220,9 +218,7 @@ func TestApplySleepScheduleNoStatusChange(t *testing.T) {
// Apply the sleep schedule. This should not trigger any persistence or broadcasts.
err := ss.ApplySleepSchedule(ctx, &testSchedule)
if !assert.NoError(t, err) {
t.FailNow()
}
require.NoError(t, err)
}
// Move the clock to the middle of the sleep schedule, so the schedule always
@ -271,9 +267,8 @@ func testFixtures(t *testing.T) (*SleepScheduler, TestMocks, context.Context) {
mockedClock := clock.NewMock()
mockedNow, err := time.Parse(time.RFC3339, "2022-06-07T11:14:47+02:00")
if err != nil {
panic(err)
}
require.NoError(t, err)
mockedClock.Set(mockedNow)
if !assert.Equal(t, time.Tuesday.String(), mockedNow.Weekday().String()) {
t.Fatal("tests assume 'now' is a Tuesday")

View File

@ -12,11 +12,12 @@ import (
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func setUpTest(t *testing.T) string {
temppath, err := ioutil.TempDir("", "testlogs")
assert.NoError(t, err)
require.NoError(t, err)
return temppath
}
@ -55,7 +56,7 @@ func TestNoFiles(t *testing.T) {
filepath := filepath.Join(temppath, "nonexisting.txt")
err := rotateLogFile(zerolog.Nop(), filepath)
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, fileExists(filepath))
}
@ -67,7 +68,7 @@ func TestOneFile(t *testing.T) {
fileTouch(filepath)
err := rotateLogFile(zerolog.Nop(), filepath)
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, fileExists(filepath))
assert.True(t, fileExists(filepath+".1"))
}
@ -77,16 +78,16 @@ func TestMultipleFilesWithHoles(t *testing.T) {
defer tearDownTest(temppath)
filepath := filepath.Join(temppath, "existing.txt")
assert.NoError(t, ioutil.WriteFile(filepath, []byte("thefile"), 0666))
assert.NoError(t, ioutil.WriteFile(filepath+".1", []byte("file .1"), 0666))
assert.NoError(t, ioutil.WriteFile(filepath+".2", []byte("file .2"), 0666))
assert.NoError(t, ioutil.WriteFile(filepath+".3", []byte("file .3"), 0666))
assert.NoError(t, ioutil.WriteFile(filepath+".5", []byte("file .5"), 0666))
assert.NoError(t, ioutil.WriteFile(filepath+".7", []byte("file .7"), 0666))
require.NoError(t, ioutil.WriteFile(filepath, []byte("thefile"), 0666))
require.NoError(t, ioutil.WriteFile(filepath+".1", []byte("file .1"), 0666))
require.NoError(t, ioutil.WriteFile(filepath+".2", []byte("file .2"), 0666))
require.NoError(t, ioutil.WriteFile(filepath+".3", []byte("file .3"), 0666))
require.NoError(t, ioutil.WriteFile(filepath+".5", []byte("file .5"), 0666))
require.NoError(t, ioutil.WriteFile(filepath+".7", []byte("file .7"), 0666))
err := rotateLogFile(zerolog.Nop(), filepath)
assert.NoError(t, err)
require.NoError(t, err)
assert.False(t, fileExists(filepath))
assert.True(t, fileExists(filepath+".1"))
assert.True(t, fileExists(filepath+".2"))
@ -100,7 +101,7 @@ func TestMultipleFilesWithHoles(t *testing.T) {
read := func(filename string) string {
content, err := ioutil.ReadFile(filename)
assert.NoError(t, err)
require.NoError(t, err)
return string(content)
}

View File

@ -19,6 +19,7 @@ import (
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/task_logs/mocks"
)
@ -36,14 +37,14 @@ func TestLogWriting(t *testing.T) {
mocks.localStorage.EXPECT().ForJob(jobUUID).Times(numWriteCalls).Return(jobDir)
err := s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ovo je priča")
assert.NoError(t, err)
require.NoError(t, err)
err = s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ima dvije linije")
assert.NoError(t, err)
require.NoError(t, err)
filename := filepath.Join(jobDir, "task-20ff9d06-53ec-4019-9e2e-1774f05f170a.txt")
contents, err := ioutil.ReadFile(filename)
assert.NoError(t, err, "the log file should exist")
require.NoError(t, err, "the log file should exist")
assert.Equal(t, "Ovo je priča\nIma dvije linije\n", string(contents))
}
@ -59,7 +60,7 @@ func TestLogRotation(t *testing.T) {
mocks.localStorage.EXPECT().ForJob(jobUUID).Return(jobDir).AnyTimes()
err := s.Write(zerolog.Nop(), jobUUID, taskUUID, "Ovo je priča")
assert.NoError(t, err)
require.NoError(t, err)
s.RotateFile(zerolog.Nop(), jobUUID, taskUUID)
@ -67,7 +68,7 @@ func TestLogRotation(t *testing.T) {
rotatedFilename := filename + ".1"
contents, err := ioutil.ReadFile(rotatedFilename)
assert.NoError(t, err, "the rotated log file should exist")
require.NoError(t, err, "the rotated log file should exist")
assert.Equal(t, "Ovo je priča\n", string(contents))
_, err = os.Stat(filename)
@ -97,16 +98,16 @@ func TestLogTailAndSize(t *testing.T) {
// Test a single line.
err = s.Write(zerolog.Nop(), jobID, taskID, "Just a single line")
assert.NoError(t, err)
require.NoError(t, err)
contents, err = s.Tail(jobID, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, "Just a single line\n", string(contents))
// A short file shouldn't do any line stripping.
err = s.Write(zerolog.Nop(), jobID, taskID, "And another line!")
assert.NoError(t, err)
require.NoError(t, err)
contents, err = s.Tail(jobID, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, "Just a single line\nAnd another line!\n", string(contents))
bigString := ""
@ -114,18 +115,17 @@ func TestLogTailAndSize(t *testing.T) {
bigString += fmt.Sprintf("This is line #%d\n", lineNum)
}
err = s.Write(zerolog.Nop(), jobID, taskID, bigString)
assert.NoError(t, err)
require.NoError(t, err)
// Check the log size, it should be the entire bigString plus what was written before that.
size, err = s.TaskLogSize(jobID, taskID)
if assert.NoError(t, err) {
require.NoError(t, err)
expect := int64(len("Just a single line\nAnd another line!\n" + bigString))
assert.Equal(t, expect, size)
}
// Check the tail, it should only be the few last lines of bigString.
contents, err = s.Tail(jobID, taskID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t,
"This is line #887\nThis is line #888\nThis is line #889\nThis is line #890\nThis is line #891\n"+
"This is line #892\nThis is line #893\nThis is line #894\nThis is line #895\nThis is line #896\n"+
@ -183,7 +183,7 @@ func TestLogWritingParallel(t *testing.T) {
}
logText := strings.Repeat(string(letter), runLength)
assert.NoError(t, s.Write(logger, jobID, taskID, logText))
require.NoError(t, s.Write(logger, jobID, taskID, logText))
}(int32(i))
}
wg.Wait()
@ -191,7 +191,7 @@ func TestLogWritingParallel(t *testing.T) {
// Test that the final log contains 1000 lines of of 100 characters, without
// any run getting interrupted by another one.
contents, err := os.ReadFile(s.Filepath(jobID, taskID))
assert.NoError(t, err)
require.NoError(t, err)
lines := strings.Split(string(contents), "\n")
assert.Equal(t, numGoroutines+1, len(lines),
"each goroutine should have written a single line, and the file should have a newline at the end")
@ -217,9 +217,7 @@ func taskLogsTestFixtures(t *testing.T) (*Storage, func(), *TaskLogsMocks) {
mockCtrl := gomock.NewController(t)
temppath, err := ioutil.TempDir("", "testlogs")
if err != nil {
panic(err)
}
require.NoError(t, err)
mocks := &TaskLogsMocks{
temppath: temppath,
@ -229,9 +227,8 @@ func taskLogsTestFixtures(t *testing.T) (*Storage, func(), *TaskLogsMocks) {
}
mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T16:52:04+02:00")
if err != nil {
panic(err)
}
require.NoError(t, err)
mocks.clock.Set(mockedNow)
// This should be called at the end of each unit test.

View File

@ -10,6 +10,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/internal/manager/task_state_machine/mocks"
@ -37,7 +38,7 @@ func TestTaskStatusChangeQueuedToActive(t *testing.T) {
mocks.expectBroadcastJobChange(task.Job, api.JobStatusQueued, api.JobStatusActive)
mocks.expectBroadcastTaskChange(task, api.TaskStatusQueued, api.TaskStatusActive)
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusActive))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusActive))
}
func TestTaskStatusChangeSaveTaskAfterJobChangeFailure(t *testing.T) {
@ -75,20 +76,20 @@ func TestTaskStatusChangeActiveToCompleted(t *testing.T) {
mocks.expectWriteTaskLogTimestamped(t, task, "task changed status active -> completed")
mocks.expectBroadcastTaskChange(task, api.TaskStatusActive, api.TaskStatusCompleted)
mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusCompleted).Return(1, 3, nil) // 1 of 3 complete.
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCompleted))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCompleted))
// Second task hickup: T: active > soft-failed --> J: active > active
mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusSoftFailed)
mocks.expectWriteTaskLogTimestamped(t, task2, "task changed status active -> soft-failed")
mocks.expectBroadcastTaskChange(task2, api.TaskStatusActive, api.TaskStatusSoftFailed)
assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusSoftFailed))
require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusSoftFailed))
// Second task completing: T: soft-failed > completed --> J: active > active
mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusCompleted)
mocks.expectWriteTaskLogTimestamped(t, task2, "task changed status soft-failed -> completed")
mocks.expectBroadcastTaskChange(task2, api.TaskStatusSoftFailed, api.TaskStatusCompleted)
mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusCompleted).Return(2, 3, nil) // 2 of 3 complete.
assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCompleted))
require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCompleted))
// Third task completing: T: active > completed --> J: active > completed
mocks.expectSaveTaskWithStatus(t, task3, api.TaskStatusCompleted)
@ -98,7 +99,7 @@ func TestTaskStatusChangeActiveToCompleted(t *testing.T) {
mocks.expectSaveJobWithStatus(t, task.Job, api.JobStatusCompleted)
mocks.expectBroadcastJobChange(task.Job, api.JobStatusActive, api.JobStatusCompleted)
assert.NoError(t, sm.TaskStatusChange(ctx, task3, api.TaskStatusCompleted))
require.NoError(t, sm.TaskStatusChange(ctx, task3, api.TaskStatusCompleted))
}
func TestTaskStatusChangeQueuedToFailed(t *testing.T) {
@ -114,7 +115,7 @@ func TestTaskStatusChangeQueuedToFailed(t *testing.T) {
mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, task.Job, api.TaskStatusFailed).Return(1, 100, nil) // 1 out of 100 failed.
mocks.expectBroadcastJobChange(task.Job, api.JobStatusQueued, api.JobStatusActive)
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusFailed))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusFailed))
}
func TestTaskStatusChangeActiveToFailedFailJob(t *testing.T) {
@ -144,7 +145,7 @@ func TestTaskStatusChangeActiveToFailedFailJob(t *testing.T) {
"Manager cancelled this task because the job got status \"failed\".",
)
assert.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusFailed))
require.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusFailed))
}
func TestTaskStatusChangeRequeueOnCompletedJob(t *testing.T) {
@ -168,7 +169,7 @@ func TestTaskStatusChangeRequeueOnCompletedJob(t *testing.T) {
)
mocks.expectSaveJobWithStatus(t, task.Job, api.JobStatusQueued)
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusQueued))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusQueued))
}
func TestTaskStatusChangeCancelSingleTask(t *testing.T) {
@ -186,7 +187,7 @@ func TestTaskStatusChangeCancelSingleTask(t *testing.T) {
mocks.persist.EXPECT().CountTasksOfJobInStatus(ctx, job,
api.TaskStatusActive, api.TaskStatusQueued, api.TaskStatusSoftFailed).
Return(1, 2, nil)
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCanceled))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatusCanceled))
// T2: queued > cancelled --> J: cancel-requested > canceled
mocks.expectSaveTaskWithStatus(t, task2, api.TaskStatusCanceled)
@ -198,7 +199,7 @@ func TestTaskStatusChangeCancelSingleTask(t *testing.T) {
mocks.expectSaveJobWithStatus(t, job, api.JobStatusCanceled)
mocks.expectBroadcastJobChange(task.Job, api.JobStatusCancelRequested, api.JobStatusCanceled)
assert.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCanceled))
require.NoError(t, sm.TaskStatusChange(ctx, task2, api.TaskStatusCanceled))
}
func TestTaskStatusChangeCancelSingleTaskWithOtherFailed(t *testing.T) {
@ -222,7 +223,7 @@ func TestTaskStatusChangeCancelSingleTaskWithOtherFailed(t *testing.T) {
// The paused task just stays paused, so don't expectBroadcastTaskChange(task3).
assert.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusCanceled))
require.NoError(t, sm.TaskStatusChange(ctx, task1, api.TaskStatusCanceled))
}
func TestTaskStatusChangeUnknownStatus(t *testing.T) {
@ -235,7 +236,7 @@ func TestTaskStatusChangeUnknownStatus(t *testing.T) {
mocks.expectWriteTaskLogTimestamped(t, task, "task changed status queued -> borked")
mocks.expectBroadcastTaskChange(task, api.TaskStatusQueued, api.TaskStatus("borked"))
assert.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatus("borked")))
require.NoError(t, sm.TaskStatusChange(ctx, task, api.TaskStatus("borked")))
}
func TestJobRequeueWithSomeCompletedTasks(t *testing.T) {
@ -269,7 +270,7 @@ func TestJobRequeueWithSomeCompletedTasks(t *testing.T) {
mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusActive, api.JobStatusRequeueing)
mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusRequeueing, api.JobStatusQueued)
assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unittest"))
require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unittest"))
}
func TestJobRequeueWithAllCompletedTasks(t *testing.T) {
@ -301,7 +302,7 @@ func TestJobRequeueWithAllCompletedTasks(t *testing.T) {
mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusCompleted, api.JobStatusRequeueing)
mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusRequeueing, api.JobStatusQueued)
assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unit test"))
require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusRequeueing, "someone wrote a unit test"))
}
func TestJobCancelWithSomeCompletedTasks(t *testing.T) {
@ -332,7 +333,7 @@ func TestJobCancelWithSomeCompletedTasks(t *testing.T) {
mocks.expectBroadcastJobChangeWithTaskRefresh(job, api.JobStatusActive, api.JobStatusCancelRequested)
mocks.expectBroadcastJobChange(job, api.JobStatusCancelRequested, api.JobStatusCanceled)
assert.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusCancelRequested, "someone wrote a unittest"))
require.NoError(t, sm.JobStatusChange(ctx, job, api.JobStatusCancelRequested, "someone wrote a unittest"))
}
func TestCheckStuck(t *testing.T) {

View File

@ -6,7 +6,7 @@ import (
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/persistence"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -66,5 +66,5 @@ func TestRequeueActiveTasksOfWorker(t *testing.T) {
})
err := sm.RequeueActiveTasksOfWorker(ctx, &worker, "worker had to test")
assert.NoError(t, err)
require.NoError(t, err)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/benbjohnson/clock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"projects.blender.org/studio/flamenco/internal/manager/timeout_checker/mocks"
)
@ -50,9 +51,7 @@ func timeoutCheckerTestFixtures(t *testing.T) (*TimeoutChecker, func(), *Timeout
}
mockedNow, err := time.Parse(time.RFC3339, "2022-06-09T12:00:00+00:00")
if err != nil {
panic(err)
}
require.NoError(t, err)
mocks.clock.Set(mockedNow)
ctx, cancel := context.WithCancel(context.Background())

View File

@ -11,6 +11,7 @@ import (
"github.com/alessio/shellescape"
"github.com/rs/zerolog"
"projects.blender.org/studio/flamenco/pkg/oomscore"
)
// The buffer size used to read stdout/stderr output from subprocesses, in
@ -20,11 +21,19 @@ const StdoutBufferSize = 40 * 1024
// CLIRunner is a wrapper around exec.CommandContext() to allow mocking.
type CLIRunner struct {
oomScoreAdjust int
useOOMScoreAdjust bool
}
func NewCLIRunner() *CLIRunner {
return &CLIRunner{}
}
func NewCLIRunnerWithOOMScoreAdjuster(oomScoreAdjust int) *CLIRunner {
return &CLIRunner{
oomScoreAdjust: oomScoreAdjust,
useOOMScoreAdjust: true,
}
}
func (cli *CLIRunner) CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd {
return exec.CommandContext(ctx, name, arg...)
@ -55,7 +64,7 @@ func (cli *CLIRunner) RunWithTextOutput(
return err
}
if err := execCmd.Start(); err != nil {
if err := cli.startWithOOMAdjust(execCmd); err != nil {
logger.Error().Err(err).Msg("error starting CLI execution")
return err
}
@ -171,3 +180,13 @@ func (cli *CLIRunner) logCmd(
}
return nil
}
// startWithOOMAdjust runs the command with its OOM score adjusted.
func (cli *CLIRunner) startWithOOMAdjust(execCmd *exec.Cmd) error {
if cli.useOOMScoreAdjust {
oomScoreRestore := oomscore.Adjust(cli.oomScoreAdjust)
defer oomScoreRestore()
}
return execCmd.Start()
}

View File

@ -64,7 +64,7 @@ loop:
select {
case <-runDone:
break loop
default:
case <-time.After(1 * time.Millisecond):
mocks.clock.Add(timeStepSize)
}
}

Some files were not shown because too many files have changed in this diff Show More