Rename worker clusters to tags #104223

Manually merged
Sybren A. Stüvel merged 8 commits from Evelinealy/flamenco:worker-tag-rename into main 2023-07-10 11:13:35 +02:00
38 changed files with 663 additions and 660 deletions
Showing only changes of commit ce00c3758d - Show all commits

View File

@ -8,7 +8,7 @@ bugs in actually-released versions.
- Improve speed of queueing up >100 simultaneous job deletions. - Improve speed of queueing up >100 simultaneous job deletions.
- Improve logging of job deletion. - Improve logging of job deletion.
- Add Worker Cluster support. Workers can be members of any number of clusters. Workers will only work on jobs that are assigned to that cluster. Jobs that do not have a cluster will be available to all workers, regardless of their cluster assignment. As a result, clusterless workers will only work on clusterless jobs. - Add Worker Tag support. Workers can be members of any number of tags. Workers will only work on jobs that are assigned to that tag. Jobs that do not have a tag will be available to all workers, regardless of their tag assignment. As a result, tagless workers will only work on tagless jobs.
- Fix limitation where a job could have no more than 1000 tasks ([#104201](https://projects.blender.org/studio/flamenco/issues/104201)) - Fix limitation where a job could have no more than 1000 tasks ([#104201](https://projects.blender.org/studio/flamenco/issues/104201))
- Add support for finding the top-level 'project' directory. When submitting files to Flamenco, the add-on will try to retain the directory structure of your Blender project as precisely as possible. This new feature allows the add-on to find the top-level directory of your project by finding a `.blender_project`, `.git`, or `.subversion` directory. This can be configured in the add-on preferences. - Add support for finding the top-level 'project' directory. When submitting files to Flamenco, the add-on will try to retain the directory structure of your Blender project as precisely as possible. This new feature allows the add-on to find the top-level directory of your project by finding a `.blender_project`, `.git`, or `.subversion` directory. This can be configured in the add-on preferences.
- Worker status is remembered when they sign off, so that workers when they come back online do so to the same state ([#99549](https://projects.blender.org/studio/flamenco/issues/99549)). - Worker status is remembered when they sign off, so that workers when they come back online do so to the same state ([#99549](https://projects.blender.org/studio/flamenco/issues/99549)).

View File

@ -26,7 +26,7 @@ if __is_first_load:
comms, comms,
preferences, preferences,
projects, projects,
worker_clusters, worker_tags,
) )
else: else:
import importlib import importlib
@ -37,7 +37,7 @@ else:
comms = importlib.reload(comms) comms = importlib.reload(comms)
preferences = importlib.reload(preferences) preferences = importlib.reload(preferences)
projects = importlib.reload(projects) projects = importlib.reload(projects)
worker_clusters = importlib.reload(worker_clusters) worker_tags = importlib.reload(worker_tags)
import bpy import bpy
@ -155,7 +155,7 @@ def register() -> None:
) )
preferences.register() preferences.register()
worker_clusters.register() worker_tags.register()
operators.register() operators.register()
gui.register() gui.register()
job_types.register() job_types.register()
@ -173,5 +173,5 @@ def unregister() -> None:
job_types.unregister() job_types.unregister()
gui.unregister() gui.unregister()
operators.unregister() operators.unregister()
worker_clusters.unregister() worker_tags.unregister()
preferences.unregister() preferences.unregister()

View File

@ -43,10 +43,10 @@ class FLAMENCO_PT_job_submission(bpy.types.Panel):
col.prop(context.scene, "flamenco_job_name", text="Job Name") col.prop(context.scene, "flamenco_job_name", text="Job Name")
col.prop(context.scene, "flamenco_job_priority", text="Priority") col.prop(context.scene, "flamenco_job_priority", text="Priority")
# Worker cluster: # Worker tag:
row = col.row(align=True) row = col.row(align=True)
row.prop(context.scene, "flamenco_worker_cluster", text="Cluster") row.prop(context.scene, "flamenco_worker_tag", text="Tag")
row.operator("flamenco.fetch_worker_clusters", text="", icon="FILE_REFRESH") row.operator("flamenco.fetch_worker_tags", text="", icon="FILE_REFRESH")
layout.separator() layout.separator()

View File

@ -54,9 +54,9 @@ def job_for_scene(scene: bpy.types.Scene) -> Optional[_SubmittedJob]:
type_etag=propgroup.job_type.etag, type_etag=propgroup.job_type.etag,
) )
worker_cluster: str = getattr(scene, "flamenco_worker_cluster", "") worker_tag: str = getattr(scene, "flamenco_worker_tag", "")
if worker_cluster and worker_cluster != "-": if worker_tag and worker_tag != "-":
job.worker_cluster = worker_cluster job.worker_tag = worker_tag
return job return job

View File

@ -10,7 +10,7 @@ from urllib3.exceptions import HTTPError, MaxRetryError
import bpy import bpy
from . import job_types, job_submission, preferences, worker_clusters from . import job_types, job_submission, preferences, worker_tags
from .job_types_propgroup import JobTypePropertyGroup from .job_types_propgroup import JobTypePropertyGroup
from .bat.submodules import bpathlib from .bat.submodules import bpathlib
@ -83,10 +83,10 @@ class FLAMENCO_OT_fetch_job_types(FlamencoOpMixin, bpy.types.Operator):
return {"FINISHED"} return {"FINISHED"}
class FLAMENCO_OT_fetch_worker_clusters(FlamencoOpMixin, bpy.types.Operator): class FLAMENCO_OT_fetch_worker_tags(FlamencoOpMixin, bpy.types.Operator):
bl_idname = "flamenco.fetch_worker_clusters" bl_idname = "flamenco.fetch_worker_tags"
bl_label = "Fetch Worker Clusters" bl_label = "Fetch Worker Tags"
bl_description = "Query Flamenco Manager to obtain the available worker clusters" bl_description = "Query Flamenco Manager to obtain the available worker tags"
def execute(self, context: bpy.types.Context) -> set[str]: def execute(self, context: bpy.types.Context) -> set[str]:
api_client = self.get_api_client(context) api_client = self.get_api_client(context)
@ -94,10 +94,10 @@ class FLAMENCO_OT_fetch_worker_clusters(FlamencoOpMixin, bpy.types.Operator):
from flamenco.manager import ApiException from flamenco.manager import ApiException
scene = context.scene scene = context.scene
old_cluster = getattr(scene, "flamenco_worker_cluster", "") old_tag = getattr(scene, "flamenco_worker_tag", "")
try: try:
worker_clusters.refresh(context, api_client) worker_tags.refresh(context, api_client)
except ApiException as ex: except ApiException as ex:
self.report({"ERROR"}, "Error getting job types: %s" % ex) self.report({"ERROR"}, "Error getting job types: %s" % ex)
return {"CANCELLED"} return {"CANCELLED"}
@ -107,9 +107,9 @@ class FLAMENCO_OT_fetch_worker_clusters(FlamencoOpMixin, bpy.types.Operator):
self.report({"ERROR"}, "Unable to reach Manager") self.report({"ERROR"}, "Unable to reach Manager")
return {"CANCELLED"} return {"CANCELLED"}
if old_cluster: if old_tag:
# TODO: handle cases where the old cluster no longer exists. # TODO: handle cases where the old tag no longer exists.
scene.flamenco_worker_cluster = old_cluster scene.flamenco_worker_tag = old_tag
return {"FINISHED"} return {"FINISHED"}
@ -669,7 +669,7 @@ class FLAMENCO3_OT_explore_file_path(bpy.types.Operator):
classes = ( classes = (
FLAMENCO_OT_fetch_job_types, FLAMENCO_OT_fetch_job_types,
FLAMENCO_OT_fetch_worker_clusters, FLAMENCO_OT_fetch_worker_tags,
FLAMENCO_OT_ping_manager, FLAMENCO_OT_ping_manager,
FLAMENCO_OT_eval_setting, FLAMENCO_OT_eval_setting,
FLAMENCO_OT_submit_job, FLAMENCO_OT_submit_job,

View File

@ -43,7 +43,7 @@ _project_finder_enum_items = [
] ]
class WorkerCluster(bpy.types.PropertyGroup): class WorkerTag(bpy.types.PropertyGroup):
id: bpy.props.StringProperty(name="id") # type: ignore id: bpy.props.StringProperty(name="id") # type: ignore
name: bpy.props.StringProperty(name="Name") # type: ignore name: bpy.props.StringProperty(name="Name") # type: ignore
description: bpy.props.StringProperty(name="Description") # type: ignore description: bpy.props.StringProperty(name="Description") # type: ignore
@ -93,10 +93,10 @@ class FlamencoPreferences(bpy.types.AddonPreferences):
get=lambda prefs: prefs.job_storage, get=lambda prefs: prefs.job_storage,
) )
worker_clusters: bpy.props.CollectionProperty( # type: ignore worker_tags: bpy.props.CollectionProperty( # type: ignore
type=WorkerCluster, type=WorkerTag,
name="Worker Clusters", name="Worker Tags",
description="Cache for the worker clusters available on the configured Manager", description="Cache for the worker tags available on the configured Manager",
options={"HIDDEN"}, options={"HIDDEN"},
) )
@ -169,7 +169,7 @@ def manager_url(context: bpy.types.Context) -> str:
classes = ( classes = (
WorkerCluster, WorkerTag,
FlamencoPreferences, FlamencoPreferences,
) )
_register, _unregister = bpy.utils.register_classes_factory(classes) _register, _unregister = bpy.utils.register_classes_factory(classes)

View File

@ -16,25 +16,25 @@ _enum_items: list[Union[tuple[str, str, str], tuple[str, str, str, int, int]]] =
def refresh(context: bpy.types.Context, api_client: _ApiClient) -> None: def refresh(context: bpy.types.Context, api_client: _ApiClient) -> None:
"""Fetch the available worker clusters from the Manager.""" """Fetch the available worker tags from the Manager."""
from flamenco.manager import ApiClient from flamenco.manager import ApiClient
from flamenco.manager.api import worker_mgt_api from flamenco.manager.api import worker_mgt_api
from flamenco.manager.model.worker_cluster_list import WorkerClusterList from flamenco.manager.model.worker_tag_list import WorkerTagList
assert isinstance(api_client, ApiClient) assert isinstance(api_client, ApiClient)
api = worker_mgt_api.WorkerMgtApi(api_client) api = worker_mgt_api.WorkerMgtApi(api_client)
response: WorkerClusterList = api.fetch_worker_clusters() response: WorkerTagList = api.fetch_worker_tags()
# Store on the preferences, so a cached version persists until the next refresh. # Store on the preferences, so a cached version persists until the next refresh.
prefs = preferences.get(context) prefs = preferences.get(context)
prefs.worker_clusters.clear() prefs.worker_tags.clear()
for cluster in response.clusters: for tag in response.tags:
rna_cluster = prefs.worker_clusters.add() rna_tag = prefs.worker_tags.add()
rna_cluster.id = cluster.id rna_tag.id = tag.id
rna_cluster.name = cluster.name rna_tag.name = tag.name
rna_cluster.description = getattr(cluster, "description", "") rna_tag.description = getattr(tag, "description", "")
# Preferences have changed, so make sure that Blender saves them (assuming # Preferences have changed, so make sure that Blender saves them (assuming
# auto-save here). # auto-save here).
@ -46,25 +46,25 @@ def _get_enum_items(self, context):
prefs = preferences.get(context) prefs = preferences.get(context)
_enum_items = [ _enum_items = [
("-", "All", "No specific cluster assigned, any worker can handle this job"), ("-", "All", "No specific tag assigned, any worker can handle this job"),
] ]
_enum_items.extend( _enum_items.extend(
(cluster.id, cluster.name, cluster.description) (tag.id, tag.name, tag.description)
for cluster in prefs.worker_clusters for tag in prefs.worker_tags
) )
return _enum_items return _enum_items
def register() -> None: def register() -> None:
bpy.types.Scene.flamenco_worker_cluster = bpy.props.EnumProperty( bpy.types.Scene.flamenco_worker_tag = bpy.props.EnumProperty(
name="Worker Cluster", name="Worker Tag",
items=_get_enum_items, items=_get_enum_items,
description="The set of Workers that can handle tasks of this job", description="The set of Workers that can handle tasks of this job",
) )
def unregister() -> None: def unregister() -> None:
to_del = ((bpy.types.Scene, "flamenco_worker_cluster"),) to_del = ((bpy.types.Scene, "flamenco_worker_tag"),)
for ob, attr in to_del: for ob, attr in to_del:
try: try:
delattr(ob, attr) delattr(ob, attr)

View File

@ -65,13 +65,13 @@ type PersistenceService interface {
RemoveFromJobBlocklist(ctx context.Context, jobUUID, workerUUID, taskType string) error RemoveFromJobBlocklist(ctx context.Context, jobUUID, workerUUID, taskType string) error
ClearJobBlocklist(ctx context.Context, job *persistence.Job) error ClearJobBlocklist(ctx context.Context, job *persistence.Job) error
// Worker cluster management. // Worker tag management.
WorkerSetClusters(ctx context.Context, worker *persistence.Worker, clusterUUIDs []string) error WorkerSetTags(ctx context.Context, worker *persistence.Worker, tagUUIDs []string) error
CreateWorkerCluster(ctx context.Context, cluster *persistence.WorkerCluster) error CreateWorkerTag(ctx context.Context, tag *persistence.WorkerTag) error
FetchWorkerCluster(ctx context.Context, uuid string) (*persistence.WorkerCluster, error) FetchWorkerTag(ctx context.Context, uuid string) (*persistence.WorkerTag, error)
FetchWorkerClusters(ctx context.Context) ([]*persistence.WorkerCluster, error) FetchWorkerTags(ctx context.Context) ([]*persistence.WorkerTag, error)
DeleteWorkerCluster(ctx context.Context, uuid string) error DeleteWorkerTag(ctx context.Context, uuid string) error
SaveWorkerCluster(ctx context.Context, cluster *persistence.WorkerCluster) error SaveWorkerTag(ctx context.Context, tag *persistence.WorkerTag) error
// WorkersLeftToRun returns a set of worker UUIDs that can run tasks of the given type on the given job. // WorkersLeftToRun returns a set of worker UUIDs that can run tasks of the given type on the given job.
WorkersLeftToRun(ctx context.Context, job *persistence.Job, taskType string) (map[string]bool, error) WorkersLeftToRun(ctx context.Context, job *persistence.Job, taskType string) (map[string]bool, error)

View File

@ -618,8 +618,8 @@ func jobDBtoAPI(dbJob *persistence.Job) api.Job {
if dbJob.DeleteRequestedAt.Valid { if dbJob.DeleteRequestedAt.Valid {
apiJob.DeleteRequestedAt = &dbJob.DeleteRequestedAt.Time apiJob.DeleteRequestedAt = &dbJob.DeleteRequestedAt.Time
} }
if dbJob.WorkerCluster != nil { if dbJob.WorkerTag != nil {
apiJob.WorkerCluster = &dbJob.WorkerCluster.UUID apiJob.WorkerTag = &dbJob.WorkerTag.UUID
} }
return apiJob return apiJob

View File

@ -320,19 +320,19 @@ func TestSubmitJobWithShamanCheckoutID(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestSubmitJobWithWorkerCluster(t *testing.T) { func TestSubmitJobWithWorkerTag(t *testing.T) {
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mf := newMockedFlamenco(mockCtrl) mf := newMockedFlamenco(mockCtrl)
worker := testWorker() worker := testWorker()
workerClusterUUID := "04435762-9dc8-4f13-80b7-643a6fa5b6fd" workerTagUUID := "04435762-9dc8-4f13-80b7-643a6fa5b6fd"
cluster := persistence.WorkerCluster{ tag := persistence.WorkerTag{
Model: persistence.Model{ID: 47}, Model: persistence.Model{ID: 47},
UUID: workerClusterUUID, UUID: workerTagUUID,
Name: "first cluster", Name: "first tag",
Description: "my first cluster", Description: "my first tag",
} }
submittedJob := api.SubmittedJob{ submittedJob := api.SubmittedJob{
@ -340,7 +340,7 @@ func TestSubmitJobWithWorkerCluster(t *testing.T) {
Type: "test", Type: "test",
Priority: 50, Priority: 50,
SubmitterPlatform: worker.Platform, SubmitterPlatform: worker.Platform,
WorkerCluster: &workerClusterUUID, WorkerTag: &workerTagUUID,
} }
mf.expectConvertTwoWayVariables(t, mf.expectConvertTwoWayVariables(t,
@ -352,7 +352,7 @@ func TestSubmitJobWithWorkerCluster(t *testing.T) {
// Expect the job compiler to be called. // Expect the job compiler to be called.
authoredJob := job_compilers.AuthoredJob{ authoredJob := job_compilers.AuthoredJob{
JobID: "afc47568-bd9d-4368-8016-e91d945db36d", JobID: "afc47568-bd9d-4368-8016-e91d945db36d",
WorkerClusterUUID: workerClusterUUID, WorkerTagUUID: workerTagUUID,
Name: submittedJob.Name, Name: submittedJob.Name,
JobType: submittedJob.Type, JobType: submittedJob.Type,
@ -382,8 +382,8 @@ func TestSubmitJobWithWorkerCluster(t *testing.T) {
Settings: persistence.StringInterfaceMap{}, Settings: persistence.StringInterfaceMap{},
Metadata: persistence.StringStringMap{}, Metadata: persistence.StringStringMap{},
WorkerClusterID: &cluster.ID, WorkerTagID: &tag.ID,
WorkerCluster: &cluster, WorkerTag: &tag,
} }
mf.persistence.EXPECT().FetchJob(gomock.Any(), queuedJob.JobID).Return(&dbJob, nil) mf.persistence.EXPECT().FetchJob(gomock.Any(), queuedJob.JobID).Return(&dbJob, nil)

View File

@ -182,7 +182,7 @@ func (f *Flamenco) RequestWorkerStatusChange(e echo.Context, workerUUID string)
return e.NoContent(http.StatusNoContent) return e.NoContent(http.StatusNoContent)
} }
func (f *Flamenco) SetWorkerClusters(e echo.Context, workerUUID string) error { func (f *Flamenco) SetWorkerTags(e echo.Context, workerUUID string) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
logger = logger.With().Str("worker", workerUUID).Logger() logger = logger.With().Str("worker", workerUUID).Logger()
@ -192,7 +192,7 @@ func (f *Flamenco) SetWorkerClusters(e echo.Context, workerUUID string) error {
} }
// Decode the request body. // Decode the request body.
var change api.WorkerClusterChangeRequest var change api.WorkerTagChangeRequest
if err := e.Bind(&change); err != nil { if err := e.Bind(&change); err != nil {
logger.Warn().Err(err).Msg("bad request received") logger.Warn().Err(err).Msg("bad request received")
return sendAPIError(e, http.StatusBadRequest, "invalid format") return sendAPIError(e, http.StatusBadRequest, "invalid format")
@ -210,13 +210,13 @@ func (f *Flamenco) SetWorkerClusters(e echo.Context, workerUUID string) error {
} }
logger = logger.With(). logger = logger.With().
Strs("clusters", change.ClusterIds). Strs("tags", change.TagIds).
Logger() Logger()
logger.Info().Msg("worker cluster change requested") logger.Info().Msg("worker tag change requested")
// Store the new cluster assignment. // Store the new tag assignment.
if err := f.persist.WorkerSetClusters(ctx, dbWorker, change.ClusterIds); err != nil { if err := f.persist.WorkerSetTags(ctx, dbWorker, change.TagIds); err != nil {
logger.Error().Err(err).Msg("saving worker after cluster change request") logger.Error().Err(err).Msg("saving worker after tag change request")
return sendAPIError(e, http.StatusInternalServerError, "error saving worker: %v", err) return sendAPIError(e, http.StatusInternalServerError, "error saving worker: %v", err)
} }
@ -227,155 +227,155 @@ func (f *Flamenco) SetWorkerClusters(e echo.Context, workerUUID string) error {
return e.NoContent(http.StatusNoContent) return e.NoContent(http.StatusNoContent)
} }
func (f *Flamenco) DeleteWorkerCluster(e echo.Context, clusterUUID string) error { func (f *Flamenco) DeleteWorkerTag(e echo.Context, tagUUID string) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
logger = logger.With().Str("cluster", clusterUUID).Logger() logger = logger.With().Str("tag", tagUUID).Logger()
if !uuid.IsValid(clusterUUID) { if !uuid.IsValid(tagUUID) {
return sendAPIError(e, http.StatusBadRequest, "not a valid UUID") return sendAPIError(e, http.StatusBadRequest, "not a valid UUID")
} }
err := f.persist.DeleteWorkerCluster(ctx, clusterUUID) err := f.persist.DeleteWorkerTag(ctx, tagUUID)
switch { switch {
case errors.Is(err, persistence.ErrWorkerClusterNotFound): case errors.Is(err, persistence.ErrWorkerTagNotFound):
logger.Debug().Msg("non-existent worker cluster requested") logger.Debug().Msg("non-existent worker tag requested")
return sendAPIError(e, http.StatusNotFound, "worker cluster %q not found", clusterUUID) return sendAPIError(e, http.StatusNotFound, "worker tag %q not found", tagUUID)
case err != nil: case err != nil:
logger.Error().Err(err).Msg("deleting worker cluster") logger.Error().Err(err).Msg("deleting worker tag")
return sendAPIError(e, http.StatusInternalServerError, "error deleting worker cluster: %v", err) return sendAPIError(e, http.StatusInternalServerError, "error deleting worker tag: %v", err)
} }
// TODO: SocketIO broadcast of cluster deletion. // TODO: SocketIO broadcast of tag deletion.
logger.Info().Msg("worker cluster deleted") logger.Info().Msg("worker tag deleted")
return e.NoContent(http.StatusNoContent) return e.NoContent(http.StatusNoContent)
} }
func (f *Flamenco) FetchWorkerCluster(e echo.Context, clusterUUID string) error { func (f *Flamenco) FetchWorkerTag(e echo.Context, tagUUID string) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
logger = logger.With().Str("cluster", clusterUUID).Logger() logger = logger.With().Str("tag", tagUUID).Logger()
if !uuid.IsValid(clusterUUID) { if !uuid.IsValid(tagUUID) {
return sendAPIError(e, http.StatusBadRequest, "not a valid UUID") return sendAPIError(e, http.StatusBadRequest, "not a valid UUID")
} }
cluster, err := f.persist.FetchWorkerCluster(ctx, clusterUUID) tag, err := f.persist.FetchWorkerTag(ctx, tagUUID)
switch { switch {
case errors.Is(err, persistence.ErrWorkerClusterNotFound): case errors.Is(err, persistence.ErrWorkerTagNotFound):
logger.Debug().Msg("non-existent worker cluster requested") logger.Debug().Msg("non-existent worker tag requested")
return sendAPIError(e, http.StatusNotFound, "worker cluster %q not found", clusterUUID) return sendAPIError(e, http.StatusNotFound, "worker tag %q not found", tagUUID)
case err != nil: case err != nil:
logger.Error().Err(err).Msg("fetching worker cluster") logger.Error().Err(err).Msg("fetching worker tag")
return sendAPIError(e, http.StatusInternalServerError, "error fetching worker cluster: %v", err) return sendAPIError(e, http.StatusInternalServerError, "error fetching worker tag: %v", err)
} }
return e.JSON(http.StatusOK, workerClusterDBtoAPI(*cluster)) return e.JSON(http.StatusOK, workerTagDBtoAPI(*tag))
} }
func (f *Flamenco) UpdateWorkerCluster(e echo.Context, clusterUUID string) error { func (f *Flamenco) UpdateWorkerTag(e echo.Context, tagUUID string) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
logger = logger.With().Str("cluster", clusterUUID).Logger() logger = logger.With().Str("tag", tagUUID).Logger()
if !uuid.IsValid(clusterUUID) { if !uuid.IsValid(tagUUID) {
return sendAPIError(e, http.StatusBadRequest, "not a valid UUID") return sendAPIError(e, http.StatusBadRequest, "not a valid UUID")
} }
// Decode the request body. // Decode the request body.
var update api.UpdateWorkerClusterJSONBody var update api.UpdateWorkerTagJSONBody
if err := e.Bind(&update); err != nil { if err := e.Bind(&update); err != nil {
logger.Warn().Err(err).Msg("bad request received") logger.Warn().Err(err).Msg("bad request received")
return sendAPIError(e, http.StatusBadRequest, "invalid format") return sendAPIError(e, http.StatusBadRequest, "invalid format")
} }
dbCluster, err := f.persist.FetchWorkerCluster(ctx, clusterUUID) dbTag, err := f.persist.FetchWorkerTag(ctx, tagUUID)
switch { switch {
case errors.Is(err, persistence.ErrWorkerClusterNotFound): case errors.Is(err, persistence.ErrWorkerTagNotFound):
logger.Debug().Msg("non-existent worker cluster requested") logger.Debug().Msg("non-existent worker tag requested")
return sendAPIError(e, http.StatusNotFound, "worker cluster %q not found", clusterUUID) return sendAPIError(e, http.StatusNotFound, "worker tag %q not found", tagUUID)
case err != nil: case err != nil:
logger.Error().Err(err).Msg("fetching worker cluster") logger.Error().Err(err).Msg("fetching worker tag")
return sendAPIError(e, http.StatusInternalServerError, "error fetching worker cluster: %v", err) return sendAPIError(e, http.StatusInternalServerError, "error fetching worker tag: %v", err)
} }
// Update the cluster. // Update the tag.
dbCluster.Name = update.Name dbTag.Name = update.Name
if update.Description == nil { if update.Description == nil {
dbCluster.Description = "" dbTag.Description = ""
} else { } else {
dbCluster.Description = *update.Description dbTag.Description = *update.Description
} }
if err := f.persist.SaveWorkerCluster(ctx, dbCluster); err != nil { if err := f.persist.SaveWorkerTag(ctx, dbTag); err != nil {
logger.Error().Err(err).Msg("saving worker cluster") logger.Error().Err(err).Msg("saving worker tag")
return sendAPIError(e, http.StatusInternalServerError, "error saving worker cluster") return sendAPIError(e, http.StatusInternalServerError, "error saving worker tag")
} }
// TODO: SocketIO broadcast of cluster update. // TODO: SocketIO broadcast of tag update.
return e.NoContent(http.StatusNoContent) return e.NoContent(http.StatusNoContent)
} }
func (f *Flamenco) FetchWorkerClusters(e echo.Context) error { func (f *Flamenco) FetchWorkerTags(e echo.Context) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
dbClusters, err := f.persist.FetchWorkerClusters(ctx) dbTags, err := f.persist.FetchWorkerTags(ctx)
if err != nil { if err != nil {
logger.Error().Err(err).Msg("fetching worker clusters") logger.Error().Err(err).Msg("fetching worker tags")
return sendAPIError(e, http.StatusInternalServerError, "error saving worker cluster") return sendAPIError(e, http.StatusInternalServerError, "error saving worker tag")
} }
apiClusters := []api.WorkerCluster{} apiTags := []api.WorkerTag{}
for _, dbCluster := range dbClusters { for _, dbTag := range dbTags {
apiCluster := workerClusterDBtoAPI(*dbCluster) apiTag := workerTagDBtoAPI(*dbTag)
apiClusters = append(apiClusters, apiCluster) apiTags = append(apiTags, apiTag)
} }
clusterList := api.WorkerClusterList{ tagList := api.WorkerTagList{
Clusters: &apiClusters, Tags: &apiTags,
} }
return e.JSON(http.StatusOK, &clusterList) return e.JSON(http.StatusOK, &tagList)
} }
func (f *Flamenco) CreateWorkerCluster(e echo.Context) error { func (f *Flamenco) CreateWorkerTag(e echo.Context) error {
ctx := e.Request().Context() ctx := e.Request().Context()
logger := requestLogger(e) logger := requestLogger(e)
// Decode the request body. // Decode the request body.
var apiCluster api.CreateWorkerClusterJSONBody var apiTag api.CreateWorkerTagJSONBody
if err := e.Bind(&apiCluster); err != nil { if err := e.Bind(&apiTag); err != nil {
logger.Warn().Err(err).Msg("bad request received") logger.Warn().Err(err).Msg("bad request received")
return sendAPIError(e, http.StatusBadRequest, "invalid format") return sendAPIError(e, http.StatusBadRequest, "invalid format")
} }
// Convert to persistence layer model. // Convert to persistence layer model.
var clusterUUID string var tagUUID string
if apiCluster.Id != nil && *apiCluster.Id != "" { if apiTag.Id != nil && *apiTag.Id != "" {
clusterUUID = *apiCluster.Id tagUUID = *apiTag.Id
} else { } else {
clusterUUID = uuid.New() tagUUID = uuid.New()
} }
dbCluster := persistence.WorkerCluster{ dbTag := persistence.WorkerTag{
UUID: clusterUUID, UUID: tagUUID,
Name: apiCluster.Name, Name: apiTag.Name,
} }
if apiCluster.Description != nil { if apiTag.Description != nil {
dbCluster.Description = *apiCluster.Description dbTag.Description = *apiTag.Description
} }
// Store in the database. // Store in the database.
if err := f.persist.CreateWorkerCluster(ctx, &dbCluster); err != nil { if err := f.persist.CreateWorkerTag(ctx, &dbTag); err != nil {
logger.Error().Err(err).Msg("creating worker cluster") logger.Error().Err(err).Msg("creating worker tag")
return sendAPIError(e, http.StatusInternalServerError, "error creating worker cluster") return sendAPIError(e, http.StatusInternalServerError, "error creating worker tag")
} }
// TODO: SocketIO broadcast of cluster creation. // TODO: SocketIO broadcast of tag creation.
return e.JSON(http.StatusOK, workerClusterDBtoAPI(dbCluster)) return e.JSON(http.StatusOK, workerTagDBtoAPI(dbTag))
} }
func workerSummary(w persistence.Worker) api.WorkerSummary { func workerSummary(w persistence.Worker) api.WorkerSummary {
@ -407,26 +407,26 @@ func workerDBtoAPI(w persistence.Worker) api.Worker {
SupportedTaskTypes: w.TaskTypes(), SupportedTaskTypes: w.TaskTypes(),
} }
if len(w.Clusters) > 0 { if len(w.Tags) > 0 {
clusters := []api.WorkerCluster{} tags := []api.WorkerTag{}
for i := range w.Clusters { for i := range w.Tags {
clusters = append(clusters, workerClusterDBtoAPI(*w.Clusters[i])) tags = append(tags, workerTagDBtoAPI(*w.Tags[i]))
} }
apiWorker.Clusters = &clusters apiWorker.Tags = &tags
} }
return apiWorker return apiWorker
} }
func workerClusterDBtoAPI(wc persistence.WorkerCluster) api.WorkerCluster { func workerTagDBtoAPI(wc persistence.WorkerTag) api.WorkerTag {
uuid := wc.UUID // Take a copy for safety. uuid := wc.UUID // Take a copy for safety.
apiCluster := api.WorkerCluster{ apiTag := api.WorkerTag{
Id: &uuid, Id: &uuid,
Name: wc.Name, Name: wc.Name,
} }
if len(wc.Description) > 0 { if len(wc.Description) > 0 {
apiCluster.Description = &wc.Description apiTag.Description = &wc.Description
} }
return apiCluster return apiTag
} }

View File

@ -262,58 +262,58 @@ func TestRequestWorkerStatusChangeRevert(t *testing.T) {
assertResponseNoContent(t, echo) assertResponseNoContent(t, echo)
} }
func TestWorkerClusterCRUDHappyFlow(t *testing.T) { func TestWorkerTagCRUDHappyFlow(t *testing.T) {
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish() defer mockCtrl.Finish()
mf := newMockedFlamenco(mockCtrl) mf := newMockedFlamenco(mockCtrl)
// Create a cluster. // Create a tag.
UUID := "18d9234e-5135-458f-a1ba-a350c3d4e837" UUID := "18d9234e-5135-458f-a1ba-a350c3d4e837"
apiCluster := api.WorkerCluster{ apiTag := api.WorkerTag{
Id: &UUID, Id: &UUID,
Name: "ʻO nā manu ʻino", Name: "ʻO nā manu ʻino",
Description: ptr("Ke aloha"), Description: ptr("Ke aloha"),
} }
expectDBCluster := persistence.WorkerCluster{ expectDBTag := persistence.WorkerTag{
UUID: UUID, UUID: UUID,
Name: apiCluster.Name, Name: apiTag.Name,
Description: *apiCluster.Description, Description: *apiTag.Description,
} }
mf.persistence.EXPECT().CreateWorkerCluster(gomock.Any(), &expectDBCluster) mf.persistence.EXPECT().CreateWorkerTag(gomock.Any(), &expectDBTag)
// TODO: expect SocketIO broadcast of the cluster creation. // TODO: expect SocketIO broadcast of the tag creation.
echo := mf.prepareMockedJSONRequest(apiCluster) echo := mf.prepareMockedJSONRequest(apiTag)
require.NoError(t, mf.flamenco.CreateWorkerCluster(echo)) require.NoError(t, mf.flamenco.CreateWorkerTag(echo))
assertResponseJSON(t, echo, http.StatusOK, &apiCluster) assertResponseJSON(t, echo, http.StatusOK, &apiTag)
// Fetch the cluster // Fetch the tag
mf.persistence.EXPECT().FetchWorkerCluster(gomock.Any(), UUID).Return(&expectDBCluster, nil) mf.persistence.EXPECT().FetchWorkerTag(gomock.Any(), UUID).Return(&expectDBTag, nil)
echo = mf.prepareMockedRequest(nil) echo = mf.prepareMockedRequest(nil)
require.NoError(t, mf.flamenco.FetchWorkerCluster(echo, UUID)) require.NoError(t, mf.flamenco.FetchWorkerTag(echo, UUID))
assertResponseJSON(t, echo, http.StatusOK, &apiCluster) assertResponseJSON(t, echo, http.StatusOK, &apiTag)
// Update & save. // Update & save.
newUUID := "60442762-83d3-4fc3-bf75-6ab5799cdbaa" newUUID := "60442762-83d3-4fc3-bf75-6ab5799cdbaa"
newAPICluster := api.WorkerCluster{ newAPITag := api.WorkerTag{
Id: &newUUID, // Intentionally change the UUID. This should just be ignored. Id: &newUUID, // Intentionally change the UUID. This should just be ignored.
Name: "updated name", Name: "updated name",
} }
expectNewDBCluster := persistence.WorkerCluster{ expectNewDBTag := persistence.WorkerTag{
UUID: UUID, UUID: UUID,
Name: newAPICluster.Name, Name: newAPITag.Name,
Description: "", Description: "",
} }
// TODO: expect SocketIO broadcast of the cluster update. // TODO: expect SocketIO broadcast of the tag update.
mf.persistence.EXPECT().FetchWorkerCluster(gomock.Any(), UUID).Return(&expectDBCluster, nil) mf.persistence.EXPECT().FetchWorkerTag(gomock.Any(), UUID).Return(&expectDBTag, nil)
mf.persistence.EXPECT().SaveWorkerCluster(gomock.Any(), &expectNewDBCluster) mf.persistence.EXPECT().SaveWorkerTag(gomock.Any(), &expectNewDBTag)
echo = mf.prepareMockedJSONRequest(newAPICluster) echo = mf.prepareMockedJSONRequest(newAPITag)
require.NoError(t, mf.flamenco.UpdateWorkerCluster(echo, UUID)) require.NoError(t, mf.flamenco.UpdateWorkerTag(echo, UUID))
assertResponseNoContent(t, echo) assertResponseNoContent(t, echo)
// Delete. // Delete.
mf.persistence.EXPECT().DeleteWorkerCluster(gomock.Any(), UUID) mf.persistence.EXPECT().DeleteWorkerTag(gomock.Any(), UUID)
// TODO: expect SocketIO broadcast of the cluster deletion. // TODO: expect SocketIO broadcast of the tag deletion.
echo = mf.prepareMockedJSONRequest(newAPICluster) echo = mf.prepareMockedJSONRequest(newAPITag)
require.NoError(t, mf.flamenco.DeleteWorkerCluster(echo, UUID)) require.NoError(t, mf.flamenco.DeleteWorkerTag(echo, UUID))
assertResponseNoContent(t, echo) assertResponseNoContent(t, echo)
} }

View File

@ -21,7 +21,7 @@ type Author struct {
type AuthoredJob struct { type AuthoredJob struct {
JobID string JobID string
WorkerClusterUUID string WorkerTagUUID string
Name string Name string
JobType string JobType string

View File

@ -127,8 +127,8 @@ func (s *Service) Compile(ctx context.Context, sj api.SubmittedJob) (*AuthoredJo
aj.Storage.ShamanCheckoutID = *sj.Storage.ShamanCheckoutId aj.Storage.ShamanCheckoutID = *sj.Storage.ShamanCheckoutId
} }
if sj.WorkerCluster != nil { if sj.WorkerTag != nil {
aj.WorkerClusterUUID = *sj.WorkerCluster aj.WorkerTagUUID = *sj.WorkerTag
} }
compiler, err := vm.getCompileJob() compiler, err := vm.getCompileJob()

View File

@ -50,7 +50,7 @@ func exampleSubmittedJob() api.SubmittedJob {
Type: "simple-blender-render", Type: "simple-blender-render",
Settings: &settings, Settings: &settings,
Metadata: &metadata, Metadata: &metadata,
WorkerCluster: ptr("acce9983-e663-4210-b3cc-f7bfa629cb21"), WorkerTag: ptr("acce9983-e663-4210-b3cc-f7bfa629cb21"),
} }
return sj return sj
} }
@ -80,7 +80,7 @@ func TestSimpleBlenderRenderHappy(t *testing.T) {
// Properties should be copied as-is. // Properties should be copied as-is.
assert.Equal(t, sj.Name, aj.Name) assert.Equal(t, sj.Name, aj.Name)
assert.Equal(t, *sj.WorkerCluster, aj.WorkerClusterUUID) assert.Equal(t, *sj.WorkerTag, aj.WorkerTagUUID)
assert.Equal(t, sj.Type, aj.JobType) assert.Equal(t, sj.Type, aj.JobType)
assert.Equal(t, sj.Priority, aj.Priority) assert.Equal(t, sj.Priority, aj.Priority)
assert.EqualValues(t, sj.Settings.AdditionalProperties, aj.Settings) assert.EqualValues(t, sj.Settings.AdditionalProperties, aj.Settings)
@ -139,7 +139,7 @@ func TestSimpleBlenderRenderHappy(t *testing.T) {
assert.Equal(t, expectDeps, tVideo.Dependencies) assert.Equal(t, expectDeps, tVideo.Dependencies)
} }
func TestJobWithoutCluster(t *testing.T) { func TestJobWithoutTag(t *testing.T) {
c := mockedClock(t) c := mockedClock(t)
s, err := Load(c) s, err := Load(c)
@ -151,20 +151,20 @@ func TestJobWithoutCluster(t *testing.T) {
sj := exampleSubmittedJob() sj := exampleSubmittedJob()
// Try with nil WorkerCluster. // Try with nil WorkerTag.
{ {
sj.WorkerCluster = nil sj.WorkerTag = nil
aj, err := s.Compile(ctx, sj) aj, err := s.Compile(ctx, sj)
require.NoError(t, err) require.NoError(t, err)
assert.Zero(t, aj.WorkerClusterUUID) assert.Zero(t, aj.WorkerTagUUID)
} }
// Try with empty WorkerCluster. // Try with empty WorkerTag.
{ {
sj.WorkerCluster = ptr("") sj.WorkerTag = ptr("")
aj, err := s.Compile(ctx, sj) aj, err := s.Compile(ctx, sj)
require.NoError(t, err) require.NoError(t, err)
assert.Zero(t, aj.WorkerClusterUUID) assert.Zero(t, aj.WorkerTagUUID)
} }
} }

View File

@ -16,7 +16,7 @@ func (db *DB) migrate() error {
&Task{}, &Task{},
&TaskFailure{}, &TaskFailure{},
&Worker{}, &Worker{},
&WorkerCluster{}, &WorkerTag{},
) )
if err != nil { if err != nil {
return fmt.Errorf("failed to automigrate database: %v", err) return fmt.Errorf("failed to automigrate database: %v", err)

View File

@ -12,7 +12,7 @@ var (
ErrJobNotFound = PersistenceError{Message: "job not found", Err: gorm.ErrRecordNotFound} ErrJobNotFound = PersistenceError{Message: "job not found", Err: gorm.ErrRecordNotFound}
ErrTaskNotFound = PersistenceError{Message: "task not found", Err: gorm.ErrRecordNotFound} ErrTaskNotFound = PersistenceError{Message: "task not found", Err: gorm.ErrRecordNotFound}
ErrWorkerNotFound = PersistenceError{Message: "worker not found", Err: gorm.ErrRecordNotFound} ErrWorkerNotFound = PersistenceError{Message: "worker not found", Err: gorm.ErrRecordNotFound}
ErrWorkerClusterNotFound = PersistenceError{Message: "worker cluster not found", Err: gorm.ErrRecordNotFound} ErrWorkerTagNotFound = PersistenceError{Message: "worker tag not found", Err: gorm.ErrRecordNotFound}
) )
type PersistenceError struct { type PersistenceError struct {
@ -40,8 +40,8 @@ func workerError(errorToWrap error, message string, msgArgs ...interface{}) erro
return wrapError(translateGormWorkerError(errorToWrap), message, msgArgs...) return wrapError(translateGormWorkerError(errorToWrap), message, msgArgs...)
} }
func workerClusterError(errorToWrap error, message string, msgArgs ...interface{}) error { func workerTagError(errorToWrap error, message string, msgArgs ...interface{}) error {
return wrapError(translateGormWorkerClusterError(errorToWrap), message, msgArgs...) return wrapError(translateGormWorkerTagError(errorToWrap), message, msgArgs...)
} }
func wrapError(errorToWrap error, message string, format ...interface{}) error { func wrapError(errorToWrap error, message string, format ...interface{}) error {
@ -86,11 +86,11 @@ func translateGormWorkerError(gormError error) error {
return gormError return gormError
} }
// translateGormWorkerClusterError translates a Gorm error to a persistence layer error. // translateGormWorkerTagError translates a Gorm error to a persistence layer error.
// This helps to keep Gorm as "implementation detail" of the persistence layer. // This helps to keep Gorm as "implementation detail" of the persistence layer.
func translateGormWorkerClusterError(gormError error) error { func translateGormWorkerTagError(gormError error) error {
if errors.Is(gormError, gorm.ErrRecordNotFound) { if errors.Is(gormError, gorm.ErrRecordNotFound) {
return ErrWorkerClusterNotFound return ErrWorkerTagNotFound
} }
return gormError return gormError
} }

View File

@ -36,8 +36,8 @@ type Job struct {
Storage JobStorageInfo `gorm:"embedded;embeddedPrefix:storage_"` Storage JobStorageInfo `gorm:"embedded;embeddedPrefix:storage_"`
WorkerClusterID *uint WorkerTagID *uint
WorkerCluster *WorkerCluster `gorm:"foreignkey:WorkerClusterID;references:ID;constraint:OnDelete:SET NULL"` WorkerTag *WorkerTag `gorm:"foreignkey:WorkerTagID;references:ID;constraint:OnDelete:SET NULL"`
} }
type StringInterfaceMap map[string]interface{} type StringInterfaceMap map[string]interface{}
@ -148,14 +148,14 @@ func (db *DB) StoreAuthoredJob(ctx context.Context, authoredJob job_compilers.Au
}, },
} }
// Find and assign the worker cluster. // Find and assign the worker tag.
if authoredJob.WorkerClusterUUID != "" { if authoredJob.WorkerTagUUID != "" {
dbCluster, err := fetchWorkerCluster(tx, authoredJob.WorkerClusterUUID) dbTag, err := fetchWorkerTag(tx, authoredJob.WorkerTagUUID)
if err != nil { if err != nil {
return err return err
} }
dbJob.WorkerClusterID = &dbCluster.ID dbJob.WorkerTagID = &dbTag.ID
dbJob.WorkerCluster = dbCluster dbJob.WorkerTag = dbTag
} }
if err := tx.Create(&dbJob).Error; err != nil { if err := tx.Create(&dbJob).Error; err != nil {
@ -233,7 +233,7 @@ func (db *DB) FetchJob(ctx context.Context, jobUUID string) (*Job, error) {
dbJob := Job{} dbJob := Job{}
findResult := db.gormDB.WithContext(ctx). findResult := db.gormDB.WithContext(ctx).
Limit(1). Limit(1).
Preload("WorkerCluster"). Preload("WorkerTag").
Find(&dbJob, "uuid = ?", jobUUID) Find(&dbJob, "uuid = ?", jobUUID)
if findResult.Error != nil { if findResult.Error != nil {
return nil, jobError(findResult.Error, "fetching job") return nil, jobError(findResult.Error, "fetching job")

View File

@ -108,16 +108,16 @@ func (db *DB) WorkersLeftToRun(ctx context.Context, job *Job, taskType string) (
Select("uuid"). Select("uuid").
Where("id not in (?)", blockedWorkers) Where("id not in (?)", blockedWorkers)
if job.WorkerClusterID == nil { if job.WorkerTagID == nil {
// Count all workers, so no extra restrictions are necessary. // Count all workers, so no extra restrictions are necessary.
} else { } else {
// Only count workers in the job's cluster. // Only count workers in the job's tag.
jobCluster := db.gormDB. jobTag := db.gormDB.
Table("worker_cluster_membership"). Table("worker_tag_membership").
Select("worker_id"). Select("worker_id").
Where("worker_cluster_id = ?", *job.WorkerClusterID) Where("worker_tag_id = ?", *job.WorkerTagID)
query = query. query = query.
Where("id in (?)", jobCluster) Where("id in (?)", jobTag)
} }
// Find the workers NOT blocked. // Find the workers NOT blocked.

View File

@ -126,14 +126,14 @@ func TestWorkersLeftToRun(t *testing.T) {
worker1 := createWorker(ctx, t, db) worker1 := createWorker(ctx, t, db)
worker2 := createWorkerFrom(ctx, t, db, *worker1) worker2 := createWorkerFrom(ctx, t, db, *worker1)
// Create one worker cluster. It will not be used by this job, but one of the // Create one worker tag. It will not be used by this job, but one of the
// workers will be assigned to it. It can get this job's tasks, though. // workers will be assigned to it. It can get this job's tasks, though.
// Because the job is clusterless, it can be run by all. // Because the job is tagless, it can be run by all.
cluster1 := WorkerCluster{UUID: "11157623-4b14-4801-bee2-271dddab6309", Name: "Cluster 1"} tag1 := WorkerTag{UUID: "11157623-4b14-4801-bee2-271dddab6309", Name: "Tag 1"}
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster1)) require.NoError(t, db.CreateWorkerTag(ctx, &tag1))
workerC1 := createWorker(ctx, t, db, func(w *Worker) { workerC1 := createWorker(ctx, t, db, func(w *Worker) {
w.UUID = "c1c1c1c1-0000-1111-2222-333333333333" w.UUID = "c1c1c1c1-0000-1111-2222-333333333333"
w.Clusters = []*WorkerCluster{&cluster1} w.Tags = []*WorkerTag{&tag1}
}) })
uuidMap := func(workers ...*Worker) map[string]bool { uuidMap := func(workers ...*Worker) map[string]bool {
@ -172,43 +172,43 @@ func TestWorkersLeftToRun(t *testing.T) {
} }
} }
func TestWorkersLeftToRunWithClusters(t *testing.T) { func TestWorkersLeftToRunWithTags(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout) ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
defer cancel() defer cancel()
// Create clusters. // Create tags.
cluster1 := WorkerCluster{UUID: "11157623-4b14-4801-bee2-271dddab6309", Name: "Cluster 1"} tag1 := WorkerTag{UUID: "11157623-4b14-4801-bee2-271dddab6309", Name: "Tag 1"}
cluster2 := WorkerCluster{UUID: "22257623-4b14-4801-bee2-271dddab6309", Name: "Cluster 2"} tag2 := WorkerTag{UUID: "22257623-4b14-4801-bee2-271dddab6309", Name: "Tag 2"}
cluster3 := WorkerCluster{UUID: "33357623-4b14-4801-bee2-271dddab6309", Name: "Cluster 3"} tag3 := WorkerTag{UUID: "33357623-4b14-4801-bee2-271dddab6309", Name: "Tag 3"}
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster1)) require.NoError(t, db.CreateWorkerTag(ctx, &tag1))
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster2)) require.NoError(t, db.CreateWorkerTag(ctx, &tag2))
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster3)) require.NoError(t, db.CreateWorkerTag(ctx, &tag3))
// Create a job in cluster1. // Create a job in tag1.
authoredJob := createTestAuthoredJobWithTasks() authoredJob := createTestAuthoredJobWithTasks()
authoredJob.WorkerClusterUUID = cluster1.UUID authoredJob.WorkerTagUUID = tag1.UUID
job := persistAuthoredJob(t, ctx, db, authoredJob) job := persistAuthoredJob(t, ctx, db, authoredJob)
// Clusters 1 + 3 // Tags 1 + 3
workerC13 := createWorker(ctx, t, db, func(w *Worker) { workerC13 := createWorker(ctx, t, db, func(w *Worker) {
w.UUID = "c13c1313-0000-1111-2222-333333333333" w.UUID = "c13c1313-0000-1111-2222-333333333333"
w.Clusters = []*WorkerCluster{&cluster1, &cluster3} w.Tags = []*WorkerTag{&tag1, &tag3}
}) })
// Cluster 1 // Tag 1
workerC1 := createWorker(ctx, t, db, func(w *Worker) { workerC1 := createWorker(ctx, t, db, func(w *Worker) {
w.UUID = "c1c1c1c1-0000-1111-2222-333333333333" w.UUID = "c1c1c1c1-0000-1111-2222-333333333333"
w.Clusters = []*WorkerCluster{&cluster1} w.Tags = []*WorkerTag{&tag1}
}) })
// Cluster 2 worker, this one should never appear. // Tag 2 worker, this one should never appear.
createWorker(ctx, t, db, func(w *Worker) { createWorker(ctx, t, db, func(w *Worker) {
w.UUID = "c2c2c2c2-0000-1111-2222-333333333333" w.UUID = "c2c2c2c2-0000-1111-2222-333333333333"
w.Clusters = []*WorkerCluster{&cluster2} w.Tags = []*WorkerTag{&tag2}
}) })
// No clusters, so should be able to run only clusterless jobs. Which is none // No tags, so should be able to run only tagless jobs. Which is none
// in this test. // in this test.
createWorker(ctx, t, db, func(w *Worker) { createWorker(ctx, t, db, func(w *Worker) {
w.UUID = "00000000-0000-1111-2222-333333333333" w.UUID = "00000000-0000-1111-2222-333333333333"
w.Clusters = nil w.Tags = nil
}) })
uuidMap := func(workers ...*Worker) map[string]bool { uuidMap := func(workers ...*Worker) map[string]bool {
@ -219,7 +219,7 @@ func TestWorkersLeftToRunWithClusters(t *testing.T) {
return theMap return theMap
} }
// All Cluster 1 workers, no blocklist. // All Tag 1 workers, no blocklist.
left, err := db.WorkersLeftToRun(ctx, job, "blender") left, err := db.WorkersLeftToRun(ctx, job, "blender")
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, uuidMap(workerC13, workerC1), left) assert.Equal(t, uuidMap(workerC13, workerC1), left)
@ -230,7 +230,7 @@ func TestWorkersLeftToRunWithClusters(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, uuidMap(workerC13), left) assert.Equal(t, uuidMap(workerC13), left)
// All clustered workers blocked. // All taged workers blocked.
_ = db.AddWorkerToJobBlocklist(ctx, job, workerC13, "blender") _ = db.AddWorkerToJobBlocklist(ctx, job, workerC13, "blender")
left, err = db.WorkersLeftToRun(ctx, job, "blender") left, err = db.WorkersLeftToRun(ctx, job, "blender")
assert.NoError(t, err) assert.NoError(t, err)

View File

@ -64,7 +64,7 @@ func (db *DB) QueryJobs(ctx context.Context, apiQ api.JobsQuery) ([]*Job, error)
} }
} }
q.Preload("Cluster") q.Preload("Tag")
result := []*Job{} result := []*Job{}
tx := q.Scan(&result) tx := q.Scan(&result)

View File

@ -757,7 +757,7 @@ func createWorker(ctx context.Context, t *testing.T, db *DB, updaters ...func(*W
Software: "3.0", Software: "3.0",
Status: api.WorkerStatusAwake, Status: api.WorkerStatusAwake,
SupportedTaskTypes: "blender,ffmpeg,file-management", SupportedTaskTypes: "blender,ffmpeg,file-management",
Clusters: nil, Tags: nil,
} }
for _, updater := range updaters { for _, updater := range updaters {

View File

@ -26,7 +26,7 @@ func (db *DB) ScheduleTask(ctx context.Context, w *Worker) (*Task, error) {
logger := log.With().Str("worker", w.UUID).Logger() logger := log.With().Str("worker", w.UUID).Logger()
logger.Trace().Msg("finding task for worker") logger.Trace().Msg("finding task for worker")
hasWorkerClusters, err := db.HasWorkerClusters(ctx) hasWorkerTags, err := db.HasWorkerTags(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -37,7 +37,7 @@ func (db *DB) ScheduleTask(ctx context.Context, w *Worker) (*Task, error) {
var task *Task var task *Task
txErr := db.gormDB.WithContext(ctx).Transaction(func(tx *gorm.DB) error { txErr := db.gormDB.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
var err error var err error
task, err = findTaskForWorker(tx, w, hasWorkerClusters) task, err = findTaskForWorker(tx, w, hasWorkerTags)
if err != nil { if err != nil {
if isDatabaseBusyError(err) { if isDatabaseBusyError(err) {
logger.Trace().Err(err).Msg("database busy while finding task for worker") logger.Trace().Err(err).Msg("database busy while finding task for worker")
@ -84,7 +84,7 @@ func (db *DB) ScheduleTask(ctx context.Context, w *Worker) (*Task, error) {
return task, nil return task, nil
} }
func findTaskForWorker(tx *gorm.DB, w *Worker, checkWorkerClusters bool) (*Task, error) { func findTaskForWorker(tx *gorm.DB, w *Worker, checkWorkerTags bool) (*Task, error) {
task := Task{} task := Task{}
// If a task is alreay active & assigned to this worker, return just that. // If a task is alreay active & assigned to this worker, return just that.
@ -129,21 +129,21 @@ func findTaskForWorker(tx *gorm.DB, w *Worker, checkWorkerClusters bool) (*Task,
Where("TF.worker_id is NULL"). // Not failed before Where("TF.worker_id is NULL"). // Not failed before
Where("tasks.type not in (?)", blockedTaskTypesQuery) // Non-blocklisted Where("tasks.type not in (?)", blockedTaskTypesQuery) // Non-blocklisted
if checkWorkerClusters { if checkWorkerTags {
// The system has one or more clusters, so limit the available jobs to those // The system has one or more tags, so limit the available jobs to those
// that have no cluster, or overlap with the Worker's clusters. // that have no tag, or overlap with the Worker's tags.
if len(w.Clusters) == 0 { if len(w.Tags) == 0 {
// Clusterless workers only get clusterless jobs. // Tagless workers only get tagless jobs.
findTaskQuery = findTaskQuery. findTaskQuery = findTaskQuery.
Where("jobs.worker_cluster_id is NULL") Where("jobs.worker_tag_id is NULL")
} else { } else {
// Clustered workers get clusterless jobs AND jobs of their own clusters. // Taged workers get tagless jobs AND jobs of their own tags.
clusterIDs := []uint{} tagIDs := []uint{}
for _, cluster := range w.Clusters { for _, tag := range w.Tags {
clusterIDs = append(clusterIDs, cluster.ID) tagIDs = append(tagIDs, tag.ID)
} }
findTaskQuery = findTaskQuery. findTaskQuery = findTaskQuery.
Where("jobs.worker_cluster_id is NULL or worker_cluster_id in ?", clusterIDs) Where("jobs.worker_tag_id is NULL or worker_tag_id in ?", tagIDs)
} }
} }

View File

@ -291,87 +291,87 @@ func TestPreviouslyFailed(t *testing.T) {
assert.Equal(t, att2.Name, task.Name, "the second task should have been chosen") assert.Equal(t, att2.Name, task.Name, "the second task should have been chosen")
} }
func TestWorkerClusterJobWithCluster(t *testing.T) { func TestWorkerTagJobWithTag(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout) ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
defer cancel() defer cancel()
// Create worker clusters: // Create worker tags:
cluster1 := WorkerCluster{UUID: "f0157623-4b14-4801-bee2-271dddab6309", Name: "Cluster 1"} tag1 := WorkerTag{UUID: "f0157623-4b14-4801-bee2-271dddab6309", Name: "Tag 1"}
cluster2 := WorkerCluster{UUID: "2f71dba1-cf92-4752-8386-f5926affabd5", Name: "Cluster 2"} tag2 := WorkerTag{UUID: "2f71dba1-cf92-4752-8386-f5926affabd5", Name: "Tag 2"}
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster1)) require.NoError(t, db.CreateWorkerTag(ctx, &tag1))
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster2)) require.NoError(t, db.CreateWorkerTag(ctx, &tag2))
// Create a worker in cluster1: // Create a worker in tag1:
workerC := linuxWorker(t, db, func(w *Worker) { workerC := linuxWorker(t, db, func(w *Worker) {
w.Clusters = []*WorkerCluster{&cluster1} w.Tags = []*WorkerTag{&tag1}
}) })
// Create a worker without cluster: // Create a worker without tag:
workerNC := linuxWorker(t, db, func(w *Worker) { workerNC := linuxWorker(t, db, func(w *Worker) {
w.UUID = "c53f8f68-4149-4790-991c-ba73a326551e" w.UUID = "c53f8f68-4149-4790-991c-ba73a326551e"
w.Clusters = nil w.Tags = nil
}) })
{ // Test job with different cluster: { // Test job with different tag:
authTask := authorTestTask("the task", "blender") authTask := authorTestTask("the task", "blender")
job := authorTestJob("499cf0f8-e83d-4cb1-837a-df94789d07db", "simple-blender-render", authTask) job := authorTestJob("499cf0f8-e83d-4cb1-837a-df94789d07db", "simple-blender-render", authTask)
job.WorkerClusterUUID = cluster2.UUID job.WorkerTagUUID = tag2.UUID
constructTestJob(ctx, t, db, job) constructTestJob(ctx, t, db, job)
task, err := db.ScheduleTask(ctx, &workerC) task, err := db.ScheduleTask(ctx, &workerC)
require.NoError(t, err) require.NoError(t, err)
assert.Nil(t, task, "job with different cluster should not be scheduled") assert.Nil(t, task, "job with different tag should not be scheduled")
} }
{ // Test job with matching cluster: { // Test job with matching tag:
authTask := authorTestTask("the task", "blender") authTask := authorTestTask("the task", "blender")
job := authorTestJob("5d4c2321-0bb7-4c13-a9dd-32a2c0cd156e", "simple-blender-render", authTask) job := authorTestJob("5d4c2321-0bb7-4c13-a9dd-32a2c0cd156e", "simple-blender-render", authTask)
job.WorkerClusterUUID = cluster1.UUID job.WorkerTagUUID = tag1.UUID
constructTestJob(ctx, t, db, job) constructTestJob(ctx, t, db, job)
task, err := db.ScheduleTask(ctx, &workerC) task, err := db.ScheduleTask(ctx, &workerC)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, task, "job with matching cluster should be scheduled") require.NotNil(t, task, "job with matching tag should be scheduled")
assert.Equal(t, authTask.UUID, task.UUID) assert.Equal(t, authTask.UUID, task.UUID)
task, err = db.ScheduleTask(ctx, &workerNC) task, err = db.ScheduleTask(ctx, &workerNC)
require.NoError(t, err) require.NoError(t, err)
assert.Nil(t, task, "job with cluster should not be scheduled for worker without cluster") assert.Nil(t, task, "job with tag should not be scheduled for worker without tag")
} }
} }
func TestWorkerClusterJobWithoutCluster(t *testing.T) { func TestWorkerTagJobWithoutTag(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout) ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
defer cancel() defer cancel()
// Create worker cluster: // Create worker tag:
cluster1 := WorkerCluster{UUID: "f0157623-4b14-4801-bee2-271dddab6309", Name: "Cluster 1"} tag1 := WorkerTag{UUID: "f0157623-4b14-4801-bee2-271dddab6309", Name: "Tag 1"}
require.NoError(t, db.CreateWorkerCluster(ctx, &cluster1)) require.NoError(t, db.CreateWorkerTag(ctx, &tag1))
// Create a worker in cluster1: // Create a worker in tag1:
workerC := linuxWorker(t, db, func(w *Worker) { workerC := linuxWorker(t, db, func(w *Worker) {
w.Clusters = []*WorkerCluster{&cluster1} w.Tags = []*WorkerTag{&tag1}
}) })
// Create a worker without cluster: // Create a worker without tag:
workerNC := linuxWorker(t, db, func(w *Worker) { workerNC := linuxWorker(t, db, func(w *Worker) {
w.UUID = "c53f8f68-4149-4790-991c-ba73a326551e" w.UUID = "c53f8f68-4149-4790-991c-ba73a326551e"
w.Clusters = nil w.Tags = nil
}) })
// Test cluster-less job: // Test tag-less job:
authTask := authorTestTask("the task", "blender") authTask := authorTestTask("the task", "blender")
job := authorTestJob("b6a1d859-122f-4791-8b78-b943329a9989", "simple-blender-render", authTask) job := authorTestJob("b6a1d859-122f-4791-8b78-b943329a9989", "simple-blender-render", authTask)
constructTestJob(ctx, t, db, job) constructTestJob(ctx, t, db, job)
task, err := db.ScheduleTask(ctx, &workerC) task, err := db.ScheduleTask(ctx, &workerC)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, task, "job without cluster should always be scheduled to worker in some cluster") require.NotNil(t, task, "job without tag should always be scheduled to worker in some tag")
assert.Equal(t, authTask.UUID, task.UUID) assert.Equal(t, authTask.UUID, task.UUID)
task, err = db.ScheduleTask(ctx, &workerNC) task, err = db.ScheduleTask(ctx, &workerNC)
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, task, "job without cluster should always be scheduled to worker without cluster") require.NotNil(t, task, "job without tag should always be scheduled to worker without tag")
assert.Equal(t, authTask.UUID, task.UUID) assert.Equal(t, authTask.UUID, task.UUID)
} }

View File

@ -97,7 +97,7 @@ type WorkerTestFixture struct {
done func() done func()
worker *Worker worker *Worker
cluster *WorkerCluster tag *WorkerTag
} }
func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTestFixture { func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTestFixture {
@ -113,14 +113,14 @@ func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTe
SupportedTaskTypes: "blender,ffmpeg,file-management", SupportedTaskTypes: "blender,ffmpeg,file-management",
} }
wc := WorkerCluster{ wc := WorkerTag{
UUID: uuid.New(), UUID: uuid.New(),
Name: "arbejdsklynge", Name: "arbejdsklynge",
Description: "Worker cluster in Danish", Description: "Worker tag in Danish",
} }
require.NoError(t, db.CreateWorker(ctx, &w)) require.NoError(t, db.CreateWorker(ctx, &w))
require.NoError(t, db.CreateWorkerCluster(ctx, &wc)) require.NoError(t, db.CreateWorkerTag(ctx, &wc))
return WorkerTestFixture{ return WorkerTestFixture{
db: db, db: db,
@ -128,6 +128,6 @@ func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTe
done: cancel, done: cancel,
worker: &w, worker: &w,
cluster: &wc, tag: &wc,
} }
} }

View File

@ -1,112 +0,0 @@
package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"context"
"fmt"
"gorm.io/gorm"
)
type WorkerCluster struct {
Model
UUID string `gorm:"type:char(36);default:'';unique;index"`
Name string `gorm:"type:varchar(64);default:'';unique"`
Description string `gorm:"type:varchar(255);default:''"`
Workers []*Worker `gorm:"many2many:worker_cluster_membership;constraint:OnDelete:CASCADE"`
}
func (db *DB) CreateWorkerCluster(ctx context.Context, wc *WorkerCluster) error {
if err := db.gormDB.WithContext(ctx).Create(wc).Error; err != nil {
return fmt.Errorf("creating new worker cluster: %w", err)
}
return nil
}
// HasWorkerClusters returns whether there are any clusters defined at all.
func (db *DB) HasWorkerClusters(ctx context.Context) (bool, error) {
var count int64
tx := db.gormDB.WithContext(ctx).
Model(&WorkerCluster{}).
Count(&count)
if err := tx.Error; err != nil {
return false, workerClusterError(err, "counting worker clusters")
}
return count > 0, nil
}
func (db *DB) FetchWorkerCluster(ctx context.Context, uuid string) (*WorkerCluster, error) {
tx := db.gormDB.WithContext(ctx)
return fetchWorkerCluster(tx, uuid)
}
// fetchWorkerCluster fetches the worker cluster using the given database instance.
func fetchWorkerCluster(gormDB *gorm.DB, uuid string) (*WorkerCluster, error) {
w := WorkerCluster{}
tx := gormDB.First(&w, "uuid = ?", uuid)
if tx.Error != nil {
return nil, workerClusterError(tx.Error, "fetching worker cluster")
}
return &w, nil
}
func (db *DB) SaveWorkerCluster(ctx context.Context, cluster *WorkerCluster) error {
if err := db.gormDB.WithContext(ctx).Save(cluster).Error; err != nil {
return workerClusterError(err, "saving worker cluster")
}
return nil
}
// DeleteWorkerCluster deletes the given cluster, after unassigning all workers from it.
func (db *DB) DeleteWorkerCluster(ctx context.Context, uuid string) error {
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", uuid).
Delete(&WorkerCluster{})
if tx.Error != nil {
return workerClusterError(tx.Error, "deleting worker cluster")
}
if tx.RowsAffected == 0 {
return ErrWorkerClusterNotFound
}
return nil
}
func (db *DB) FetchWorkerClusters(ctx context.Context) ([]*WorkerCluster, error) {
clusters := make([]*WorkerCluster, 0)
tx := db.gormDB.WithContext(ctx).Model(&WorkerCluster{}).Scan(&clusters)
if tx.Error != nil {
return nil, workerClusterError(tx.Error, "fetching all worker clusters")
}
return clusters, nil
}
func (db *DB) fetchWorkerClustersWithUUID(ctx context.Context, clusterUUIDs []string) ([]*WorkerCluster, error) {
clusters := make([]*WorkerCluster, 0)
tx := db.gormDB.WithContext(ctx).
Model(&WorkerCluster{}).
Where("uuid in ?", clusterUUIDs).
Scan(&clusters)
if tx.Error != nil {
return nil, workerClusterError(tx.Error, "fetching all worker clusters")
}
return clusters, nil
}
func (db *DB) WorkerSetClusters(ctx context.Context, worker *Worker, clusterUUIDs []string) error {
clusters, err := db.fetchWorkerClustersWithUUID(ctx, clusterUUIDs)
if err != nil {
return workerClusterError(err, "fetching worker clusters")
}
err = db.gormDB.WithContext(ctx).
Model(worker).
Association("Clusters").
Replace(clusters)
if err != nil {
return workerClusterError(err, "updating worker clusters")
}
return nil
}

View File

@ -1,165 +0,0 @@
package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"testing"
"time"
"git.blender.org/flamenco/internal/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCreateFetchCluster(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Test fetching non-existent cluster
fetchedCluster, err := f.db.FetchWorkerCluster(f.ctx, "7ee21bc8-ff1a-42d2-a6b6-cc4b529b189f")
assert.ErrorIs(t, err, ErrWorkerClusterNotFound)
assert.Nil(t, fetchedCluster)
// New cluster creation is already done in the workerTestFixtures() call.
assert.NotNil(t, f.cluster)
fetchedCluster, err = f.db.FetchWorkerCluster(f.ctx, f.cluster.UUID)
require.NoError(t, err)
assert.NotNil(t, fetchedCluster)
// Test contents of fetched cluster.
assert.Equal(t, f.cluster.UUID, fetchedCluster.UUID)
assert.Equal(t, f.cluster.Name, fetchedCluster.Name)
assert.Equal(t, f.cluster.Description, fetchedCluster.Description)
assert.Zero(t, fetchedCluster.Workers)
}
func TestFetchDeleteClusters(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Single cluster was created by fixture.
has, err := f.db.HasWorkerClusters(f.ctx)
require.NoError(t, err)
assert.True(t, has, "expecting HasWorkerClusters to return true")
secondCluster := WorkerCluster{
UUID: uuid.New(),
Name: "arbeiderscluster",
Description: "Worker cluster in Dutch",
}
require.NoError(t, f.db.CreateWorkerCluster(f.ctx, &secondCluster))
allClusters, err := f.db.FetchWorkerClusters(f.ctx)
require.NoError(t, err)
require.Len(t, allClusters, 2)
var allClusterIDs [2]string
for idx := range allClusters {
allClusterIDs[idx] = allClusters[idx].UUID
}
assert.Contains(t, allClusterIDs, f.cluster.UUID)
assert.Contains(t, allClusterIDs, secondCluster.UUID)
has, err = f.db.HasWorkerClusters(f.ctx)
require.NoError(t, err)
assert.True(t, has, "expecting HasWorkerClusters to return true")
// Test deleting the 2nd cluster.
require.NoError(t, f.db.DeleteWorkerCluster(f.ctx, secondCluster.UUID))
allClusters, err = f.db.FetchWorkerClusters(f.ctx)
require.NoError(t, err)
require.Len(t, allClusters, 1)
assert.Equal(t, f.cluster.UUID, allClusters[0].UUID)
// Test deleting the 1st cluster.
require.NoError(t, f.db.DeleteWorkerCluster(f.ctx, f.cluster.UUID))
has, err = f.db.HasWorkerClusters(f.ctx)
require.NoError(t, err)
assert.False(t, has, "expecting HasWorkerClusters to return false")
}
func TestAssignUnassignWorkerClusters(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
assertClusters := func(msgLabel string, clusterUUIDs ...string) {
w, err := f.db.FetchWorker(f.ctx, f.worker.UUID)
require.NoError(t, err)
// Catch doubly-reported clusters, as the maps below would hide those cases.
assert.Len(t, w.Clusters, len(clusterUUIDs), msgLabel)
expectClusters := make(map[string]bool)
for _, cid := range clusterUUIDs {
expectClusters[cid] = true
}
actualClusters := make(map[string]bool)
for _, c := range w.Clusters {
actualClusters[c.UUID] = true
}
assert.Equal(t, expectClusters, actualClusters, msgLabel)
}
secondCluster := WorkerCluster{
UUID: uuid.New(),
Name: "arbeiderscluster",
Description: "Worker cluster in Dutch",
}
require.NoError(t, f.db.CreateWorkerCluster(f.ctx, &secondCluster))
// By default the Worker should not be part of a cluster.
assertClusters("default cluster assignment")
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{f.cluster.UUID}))
assertClusters("setting one cluster", f.cluster.UUID)
// Double assignments should also just work.
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{f.cluster.UUID, f.cluster.UUID}))
assertClusters("setting twice the same cluster", f.cluster.UUID)
// Multiple cluster memberships.
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{f.cluster.UUID, secondCluster.UUID}))
assertClusters("setting two different clusters", f.cluster.UUID, secondCluster.UUID)
// Remove memberships.
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{secondCluster.UUID}))
assertClusters("unassigning from first cluster", secondCluster.UUID)
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{}))
assertClusters("unassigning from second cluster")
}
func TestSaveWorkerCluster(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
f.cluster.Name = "übercluster"
f.cluster.Description = "ʻO kēlā hui ma laila"
require.NoError(t, f.db.SaveWorkerCluster(f.ctx, f.cluster))
fetched, err := f.db.FetchWorkerCluster(f.ctx, f.cluster.UUID)
require.NoError(t, err)
assert.Equal(t, f.cluster.Name, fetched.Name)
assert.Equal(t, f.cluster.Description, fetched.Description)
}
func TestDeleteWorkerClusterWithWorkersAssigned(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Assign the worker.
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{f.cluster.UUID}))
// Delete the cluster.
require.NoError(t, f.db.DeleteWorkerCluster(f.ctx, f.cluster.UUID))
// Check the Worker has been unassigned from the cluster.
w, err := f.db.FetchWorker(f.ctx, f.worker.UUID)
require.NoError(t, err)
assert.Empty(t, w.Clusters)
}

View File

@ -0,0 +1,112 @@
package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"context"
"fmt"
"gorm.io/gorm"
)
type WorkerTag struct {
Model
UUID string `gorm:"type:char(36);default:'';unique;index"`
Name string `gorm:"type:varchar(64);default:'';unique"`
Description string `gorm:"type:varchar(255);default:''"`
Workers []*Worker `gorm:"many2many:worker_tag_membership;constraint:OnDelete:CASCADE"`
}
func (db *DB) CreateWorkerTag(ctx context.Context, wc *WorkerTag) error {
if err := db.gormDB.WithContext(ctx).Create(wc).Error; err != nil {
return fmt.Errorf("creating new worker tag: %w", err)
}
return nil
}
// HasWorkerTags returns whether there are any tags defined at all.
func (db *DB) HasWorkerTags(ctx context.Context) (bool, error) {
var count int64
tx := db.gormDB.WithContext(ctx).
Model(&WorkerTag{}).
Count(&count)
if err := tx.Error; err != nil {
return false, workerTagError(err, "counting worker tags")
}
return count > 0, nil
}
func (db *DB) FetchWorkerTag(ctx context.Context, uuid string) (*WorkerTag, error) {
tx := db.gormDB.WithContext(ctx)
return fetchWorkerTag(tx, uuid)
}
// fetchWorkerTag fetches the worker tag using the given database instance.
func fetchWorkerTag(gormDB *gorm.DB, uuid string) (*WorkerTag, error) {
w := WorkerTag{}
tx := gormDB.First(&w, "uuid = ?", uuid)
if tx.Error != nil {
return nil, workerTagError(tx.Error, "fetching worker tag")
}
return &w, nil
}
func (db *DB) SaveWorkerTag(ctx context.Context, tag *WorkerTag) error {
if err := db.gormDB.WithContext(ctx).Save(tag).Error; err != nil {
return workerTagError(err, "saving worker tag")
}
return nil
}
// DeleteWorkerTag deletes the given tag, after unassigning all workers from it.
func (db *DB) DeleteWorkerTag(ctx context.Context, uuid string) error {
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", uuid).
Delete(&WorkerTag{})
if tx.Error != nil {
return workerTagError(tx.Error, "deleting worker tag")
}
if tx.RowsAffected == 0 {
return ErrWorkerTagNotFound
}
return nil
}
func (db *DB) FetchWorkerTags(ctx context.Context) ([]*WorkerTag, error) {
tags := make([]*WorkerTag, 0)
tx := db.gormDB.WithContext(ctx).Model(&WorkerTag{}).Scan(&tags)
if tx.Error != nil {
return nil, workerTagError(tx.Error, "fetching all worker tags")
}
return tags, nil
}
func (db *DB) fetchWorkerTagsWithUUID(ctx context.Context, tagUUIDs []string) ([]*WorkerTag, error) {
tags := make([]*WorkerTag, 0)
tx := db.gormDB.WithContext(ctx).
Model(&WorkerTag{}).
Where("uuid in ?", tagUUIDs).
Scan(&tags)
if tx.Error != nil {
return nil, workerTagError(tx.Error, "fetching all worker tags")
}
return tags, nil
}
func (db *DB) WorkerSetTags(ctx context.Context, worker *Worker, tagUUIDs []string) error {
tags, err := db.fetchWorkerTagsWithUUID(ctx, tagUUIDs)
if err != nil {
return workerTagError(err, "fetching worker tags")
}
err = db.gormDB.WithContext(ctx).
Model(worker).
Association("Tags").
Replace(tags)
if err != nil {
return workerTagError(err, "updating worker tags")
}
return nil
}

View File

@ -0,0 +1,165 @@
package persistence
// SPDX-License-Identifier: GPL-3.0-or-later
import (
"testing"
"time"
"git.blender.org/flamenco/internal/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCreateFetchTag(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Test fetching non-existent tag
fetchedTag, err := f.db.FetchWorkerTag(f.ctx, "7ee21bc8-ff1a-42d2-a6b6-cc4b529b189f")
assert.ErrorIs(t, err, ErrWorkerTagNotFound)
assert.Nil(t, fetchedTag)
// New tag creation is already done in the workerTestFixtures() call.
assert.NotNil(t, f.tag)
fetchedTag, err = f.db.FetchWorkerTag(f.ctx, f.tag.UUID)
require.NoError(t, err)
assert.NotNil(t, fetchedTag)
// Test contents of fetched tag.
assert.Equal(t, f.tag.UUID, fetchedTag.UUID)
assert.Equal(t, f.tag.Name, fetchedTag.Name)
assert.Equal(t, f.tag.Description, fetchedTag.Description)
assert.Zero(t, fetchedTag.Workers)
}
func TestFetchDeleteTags(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Single tag was created by fixture.
has, err := f.db.HasWorkerTags(f.ctx)
require.NoError(t, err)
assert.True(t, has, "expecting HasWorkerTags to return true")
secondTag := WorkerTag{
UUID: uuid.New(),
Name: "arbeiderstag",
Description: "Worker tag in Dutch",
}
require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag))
allTags, err := f.db.FetchWorkerTags(f.ctx)
require.NoError(t, err)
require.Len(t, allTags, 2)
var allTagIDs [2]string
for idx := range allTags {
allTagIDs[idx] = allTags[idx].UUID
}
assert.Contains(t, allTagIDs, f.tag.UUID)
assert.Contains(t, allTagIDs, secondTag.UUID)
has, err = f.db.HasWorkerTags(f.ctx)
require.NoError(t, err)
assert.True(t, has, "expecting HasWorkerTags to return true")
// Test deleting the 2nd tag.
require.NoError(t, f.db.DeleteWorkerTag(f.ctx, secondTag.UUID))
allTags, err = f.db.FetchWorkerTags(f.ctx)
require.NoError(t, err)
require.Len(t, allTags, 1)
assert.Equal(t, f.tag.UUID, allTags[0].UUID)
// Test deleting the 1st tag.
require.NoError(t, f.db.DeleteWorkerTag(f.ctx, f.tag.UUID))
has, err = f.db.HasWorkerTags(f.ctx)
require.NoError(t, err)
assert.False(t, has, "expecting HasWorkerTags to return false")
}
func TestAssignUnassignWorkerTags(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
assertTags := func(msgLabel string, tagUUIDs ...string) {
w, err := f.db.FetchWorker(f.ctx, f.worker.UUID)
require.NoError(t, err)
// Catch doubly-reported tags, as the maps below would hide those cases.
assert.Len(t, w.Tags, len(tagUUIDs), msgLabel)
expectTags := make(map[string]bool)
for _, cid := range tagUUIDs {
expectTags[cid] = true
}
actualTags := make(map[string]bool)
for _, c := range w.Tags {
actualTags[c.UUID] = true
}
assert.Equal(t, expectTags, actualTags, msgLabel)
}
secondTag := WorkerTag{
UUID: uuid.New(),
Name: "arbeiderstag",
Description: "Worker tag in Dutch",
}
require.NoError(t, f.db.CreateWorkerTag(f.ctx, &secondTag))
// By default the Worker should not be part of a tag.
assertTags("default tag assignment")
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{f.tag.UUID}))
assertTags("setting one tag", f.tag.UUID)
// Double assignments should also just work.
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{f.tag.UUID, f.tag.UUID}))
assertTags("setting twice the same tag", f.tag.UUID)
// Multiple tag memberships.
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{f.tag.UUID, secondTag.UUID}))
assertTags("setting two different tags", f.tag.UUID, secondTag.UUID)
// Remove memberships.
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{secondTag.UUID}))
assertTags("unassigning from first tag", secondTag.UUID)
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{}))
assertTags("unassigning from second tag")
}
func TestSaveWorkerTag(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
f.tag.Name = "übertag"
f.tag.Description = "ʻO kēlā hui ma laila"
require.NoError(t, f.db.SaveWorkerTag(f.ctx, f.tag))
fetched, err := f.db.FetchWorkerTag(f.ctx, f.tag.UUID)
require.NoError(t, err)
assert.Equal(t, f.tag.Name, fetched.Name)
assert.Equal(t, f.tag.Description, fetched.Description)
}
func TestDeleteWorkerTagWithWorkersAssigned(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second)
defer f.done()
// Assign the worker.
require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{f.tag.UUID}))
// Delete the tag.
require.NoError(t, f.db.DeleteWorkerTag(f.ctx, f.tag.UUID))
// Check the Worker has been unassigned from the tag.
w, err := f.db.FetchWorker(f.ctx, f.worker.UUID)
require.NoError(t, err)
assert.Empty(t, w.Tags)
}

View File

@ -31,7 +31,7 @@ type Worker struct {
SupportedTaskTypes string `gorm:"type:varchar(255);default:''"` // comma-separated list of task types. SupportedTaskTypes string `gorm:"type:varchar(255);default:''"` // comma-separated list of task types.
Clusters []*WorkerCluster `gorm:"many2many:worker_cluster_membership;constraint:OnDelete:CASCADE"` Tags []*WorkerTag `gorm:"many2many:worker_tag_membership;constraint:OnDelete:CASCADE"`
} }
func (w *Worker) Identifier() string { func (w *Worker) Identifier() string {
@ -73,7 +73,7 @@ func (db *DB) CreateWorker(ctx context.Context, w *Worker) error {
func (db *DB) FetchWorker(ctx context.Context, uuid string) (*Worker, error) { func (db *DB) FetchWorker(ctx context.Context, uuid string) (*Worker, error) {
w := Worker{} w := Worker{}
tx := db.gormDB.WithContext(ctx). tx := db.gormDB.WithContext(ctx).
Preload("Clusters"). Preload("Tags").
First(&w, "uuid = ?", uuid) First(&w, "uuid = ?", uuid)
if tx.Error != nil { if tx.Error != nil {
return nil, workerError(tx.Error, "fetching worker") return nil, workerError(tx.Error, "fetching worker")

View File

@ -319,18 +319,18 @@ func TestDeleteWorker(t *testing.T) {
} }
} }
func TestDeleteWorkerWithClusterAssigned(t *testing.T) { func TestDeleteWorkerWithTagAssigned(t *testing.T) {
f := workerTestFixtures(t, 1*time.Second) f := workerTestFixtures(t, 1*time.Second)
defer f.done() defer f.done()
// Assign the worker. // Assign the worker.
require.NoError(t, f.db.WorkerSetClusters(f.ctx, f.worker, []string{f.cluster.UUID})) require.NoError(t, f.db.WorkerSetTags(f.ctx, f.worker, []string{f.tag.UUID}))
// Delete the Worker. // Delete the Worker.
require.NoError(t, f.db.DeleteWorker(f.ctx, f.worker.UUID)) require.NoError(t, f.db.DeleteWorker(f.ctx, f.worker.UUID))
// Check the Worker has been unassigned from the cluster. // Check the Worker has been unassigned from the tag.
cluster, err := f.db.FetchWorkerCluster(f.ctx, f.cluster.UUID) tag, err := f.db.FetchWorkerTag(f.ctx, f.tag.UUID)
require.NoError(t, err) require.NoError(t, err)
assert.Empty(t, cluster.Workers) assert.Empty(t, tag.Workers)
} }

View File

@ -32,7 +32,7 @@ func NewWorkerUpdate(worker *persistence.Worker) api.SocketIOWorkerUpdate {
workerUpdate.LastSeen = &worker.LastSeenAt workerUpdate.LastSeen = &worker.LastSeenAt
} }
// TODO: add cluster IDs. // TODO: add tag IDs.
return workerUpdate return workerUpdate
} }

View File

@ -215,7 +215,6 @@ func fileCopy(src, dest string) (error, string) {
return nil, msg return nil, msg
} }
func fileExists(filename string) bool { func fileExists(filename string) bool {
_, err := os.Stat(filename) _, err := os.Stat(filename)
return !errors.Is(err, fs.ErrNotExist) return !errors.Is(err, fs.ErrNotExist)

View File

@ -345,7 +345,6 @@ func TestCmdCopyFileDestinationExists(t *testing.T) {
assert.Error(t, f.run()) assert.Error(t, f.run())
} }
func TestCmdCopyFileSourceIsDir(t *testing.T) { func TestCmdCopyFileSourceIsDir(t *testing.T) {
f := newCmdCopyFileFixture(t) f := newCmdCopyFileFixture(t)
defer f.finish(t) defer f.finish(t)
@ -372,7 +371,6 @@ func TestCmdCopyFileSourceIsDir(t *testing.T) {
assert.Error(t, f.run()) assert.Error(t, f.run())
} }
func newCmdCopyFileFixture(t *testing.T) cmdCopyFileFixture { func newCmdCopyFileFixture(t *testing.T) cmdCopyFileFixture {
mockCtrl := gomock.NewController(t) mockCtrl := gomock.NewController(t)
ce, mocks := testCommandExecutor(t, mockCtrl) ce, mocks := testCommandExecutor(t, mockCtrl)

View File

@ -32,12 +32,17 @@
<dt class="field-name" title="ID">ID</dt> <dt class="field-name" title="ID">ID</dt>
<dd><span @click="copyElementText" class="click-to-copy">{{ jobData.id }}</span></dd> <dd><span @click="copyElementText" class="click-to-copy">{{ jobData.id }}</span></dd>
<template v-if="workerCluster"> <template v-if="workerTag">
<!-- TODO: fetch cluster name and show that instead, and allow editing of the cluster. --> <!-- TODO: fetch tag name and show that instead, and allow editing of the tag. -->
<dt class="field-name" title="Worker Cluster">Cluster</dt> <dt class="field-name" title="Worker Tag">Tag</dt>
<dd :title="workerCluster.description"><span @click="copyElementData" class="click-to-copy" <dd :title="workerTag.description">
:data-clipboard="workerCluster.id">{{ <span
workerCluster.name }}</span></dd> @click="copyElementData"
class="click-to-copy"
:data-clipboard="workerTag.id"
>{{ workerTag.name }}</span
>
</dd>
</template> </template>
<dt class="field-name" title="Name">Name</dt> <dt class="field-name" title="Name">Name</dt>
@ -128,8 +133,7 @@ export default {
this._refreshJobSettings(this.jobData); this._refreshJobSettings(this.jobData);
} }
this.workers.refreshClusters() this.workers.refreshTags().catch((error) => {
.catch((error) => {
const errorMsg = JSON.stringify(error); // TODO: handle API errors better. const errorMsg = JSON.stringify(error); // TODO: handle API errors better.
this.notifs.add(`Error: ${errorMsg}`); this.notifs.add(`Error: ${errorMsg}`);
}); });
@ -156,9 +160,9 @@ export default {
} }
return this.jobData.settings; return this.jobData.settings;
}, },
workerCluster() { workerTag() {
if (!this.jobData.worker_cluster) return undefined; if (!this.jobData.worker_tag) return undefined;
return this.workers.clustersByID[this.jobData.worker_cluster]; return this.workers.tagsByID[this.jobData.worker_tag];
}, },
}, },
watch: { watch: {

View File

@ -34,21 +34,24 @@
</dd> </dd>
</dl> </dl>
<section class="worker-clusters" v-if="workers.clusters && workers.clusters.length"> <section class="worker-tags" v-if="workers.tags && workers.tags.length">
<h3 class="sub-title">Clusters</h3> <h3 class="sub-title">Tags</h3>
<ul> <ul>
<li v-for="cluster in workers.clusters"> <li v-for="tag in workers.tags">
<switch-checkbox :isChecked="thisWorkerClusters[cluster.id]" :label="cluster.name" :title="cluster.description" <switch-checkbox
@switch-toggle="toggleWorkerCluster(cluster.id)"> :isChecked="thisWorkerTags[tag.id]"
:label="tag.name"
:title="tag.description"
@switch-toggle="toggleWorkerTag(tag.id)"
>
</switch-checkbox> </switch-checkbox>
</li> </li>
</ul> </ul>
<p class="hint" v-if="hasClustersAssigned"> <p class="hint" v-if="hasTagsAssigned">
This worker will only pick up jobs assigned to one of its clusters, and clusterless jobs. This worker will only pick up jobs assigned to one of its tags, and
</p> tagless jobs.
<p class="hint" v-else>
This worker will only pick up clusterless jobs.
</p> </p>
<p class="hint" v-else>This worker will only pick up tagless jobs.</p>
</section> </section>
<section class="sleep-schedule" :class="{ 'is-schedule-active': workerSleepSchedule.is_active }"> <section class="sleep-schedule" :class="{ 'is-schedule-active': workerSleepSchedule.is_active }">
@ -165,15 +168,14 @@ export default {
notifs: useNotifs(), notifs: useNotifs(),
copyElementText: copyElementText, copyElementText: copyElementText,
workers: useWorkers(), workers: useWorkers(),
thisWorkerClusters: {}, // Mapping from UUID to 'isAssigned' boolean. thisWorkerTags: {}, // Mapping from UUID to 'isAssigned' boolean.
}; };
}, },
mounted() { mounted() {
// Allow testing from the JS console: // Allow testing from the JS console:
window.workerDetailsVue = this; window.workerDetailsVue = this;
this.workers.refreshClusters() this.workers.refreshTags().catch((error) => {
.catch((error) => {
const errorMsg = JSON.stringify(error); // TODO: handle API errors better. const errorMsg = JSON.stringify(error); // TODO: handle API errors better.
this.notifs.add(`Error: ${errorMsg}`); this.notifs.add(`Error: ${errorMsg}`);
}); });
@ -191,7 +193,7 @@ export default {
this.fetchWorkerSleepSchedule(); this.fetchWorkerSleepSchedule();
} }
this.updateThisWorkerClusters(newData); this.updateThisWorkerTags(newData);
}, },
}, },
computed: { computed: {
@ -209,11 +211,10 @@ export default {
}, },
workerSleepScheduleStatusLabel() { workerSleepScheduleStatusLabel() {
return this.workerSleepSchedule.is_active ? 'Enabled' : 'Disabled'; return this.workerSleepSchedule.is_active ? 'Enabled' : 'Disabled';
hasTagsAssigned() {
const tagIDs = this.getAssignedTagIDs();
return tagIDs && tagIDs.length > 0;
}, },
hasClustersAssigned() {
const clusterIDs = this.getAssignedClusterIDs();
return clusterIDs && clusterIDs.length > 0;
}
}, },
methods: { methods: {
fetchWorkerSleepSchedule() { fetchWorkerSleepSchedule() {
@ -262,46 +263,48 @@ export default {
} }
this.api.deleteWorker(this.workerData.id); this.api.deleteWorker(this.workerData.id);
}, },
updateThisWorkerClusters(newWorkerData) { updateThisWorkerTags(newWorkerData) {
if (!newWorkerData || !newWorkerData.clusters) { if (!newWorkerData || !newWorkerData.tags) {
this.thisWorkerClusters = {}; this.thisWorkerTags = {};
return; return;
} }
const assignedClusters = newWorkerData.clusters.reduce( const assignedTags = newWorkerData.tags.reduce((accu, tag) => {
(accu, cluster) => { accu[cluster.id] = true; return accu; }, accu[tag.id] = true;
{}); return accu;
this.thisWorkerClusters = assignedClusters; }, {});
this.thisWorkerTags = assignedTags;
}, },
toggleWorkerCluster(clusterID) { toggleWorkerTag(tagID) {
console.log("Toggled", clusterID); console.log("Toggled", tagID);
this.thisWorkerClusters[clusterID] = !this.thisWorkerClusters[clusterID]; this.thisWorkerTags[tagID] = !this.thisWorkerTags[tagID];
console.log("New assignment:", plain(this.thisWorkerClusters)) console.log("New assignment:", plain(this.thisWorkerTags));
// Construct cluster change request. // Construct tag change request.
const clusterIDs = this.getAssignedClusterIDs(); const tagIDs = this.getAssignedTagIDs();
const changeRequest = new WorkerClusterChangeRequest(clusterIDs); const changeRequest = new WorkerTagChangeRequest(tagIDs);
// Send to the Manager. // Send to the Manager.
this.api.setWorkerClusters(this.workerData.id, changeRequest) this.api
.setWorkerTags(this.workerData.id, changeRequest)
.then(() => { .then(() => {
this.notifs.add('Cluster assignment updated'); this.notifs.add("Tag assignment updated");
}) })
.catch((error) => { .catch((error) => {
const errorMsg = JSON.stringify(error); // TODO: handle API errors better. const errorMsg = JSON.stringify(error); // TODO: handle API errors better.
this.notifs.add(`Error: ${errorMsg}`); this.notifs.add(`Error: ${errorMsg}`);
}); });
}, },
getAssignedClusterIDs() { getAssignedTagIDs() {
const clusterIDs = []; const tagIDs = [];
for (let clusterID in this.thisWorkerClusters) { for (let tagID in this.thisWorkerTags) {
// Values can exist and be set to 'false'. // Values can exist and be set to 'false'.
const isAssigned = this.thisWorkerClusters[clusterID]; const isAssigned = this.thisWorkerTags[tagID];
if (isAssigned) clusterIDs.push(clusterID); if (isAssigned) tagIDs.push(tagID);
}
return clusterIDs;
}
} }
return tagIDs;
},
},
}; };
</script> </script>
@ -377,11 +380,11 @@ export default {
white-space: nowrap; white-space: nowrap;
} }
.worker-clusters ul { .worker-tags ul {
list-style: none; list-style: none;
} }
.worker-clusters ul li { .worker-tags ul li {
margin-bottom: 0.25rem; margin-bottom: 0.25rem;
} }
</style> </style>

View File

@ -15,16 +15,16 @@ export const useWorkers = defineStore('workers', {
*/ */
activeWorkerID: "", activeWorkerID: "",
/** @type {API.WorkerCluster[]} */ /** @type {API.WorkerTag[]} */
clusters: [], tags: [],
/* Mapping from cluster UUID to API.WorkerCluster. */ /* Mapping from tag UUID to API.WorkerTag. */
clustersByID: {}, tagsByID: {},
}), }),
actions: { actions: {
setActiveWorkerID(workerID) { setActiveWorkerID(workerID) {
this.$patch({ this.$patch({
activeWorker: {id: workerID, settings: {}, metadata: {}}, activeWorker: { id: workerID, settings: {}, metadata: {} },
activeWorkerID: workerID, activeWorkerID: workerID,
}); });
}, },
@ -47,22 +47,21 @@ export const useWorkers = defineStore('workers', {
}); });
}, },
/** /**
* Fetch the available worker clusters from the Manager. * Fetch the available worker tags from the Manager.
* *
* @returns a promise. * @returns a promise.
*/ */
refreshClusters() { refreshTags() {
const api = new WorkerMgtApi(getAPIClient()); const api = new WorkerMgtApi(getAPIClient());
return api.fetchWorkerClusters() return api.fetchWorkerTags().then((resp) => {
.then((resp) => { this.tags = resp.tags;
this.clusters = resp.clusters;
let clustersByID = {}; let tagsByID = {};
for (let cluster of this.clusters) { for (let tag of this.tags) {
clustersByID[cluster.id] = cluster; tagsByID[tag.id] = tag;
} }
this.clustersByID = clustersByID; this.tagsByID = tagsByID;
}) });
}, },
}, },
}) });