Manager: allow setup to finish without Blender #104306

Manually merged
Sybren A. Stüvel merged 34 commits from abelli/flamenco:issue100195 into main 2024-09-09 11:22:42 +02:00
55 changed files with 3127 additions and 600 deletions
Showing only changes of commit a9750c1fc8 - Show all commits

5
.gitattributes vendored
View File

@ -4,6 +4,10 @@
/web/app/src/manager-api/** linguist-generated=true
**/*.gen.go linguist-generated=true
# In your Git config, set:
# git config core.eol native
# git config core.autocrlf true
# Set the default newline behavior, in case people don't have core.autocrlf set.
* text=auto
@ -19,6 +23,7 @@
*.md text
*.py text
*.sh text
*.sql text
*.svg text
*.toml text
*.txt text

View File

@ -1,9 +1,9 @@
name: Bug Report
about: File a bug report
labels:
- "type::Report"
- "status::Needs Triage"
- "priority::Normal"
- "Type/Report"
- "Status/Needs Triage"
- "Priority/Normal"
body:
- type: markdown
attributes:

View File

@ -1,7 +1,7 @@
name: Design
about: Create a design task (for developers only)
labels:
- "type::Design"
- "Type/Design"
body:
- type: textarea
id: body

View File

@ -0,0 +1,41 @@
name: Custom Job Type
about: Submit your custom job type
labels:
- "Type/Job Type"
body:
- type: markdown
attributes:
value: |
## Thanks for contributing!
With this form you can submit your custom job type for listing on https://flamenco.blender.org/third-party-jobs/
- type: input
id: blender_version
attributes:
label: "Blender Version(s)"
description: "Which version(s) of Blender are known to work with this job type?"
required: true
- type: input
id: flamenco_version
attributes:
label: "Flamenco Version(s)"
description: "Which version(s) of Flamenco are known to work with this job type?"
required: true
- type: textarea
id: description
attributes:
label: "Description"
description: "Please describe what this job type does, what the target audience is, how to use it, etc. Feel free to include images as well."
required: true
- type: markdown
attributes:
value: |
Please understand that both Flamenco and Blender are under constant
development. By their very nature, this means that every once in a while
your job type will need some attention and updating.
- type: checkboxes
id: understanding
attributes:
label: "Will you help to keep things up to date?"
options:
- label: "Yes, I'll check with new versions of Blender and/or Flamenco, and send in a report when updating is necessary"

View File

@ -1,7 +1,7 @@
name: To Do
about: Create a to do task (for developers only)
labels:
- "type::To Do"
- "Type/To Do"
body:
- type: textarea
id: body

View File

@ -6,7 +6,7 @@ The `flamenco.manager` package is automatically generated by the [OpenAPI Genera
- API version: 1.0.0
- Package version: 3.6-alpha0
- Build package: org.openapitools.codegen.languages.PythonClientCodegen
For more information, please visit [https://flamenco.io/](https://flamenco.io/)
For more information, please visit [https://flamenco.blender.org/](https://flamenco.blender.org/)
## Requirements.

2
go.mod
View File

@ -1,6 +1,6 @@
module projects.blender.org/studio/flamenco
go 1.22.2
go 1.22.3
require (
github.com/adrg/xdg v0.4.0

6
go.sum
View File

@ -199,8 +199,6 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@ -223,8 +221,6 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -264,8 +260,6 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=

View File

@ -36,8 +36,9 @@ type PersistenceService interface {
SaveJobPriority(ctx context.Context, job *persistence.Job) error
// FetchTask fetches the given task and the accompanying job.
FetchTask(ctx context.Context, taskID string) (*persistence.Task, error)
// FetchTaskJobUUID fetches the UUID of the job this task belongs to.
FetchTaskJobUUID(ctx context.Context, taskID string) (string, error)
FetchTaskFailureList(context.Context, *persistence.Task) ([]*persistence.Worker, error)
SaveTask(ctx context.Context, task *persistence.Task) error
SaveTaskActivity(ctx context.Context, t *persistence.Task) error
// TaskTouchedByWorker marks the task as 'touched' by a worker. This is used for timeout detection.
TaskTouchedByWorker(context.Context, *persistence.Task) error

View File

@ -439,7 +439,7 @@ func (f *Flamenco) FetchTaskLogInfo(e echo.Context, taskID string) error {
return sendAPIError(e, http.StatusBadRequest, "bad task ID")
}
dbTask, err := f.persist.FetchTask(ctx, taskID)
jobUUID, err := f.persist.FetchTaskJobUUID(ctx, taskID)
if err != nil {
if errors.Is(err, persistence.ErrTaskNotFound) {
return sendAPIError(e, http.StatusNotFound, "no such task")
@ -447,9 +447,9 @@ func (f *Flamenco) FetchTaskLogInfo(e echo.Context, taskID string) error {
logger.Error().Err(err).Msg("error fetching task")
return sendAPIError(e, http.StatusInternalServerError, "error fetching task: %v", err)
}
logger = logger.With().Str("job", dbTask.Job.UUID).Logger()
logger = logger.With().Str("job", jobUUID).Logger()
size, err := f.logStorage.TaskLogSize(dbTask.Job.UUID, taskID)
size, err := f.logStorage.TaskLogSize(jobUUID, taskID)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
logger.Debug().Msg("task log unavailable, task has no log on disk")
@ -475,11 +475,11 @@ func (f *Flamenco) FetchTaskLogInfo(e echo.Context, taskID string) error {
taskLogInfo := api.TaskLogInfo{
TaskId: taskID,
JobId: dbTask.Job.UUID,
JobId: jobUUID,
Size: int(size),
}
fullLogPath := f.logStorage.Filepath(dbTask.Job.UUID, taskID)
fullLogPath := f.logStorage.Filepath(jobUUID, taskID)
relPath, err := f.localStorage.RelPath(fullLogPath)
if err != nil {
logger.Error().Err(err).Msg("task log is outside the manager storage, cannot construct its URL for download")
@ -501,7 +501,7 @@ func (f *Flamenco) FetchTaskLogTail(e echo.Context, taskID string) error {
return sendAPIError(e, http.StatusBadRequest, "bad task ID")
}
dbTask, err := f.persist.FetchTask(ctx, taskID)
jobUUID, err := f.persist.FetchTaskJobUUID(ctx, taskID)
if err != nil {
if errors.Is(err, persistence.ErrTaskNotFound) {
return sendAPIError(e, http.StatusNotFound, "no such task")
@ -509,9 +509,9 @@ func (f *Flamenco) FetchTaskLogTail(e echo.Context, taskID string) error {
logger.Error().Err(err).Msg("error fetching task")
return sendAPIError(e, http.StatusInternalServerError, "error fetching task: %v", err)
}
logger = logger.With().Str("job", dbTask.Job.UUID).Logger()
logger = logger.With().Str("job", jobUUID).Logger()
tail, err := f.logStorage.Tail(dbTask.Job.UUID, taskID)
tail, err := f.logStorage.Tail(jobUUID, taskID)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
logger.Debug().Msg("task tail unavailable, task has no log on disk")
@ -700,7 +700,11 @@ func taskDBtoAPI(dbTask *persistence.Task) api.Task {
Status: dbTask.Status,
Activity: dbTask.Activity,
Commands: make([]api.Command, len(dbTask.Commands)),
// TODO: convert this to just store dbTask.WorkerUUID.
Worker: workerToTaskWorker(dbTask.Worker),
JobId: dbTask.JobUUID,
}
if dbTask.Job != nil {

View File

@ -70,9 +70,13 @@ func (f *Flamenco) QueryJobs(e echo.Context) error {
ctx := e.Request().Context()
dbJobs, err := f.persist.QueryJobs(ctx, api.JobsQuery(jobsQuery))
if err != nil {
switch {
case errors.Is(err, context.Canceled):
logger.Debug().AnErr("cause", err).Msg("could not query for jobs, remote end probably closed the connection")
return sendAPIError(e, http.StatusInternalServerError, "error querying for jobs: %v", err)
case err != nil:
logger.Warn().Err(err).Msg("error querying for jobs")
return sendAPIError(e, http.StatusInternalServerError, "error querying for jobs")
return sendAPIError(e, http.StatusInternalServerError, "error querying for jobs: %v", err)
}
apiJobs := make([]api.Job, len(dbJobs))
@ -97,9 +101,13 @@ func (f *Flamenco) FetchJobTasks(e echo.Context, jobID string) error {
}
tasks, err := f.persist.QueryJobTaskSummaries(ctx, jobID)
if err != nil {
logger.Warn().Err(err).Msg("error querying for jobs")
return sendAPIError(e, http.StatusInternalServerError, "error querying for jobs")
switch {
case errors.Is(err, context.Canceled):
logger.Debug().AnErr("cause", err).Msg("could not fetch job tasks, remote end probably closed connection")
return sendAPIError(e, http.StatusInternalServerError, "error fetching job tasks: %v", err)
case err != nil:
logger.Warn().Err(err).Msg("error fetching job tasks")
return sendAPIError(e, http.StatusInternalServerError, "error fetching job tasks: %v", err)
}
summaries := make([]api.TaskSummary, len(tasks))

View File

@ -753,22 +753,10 @@ func TestFetchTaskLogTail(t *testing.T) {
jobID := "18a9b096-d77e-438c-9be2-74397038298b"
taskID := "2e020eee-20f8-4e95-8dcf-65f7dfc3ebab"
dbJob := persistence.Job{
UUID: jobID,
Name: "test job",
Status: api.JobStatusActive,
Settings: persistence.StringInterfaceMap{},
Metadata: persistence.StringStringMap{},
}
dbTask := persistence.Task{
UUID: taskID,
Job: &dbJob,
Name: "test task",
}
// The task can be found, but has no on-disk task log.
// This should not cause any error, but instead be returned as "no content".
mf.persistence.EXPECT().FetchTask(gomock.Any(), taskID).Return(&dbTask, nil)
mf.persistence.EXPECT().FetchTaskJobUUID(gomock.Any(), taskID).Return(jobID, nil)
mf.logStorage.EXPECT().Tail(jobID, taskID).
Return("", fmt.Errorf("wrapped error: %w", os.ErrNotExist))
@ -778,7 +766,7 @@ func TestFetchTaskLogTail(t *testing.T) {
assertResponseNoContent(t, echoCtx)
// Check that a 204 No Content is also returned when the task log file on disk exists, but is empty.
mf.persistence.EXPECT().FetchTask(gomock.Any(), taskID).Return(&dbTask, nil)
mf.persistence.EXPECT().FetchTaskJobUUID(gomock.Any(), taskID).Return(jobID, nil)
mf.logStorage.EXPECT().Tail(jobID, taskID).
Return("", fmt.Errorf("wrapped error: %w", os.ErrNotExist))
@ -796,21 +784,9 @@ func TestFetchTaskLogInfo(t *testing.T) {
jobID := "18a9b096-d77e-438c-9be2-74397038298b"
taskID := "2e020eee-20f8-4e95-8dcf-65f7dfc3ebab"
dbJob := persistence.Job{
UUID: jobID,
Name: "test job",
Status: api.JobStatusActive,
Settings: persistence.StringInterfaceMap{},
Metadata: persistence.StringStringMap{},
}
dbTask := persistence.Task{
UUID: taskID,
Job: &dbJob,
Name: "test task",
}
mf.persistence.EXPECT().
FetchTask(gomock.Any(), taskID).
Return(&dbTask, nil).
FetchTaskJobUUID(gomock.Any(), taskID).
Return(jobID, nil).
AnyTimes()
// The task can be found, but has no on-disk task log.

View File

@ -244,6 +244,21 @@ func (mr *MockPersistenceServiceMockRecorder) FetchTaskFailureList(arg0, arg1 in
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTaskFailureList", reflect.TypeOf((*MockPersistenceService)(nil).FetchTaskFailureList), arg0, arg1)
}
// FetchTaskJobUUID mocks base method.
func (m *MockPersistenceService) FetchTaskJobUUID(arg0 context.Context, arg1 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchTaskJobUUID", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchTaskJobUUID indicates an expected call of FetchTaskJobUUID.
func (mr *MockPersistenceServiceMockRecorder) FetchTaskJobUUID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchTaskJobUUID", reflect.TypeOf((*MockPersistenceService)(nil).FetchTaskJobUUID), arg0, arg1)
}
// FetchWorker mocks base method.
func (m *MockPersistenceService) FetchWorker(arg0 context.Context, arg1 string) (*persistence.Worker, error) {
m.ctrl.T.Helper()
@ -392,20 +407,6 @@ func (mr *MockPersistenceServiceMockRecorder) SaveJobPriority(arg0, arg1 interfa
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveJobPriority", reflect.TypeOf((*MockPersistenceService)(nil).SaveJobPriority), arg0, arg1)
}
// SaveTask mocks base method.
func (m *MockPersistenceService) SaveTask(arg0 context.Context, arg1 *persistence.Task) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveTask", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// SaveTask indicates an expected call of SaveTask.
func (mr *MockPersistenceServiceMockRecorder) SaveTask(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveTask", reflect.TypeOf((*MockPersistenceService)(nil).SaveTask), arg0, arg1)
}
// SaveTaskActivity mocks base method.
func (m *MockPersistenceService) SaveTaskActivity(arg0 context.Context, arg1 *persistence.Task) error {
m.ctrl.T.Helper()

View File

@ -18,6 +18,7 @@ import (
type PersistenceService interface {
FetchJob(ctx context.Context, jobUUID string) (*persistence.Job, error)
FetchJobShamanCheckoutID(ctx context.Context, jobUUID string) (string, error)
RequestJobDeletion(ctx context.Context, j *persistence.Job) error
RequestJobMassDeletion(ctx context.Context, lastUpdatedMax time.Time) ([]string, error)

View File

@ -150,19 +150,28 @@ func (s *Service) Run(ctx context.Context) {
log.Debug().Msg("job deleter: running")
defer log.Debug().Msg("job deleter: shutting down")
waitTime := jobDeletionCheckInterval
for {
select {
case <-ctx.Done():
return
case jobUUID := <-s.queue:
s.deleteJob(ctx, jobUUID)
case <-time.After(jobDeletionCheckInterval):
if len(s.queue) == 0 {
waitTime = 100 * time.Millisecond
}
case <-time.After(waitTime):
// Inspect the database to see if there was anything marked for deletion
// without getting into our queue. This can happen when lots of jobs are
// queued in quick succession, as then the queue channel gets full.
if len(s.queue) == 0 {
s.queuePendingDeletions(ctx)
}
// The next iteration should just wait for the default duration.
waitTime = jobDeletionCheckInterval
}
}
}
@ -196,7 +205,9 @@ queueLoop:
func (s *Service) deleteJob(ctx context.Context, jobUUID string) error {
logger := log.With().Str("job", jobUUID).Logger()
startTime := time.Now()
logger.Debug().Msg("job deleter: starting job deletion")
err := s.deleteShamanCheckout(ctx, logger, jobUUID)
if err != nil {
return err
@ -224,11 +235,10 @@ func (s *Service) deleteJob(ctx context.Context, jobUUID string) error {
}
s.changeBroadcaster.BroadcastJobUpdate(jobUpdate)
logger.Info().Msg("job deleter: job removal complete")
// Request a consistency check on the database. In the past there have been
// some issues after deleting a job.
s.persist.RequestIntegrityCheck()
duration := time.Since(startTime)
logger.Info().
Stringer("duration", duration).
Msg("job deleter: job removal complete")
return nil
}
@ -258,12 +268,10 @@ func (s *Service) deleteShamanCheckout(ctx context.Context, logger zerolog.Logge
}
// To erase the Shaman checkout we need more info than just its UUID.
dbJob, err := s.persist.FetchJob(ctx, jobUUID)
checkoutID, err := s.persist.FetchJobShamanCheckoutID(ctx, jobUUID)
if err != nil {
return fmt.Errorf("unable to fetch job from database: %w", err)
}
checkoutID := dbJob.Storage.ShamanCheckoutID
if checkoutID == "" {
logger.Info().Msg("job deleter: job was not created with Shaman (or before Flamenco v3.2), skipping job file deletion")
return nil
@ -272,10 +280,10 @@ func (s *Service) deleteShamanCheckout(ctx context.Context, logger zerolog.Logge
err = s.shaman.EraseCheckout(checkoutID)
switch {
case errors.Is(err, shaman.ErrDoesNotExist):
logger.Info().Msg("job deleter: Shaman checkout directory does not exist, ignoring")
logger.Debug().Msg("job deleter: Shaman checkout directory does not exist, ignoring")
return nil
case err != nil:
logger.Info().Err(err).Msg("job deleter: Shaman checkout directory could not be erased")
logger.Warn().Err(err).Msg("job deleter: Shaman checkout directory could not be erased")
return err
}

View File

@ -110,7 +110,6 @@ func TestDeleteJobWithoutShaman(t *testing.T) {
// Mock that everything went OK.
mocks.storage.EXPECT().RemoveJobStorage(mocks.ctx, jobUUID)
mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID)
mocks.persist.EXPECT().RequestIntegrityCheck()
mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any())
require.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
}
@ -128,14 +127,7 @@ func TestDeleteJobWithShaman(t *testing.T) {
AnyTimes()
shamanCheckoutID := "010_0431_lighting"
dbJob := persistence.Job{
UUID: jobUUID,
Name: "сцена/shot/010_0431_lighting",
Storage: persistence.JobStorageInfo{
ShamanCheckoutID: shamanCheckoutID,
},
}
mocks.persist.EXPECT().FetchJob(mocks.ctx, jobUUID).Return(&dbJob, nil).AnyTimes()
mocks.persist.EXPECT().FetchJobShamanCheckoutID(mocks.ctx, jobUUID).Return(shamanCheckoutID, nil).AnyTimes()
// Mock that Shaman deletion failed. The rest of the deletion should be
// blocked by this.
@ -162,7 +154,6 @@ func TestDeleteJobWithShaman(t *testing.T) {
mocks.shaman.EXPECT().EraseCheckout(shamanCheckoutID)
mocks.storage.EXPECT().RemoveJobStorage(mocks.ctx, jobUUID)
mocks.persist.EXPECT().DeleteJob(mocks.ctx, jobUUID)
mocks.persist.EXPECT().RequestIntegrityCheck()
mocks.broadcaster.EXPECT().BroadcastJobUpdate(gomock.Any())
require.NoError(t, s.deleteJob(mocks.ctx, jobUUID))
}

View File

@ -66,6 +66,21 @@ func (mr *MockPersistenceServiceMockRecorder) FetchJob(arg0, arg1 interface{}) *
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchJob", reflect.TypeOf((*MockPersistenceService)(nil).FetchJob), arg0, arg1)
}
// FetchJobShamanCheckoutID mocks base method.
func (m *MockPersistenceService) FetchJobShamanCheckoutID(arg0 context.Context, arg1 string) (string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "FetchJobShamanCheckoutID", arg0, arg1)
ret0, _ := ret[0].(string)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// FetchJobShamanCheckoutID indicates an expected call of FetchJobShamanCheckoutID.
func (mr *MockPersistenceServiceMockRecorder) FetchJobShamanCheckoutID(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchJobShamanCheckoutID", reflect.TypeOf((*MockPersistenceService)(nil).FetchJobShamanCheckoutID), arg0, arg1)
}
// FetchJobsDeletionRequested mocks base method.
func (m *MockPersistenceService) FetchJobsDeletionRequested(arg0 context.Context) ([]string, error) {
m.ctrl.T.Helper()

View File

@ -184,7 +184,9 @@ func (db *DB) queries() (*sqlc.Queries, error) {
if err != nil {
return nil, fmt.Errorf("could not get low-level database driver: %w", err)
}
return sqlc.New(sqldb), nil
loggingWrapper := LoggingDBConn{sqldb}
return sqlc.New(&loggingWrapper), nil
}
// now returns the result of `nowFunc()` wrapped in a sql.NullTime.

View File

@ -2,6 +2,7 @@
package persistence
import (
"database/sql"
"errors"
"fmt"
@ -9,12 +10,20 @@ import (
)
var (
// TODO: let these errors wrap database/sql.ErrNoRows.
ErrJobNotFound = PersistenceError{Message: "job not found", Err: gorm.ErrRecordNotFound}
ErrTaskNotFound = PersistenceError{Message: "task not found", Err: gorm.ErrRecordNotFound}
ErrWorkerNotFound = PersistenceError{Message: "worker not found", Err: gorm.ErrRecordNotFound}
ErrWorkerTagNotFound = PersistenceError{Message: "worker tag not found", Err: gorm.ErrRecordNotFound}
ErrDeletingWithoutFK = errors.New("refusing to delete a job when foreign keys are not enabled on the database")
// ErrContextCancelled wraps the SQLite error "interrupted (9)". That error is
// (as far as Sybren could figure out) caused by the context being closed.
// Unfortunately there is no wrapping of the context error, so it's not
// possible to determine whether it was due to a 'deadline exceeded' error or
// another cancellation cause (like upstream HTTP connection closing).
ErrContextCancelled = errors.New("context cancelled")
)
type PersistenceError struct {
@ -55,6 +64,12 @@ func wrapError(errorToWrap error, message string, format ...interface{}) error {
formattedMsg = message
}
// Translate the SQLite "interrupted" error into something the error-handling
// code can check for.
if errorToWrap.Error() == "interrupted (9)" {
errorToWrap = ErrContextCancelled
}
return PersistenceError{
Message: formattedMsg,
Err: errorToWrap,
@ -63,36 +78,48 @@ func wrapError(errorToWrap error, message string, format ...interface{}) error {
// translateGormJobError translates a Gorm error to a persistence layer error.
// This helps to keep Gorm as "implementation detail" of the persistence layer.
func translateGormJobError(gormError error) error {
if errors.Is(gormError, gorm.ErrRecordNotFound) {
func translateGormJobError(err error) error {
if errors.Is(err, sql.ErrNoRows) {
return ErrTaskNotFound
}
if errors.Is(err, gorm.ErrRecordNotFound) {
return ErrJobNotFound
}
return gormError
return err
}
// translateGormTaskError translates a Gorm error to a persistence layer error.
// This helps to keep Gorm as "implementation detail" of the persistence layer.
func translateGormTaskError(gormError error) error {
if errors.Is(gormError, gorm.ErrRecordNotFound) {
func translateGormTaskError(err error) error {
if errors.Is(err, sql.ErrNoRows) {
return ErrTaskNotFound
}
return gormError
if errors.Is(err, gorm.ErrRecordNotFound) {
return ErrTaskNotFound
}
return err
}
// translateGormWorkerError translates a Gorm error to a persistence layer error.
// This helps to keep Gorm as "implementation detail" of the persistence layer.
func translateGormWorkerError(gormError error) error {
if errors.Is(gormError, gorm.ErrRecordNotFound) {
func translateGormWorkerError(err error) error {
if errors.Is(err, sql.ErrNoRows) {
return ErrWorkerNotFound
}
return gormError
if errors.Is(err, gorm.ErrRecordNotFound) {
return ErrWorkerNotFound
}
return err
}
// translateGormWorkerTagError translates a Gorm error to a persistence layer error.
// This helps to keep Gorm as "implementation detail" of the persistence layer.
func translateGormWorkerTagError(gormError error) error {
if errors.Is(gormError, gorm.ErrRecordNotFound) {
func translateGormWorkerTagError(err error) error {
if errors.Is(err, sql.ErrNoRows) {
return ErrWorkerTagNotFound
}
return gormError
if errors.Is(err, gorm.ErrRecordNotFound) {
return ErrWorkerTagNotFound
}
return err
}

View File

@ -14,7 +14,6 @@ import (
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"projects.blender.org/studio/flamenco/internal/manager/job_compilers"
"projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc"
@ -66,12 +65,14 @@ type Task struct {
Type string `gorm:"type:varchar(32);default:''"`
JobID uint `gorm:"default:0"`
Job *Job `gorm:"foreignkey:JobID;references:ID;constraint:OnDelete:CASCADE"`
JobUUID string `gorm:"-"` // Fetched by SQLC, handled by GORM in Task.AfterFind()
Priority int `gorm:"type:smallint;default:50"`
Status api.TaskStatus `gorm:"type:varchar(16);default:''"`
// Which worker is/was working on this.
WorkerID *uint
Worker *Worker `gorm:"foreignkey:WorkerID;references:ID;constraint:OnDelete:SET NULL"`
WorkerUUID string `gorm:"-"` // Fetched by SQLC, handled by GORM in Task.AfterFind()
LastTouchedAt time.Time `gorm:"index"` // Should contain UTC timestamps.
// Dependencies are tasks that need to be completed before this one can run.
@ -81,6 +82,17 @@ type Task struct {
Activity string `gorm:"type:varchar(255);default:''"`
}
// AfterFind updates the task JobUUID and WorkerUUID fields from its job/worker, if known.
func (t *Task) AfterFind(tx *gorm.DB) error {
if t.JobUUID == "" && t.Job != nil {
t.JobUUID = t.Job.UUID
}
if t.WorkerUUID == "" && t.Worker != nil {
t.WorkerUUID = t.Worker.UUID
}
return nil
}
type Commands []Command
type Command struct {
@ -269,6 +281,23 @@ func (db *DB) FetchJob(ctx context.Context, jobUUID string) (*Job, error) {
return convertSqlcJob(sqlcJob)
}
// FetchJobShamanCheckoutID fetches the job's Shaman Checkout ID.
func (db *DB) FetchJobShamanCheckoutID(ctx context.Context, jobUUID string) (string, error) {
queries, err := db.queries()
if err != nil {
return "", err
}
checkoutID, err := queries.FetchJobShamanCheckoutID(ctx, jobUUID)
switch {
case errors.Is(err, sql.ErrNoRows):
return "", ErrJobNotFound
case err != nil:
return "", jobError(err, "fetching job")
}
return checkoutID, nil
}
// DeleteJob deletes a job from the database.
// The deletion cascades to its tasks and other job-related tables.
func (db *DB) DeleteJob(ctx context.Context, jobUUID string) error {
@ -454,129 +483,297 @@ func (db *DB) SaveJobStorageInfo(ctx context.Context, j *Job) error {
}
func (db *DB) FetchTask(ctx context.Context, taskUUID string) (*Task, error) {
dbTask := Task{}
tx := db.gormDB.WithContext(ctx).
// Allow finding the Worker, even after it was deleted. Jobs and Tasks
// don't have soft-deletion.
Unscoped().
Joins("Job").
Joins("Worker").
First(&dbTask, "tasks.uuid = ?", taskUUID)
if tx.Error != nil {
return nil, taskError(tx.Error, "fetching task")
queries, err := db.queries()
if err != nil {
return nil, err
}
return &dbTask, nil
taskRow, err := queries.FetchTask(ctx, taskUUID)
if err != nil {
return nil, taskError(err, "fetching task %s", taskUUID)
}
convertedTask, err := convertSqlcTask(taskRow.Task, taskRow.JobUUID.String, taskRow.WorkerUUID.String)
if err != nil {
return nil, err
}
// TODO: remove this code, and let the caller fetch the job explicitly when needed.
if taskRow.Task.JobID > 0 {
dbJob, err := queries.FetchJobByID(ctx, taskRow.Task.JobID)
if err != nil {
return nil, jobError(err, "fetching job of task %s", taskUUID)
}
convertedJob, err := convertSqlcJob(dbJob)
if err != nil {
return nil, jobError(err, "converting job of task %s", taskUUID)
}
convertedTask.Job = convertedJob
if convertedTask.JobUUID != convertedJob.UUID {
panic("Conversion to SQLC is incomplete")
}
}
// TODO: remove this code, and let the caller fetch the Worker explicitly when needed.
if taskRow.WorkerUUID.Valid {
worker, err := queries.FetchWorkerUnconditional(ctx, taskRow.WorkerUUID.String)
if err != nil {
return nil, taskError(err, "fetching worker assigned to task %s", taskUUID)
}
convertedWorker := convertSqlcWorker(worker)
convertedTask.Worker = &convertedWorker
}
return convertedTask, nil
}
// FetchTaskJobUUID fetches the job UUID of the given task.
func (db *DB) FetchTaskJobUUID(ctx context.Context, taskUUID string) (string, error) {
queries, err := db.queries()
if err != nil {
return "", err
}
jobUUID, err := queries.FetchTaskJobUUID(ctx, taskUUID)
if err != nil {
return "", taskError(err, "fetching job UUID of task %s", taskUUID)
}
if !jobUUID.Valid {
return "", PersistenceError{Message: fmt.Sprintf("unable to find job of task %s", taskUUID)}
}
return jobUUID.String, nil
}
// SaveTask updates a task that already exists in the database.
// This function is not used by the Flamenco API, only by unit tests.
func (db *DB) SaveTask(ctx context.Context, t *Task) error {
tx := db.gormDB.WithContext(ctx).
Omit("job").
Omit("worker").
Save(t)
if tx.Error != nil {
return taskError(tx.Error, "saving task")
if t.ID == 0 {
panic(fmt.Errorf("cannot use this function to insert a task"))
}
queries, err := db.queries()
if err != nil {
return err
}
commandsJSON, err := json.Marshal(t.Commands)
if err != nil {
return fmt.Errorf("cannot convert commands to JSON: %w", err)
}
param := sqlc.UpdateTaskParams{
UpdatedAt: db.now(),
Name: t.Name,
Type: t.Type,
Priority: int64(t.Priority),
Status: string(t.Status),
Commands: commandsJSON,
Activity: t.Activity,
ID: int64(t.ID),
}
if t.WorkerID != nil {
param.WorkerID = sql.NullInt64{
Int64: int64(*t.WorkerID),
Valid: true,
}
} else if t.Worker != nil && t.Worker.ID > 0 {
param.WorkerID = sql.NullInt64{
Int64: int64(t.Worker.ID),
Valid: true,
}
}
if !t.LastTouchedAt.IsZero() {
param.LastTouchedAt = sql.NullTime{
Time: t.LastTouchedAt,
Valid: true,
}
}
err = queries.UpdateTask(ctx, param)
if err != nil {
return taskError(err, "updating task")
}
return nil
}
func (db *DB) SaveTaskStatus(ctx context.Context, t *Task) error {
tx := db.gormDB.WithContext(ctx).
Select("Status").
Save(t)
if tx.Error != nil {
return taskError(tx.Error, "saving task")
queries, err := db.queries()
if err != nil {
return err
}
err = queries.UpdateTaskStatus(ctx, sqlc.UpdateTaskStatusParams{
UpdatedAt: db.now(),
Status: string(t.Status),
ID: int64(t.ID),
})
if err != nil {
return taskError(err, "saving task status")
}
return nil
}
func (db *DB) SaveTaskActivity(ctx context.Context, t *Task) error {
if err := db.gormDB.WithContext(ctx).
Model(t).
Select("Activity").
Updates(Task{Activity: t.Activity}).Error; err != nil {
queries, err := db.queries()
if err != nil {
return err
}
err = queries.UpdateTaskActivity(ctx, sqlc.UpdateTaskActivityParams{
UpdatedAt: db.now(),
Activity: t.Activity,
ID: int64(t.ID),
})
if err != nil {
return taskError(err, "saving task activity")
}
return nil
}
// TaskAssignToWorker assigns the given task to the given worker.
// This function is only used by unit tests. During normal operation, Flamenco
// uses the code in task_scheduler.go to assign tasks to workers.
func (db *DB) TaskAssignToWorker(ctx context.Context, t *Task, w *Worker) error {
tx := db.gormDB.WithContext(ctx).
Model(t).
Select("WorkerID").
Updates(Task{WorkerID: &w.ID})
if tx.Error != nil {
return taskError(tx.Error, "assigning task %s to worker %s", t.UUID, w.UUID)
queries, err := db.queries()
if err != nil {
return err
}
// Gorm updates t.WorkerID itself, but not t.Worker (even when it's added to
// the Updates() call above).
err = queries.TaskAssignToWorker(ctx, sqlc.TaskAssignToWorkerParams{
UpdatedAt: db.now(),
WorkerID: sql.NullInt64{
Int64: int64(w.ID),
Valid: true,
},
ID: int64(t.ID),
})
if err != nil {
return taskError(err, "assigning task %s to worker %s", t.UUID, w.UUID)
}
// Update the task itself.
t.Worker = w
t.WorkerID = &w.ID
return nil
}
func (db *DB) FetchTasksOfWorkerInStatus(ctx context.Context, worker *Worker, taskStatus api.TaskStatus) ([]*Task, error) {
result := []*Task{}
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Joins("Job").
Where("tasks.worker_id = ?", worker.ID).
Where("tasks.status = ?", taskStatus).
Scan(&result)
if tx.Error != nil {
return nil, taskError(tx.Error, "finding tasks of worker %s in status %q", worker.UUID, taskStatus)
queries, err := db.queries()
if err != nil {
return nil, err
}
rows, err := queries.FetchTasksOfWorkerInStatus(ctx, sqlc.FetchTasksOfWorkerInStatusParams{
WorkerID: sql.NullInt64{
Int64: int64(worker.ID),
Valid: true,
},
TaskStatus: string(taskStatus),
})
if err != nil {
return nil, taskError(err, "finding tasks of worker %s in status %q", worker.UUID, taskStatus)
}
jobCache := make(map[uint]*Job)
result := make([]*Task, len(rows))
for i := range rows {
jobUUID := rows[i].JobUUID.String
gormTask, err := convertSqlcTask(rows[i].Task, jobUUID, worker.UUID)
if err != nil {
return nil, err
}
gormTask.Worker = worker
gormTask.WorkerID = &worker.ID
// Fetch the job, either from the cache or from the database. This is done
// here because the task_state_machine functionality expects that task.Job
// is set.
// TODO: make that code fetch the job details it needs, rather than fetching
// the entire job here.
job := jobCache[gormTask.JobID]
if job == nil {
job, err = db.FetchJob(ctx, jobUUID)
if err != nil {
return nil, jobError(err, "finding job %s of task %s", jobUUID, gormTask.UUID)
}
}
gormTask.Job = job
result[i] = gormTask
}
return result, nil
}
func (db *DB) FetchTasksOfWorkerInStatusOfJob(ctx context.Context, worker *Worker, taskStatus api.TaskStatus, job *Job) ([]*Task, error) {
result := []*Task{}
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Joins("Job").
Where("tasks.worker_id = ?", worker.ID).
Where("tasks.status = ?", taskStatus).
Where("job.id = ?", job.ID).
Scan(&result)
if tx.Error != nil {
return nil, taskError(tx.Error, "finding tasks of worker %s in status %q and job %s", worker.UUID, taskStatus, job.UUID)
queries, err := db.queries()
if err != nil {
return nil, err
}
rows, err := queries.FetchTasksOfWorkerInStatusOfJob(ctx, sqlc.FetchTasksOfWorkerInStatusOfJobParams{
WorkerID: sql.NullInt64{
Int64: int64(worker.ID),
Valid: true,
},
JobID: int64(job.ID),
TaskStatus: string(taskStatus),
})
if err != nil {
return nil, taskError(err, "finding tasks of worker %s in status %q and job %s", worker.UUID, taskStatus, job.UUID)
}
result := make([]*Task, len(rows))
for i := range rows {
gormTask, err := convertSqlcTask(rows[i].Task, job.UUID, worker.UUID)
if err != nil {
return nil, err
}
gormTask.Job = job
gormTask.JobID = job.ID
gormTask.Worker = worker
gormTask.WorkerID = &worker.ID
result[i] = gormTask
}
return result, nil
}
func (db *DB) JobHasTasksInStatus(ctx context.Context, job *Job, taskStatus api.TaskStatus) (bool, error) {
var numTasksInStatus int64
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Where("job_id", job.ID).
Where("status", taskStatus).
Count(&numTasksInStatus)
if tx.Error != nil {
return false, taskError(tx.Error, "counting tasks of job %s in status %q", job.UUID, taskStatus)
queries, err := db.queries()
if err != nil {
return false, err
}
return numTasksInStatus > 0, nil
count, err := queries.JobCountTasksInStatus(ctx, sqlc.JobCountTasksInStatusParams{
JobID: int64(job.ID),
TaskStatus: string(taskStatus),
})
if err != nil {
return false, taskError(err, "counting tasks of job %s in status %q", job.UUID, taskStatus)
}
return count > 0, nil
}
// CountTasksOfJobInStatus counts the number of tasks in the job.
// It returns two counts, one is the number of tasks in the given statuses, the
// other is the total number of tasks of the job.
func (db *DB) CountTasksOfJobInStatus(
ctx context.Context,
job *Job,
taskStatuses ...api.TaskStatus,
) (numInStatus, numTotal int, err error) {
type Result struct {
Status api.TaskStatus
NumTasks int
queries, err := db.queries()
if err != nil {
return 0, 0, err
}
var results []Result
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Select("status, count(*) as num_tasks").
Where("job_id", job.ID).
Group("status").
Scan(&results)
if tx.Error != nil {
return 0, 0, jobError(tx.Error, "count tasks of job %s in status %q", job.UUID, taskStatuses)
results, err := queries.JobCountTaskStatuses(ctx, int64(job.ID))
if err != nil {
return 0, 0, jobError(err, "count tasks of job %s in status %q", job.UUID, taskStatuses)
}
// Create lookup table for which statuses to count.
@ -587,10 +784,10 @@ func (db *DB) CountTasksOfJobInStatus(
// Count the number of tasks per status.
for _, result := range results {
if countStatus[result.Status] {
numInStatus += result.NumTasks
if countStatus[api.TaskStatus(result.Status)] {
numInStatus += int(result.NumTasks)
}
numTotal += result.NumTasks
numTotal += int(result.NumTasks)
}
return
@ -598,39 +795,53 @@ func (db *DB) CountTasksOfJobInStatus(
// FetchTaskIDsOfJob returns all tasks of the given job.
func (db *DB) FetchTasksOfJob(ctx context.Context, job *Job) ([]*Task, error) {
var tasks []*Task
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Where("job_id", job.ID).
Scan(&tasks)
if tx.Error != nil {
return nil, taskError(tx.Error, "fetching tasks of job %s", job.UUID)
queries, err := db.queries()
if err != nil {
return nil, err
}
for i := range tasks {
tasks[i].Job = job
rows, err := queries.FetchTasksOfJob(ctx, int64(job.ID))
if err != nil {
return nil, taskError(err, "fetching tasks of job %s", job.UUID)
}
return tasks, nil
result := make([]*Task, len(rows))
for i := range rows {
gormTask, err := convertSqlcTask(rows[i].Task, job.UUID, rows[i].WorkerUUID.String)
if err != nil {
return nil, err
}
gormTask.Job = job
result[i] = gormTask
}
return result, nil
}
// FetchTasksOfJobInStatus returns those tasks of the given job that have any of the given statuses.
func (db *DB) FetchTasksOfJobInStatus(ctx context.Context, job *Job, taskStatuses ...api.TaskStatus) ([]*Task, error) {
var tasks []*Task
tx := db.gormDB.WithContext(ctx).
Model(&Task{}).
Where("job_id", job.ID).
Where("status in ?", taskStatuses).
Scan(&tasks)
if tx.Error != nil {
return nil, taskError(tx.Error, "fetching tasks of job %s in status %q", job.UUID, taskStatuses)
queries, err := db.queries()
if err != nil {
return nil, err
}
for i := range tasks {
tasks[i].Job = job
rows, err := queries.FetchTasksOfJobInStatus(ctx, sqlc.FetchTasksOfJobInStatusParams{
JobID: int64(job.ID),
TaskStatus: convertTaskStatuses(taskStatuses),
})
if err != nil {
return nil, taskError(err, "fetching tasks of job %s in status %q", job.UUID, taskStatuses)
}
return tasks, nil
result := make([]*Task, len(rows))
for i := range rows {
gormTask, err := convertSqlcTask(rows[i].Task, job.UUID, rows[i].WorkerUUID.String)
if err != nil {
return nil, err
}
gormTask.Job = job
result[i] = gormTask
}
return result, nil
}
// UpdateJobsTaskStatuses updates the status & activity of all tasks of `job`.
@ -641,13 +852,20 @@ func (db *DB) UpdateJobsTaskStatuses(ctx context.Context, job *Job,
return taskError(nil, "empty status not allowed")
}
tx := db.gormDB.WithContext(ctx).
Model(Task{}).
Where("job_Id = ?", job.ID).
Updates(Task{Status: taskStatus, Activity: activity})
queries, err := db.queries()
if err != nil {
return err
}
if tx.Error != nil {
return taskError(tx.Error, "updating status of all tasks of job %s", job.UUID)
err = queries.UpdateJobsTaskStatuses(ctx, sqlc.UpdateJobsTaskStatusesParams{
UpdatedAt: db.now(),
Status: string(taskStatus),
Activity: activity,
JobID: int64(job.ID),
})
if err != nil {
return taskError(err, "updating status of all tasks of job %s", job.UUID)
}
return nil
}
@ -661,26 +879,45 @@ func (db *DB) UpdateJobsTaskStatusesConditional(ctx context.Context, job *Job,
return taskError(nil, "empty status not allowed")
}
tx := db.gormDB.WithContext(ctx).
Model(Task{}).
Where("job_Id = ?", job.ID).
Where("status in ?", statusesToUpdate).
Updates(Task{Status: taskStatus, Activity: activity})
if tx.Error != nil {
return taskError(tx.Error, "updating status of all tasks in status %v of job %s", statusesToUpdate, job.UUID)
queries, err := db.queries()
if err != nil {
return err
}
err = queries.UpdateJobsTaskStatusesConditional(ctx, sqlc.UpdateJobsTaskStatusesConditionalParams{
UpdatedAt: db.now(),
Status: string(taskStatus),
Activity: activity,
JobID: int64(job.ID),
StatusesToUpdate: convertTaskStatuses(statusesToUpdate),
})
if err != nil {
return taskError(err, "updating status of all tasks in status %v of job %s", statusesToUpdate, job.UUID)
}
return nil
}
// TaskTouchedByWorker marks the task as 'touched' by a worker. This is used for timeout detection.
func (db *DB) TaskTouchedByWorker(ctx context.Context, t *Task) error {
tx := db.gormDB.WithContext(ctx).
Model(t).
Select("LastTouchedAt").
Updates(Task{LastTouchedAt: db.gormDB.NowFunc()})
if err := tx.Error; err != nil {
queries, err := db.queries()
if err != nil {
return err
}
now := db.now()
err = queries.TaskTouchedByWorker(ctx, sqlc.TaskTouchedByWorkerParams{
UpdatedAt: now,
LastTouchedAt: now,
ID: int64(t.ID),
})
if err != nil {
return taskError(err, "saving task 'last touched at'")
}
// Also update the given task, so that it's consistent with the database.
t.LastTouchedAt = now.Time
return nil
}
@ -693,64 +930,72 @@ func (db *DB) TaskTouchedByWorker(ctx context.Context, t *Task) error {
//
// Returns the new number of workers that failed this task.
func (db *DB) AddWorkerToTaskFailedList(ctx context.Context, t *Task, w *Worker) (numFailed int, err error) {
entry := TaskFailure{
Task: t,
Worker: w,
}
tx := db.gormDB.WithContext(ctx).
Clauses(clause.OnConflict{DoNothing: true}).
Create(&entry)
if tx.Error != nil {
return 0, tx.Error
queries, err := db.queries()
if err != nil {
return 0, err
}
var numFailed64 int64
tx = db.gormDB.WithContext(ctx).Model(&TaskFailure{}).
Where("task_id=?", t.ID).
Count(&numFailed64)
err = queries.AddWorkerToTaskFailedList(ctx, sqlc.AddWorkerToTaskFailedListParams{
CreatedAt: db.now().Time,
TaskID: int64(t.ID),
WorkerID: int64(w.ID),
})
if err != nil {
return 0, err
}
numFailed64, err := queries.CountWorkersFailingTask(ctx, int64(t.ID))
if err != nil {
return 0, err
}
// Integer literals are of type `int`, so that's just a bit nicer to work with
// than `int64`.
if numFailed64 > math.MaxInt32 {
log.Warn().Int64("numFailed", numFailed64).Msg("number of failed workers is crazy high, something is wrong here")
return math.MaxInt32, tx.Error
return math.MaxInt32, nil
}
return int(numFailed64), tx.Error
return int(numFailed64), nil
}
// ClearFailureListOfTask clears the list of workers that failed this task.
func (db *DB) ClearFailureListOfTask(ctx context.Context, t *Task) error {
tx := db.gormDB.WithContext(ctx).
Where("task_id = ?", t.ID).
Delete(&TaskFailure{})
return tx.Error
queries, err := db.queries()
if err != nil {
return err
}
return queries.ClearFailureListOfTask(ctx, int64(t.ID))
}
// ClearFailureListOfJob en-mass, for all tasks of this job, clears the list of
// workers that failed those tasks.
func (db *DB) ClearFailureListOfJob(ctx context.Context, j *Job) error {
queries, err := db.queries()
if err != nil {
return err
}
// SQLite doesn't support JOIN in DELETE queries, so use a sub-query instead.
jobTasksQuery := db.gormDB.Model(&Task{}).
Select("id").
Where("job_id = ?", j.ID)
tx := db.gormDB.WithContext(ctx).
Where("task_id in (?)", jobTasksQuery).
Delete(&TaskFailure{})
return tx.Error
return queries.ClearFailureListOfJob(ctx, int64(j.ID))
}
func (db *DB) FetchTaskFailureList(ctx context.Context, t *Task) ([]*Worker, error) {
var workers []*Worker
queries, err := db.queries()
if err != nil {
return nil, err
}
tx := db.gormDB.WithContext(ctx).
Model(&Worker{}).
Joins("inner join task_failures TF on TF.worker_id = workers.id").
Where("TF.task_id = ?", t.ID).
Scan(&workers)
failureList, err := queries.FetchTaskFailureList(ctx, int64(t.ID))
if err != nil {
return nil, err
}
return workers, tx.Error
workers := make([]*Worker, len(failureList))
for idx := range failureList {
worker := convertSqlcWorker(failureList[idx].Worker)
workers[idx] = &worker
}
return workers, nil
}
// convertSqlcJob converts a job from the SQLC-generated model to the model
@ -791,3 +1036,52 @@ func convertSqlcJob(job sqlc.Job) (*Job, error) {
return &dbJob, nil
}
// convertSqlcTask converts a FetchTaskRow from the SQLC-generated model to the
// model expected by the rest of the code. This is mostly in place to aid in the
// GORM to SQLC migration. It is intended that eventually the rest of the code
// will use the same SQLC-generated model.
func convertSqlcTask(task sqlc.Task, jobUUID string, workerUUID string) (*Task, error) {
dbTask := Task{
Model: Model{
ID: uint(task.ID),
CreatedAt: task.CreatedAt,
UpdatedAt: task.UpdatedAt.Time,
},
UUID: task.UUID,
Name: task.Name,
Type: task.Type,
Priority: int(task.Priority),
Status: api.TaskStatus(task.Status),
LastTouchedAt: task.LastTouchedAt.Time,
Activity: task.Activity,
JobID: uint(task.JobID),
JobUUID: jobUUID,
WorkerUUID: workerUUID,
}
// TODO: convert dependencies?
if task.WorkerID.Valid {
workerID := uint(task.WorkerID.Int64)
dbTask.WorkerID = &workerID
}
if err := json.Unmarshal(task.Commands, &dbTask.Commands); err != nil {
return nil, taskError(err, fmt.Sprintf("task %s of job %s has invalid commands: %v",
task.UUID, jobUUID, err))
}
return &dbTask, nil
}
// convertTaskStatuses converts from []api.TaskStatus to []string for feeding to sqlc.
func convertTaskStatuses(taskStatuses []api.TaskStatus) []string {
statusesAsStrings := make([]string, len(taskStatuses))
for index := range taskStatuses {
statusesAsStrings[index] = string(taskStatuses[index])
}
return statusesAsStrings
}

View File

@ -170,7 +170,7 @@ func TestWorkersLeftToRun(t *testing.T) {
}
func TestWorkersLeftToRunWithTags(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
// Create tags.
@ -238,9 +238,12 @@ func TestCountTaskFailuresOfWorker(t *testing.T) {
ctx, close, db, dbJob, authoredJob := jobTasksTestFixtures(t)
defer close()
task0, _ := db.FetchTask(ctx, authoredJob.Tasks[0].UUID)
task1, _ := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
task2, _ := db.FetchTask(ctx, authoredJob.Tasks[2].UUID)
task0, err := db.FetchTask(ctx, authoredJob.Tasks[0].UUID)
require.NoError(t, err)
task1, err := db.FetchTask(ctx, authoredJob.Tasks[1].UUID)
require.NoError(t, err)
task2, err := db.FetchTask(ctx, authoredJob.Tasks[2].UUID)
require.NoError(t, err)
// Sanity check on the test data.
assert.Equal(t, "blender", task0.Type)

View File

@ -45,7 +45,7 @@ func TestSimpleQuery(t *testing.T) {
}
func TestQueryMetadata(t *testing.T) {
ctx, close, db := persistenceTestFixtures(t, 0)
ctx, close, db := persistenceTestFixtures(0)
defer close()
testJob := persistAuthoredJob(t, ctx, db, createTestAuthoredJobWithTasks())

View File

@ -19,7 +19,7 @@ import (
)
func TestStoreAuthoredJob(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
job := createTestAuthoredJobWithTasks()
@ -59,7 +59,7 @@ func TestStoreAuthoredJob(t *testing.T) {
}
func TestStoreAuthoredJobWithShamanCheckoutID(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
job := createTestAuthoredJobWithTasks()
@ -75,10 +75,23 @@ func TestStoreAuthoredJobWithShamanCheckoutID(t *testing.T) {
assert.Equal(t, job.Storage.ShamanCheckoutID, fetchedJob.Storage.ShamanCheckoutID)
}
func TestFetchTaskJobUUID(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
job := createTestAuthoredJobWithTasks()
err := db.StoreAuthoredJob(ctx, job)
require.NoError(t, err)
jobUUID, err := db.FetchTaskJobUUID(ctx, job.Tasks[0].UUID)
require.NoError(t, err)
assert.Equal(t, job.JobID, jobUUID)
}
func TestSaveJobStorageInfo(t *testing.T) {
// Test that saving job storage info doesn't count as "update".
// This is necessary for `cmd/shaman-checkout-id-setter` to do its work quietly.
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
startTime := time.Date(2023, time.February, 7, 15, 0, 0, 0, time.UTC)
@ -109,7 +122,7 @@ func TestSaveJobStorageInfo(t *testing.T) {
}
func TestSaveJobPriority(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
// Create test job.
@ -133,7 +146,7 @@ func TestSaveJobPriority(t *testing.T) {
}
func TestDeleteJob(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
authJob := createTestAuthoredJobWithTasks()
@ -179,8 +192,31 @@ func TestDeleteJob(t *testing.T) {
"all remaining tasks should belong to the other job")
}
func TestFetchJobShamanCheckoutID(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
authJob := createTestAuthoredJobWithTasks()
authJob.JobID = "e1a034cc-b709-45f5-b80f-9cf16511c678"
authJob.Name = "Job to delete"
authJob.Storage.ShamanCheckoutID = "some-✓out-id-string"
persistAuthoredJob(t, ctx, db, authJob)
{ // Test fetching a non-existing job.
checkoutID, err := db.FetchJobShamanCheckoutID(ctx, "4cb20f0d-f1f6-4d56-8277-9b208a99fed0")
assert.ErrorIs(t, err, ErrJobNotFound)
assert.Equal(t, "", checkoutID)
}
{ // Test existing job.
checkoutID, err := db.FetchJobShamanCheckoutID(ctx, authJob.JobID)
require.NoError(t, err)
assert.Equal(t, authJob.Storage.ShamanCheckoutID, checkoutID)
}
}
func TestDeleteJobWithoutFK(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
authJob := createTestAuthoredJobWithTasks()
@ -383,6 +419,12 @@ func TestCountTasksOfJobInStatus(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, 0, numActive)
assert.Equal(t, 3, numTotal)
numCounted, numTotal, err := db.CountTasksOfJobInStatus(ctx, job,
api.TaskStatusFailed, api.TaskStatusQueued)
require.NoError(t, err)
assert.Equal(t, 3, numCounted)
assert.Equal(t, 3, numTotal)
}
func TestCheckIfJobsHoldLargeNumOfTasks(t *testing.T) {
@ -473,6 +515,26 @@ func TestFetchTasksOfJobInStatus(t *testing.T) {
assert.Empty(t, tasks)
}
func TestSaveTaskActivity(t *testing.T) {
ctx, close, db, _, authoredJob := jobTasksTestFixtures(t)
defer close()
taskUUID := authoredJob.Tasks[0].UUID
task, err := db.FetchTask(ctx, taskUUID)
require.NoError(t, err)
require.Equal(t, api.TaskStatusQueued, task.Status)
task.Activity = "Somebody ran a ünit test"
task.Status = api.TaskStatusPaused // Should not be saved.
require.NoError(t, db.SaveTaskActivity(ctx, task))
dbTask, err := db.FetchTask(ctx, taskUUID)
require.NoError(t, err)
require.Equal(t, "Somebody ran a ünit test", dbTask.Activity)
require.Equal(t, api.TaskStatusQueued, dbTask.Status,
"SaveTaskActivity() should not save the task status")
}
func TestTaskAssignToWorker(t *testing.T) {
ctx, close, db, _, authoredJob := jobTasksTestFixtures(t)
defer close()
@ -857,7 +919,7 @@ func createTestAuthoredJob(jobID string, tasks ...job_compilers.AuthoredTask) jo
return job
}
func persistAuthoredJob(t *testing.T, ctx context.Context, db *DB, authoredJob job_compilers.AuthoredJob) *Job {
func persistAuthoredJob(t require.TestingT, ctx context.Context, db *DB, authoredJob job_compilers.AuthoredJob) *Job {
err := db.StoreAuthoredJob(ctx, authoredJob)
require.NoError(t, err, "error storing authored job in DB")
@ -906,7 +968,7 @@ func duplicateJobAndTasks(job job_compilers.AuthoredJob) job_compilers.AuthoredJ
}
func jobTasksTestFixtures(t *testing.T) (context.Context, context.CancelFunc, *DB, *Job, job_compilers.AuthoredJob) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
authoredJob := createTestAuthoredJobWithTasks()
dbJob := persistAuthoredJob(t, ctx, db, authoredJob)
@ -916,7 +978,7 @@ func jobTasksTestFixtures(t *testing.T) (context.Context, context.CancelFunc, *D
// This created Test Jobs using the new function createTestAuthoredJobWithNumTasks so that you can set the number of tasks
func jobTasksTestFixturesWithTaskNum(t *testing.T, numtasks int) (context.Context, context.CancelFunc, *DB, *Job, job_compilers.AuthoredJob) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeoutlong)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeoutlong)
authoredJob := createTestAuthoredJobWithNumTasks(numtasks)
dbJob := persistAuthoredJob(t, ctx, db, authoredJob)

View File

@ -4,13 +4,16 @@ package persistence
import (
"context"
"database/sql"
"errors"
"fmt"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
gormlogger "gorm.io/gorm/logger"
"projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc"
)
// dbLogger implements the behaviour of Gorm's default logger on top of Zerolog.
@ -82,7 +85,7 @@ func (l *dbLogger) Trace(ctx context.Context, begin time.Time, fc func() (sql st
// Function to lazily get the SQL, affected rows, and logger.
buildLogger := func() (loggerPtr *zerolog.Logger, sql string) {
sql, rows := fc()
logCtx = logCtx.Err(err)
logCtx = logCtx.AnErr("cause", err)
if rows >= 0 {
logCtx = logCtx.Int64("rowsAffected", rows)
}
@ -91,9 +94,13 @@ func (l *dbLogger) Trace(ctx context.Context, begin time.Time, fc func() (sql st
}
switch {
case err != nil && zlogLevel <= zerolog.ErrorLevel && (!errors.Is(err, gorm.ErrRecordNotFound) || !l.IgnoreRecordNotFoundError):
case err != nil && zlogLevel <= zerolog.ErrorLevel:
logger, sql := buildLogger()
if l.silenceLoggingError(err) {
logger.Debug().Msg(sql)
} else {
logger.Error().Msg(sql)
}
case elapsed > l.SlowThreshold && l.SlowThreshold != 0 && zlogLevel <= zerolog.WarnLevel:
logger, sql := buildLogger()
@ -109,13 +116,27 @@ func (l *dbLogger) Trace(ctx context.Context, begin time.Time, fc func() (sql st
}
}
func (l dbLogger) silenceLoggingError(err error) bool {
switch {
case l.IgnoreRecordNotFoundError && errors.Is(err, gorm.ErrRecordNotFound):
return true
case errors.Is(err, context.Canceled):
// These are usually caused by the HTTP client connection closing. Stopping
// a database query is normal behaviour in such a case, so this shouldn't be
// logged as an error.
return true
default:
return false
}
}
// logEvent logs an even at the given level.
func (l dbLogger) logEvent(level zerolog.Level, msg string, args ...interface{}) {
if l.zlog.GetLevel() > level {
return
}
logger := l.logger(args)
logger.WithLevel(level).Msg(msg)
logger.WithLevel(level).Msg("logEvent: " + msg)
}
// logger constructs a zerolog logger. The given arguments are added via reflection.
@ -126,3 +147,28 @@ func (l dbLogger) logger(args ...interface{}) zerolog.Logger {
}
return logCtx.Logger()
}
// LoggingDBConn wraps a database/sql.DB connection, so that it can be used with
// sqlc and log all the queries.
type LoggingDBConn struct {
wrappedConn sqlc.DBTX
}
var _ sqlc.DBTX = (*LoggingDBConn)(nil)
func (ldbc *LoggingDBConn) ExecContext(ctx context.Context, sql string, args ...interface{}) (sql.Result, error) {
log.Trace().Str("sql", sql).Interface("args", args).Msg("database: query Exec")
return ldbc.wrappedConn.ExecContext(ctx, sql, args...)
}
func (ldbc *LoggingDBConn) PrepareContext(ctx context.Context, sql string) (*sql.Stmt, error) {
log.Trace().Str("sql", sql).Msg("database: query Prepare")
return ldbc.wrappedConn.PrepareContext(ctx, sql)
}
func (ldbc *LoggingDBConn) QueryContext(ctx context.Context, sql string, args ...interface{}) (*sql.Rows, error) {
log.Trace().Str("sql", sql).Interface("args", args).Msg("database: query Query")
return ldbc.wrappedConn.QueryContext(ctx, sql, args...)
}
func (ldbc *LoggingDBConn) QueryRowContext(ctx context.Context, sql string, args ...interface{}) *sql.Row {
log.Trace().Str("sql", sql).Interface("args", args).Msg("database: query QueryRow")
return ldbc.wrappedConn.QueryRowContext(ctx, sql, args...)
}

View File

@ -0,0 +1,84 @@
-- Some booleans were modeled as `smallint`. These are turned into `boolean` instead.
--
-- +goose Up
CREATE TABLE temp_workers (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
secret varchar(255) DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
address varchar(39) DEFAULT '' NOT NULL,
platform varchar(16) DEFAULT '' NOT NULL,
software varchar(32) DEFAULT '' NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
last_seen_at datetime,
status_requested varchar(16) DEFAULT '' NOT NULL,
lazy_status_request boolean DEFAULT false NOT NULL,
supported_task_types varchar(255) DEFAULT '' NOT NULL,
deleted_at datetime,
can_restart boolean DEFAULT false NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO temp_workers SELECT
id,
created_at,
updated_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
deleted_at,
can_restart
FROM workers;
DROP TABLE workers;
ALTER TABLE temp_workers RENAME TO workers;
-- +goose Down
CREATE TABLE temp_workers (
id integer NOT NULL,
created_at datetime NOT NULL,
updated_at datetime,
uuid varchar(36) UNIQUE DEFAULT '' NOT NULL,
secret varchar(255) DEFAULT '' NOT NULL,
name varchar(64) DEFAULT '' NOT NULL,
address varchar(39) DEFAULT '' NOT NULL,
platform varchar(16) DEFAULT '' NOT NULL,
software varchar(32) DEFAULT '' NOT NULL,
status varchar(16) DEFAULT '' NOT NULL,
last_seen_at datetime,
status_requested varchar(16) DEFAULT '' NOT NULL,
lazy_status_request smallint DEFAULT false NOT NULL,
supported_task_types varchar(255) DEFAULT '' NOT NULL,
deleted_at datetime,
can_restart smallint DEFAULT false NOT NULL,
PRIMARY KEY (id)
);
INSERT INTO temp_workers SELECT
id,
created_at,
updated_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
deleted_at,
can_restart
FROM workers;
DROP TABLE workers;
ALTER TABLE temp_workers RENAME TO workers;

View File

@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
// sqlc v1.26.0
package sqlc

View File

@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
// sqlc v1.26.0
package sqlc
@ -94,10 +94,10 @@ type Worker struct {
Status string
LastSeenAt sql.NullTime
StatusRequested string
LazyStatusRequest int64
LazyStatusRequest bool
SupportedTaskTypes string
DeletedAt sql.NullTime
CanRestart int64
CanRestart bool
}
type WorkerTag struct {

View File

@ -18,9 +18,18 @@ INSERT INTO jobs (
VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ? );
-- name: FetchJob :one
-- Fetch a job by its UUID.
SELECT * FROM jobs
WHERE uuid = ? LIMIT 1;
-- name: FetchJobByID :one
-- Fetch a job by its numerical ID.
SELECT * FROM jobs
WHERE id = ? LIMIT 1;
-- name: FetchJobShamanCheckoutID :one
SELECT storage_shaman_checkout_id FROM jobs WHERE uuid=@uuid;
-- name: DeleteJob :exec
DELETE FROM jobs WHERE uuid = ?;
@ -55,3 +64,129 @@ UPDATE jobs SET updated_at=@now, priority=@priority WHERE id=@id;
-- name: SaveJobStorageInfo :exec
UPDATE jobs SET storage_shaman_checkout_id=@storage_shaman_checkout_id WHERE id=@id;
-- name: FetchTask :one
SELECT sqlc.embed(tasks), jobs.UUID as jobUUID, workers.UUID as workerUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.uuid = @uuid;
-- name: FetchTasksOfWorkerInStatus :many
SELECT sqlc.embed(tasks), jobs.UUID as jobUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
WHERE tasks.worker_id = @worker_id
AND tasks.status = @task_status;
-- name: FetchTasksOfWorkerInStatusOfJob :many
SELECT sqlc.embed(tasks)
FROM tasks
WHERE tasks.worker_id = @worker_id
AND tasks.job_id = @job_id
AND tasks.status = @task_status;
-- name: FetchTasksOfJob :many
SELECT sqlc.embed(tasks), workers.UUID as workerUUID
FROM tasks
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.job_id = @job_id;
-- name: FetchTasksOfJobInStatus :many
SELECT sqlc.embed(tasks), workers.UUID as workerUUID
FROM tasks
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.job_id = @job_id
AND tasks.status in (sqlc.slice('task_status'));
-- name: FetchTaskJobUUID :one
SELECT jobs.UUID as jobUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
WHERE tasks.uuid = @uuid;
-- name: UpdateTask :exec
-- Update a Task, except its id, created_at, uuid, or job_id fields.
UPDATE tasks SET
updated_at = @updated_at,
name = @name,
type = @type,
priority = @priority,
status = @status,
worker_id = @worker_id,
last_touched_at = @last_touched_at,
commands = @commands,
activity = @activity
WHERE id=@id;
-- name: UpdateTaskStatus :exec
UPDATE tasks SET
updated_at = @updated_at,
status = @status
WHERE id=@id;
-- name: UpdateTaskActivity :exec
UPDATE tasks SET
updated_at = @updated_at,
activity = @activity
WHERE id=@id;
-- name: UpdateJobsTaskStatusesConditional :exec
UPDATE tasks SET
updated_at = @updated_at,
status = @status,
activity = @activity
WHERE job_id = @job_id AND status in (sqlc.slice('statuses_to_update'));
-- name: UpdateJobsTaskStatuses :exec
UPDATE tasks SET
updated_at = @updated_at,
status = @status,
activity = @activity
WHERE job_id = @job_id;
-- name: TaskAssignToWorker :exec
UPDATE tasks SET
updated_at = @updated_at,
worker_id = @worker_id
WHERE id=@id;
-- name: TaskTouchedByWorker :exec
UPDATE tasks SET
updated_at = @updated_at,
last_touched_at = @last_touched_at
WHERE id=@id;
-- name: JobCountTasksInStatus :one
-- Fetch number of tasks in the given status, of the given job.
SELECT count(*) as num_tasks FROM tasks
WHERE job_id = @job_id AND status = @task_status;
-- name: JobCountTaskStatuses :many
-- Fetch (status, num tasks in that status) rows for the given job.
SELECT status, count(*) as num_tasks FROM tasks
WHERE job_id = @job_id
GROUP BY status;
-- name: AddWorkerToTaskFailedList :exec
INSERT INTO task_failures (created_at, task_id, worker_id)
VALUES (@created_at, @task_id, @worker_id)
ON CONFLICT DO NOTHING;
-- name: CountWorkersFailingTask :one
-- Count how many workers have failed a given task.
SELECT count(*) as num_failed FROM task_failures
WHERE task_id=@task_id;
-- name: ClearFailureListOfTask :exec
DELETE FROM task_failures WHERE task_id=@task_id;
-- name: ClearFailureListOfJob :exec
-- SQLite doesn't support JOIN in DELETE queries, so use a sub-query instead.
DELETE FROM task_failures
WHERE task_id in (SELECT id FROM tasks WHERE job_id=@job_id);
-- name: FetchTaskFailureList :many
SELECT sqlc.embed(workers) FROM workers
INNER JOIN task_failures TF on TF.worker_id=workers.id
WHERE TF.task_id=@task_id;

View File

@ -1,6 +1,6 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.25.0
// sqlc v1.26.0
// source: query_jobs.sql
package sqlc
@ -13,6 +13,56 @@ import (
"time"
)
const addWorkerToTaskFailedList = `-- name: AddWorkerToTaskFailedList :exec
INSERT INTO task_failures (created_at, task_id, worker_id)
VALUES (?1, ?2, ?3)
ON CONFLICT DO NOTHING
`
type AddWorkerToTaskFailedListParams struct {
CreatedAt time.Time
TaskID int64
WorkerID int64
}
func (q *Queries) AddWorkerToTaskFailedList(ctx context.Context, arg AddWorkerToTaskFailedListParams) error {
_, err := q.db.ExecContext(ctx, addWorkerToTaskFailedList, arg.CreatedAt, arg.TaskID, arg.WorkerID)
return err
}
const clearFailureListOfJob = `-- name: ClearFailureListOfJob :exec
DELETE FROM task_failures
WHERE task_id in (SELECT id FROM tasks WHERE job_id=?1)
`
// SQLite doesn't support JOIN in DELETE queries, so use a sub-query instead.
func (q *Queries) ClearFailureListOfJob(ctx context.Context, jobID int64) error {
_, err := q.db.ExecContext(ctx, clearFailureListOfJob, jobID)
return err
}
const clearFailureListOfTask = `-- name: ClearFailureListOfTask :exec
DELETE FROM task_failures WHERE task_id=?1
`
func (q *Queries) ClearFailureListOfTask(ctx context.Context, taskID int64) error {
_, err := q.db.ExecContext(ctx, clearFailureListOfTask, taskID)
return err
}
const countWorkersFailingTask = `-- name: CountWorkersFailingTask :one
SELECT count(*) as num_failed FROM task_failures
WHERE task_id=?1
`
// Count how many workers have failed a given task.
func (q *Queries) CountWorkersFailingTask(ctx context.Context, taskID int64) (int64, error) {
row := q.db.QueryRowContext(ctx, countWorkersFailingTask, taskID)
var num_failed int64
err := row.Scan(&num_failed)
return num_failed, err
}
const createJob = `-- name: CreateJob :exec
INSERT INTO jobs (
@ -74,6 +124,7 @@ SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activ
WHERE uuid = ? LIMIT 1
`
// Fetch a job by its UUID.
func (q *Queries) FetchJob(ctx context.Context, uuid string) (Job, error) {
row := q.db.QueryRowContext(ctx, fetchJob, uuid)
var i Job
@ -96,6 +147,45 @@ func (q *Queries) FetchJob(ctx context.Context, uuid string) (Job, error) {
return i, err
}
const fetchJobByID = `-- name: FetchJobByID :one
SELECT id, created_at, updated_at, uuid, name, job_type, priority, status, activity, settings, metadata, delete_requested_at, storage_shaman_checkout_id, worker_tag_id FROM jobs
WHERE id = ? LIMIT 1
`
// Fetch a job by its numerical ID.
func (q *Queries) FetchJobByID(ctx context.Context, id int64) (Job, error) {
row := q.db.QueryRowContext(ctx, fetchJobByID, id)
var i Job
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Name,
&i.JobType,
&i.Priority,
&i.Status,
&i.Activity,
&i.Settings,
&i.Metadata,
&i.DeleteRequestedAt,
&i.StorageShamanCheckoutID,
&i.WorkerTagID,
)
return i, err
}
const fetchJobShamanCheckoutID = `-- name: FetchJobShamanCheckoutID :one
SELECT storage_shaman_checkout_id FROM jobs WHERE uuid=?1
`
func (q *Queries) FetchJobShamanCheckoutID(ctx context.Context, uuid string) (string, error) {
row := q.db.QueryRowContext(ctx, fetchJobShamanCheckoutID, uuid)
var storage_shaman_checkout_id string
err := row.Scan(&storage_shaman_checkout_id)
return storage_shaman_checkout_id, err
}
const fetchJobUUIDsUpdatedBefore = `-- name: FetchJobUUIDsUpdatedBefore :many
SELECT uuid FROM jobs WHERE updated_at <= ?1
`
@ -204,6 +294,388 @@ func (q *Queries) FetchJobsInStatus(ctx context.Context, statuses []string) ([]J
return items, nil
}
const fetchTask = `-- name: FetchTask :one
SELECT tasks.id, tasks.created_at, tasks.updated_at, tasks.uuid, tasks.name, tasks.type, tasks.job_id, tasks.priority, tasks.status, tasks.worker_id, tasks.last_touched_at, tasks.commands, tasks.activity, jobs.UUID as jobUUID, workers.UUID as workerUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.uuid = ?1
`
type FetchTaskRow struct {
Task Task
JobUUID sql.NullString
WorkerUUID sql.NullString
}
func (q *Queries) FetchTask(ctx context.Context, uuid string) (FetchTaskRow, error) {
row := q.db.QueryRowContext(ctx, fetchTask, uuid)
var i FetchTaskRow
err := row.Scan(
&i.Task.ID,
&i.Task.CreatedAt,
&i.Task.UpdatedAt,
&i.Task.UUID,
&i.Task.Name,
&i.Task.Type,
&i.Task.JobID,
&i.Task.Priority,
&i.Task.Status,
&i.Task.WorkerID,
&i.Task.LastTouchedAt,
&i.Task.Commands,
&i.Task.Activity,
&i.JobUUID,
&i.WorkerUUID,
)
return i, err
}
const fetchTaskFailureList = `-- name: FetchTaskFailureList :many
SELECT workers.id, workers.created_at, workers.updated_at, workers.uuid, workers.secret, workers.name, workers.address, workers.platform, workers.software, workers.status, workers.last_seen_at, workers.status_requested, workers.lazy_status_request, workers.supported_task_types, workers.deleted_at, workers.can_restart FROM workers
INNER JOIN task_failures TF on TF.worker_id=workers.id
WHERE TF.task_id=?1
`
type FetchTaskFailureListRow struct {
Worker Worker
}
func (q *Queries) FetchTaskFailureList(ctx context.Context, taskID int64) ([]FetchTaskFailureListRow, error) {
rows, err := q.db.QueryContext(ctx, fetchTaskFailureList, taskID)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchTaskFailureListRow
for rows.Next() {
var i FetchTaskFailureListRow
if err := rows.Scan(
&i.Worker.ID,
&i.Worker.CreatedAt,
&i.Worker.UpdatedAt,
&i.Worker.UUID,
&i.Worker.Secret,
&i.Worker.Name,
&i.Worker.Address,
&i.Worker.Platform,
&i.Worker.Software,
&i.Worker.Status,
&i.Worker.LastSeenAt,
&i.Worker.StatusRequested,
&i.Worker.LazyStatusRequest,
&i.Worker.SupportedTaskTypes,
&i.Worker.DeletedAt,
&i.Worker.CanRestart,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchTaskJobUUID = `-- name: FetchTaskJobUUID :one
SELECT jobs.UUID as jobUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
WHERE tasks.uuid = ?1
`
func (q *Queries) FetchTaskJobUUID(ctx context.Context, uuid string) (sql.NullString, error) {
row := q.db.QueryRowContext(ctx, fetchTaskJobUUID, uuid)
var jobuuid sql.NullString
err := row.Scan(&jobuuid)
return jobuuid, err
}
const fetchTasksOfJob = `-- name: FetchTasksOfJob :many
SELECT tasks.id, tasks.created_at, tasks.updated_at, tasks.uuid, tasks.name, tasks.type, tasks.job_id, tasks.priority, tasks.status, tasks.worker_id, tasks.last_touched_at, tasks.commands, tasks.activity, workers.UUID as workerUUID
FROM tasks
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.job_id = ?1
`
type FetchTasksOfJobRow struct {
Task Task
WorkerUUID sql.NullString
}
func (q *Queries) FetchTasksOfJob(ctx context.Context, jobID int64) ([]FetchTasksOfJobRow, error) {
rows, err := q.db.QueryContext(ctx, fetchTasksOfJob, jobID)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchTasksOfJobRow
for rows.Next() {
var i FetchTasksOfJobRow
if err := rows.Scan(
&i.Task.ID,
&i.Task.CreatedAt,
&i.Task.UpdatedAt,
&i.Task.UUID,
&i.Task.Name,
&i.Task.Type,
&i.Task.JobID,
&i.Task.Priority,
&i.Task.Status,
&i.Task.WorkerID,
&i.Task.LastTouchedAt,
&i.Task.Commands,
&i.Task.Activity,
&i.WorkerUUID,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchTasksOfJobInStatus = `-- name: FetchTasksOfJobInStatus :many
SELECT tasks.id, tasks.created_at, tasks.updated_at, tasks.uuid, tasks.name, tasks.type, tasks.job_id, tasks.priority, tasks.status, tasks.worker_id, tasks.last_touched_at, tasks.commands, tasks.activity, workers.UUID as workerUUID
FROM tasks
LEFT JOIN workers ON (tasks.worker_id = workers.id)
WHERE tasks.job_id = ?1
AND tasks.status in (/*SLICE:task_status*/?)
`
type FetchTasksOfJobInStatusParams struct {
JobID int64
TaskStatus []string
}
type FetchTasksOfJobInStatusRow struct {
Task Task
WorkerUUID sql.NullString
}
func (q *Queries) FetchTasksOfJobInStatus(ctx context.Context, arg FetchTasksOfJobInStatusParams) ([]FetchTasksOfJobInStatusRow, error) {
query := fetchTasksOfJobInStatus
var queryParams []interface{}
queryParams = append(queryParams, arg.JobID)
if len(arg.TaskStatus) > 0 {
for _, v := range arg.TaskStatus {
queryParams = append(queryParams, v)
}
query = strings.Replace(query, "/*SLICE:task_status*/?", strings.Repeat(",?", len(arg.TaskStatus))[1:], 1)
} else {
query = strings.Replace(query, "/*SLICE:task_status*/?", "NULL", 1)
}
rows, err := q.db.QueryContext(ctx, query, queryParams...)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchTasksOfJobInStatusRow
for rows.Next() {
var i FetchTasksOfJobInStatusRow
if err := rows.Scan(
&i.Task.ID,
&i.Task.CreatedAt,
&i.Task.UpdatedAt,
&i.Task.UUID,
&i.Task.Name,
&i.Task.Type,
&i.Task.JobID,
&i.Task.Priority,
&i.Task.Status,
&i.Task.WorkerID,
&i.Task.LastTouchedAt,
&i.Task.Commands,
&i.Task.Activity,
&i.WorkerUUID,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchTasksOfWorkerInStatus = `-- name: FetchTasksOfWorkerInStatus :many
SELECT tasks.id, tasks.created_at, tasks.updated_at, tasks.uuid, tasks.name, tasks.type, tasks.job_id, tasks.priority, tasks.status, tasks.worker_id, tasks.last_touched_at, tasks.commands, tasks.activity, jobs.UUID as jobUUID
FROM tasks
LEFT JOIN jobs ON (tasks.job_id = jobs.id)
WHERE tasks.worker_id = ?1
AND tasks.status = ?2
`
type FetchTasksOfWorkerInStatusParams struct {
WorkerID sql.NullInt64
TaskStatus string
}
type FetchTasksOfWorkerInStatusRow struct {
Task Task
JobUUID sql.NullString
}
func (q *Queries) FetchTasksOfWorkerInStatus(ctx context.Context, arg FetchTasksOfWorkerInStatusParams) ([]FetchTasksOfWorkerInStatusRow, error) {
rows, err := q.db.QueryContext(ctx, fetchTasksOfWorkerInStatus, arg.WorkerID, arg.TaskStatus)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchTasksOfWorkerInStatusRow
for rows.Next() {
var i FetchTasksOfWorkerInStatusRow
if err := rows.Scan(
&i.Task.ID,
&i.Task.CreatedAt,
&i.Task.UpdatedAt,
&i.Task.UUID,
&i.Task.Name,
&i.Task.Type,
&i.Task.JobID,
&i.Task.Priority,
&i.Task.Status,
&i.Task.WorkerID,
&i.Task.LastTouchedAt,
&i.Task.Commands,
&i.Task.Activity,
&i.JobUUID,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchTasksOfWorkerInStatusOfJob = `-- name: FetchTasksOfWorkerInStatusOfJob :many
SELECT tasks.id, tasks.created_at, tasks.updated_at, tasks.uuid, tasks.name, tasks.type, tasks.job_id, tasks.priority, tasks.status, tasks.worker_id, tasks.last_touched_at, tasks.commands, tasks.activity
FROM tasks
WHERE tasks.worker_id = ?1
AND tasks.job_id = ?2
AND tasks.status = ?3
`
type FetchTasksOfWorkerInStatusOfJobParams struct {
WorkerID sql.NullInt64
JobID int64
TaskStatus string
}
type FetchTasksOfWorkerInStatusOfJobRow struct {
Task Task
}
func (q *Queries) FetchTasksOfWorkerInStatusOfJob(ctx context.Context, arg FetchTasksOfWorkerInStatusOfJobParams) ([]FetchTasksOfWorkerInStatusOfJobRow, error) {
rows, err := q.db.QueryContext(ctx, fetchTasksOfWorkerInStatusOfJob, arg.WorkerID, arg.JobID, arg.TaskStatus)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchTasksOfWorkerInStatusOfJobRow
for rows.Next() {
var i FetchTasksOfWorkerInStatusOfJobRow
if err := rows.Scan(
&i.Task.ID,
&i.Task.CreatedAt,
&i.Task.UpdatedAt,
&i.Task.UUID,
&i.Task.Name,
&i.Task.Type,
&i.Task.JobID,
&i.Task.Priority,
&i.Task.Status,
&i.Task.WorkerID,
&i.Task.LastTouchedAt,
&i.Task.Commands,
&i.Task.Activity,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const jobCountTaskStatuses = `-- name: JobCountTaskStatuses :many
SELECT status, count(*) as num_tasks FROM tasks
WHERE job_id = ?1
GROUP BY status
`
type JobCountTaskStatusesRow struct {
Status string
NumTasks int64
}
// Fetch (status, num tasks in that status) rows for the given job.
func (q *Queries) JobCountTaskStatuses(ctx context.Context, jobID int64) ([]JobCountTaskStatusesRow, error) {
rows, err := q.db.QueryContext(ctx, jobCountTaskStatuses, jobID)
if err != nil {
return nil, err
}
defer rows.Close()
var items []JobCountTaskStatusesRow
for rows.Next() {
var i JobCountTaskStatusesRow
if err := rows.Scan(&i.Status, &i.NumTasks); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const jobCountTasksInStatus = `-- name: JobCountTasksInStatus :one
SELECT count(*) as num_tasks FROM tasks
WHERE job_id = ?1 AND status = ?2
`
type JobCountTasksInStatusParams struct {
JobID int64
TaskStatus string
}
// Fetch number of tasks in the given status, of the given job.
func (q *Queries) JobCountTasksInStatus(ctx context.Context, arg JobCountTasksInStatusParams) (int64, error) {
row := q.db.QueryRowContext(ctx, jobCountTasksInStatus, arg.JobID, arg.TaskStatus)
var num_tasks int64
err := row.Scan(&num_tasks)
return num_tasks, err
}
const requestJobDeletion = `-- name: RequestJobDeletion :exec
UPDATE jobs SET
updated_at = ?1,
@ -298,3 +770,179 @@ func (q *Queries) SaveJobStorageInfo(ctx context.Context, arg SaveJobStorageInfo
_, err := q.db.ExecContext(ctx, saveJobStorageInfo, arg.StorageShamanCheckoutID, arg.ID)
return err
}
const taskAssignToWorker = `-- name: TaskAssignToWorker :exec
UPDATE tasks SET
updated_at = ?1,
worker_id = ?2
WHERE id=?3
`
type TaskAssignToWorkerParams struct {
UpdatedAt sql.NullTime
WorkerID sql.NullInt64
ID int64
}
func (q *Queries) TaskAssignToWorker(ctx context.Context, arg TaskAssignToWorkerParams) error {
_, err := q.db.ExecContext(ctx, taskAssignToWorker, arg.UpdatedAt, arg.WorkerID, arg.ID)
return err
}
const taskTouchedByWorker = `-- name: TaskTouchedByWorker :exec
UPDATE tasks SET
updated_at = ?1,
last_touched_at = ?2
WHERE id=?3
`
type TaskTouchedByWorkerParams struct {
UpdatedAt sql.NullTime
LastTouchedAt sql.NullTime
ID int64
}
func (q *Queries) TaskTouchedByWorker(ctx context.Context, arg TaskTouchedByWorkerParams) error {
_, err := q.db.ExecContext(ctx, taskTouchedByWorker, arg.UpdatedAt, arg.LastTouchedAt, arg.ID)
return err
}
const updateJobsTaskStatuses = `-- name: UpdateJobsTaskStatuses :exec
UPDATE tasks SET
updated_at = ?1,
status = ?2,
activity = ?3
WHERE job_id = ?4
`
type UpdateJobsTaskStatusesParams struct {
UpdatedAt sql.NullTime
Status string
Activity string
JobID int64
}
func (q *Queries) UpdateJobsTaskStatuses(ctx context.Context, arg UpdateJobsTaskStatusesParams) error {
_, err := q.db.ExecContext(ctx, updateJobsTaskStatuses,
arg.UpdatedAt,
arg.Status,
arg.Activity,
arg.JobID,
)
return err
}
const updateJobsTaskStatusesConditional = `-- name: UpdateJobsTaskStatusesConditional :exec
UPDATE tasks SET
updated_at = ?1,
status = ?2,
activity = ?3
WHERE job_id = ?4 AND status in (/*SLICE:statuses_to_update*/?)
`
type UpdateJobsTaskStatusesConditionalParams struct {
UpdatedAt sql.NullTime
Status string
Activity string
JobID int64
StatusesToUpdate []string
}
func (q *Queries) UpdateJobsTaskStatusesConditional(ctx context.Context, arg UpdateJobsTaskStatusesConditionalParams) error {
query := updateJobsTaskStatusesConditional
var queryParams []interface{}
queryParams = append(queryParams, arg.UpdatedAt)
queryParams = append(queryParams, arg.Status)
queryParams = append(queryParams, arg.Activity)
queryParams = append(queryParams, arg.JobID)
if len(arg.StatusesToUpdate) > 0 {
for _, v := range arg.StatusesToUpdate {
queryParams = append(queryParams, v)
}
query = strings.Replace(query, "/*SLICE:statuses_to_update*/?", strings.Repeat(",?", len(arg.StatusesToUpdate))[1:], 1)
} else {
query = strings.Replace(query, "/*SLICE:statuses_to_update*/?", "NULL", 1)
}
_, err := q.db.ExecContext(ctx, query, queryParams...)
return err
}
const updateTask = `-- name: UpdateTask :exec
UPDATE tasks SET
updated_at = ?1,
name = ?2,
type = ?3,
priority = ?4,
status = ?5,
worker_id = ?6,
last_touched_at = ?7,
commands = ?8,
activity = ?9
WHERE id=?10
`
type UpdateTaskParams struct {
UpdatedAt sql.NullTime
Name string
Type string
Priority int64
Status string
WorkerID sql.NullInt64
LastTouchedAt sql.NullTime
Commands json.RawMessage
Activity string
ID int64
}
// Update a Task, except its id, created_at, uuid, or job_id fields.
func (q *Queries) UpdateTask(ctx context.Context, arg UpdateTaskParams) error {
_, err := q.db.ExecContext(ctx, updateTask,
arg.UpdatedAt,
arg.Name,
arg.Type,
arg.Priority,
arg.Status,
arg.WorkerID,
arg.LastTouchedAt,
arg.Commands,
arg.Activity,
arg.ID,
)
return err
}
const updateTaskActivity = `-- name: UpdateTaskActivity :exec
UPDATE tasks SET
updated_at = ?1,
activity = ?2
WHERE id=?3
`
type UpdateTaskActivityParams struct {
UpdatedAt sql.NullTime
Activity string
ID int64
}
func (q *Queries) UpdateTaskActivity(ctx context.Context, arg UpdateTaskActivityParams) error {
_, err := q.db.ExecContext(ctx, updateTaskActivity, arg.UpdatedAt, arg.Activity, arg.ID)
return err
}
const updateTaskStatus = `-- name: UpdateTaskStatus :exec
UPDATE tasks SET
updated_at = ?1,
status = ?2
WHERE id=?3
`
type UpdateTaskStatusParams struct {
UpdatedAt sql.NullTime
Status string
ID int64
}
func (q *Queries) UpdateTaskStatus(ctx context.Context, arg UpdateTaskStatusParams) error {
_, err := q.db.ExecContext(ctx, updateTaskStatus, arg.UpdatedAt, arg.Status, arg.ID)
return err
}

View File

@ -0,0 +1,100 @@
-- Worker queries
--
-- name: CreateWorker :one
INSERT INTO workers (
created_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
deleted_at,
can_restart
) values (
@created_at,
@uuid,
@secret,
@name,
@address,
@platform,
@software,
@status,
@last_seen_at,
@status_requested,
@lazy_status_request,
@supported_task_types,
@deleted_at,
@can_restart
)
RETURNING id;
-- name: AddWorkerTagMembership :exec
INSERT INTO worker_tag_membership (worker_tag_id, worker_id)
VALUES (@worker_tag_id, @worker_id);
-- name: FetchWorkers :many
SELECT sqlc.embed(workers) FROM workers
WHERE deleted_at IS NULL;
-- name: FetchWorker :one
-- FetchWorker only returns the worker if it wasn't soft-deleted.
SELECT * FROM workers WHERE workers.uuid = @uuid and deleted_at is NULL;
-- name: FetchWorkerUnconditional :one
-- FetchWorkerUnconditional ignores soft-deletion status and just returns the worker.
SELECT * FROM workers WHERE workers.uuid = @uuid;
-- name: FetchWorkerTags :many
SELECT worker_tags.*
FROM worker_tags
LEFT JOIN worker_tag_membership m ON (m.worker_tag_id = worker_tags.id)
LEFT JOIN workers on (m.worker_id = workers.id)
WHERE workers.uuid = @uuid;
-- name: SoftDeleteWorker :execrows
UPDATE workers SET deleted_at=@deleted_at
WHERE uuid=@uuid;
-- name: SaveWorkerStatus :exec
UPDATE workers SET
updated_at=@updated_at,
status=@status,
status_requested=@status_requested,
lazy_status_request=@lazy_status_request
WHERE id=@id;
-- name: SaveWorker :exec
UPDATE workers SET
updated_at=@updated_at,
uuid=@uuid,
secret=@secret,
name=@name,
address=@address,
platform=@platform,
software=@software,
status=@status,
last_seen_at=@last_seen_at,
status_requested=@status_requested,
lazy_status_request=@lazy_status_request,
supported_task_types=@supported_task_types,
can_restart=@can_restart
WHERE id=@id;
-- name: WorkerSeen :exec
UPDATE workers SET
updated_at=@updated_at,
last_seen_at=@last_seen_at
WHERE id=@id;
-- name: SummarizeWorkerStatuses :many
SELECT status, count(id) as status_count FROM workers
WHERE deleted_at is NULL
GROUP BY status;

View File

@ -0,0 +1,402 @@
// Code generated by sqlc. DO NOT EDIT.
// versions:
// sqlc v1.26.0
// source: query_workers.sql
package sqlc
import (
"context"
"database/sql"
"time"
)
const addWorkerTagMembership = `-- name: AddWorkerTagMembership :exec
INSERT INTO worker_tag_membership (worker_tag_id, worker_id)
VALUES (?1, ?2)
`
type AddWorkerTagMembershipParams struct {
WorkerTagID int64
WorkerID int64
}
func (q *Queries) AddWorkerTagMembership(ctx context.Context, arg AddWorkerTagMembershipParams) error {
_, err := q.db.ExecContext(ctx, addWorkerTagMembership, arg.WorkerTagID, arg.WorkerID)
return err
}
const createWorker = `-- name: CreateWorker :one
INSERT INTO workers (
created_at,
uuid,
secret,
name,
address,
platform,
software,
status,
last_seen_at,
status_requested,
lazy_status_request,
supported_task_types,
deleted_at,
can_restart
) values (
?1,
?2,
?3,
?4,
?5,
?6,
?7,
?8,
?9,
?10,
?11,
?12,
?13,
?14
)
RETURNING id
`
type CreateWorkerParams struct {
CreatedAt time.Time
UUID string
Secret string
Name string
Address string
Platform string
Software string
Status string
LastSeenAt sql.NullTime
StatusRequested string
LazyStatusRequest bool
SupportedTaskTypes string
DeletedAt sql.NullTime
CanRestart bool
}
// Worker queries
//
func (q *Queries) CreateWorker(ctx context.Context, arg CreateWorkerParams) (int64, error) {
row := q.db.QueryRowContext(ctx, createWorker,
arg.CreatedAt,
arg.UUID,
arg.Secret,
arg.Name,
arg.Address,
arg.Platform,
arg.Software,
arg.Status,
arg.LastSeenAt,
arg.StatusRequested,
arg.LazyStatusRequest,
arg.SupportedTaskTypes,
arg.DeletedAt,
arg.CanRestart,
)
var id int64
err := row.Scan(&id)
return id, err
}
const fetchWorker = `-- name: FetchWorker :one
SELECT id, created_at, updated_at, uuid, secret, name, address, platform, software, status, last_seen_at, status_requested, lazy_status_request, supported_task_types, deleted_at, can_restart FROM workers WHERE workers.uuid = ?1 and deleted_at is NULL
`
// FetchWorker only returns the worker if it wasn't soft-deleted.
func (q *Queries) FetchWorker(ctx context.Context, uuid string) (Worker, error) {
row := q.db.QueryRowContext(ctx, fetchWorker, uuid)
var i Worker
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Secret,
&i.Name,
&i.Address,
&i.Platform,
&i.Software,
&i.Status,
&i.LastSeenAt,
&i.StatusRequested,
&i.LazyStatusRequest,
&i.SupportedTaskTypes,
&i.DeletedAt,
&i.CanRestart,
)
return i, err
}
const fetchWorkerTags = `-- name: FetchWorkerTags :many
SELECT worker_tags.id, worker_tags.created_at, worker_tags.updated_at, worker_tags.uuid, worker_tags.name, worker_tags.description
FROM worker_tags
LEFT JOIN worker_tag_membership m ON (m.worker_tag_id = worker_tags.id)
LEFT JOIN workers on (m.worker_id = workers.id)
WHERE workers.uuid = ?1
`
func (q *Queries) FetchWorkerTags(ctx context.Context, uuid string) ([]WorkerTag, error) {
rows, err := q.db.QueryContext(ctx, fetchWorkerTags, uuid)
if err != nil {
return nil, err
}
defer rows.Close()
var items []WorkerTag
for rows.Next() {
var i WorkerTag
if err := rows.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Name,
&i.Description,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const fetchWorkerUnconditional = `-- name: FetchWorkerUnconditional :one
SELECT id, created_at, updated_at, uuid, secret, name, address, platform, software, status, last_seen_at, status_requested, lazy_status_request, supported_task_types, deleted_at, can_restart FROM workers WHERE workers.uuid = ?1
`
// FetchWorkerUnconditional ignores soft-deletion status and just returns the worker.
func (q *Queries) FetchWorkerUnconditional(ctx context.Context, uuid string) (Worker, error) {
row := q.db.QueryRowContext(ctx, fetchWorkerUnconditional, uuid)
var i Worker
err := row.Scan(
&i.ID,
&i.CreatedAt,
&i.UpdatedAt,
&i.UUID,
&i.Secret,
&i.Name,
&i.Address,
&i.Platform,
&i.Software,
&i.Status,
&i.LastSeenAt,
&i.StatusRequested,
&i.LazyStatusRequest,
&i.SupportedTaskTypes,
&i.DeletedAt,
&i.CanRestart,
)
return i, err
}
const fetchWorkers = `-- name: FetchWorkers :many
SELECT workers.id, workers.created_at, workers.updated_at, workers.uuid, workers.secret, workers.name, workers.address, workers.platform, workers.software, workers.status, workers.last_seen_at, workers.status_requested, workers.lazy_status_request, workers.supported_task_types, workers.deleted_at, workers.can_restart FROM workers
WHERE deleted_at IS NULL
`
type FetchWorkersRow struct {
Worker Worker
}
func (q *Queries) FetchWorkers(ctx context.Context) ([]FetchWorkersRow, error) {
rows, err := q.db.QueryContext(ctx, fetchWorkers)
if err != nil {
return nil, err
}
defer rows.Close()
var items []FetchWorkersRow
for rows.Next() {
var i FetchWorkersRow
if err := rows.Scan(
&i.Worker.ID,
&i.Worker.CreatedAt,
&i.Worker.UpdatedAt,
&i.Worker.UUID,
&i.Worker.Secret,
&i.Worker.Name,
&i.Worker.Address,
&i.Worker.Platform,
&i.Worker.Software,
&i.Worker.Status,
&i.Worker.LastSeenAt,
&i.Worker.StatusRequested,
&i.Worker.LazyStatusRequest,
&i.Worker.SupportedTaskTypes,
&i.Worker.DeletedAt,
&i.Worker.CanRestart,
); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const saveWorker = `-- name: SaveWorker :exec
UPDATE workers SET
updated_at=?1,
uuid=?2,
secret=?3,
name=?4,
address=?5,
platform=?6,
software=?7,
status=?8,
last_seen_at=?9,
status_requested=?10,
lazy_status_request=?11,
supported_task_types=?12,
can_restart=?13
WHERE id=?14
`
type SaveWorkerParams struct {
UpdatedAt sql.NullTime
UUID string
Secret string
Name string
Address string
Platform string
Software string
Status string
LastSeenAt sql.NullTime
StatusRequested string
LazyStatusRequest bool
SupportedTaskTypes string
CanRestart bool
ID int64
}
func (q *Queries) SaveWorker(ctx context.Context, arg SaveWorkerParams) error {
_, err := q.db.ExecContext(ctx, saveWorker,
arg.UpdatedAt,
arg.UUID,
arg.Secret,
arg.Name,
arg.Address,
arg.Platform,
arg.Software,
arg.Status,
arg.LastSeenAt,
arg.StatusRequested,
arg.LazyStatusRequest,
arg.SupportedTaskTypes,
arg.CanRestart,
arg.ID,
)
return err
}
const saveWorkerStatus = `-- name: SaveWorkerStatus :exec
UPDATE workers SET
updated_at=?1,
status=?2,
status_requested=?3,
lazy_status_request=?4
WHERE id=?5
`
type SaveWorkerStatusParams struct {
UpdatedAt sql.NullTime
Status string
StatusRequested string
LazyStatusRequest bool
ID int64
}
func (q *Queries) SaveWorkerStatus(ctx context.Context, arg SaveWorkerStatusParams) error {
_, err := q.db.ExecContext(ctx, saveWorkerStatus,
arg.UpdatedAt,
arg.Status,
arg.StatusRequested,
arg.LazyStatusRequest,
arg.ID,
)
return err
}
const softDeleteWorker = `-- name: SoftDeleteWorker :execrows
UPDATE workers SET deleted_at=?1
WHERE uuid=?2
`
type SoftDeleteWorkerParams struct {
DeletedAt sql.NullTime
UUID string
}
func (q *Queries) SoftDeleteWorker(ctx context.Context, arg SoftDeleteWorkerParams) (int64, error) {
result, err := q.db.ExecContext(ctx, softDeleteWorker, arg.DeletedAt, arg.UUID)
if err != nil {
return 0, err
}
return result.RowsAffected()
}
const summarizeWorkerStatuses = `-- name: SummarizeWorkerStatuses :many
SELECT status, count(id) as status_count FROM workers
WHERE deleted_at is NULL
GROUP BY status
`
type SummarizeWorkerStatusesRow struct {
Status string
StatusCount int64
}
func (q *Queries) SummarizeWorkerStatuses(ctx context.Context) ([]SummarizeWorkerStatusesRow, error) {
rows, err := q.db.QueryContext(ctx, summarizeWorkerStatuses)
if err != nil {
return nil, err
}
defer rows.Close()
var items []SummarizeWorkerStatusesRow
for rows.Next() {
var i SummarizeWorkerStatusesRow
if err := rows.Scan(&i.Status, &i.StatusCount); err != nil {
return nil, err
}
items = append(items, i)
}
if err := rows.Close(); err != nil {
return nil, err
}
if err := rows.Err(); err != nil {
return nil, err
}
return items, nil
}
const workerSeen = `-- name: WorkerSeen :exec
UPDATE workers SET
updated_at=?1,
last_seen_at=?2
WHERE id=?3
`
type WorkerSeenParams struct {
UpdatedAt sql.NullTime
LastSeenAt sql.NullTime
ID int64
}
func (q *Queries) WorkerSeen(ctx context.Context, arg WorkerSeenParams) error {
_, err := q.db.ExecContext(ctx, workerSeen, arg.UpdatedAt, arg.LastSeenAt, arg.ID)
return err
}

View File

@ -109,10 +109,10 @@ CREATE TABLE workers (
status varchar(16) DEFAULT '' NOT NULL,
last_seen_at datetime,
status_requested varchar(16) DEFAULT '' NOT NULL,
lazy_status_request smallint DEFAULT false NOT NULL,
lazy_status_request boolean DEFAULT false NOT NULL,
supported_task_types varchar(255) DEFAULT '' NOT NULL,
deleted_at datetime,
can_restart smallint DEFAULT false NOT NULL,
can_restart boolean DEFAULT false NOT NULL,
PRIMARY KEY (id)
);
CREATE INDEX idx_jobs_uuid ON jobs(uuid);

View File

@ -19,7 +19,7 @@ const schedulerTestTimeout = 100 * time.Millisecond
const schedulerTestTimeoutlong = 5000 * time.Millisecond
func TestNoTasks(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -30,7 +30,7 @@ func TestNoTasks(t *testing.T) {
}
func TestOneJobOneTask(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -67,7 +67,7 @@ func TestOneJobOneTask(t *testing.T) {
}
func TestOneJobThreeTasksByPrio(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -98,7 +98,7 @@ func TestOneJobThreeTasksByPrio(t *testing.T) {
}
func TestOneJobThreeTasksByDependencies(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -124,7 +124,7 @@ func TestOneJobThreeTasksByDependencies(t *testing.T) {
}
func TestTwoJobsThreeTasks(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -167,7 +167,7 @@ func TestSomeButNotAllDependenciesCompleted(t *testing.T) {
// There was a bug in the task scheduler query, where it would schedule a task
// if any of its dependencies was completed (instead of all dependencies).
// This test reproduces that problematic scenario.
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
att1 := authorTestTask("1.1 completed task", "blender")
@ -190,7 +190,7 @@ func TestSomeButNotAllDependenciesCompleted(t *testing.T) {
}
func TestAlreadyAssigned(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -226,7 +226,7 @@ func TestAlreadyAssigned(t *testing.T) {
}
func TestAssignedToOtherWorker(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -262,7 +262,7 @@ func TestAssignedToOtherWorker(t *testing.T) {
}
func TestPreviouslyFailed(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)
@ -292,7 +292,7 @@ func TestPreviouslyFailed(t *testing.T) {
}
func TestWorkerTagJobWithTag(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
// Create worker tags:
@ -341,7 +341,7 @@ func TestWorkerTagJobWithTag(t *testing.T) {
}
func TestWorkerTagJobWithoutTag(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
// Create worker tag:
@ -376,7 +376,7 @@ func TestWorkerTagJobWithoutTag(t *testing.T) {
}
func TestBlocklisted(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, schedulerTestTimeout)
ctx, cancel, db := persistenceTestFixtures(schedulerTestTimeout)
defer cancel()
w := linuxWorker(t, db)

View File

@ -6,6 +6,7 @@ package persistence
import (
"context"
"database/sql"
"fmt"
"os"
"testing"
"time"
@ -15,7 +16,6 @@ import (
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"projects.blender.org/studio/flamenco/internal/uuid"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -23,11 +23,11 @@ import (
// resulting database.
const TestDSN = "file::memory:"
func CreateTestDB(t *testing.T) (db *DB, closer func()) {
func CreateTestDB() (db *DB, closer func()) {
// Delete the SQLite file if it exists on disk.
if _, err := os.Stat(TestDSN); err == nil {
if err := os.Remove(TestDSN); err != nil {
t.Fatalf("unable to remove %s: %v", TestDSN, err)
panic(fmt.Sprintf("unable to remove %s: %v", TestDSN, err))
}
}
@ -39,7 +39,7 @@ func CreateTestDB(t *testing.T) (db *DB, closer func()) {
// can be closed when the unit test is done running.
sqliteConn, err := sql.Open(sqlite.DriverName, TestDSN)
if err != nil {
t.Fatalf("opening SQLite connection: %v", err)
panic(fmt.Sprintf("opening SQLite connection: %v", err))
}
config := gorm.Config{
@ -50,19 +50,19 @@ func CreateTestDB(t *testing.T) (db *DB, closer func()) {
db, err = openDBWithConfig(TestDSN, &config)
if err != nil {
t.Fatalf("opening DB: %v", err)
panic(fmt.Sprintf("opening DB: %v", err))
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
err = db.migrate(ctx)
if err != nil {
t.Fatalf("migrating DB: %v", err)
panic(fmt.Sprintf("migrating DB: %v", err))
}
closer = func() {
if err := sqliteConn.Close(); err != nil {
t.Fatalf("closing DB: %v", err)
if err := db.Close(); err != nil {
panic(fmt.Sprintf("closing DB: %v", err))
}
}
@ -71,8 +71,8 @@ func CreateTestDB(t *testing.T) (db *DB, closer func()) {
// persistenceTestFixtures creates a test database and returns it and a context.
// Tests should call the returned cancel function when they're done.
func persistenceTestFixtures(t *testing.T, testContextTimeout time.Duration) (context.Context, context.CancelFunc, *DB) {
db, dbCloser := CreateTestDB(t)
func persistenceTestFixtures(testContextTimeout time.Duration) (context.Context, context.CancelFunc, *DB) {
db, dbCloser := CreateTestDB()
var (
ctx context.Context
@ -103,10 +103,10 @@ type WorkerTestFixture struct {
}
func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTestFixture {
ctx, cancel, db := persistenceTestFixtures(t, testContextTimeout)
ctx, cancel, db := persistenceTestFixtures(testContextTimeout)
w := Worker{
UUID: uuid.New(),
UUID: "557930e7-5b55-469e-a6d7-fc800f3685be",
Name: "дрон",
Address: "fe80::5054:ff:fede:2ad7",
Platform: "linux",
@ -116,7 +116,7 @@ func workerTestFixtures(t *testing.T, testContextTimeout time.Duration) WorkerTe
}
wc := WorkerTag{
UUID: uuid.New(),
UUID: "e0e05417-9793-4829-b1d0-d446dd819f3d",
Name: "arbejdsklynge",
Description: "Worker tag in Danish",
}

View File

@ -51,7 +51,7 @@ func TestFetchTimedOutTasks(t *testing.T) {
}
func TestFetchTimedOutWorkers(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
timeoutDeadline := mustParseTime("2022-06-07T11:14:47+02:00")

View File

@ -13,7 +13,7 @@ import (
)
func TestFetchWorkerSleepSchedule(t *testing.T) {
ctx, finish, db := persistenceTestFixtures(t, 1*time.Second)
ctx, finish, db := persistenceTestFixtures(1 * time.Second)
defer finish()
linuxWorker := Worker{
@ -57,7 +57,7 @@ func TestFetchWorkerSleepSchedule(t *testing.T) {
}
func TestFetchSleepScheduleWorker(t *testing.T) {
ctx, finish, db := persistenceTestFixtures(t, 1*time.Second)
ctx, finish, db := persistenceTestFixtures(1 * time.Second)
defer finish()
linuxWorker := Worker{
@ -104,7 +104,7 @@ func TestFetchSleepScheduleWorker(t *testing.T) {
}
func TestSetWorkerSleepSchedule(t *testing.T) {
ctx, finish, db := persistenceTestFixtures(t, 1*time.Second)
ctx, finish, db := persistenceTestFixtures(1 * time.Second)
defer finish()
linuxWorker := Worker{
@ -187,7 +187,7 @@ func TestSetWorkerSleepSchedule(t *testing.T) {
}
func TestSetWorkerSleepScheduleNextCheck(t *testing.T) {
ctx, finish, db := persistenceTestFixtures(t, 1*time.Second)
ctx, finish, db := persistenceTestFixtures(1 * time.Second)
defer finish()
schedule := SleepSchedule{
@ -218,7 +218,7 @@ func TestSetWorkerSleepScheduleNextCheck(t *testing.T) {
}
func TestFetchSleepSchedulesToCheck(t *testing.T) {
ctx, finish, db := persistenceTestFixtures(t, 1*time.Second)
ctx, finish, db := persistenceTestFixtures(1 * time.Second)
defer finish()
mockedNow := mustParseTime("2022-06-07T11:14:47+02:00").UTC()

View File

@ -4,12 +4,14 @@ package persistence
import (
"context"
"database/sql"
"fmt"
"strings"
"time"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"projects.blender.org/studio/flamenco/internal/manager/persistence/sqlc"
"projects.blender.org/studio/flamenco/pkg/api"
)
@ -66,25 +68,78 @@ func (w *Worker) StatusChangeClear() {
}
func (db *DB) CreateWorker(ctx context.Context, w *Worker) error {
if err := db.gormDB.WithContext(ctx).Create(w).Error; err != nil {
queries, err := db.queries()
if err != nil {
return err
}
now := db.now().Time
workerID, err := queries.CreateWorker(ctx, sqlc.CreateWorkerParams{
CreatedAt: now,
UUID: w.UUID,
Secret: w.Secret,
Name: w.Name,
Address: w.Address,
Platform: w.Platform,
Software: w.Software,
Status: string(w.Status),
LastSeenAt: sql.NullTime{
Time: w.LastSeenAt,
Valid: !w.LastSeenAt.IsZero(),
},
StatusRequested: string(w.StatusRequested),
LazyStatusRequest: w.LazyStatusRequest,
SupportedTaskTypes: w.SupportedTaskTypes,
DeletedAt: sql.NullTime(w.DeletedAt),
CanRestart: w.CanRestart,
})
if err != nil {
return fmt.Errorf("creating new worker: %w", err)
}
w.ID = uint(workerID)
w.CreatedAt = now
// TODO: remove the create-with-tags functionality to a higher-level function.
// This code is just here to make this function work like the GORM code did.
for _, tag := range w.Tags {
err := queries.AddWorkerTagMembership(ctx, sqlc.AddWorkerTagMembershipParams{
WorkerTagID: int64(tag.ID),
WorkerID: workerID,
})
if err != nil {
return err
}
}
return nil
}
func (db *DB) FetchWorker(ctx context.Context, uuid string) (*Worker, error) {
w := Worker{}
tx := db.gormDB.WithContext(ctx).
Preload("Tags").
Find(&w, "uuid = ?", uuid).
Limit(1)
if tx.Error != nil {
return nil, workerError(tx.Error, "fetching worker")
queries, err := db.queries()
if err != nil {
return nil, err
}
if w.ID == 0 {
return nil, ErrWorkerNotFound
worker, err := queries.FetchWorker(ctx, uuid)
if err != nil {
return nil, workerError(err, "fetching worker %s", uuid)
}
return &w, nil
// TODO: remove this code, and let the caller fetch the tags when interested in them.
workerTags, err := queries.FetchWorkerTags(ctx, uuid)
if err != nil {
return nil, workerTagError(err, "fetching tags of worker %s", uuid)
}
convertedWorker := convertSqlcWorker(worker)
convertedWorker.Tags = make([]*WorkerTag, len(workerTags))
for index := range workerTags {
convertedTag := convertSqlcWorkerTag(workerTags[index])
convertedWorker.Tags[index] = &convertedTag
}
return &convertedWorker, nil
}
func (db *DB) DeleteWorker(ctx context.Context, uuid string) error {
@ -97,25 +152,41 @@ func (db *DB) DeleteWorker(ctx context.Context, uuid string) error {
return ErrDeletingWithoutFK
}
tx := db.gormDB.WithContext(ctx).
Where("uuid = ?", uuid).
Delete(&Worker{})
if tx.Error != nil {
return workerError(tx.Error, "deleting worker")
queries, err := db.queries()
if err != nil {
return err
}
if tx.RowsAffected == 0 {
rowsAffected, err := queries.SoftDeleteWorker(ctx, sqlc.SoftDeleteWorkerParams{
DeletedAt: db.now(),
UUID: uuid,
})
if err != nil {
return workerError(err, "deleting worker")
}
if rowsAffected == 0 {
return ErrWorkerNotFound
}
return nil
}
func (db *DB) FetchWorkers(ctx context.Context) ([]*Worker, error) {
workers := make([]*Worker, 0)
tx := db.gormDB.WithContext(ctx).Model(&Worker{}).Scan(&workers)
if tx.Error != nil {
return nil, workerError(tx.Error, "fetching all workers")
queries, err := db.queries()
if err != nil {
return nil, err
}
return workers, nil
workers, err := queries.FetchWorkers(ctx)
if err != nil {
return nil, workerError(err, "fetching all workers")
}
gormWorkers := make([]*Worker, len(workers))
for idx := range workers {
worker := convertSqlcWorker(workers[idx].Worker)
gormWorkers[idx] = &worker
}
return gormWorkers, nil
}
// FetchWorkerTask returns the most recent task assigned to the given Worker.
@ -155,22 +226,52 @@ func (db *DB) FetchWorkerTask(ctx context.Context, worker *Worker) (*Task, error
}
func (db *DB) SaveWorkerStatus(ctx context.Context, w *Worker) error {
err := db.gormDB.WithContext(ctx).
Model(w).
Select("status", "status_requested", "lazy_status_request").
Updates(Worker{
Status: w.Status,
StatusRequested: w.StatusRequested,
LazyStatusRequest: w.LazyStatusRequest,
}).Error
queries, err := db.queries()
if err != nil {
return fmt.Errorf("saving worker: %w", err)
return err
}
err = queries.SaveWorkerStatus(ctx, sqlc.SaveWorkerStatusParams{
UpdatedAt: db.now(),
Status: string(w.Status),
StatusRequested: string(w.StatusRequested),
LazyStatusRequest: w.LazyStatusRequest,
ID: int64(w.ID),
})
if err != nil {
return fmt.Errorf("saving worker status: %w", err)
}
return nil
}
func (db *DB) SaveWorker(ctx context.Context, w *Worker) error {
if err := db.gormDB.WithContext(ctx).Save(w).Error; err != nil {
// TODO: remove this code, and just let the caller call CreateWorker() directly.
if w.ID == 0 {
return db.CreateWorker(ctx, w)
}
queries, err := db.queries()
if err != nil {
return err
}
err = queries.SaveWorker(ctx, sqlc.SaveWorkerParams{
UpdatedAt: db.now(),
UUID: w.UUID,
Secret: w.Secret,
Name: w.Name,
Address: w.Address,
Platform: w.Platform,
Software: w.Software,
Status: string(w.Status),
LastSeenAt: sql.NullTime{Time: w.LastSeenAt, Valid: !w.LastSeenAt.IsZero()},
StatusRequested: string(w.StatusRequested),
LazyStatusRequest: w.LazyStatusRequest,
SupportedTaskTypes: w.SupportedTaskTypes,
CanRestart: w.CanRestart,
ID: int64(w.ID),
})
if err != nil {
return fmt.Errorf("saving worker: %w", err)
}
return nil
@ -178,10 +279,18 @@ func (db *DB) SaveWorker(ctx context.Context, w *Worker) error {
// WorkerSeen marks the worker as 'seen' by this Manager. This is used for timeout detection.
func (db *DB) WorkerSeen(ctx context.Context, w *Worker) error {
tx := db.gormDB.WithContext(ctx).
Model(w).
Updates(Worker{LastSeenAt: db.gormDB.NowFunc()})
if err := tx.Error; err != nil {
queries, err := db.queries()
if err != nil {
return err
}
now := db.now()
err = queries.WorkerSeen(ctx, sqlc.WorkerSeenParams{
UpdatedAt: now,
LastSeenAt: now,
ID: int64(w.ID),
})
if err != nil {
return workerError(err, "saving worker 'last seen at'")
}
return nil
@ -194,25 +303,65 @@ func (db *DB) SummarizeWorkerStatuses(ctx context.Context) (WorkerStatusCount, e
logger := log.Ctx(ctx)
logger.Debug().Msg("database: summarizing worker statuses")
// Query the database using a data structure that's easy to handle in GORM.
type queryResult struct {
Status api.WorkerStatus
StatusCount int
}
result := []*queryResult{}
tx := db.gormDB.WithContext(ctx).Model(&Worker{}).
Select("status as Status", "count(id) as StatusCount").
Group("status").
Scan(&result)
if tx.Error != nil {
return nil, workerError(tx.Error, "summarizing worker statuses")
queries, err := db.queries()
if err != nil {
return nil, err
}
rows, err := queries.SummarizeWorkerStatuses(ctx)
if err != nil {
return nil, workerError(err, "summarizing worker statuses")
}
// Convert the array-of-structs to a map that's easier to handle by the caller.
statusCounts := make(WorkerStatusCount)
for _, singleStatusCount := range result {
statusCounts[singleStatusCount.Status] = singleStatusCount.StatusCount
for _, row := range rows {
statusCounts[api.WorkerStatus(row.Status)] = int(row.StatusCount)
}
return statusCounts, nil
}
// convertSqlcWorker converts a worker from the SQLC-generated model to the model
// expected by the rest of the code. This is mostly in place to aid in the GORM
// to SQLC migration. It is intended that eventually the rest of the code will
// use the same SQLC-generated model.
func convertSqlcWorker(worker sqlc.Worker) Worker {
return Worker{
Model: Model{
ID: uint(worker.ID),
CreatedAt: worker.CreatedAt,
UpdatedAt: worker.UpdatedAt.Time,
},
DeletedAt: gorm.DeletedAt(worker.DeletedAt),
UUID: worker.UUID,
Secret: worker.Secret,
Name: worker.Name,
Address: worker.Address,
Platform: worker.Platform,
Software: worker.Software,
Status: api.WorkerStatus(worker.Status),
LastSeenAt: worker.LastSeenAt.Time,
CanRestart: worker.CanRestart,
StatusRequested: api.WorkerStatus(worker.StatusRequested),
LazyStatusRequest: worker.LazyStatusRequest,
SupportedTaskTypes: worker.SupportedTaskTypes,
}
}
// convertSqlcWorkerTag converts a worker tag from the SQLC-generated model to
// the model expected by the rest of the code. This is mostly in place to aid in
// the GORM to SQLC migration. It is intended that eventually the rest of the
// code will use the same SQLC-generated model.
func convertSqlcWorkerTag(tag sqlc.WorkerTag) WorkerTag {
return WorkerTag{
Model: Model{
ID: uint(tag.ID),
CreatedAt: tag.CreatedAt,
UpdatedAt: tag.UpdatedAt.Time,
},
UUID: tag.UUID,
Name: tag.Name,
Description: tag.Description,
}
}

View File

@ -5,6 +5,7 @@ package persistence
import (
"context"
"errors"
"testing"
"time"
@ -16,7 +17,7 @@ import (
)
func TestCreateFetchWorker(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
// Test fetching non-existent worker
@ -37,6 +38,7 @@ func TestCreateFetchWorker(t *testing.T) {
err = db.CreateWorker(ctx, &w)
require.NoError(t, err)
assert.NotZero(t, w.ID)
fetchedWorker, err = db.FetchWorker(ctx, w.UUID)
require.NoError(t, err)
@ -49,12 +51,13 @@ func TestCreateFetchWorker(t *testing.T) {
assert.Equal(t, w.Platform, fetchedWorker.Platform)
assert.Equal(t, w.Software, fetchedWorker.Software)
assert.Equal(t, w.Status, fetchedWorker.Status)
assert.False(t, fetchedWorker.DeletedAt.Valid)
assert.EqualValues(t, w.SupportedTaskTypes, fetchedWorker.SupportedTaskTypes)
}
func TestFetchWorkerTask(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 10*time.Second)
ctx, cancel, db := persistenceTestFixtures(10 * time.Second)
defer cancel()
// Worker without task.
@ -135,7 +138,7 @@ func TestFetchWorkerTask(t *testing.T) {
}
func TestSaveWorker(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
w := Worker{
@ -190,7 +193,7 @@ func TestSaveWorker(t *testing.T) {
}
func TestFetchWorkers(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
// No workers
@ -250,7 +253,7 @@ func TestFetchWorkers(t *testing.T) {
}
func TestDeleteWorker(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
// Test deleting non-existent worker
@ -315,7 +318,7 @@ func TestDeleteWorker(t *testing.T) {
}
func TestDeleteWorkerNoForeignKeys(t *testing.T) {
ctx, cancel, db := persistenceTestFixtures(t, 1*time.Second)
ctx, cancel, db := persistenceTestFixtures(1 * time.Second)
defer cancel()
// Create a Worker to delete.
@ -408,10 +411,21 @@ func TestSummarizeWorkerStatusesTimeout(t *testing.T) {
// Force a timeout of the context. And yes, even when a nanosecond is quite
// short, it is still necessary to wait.
time.Sleep(2 * time.Nanosecond)
time.Sleep(1 * time.Millisecond)
// Test the summary.
summary, err := f.db.SummarizeWorkerStatuses(subCtx)
assert.ErrorIs(t, err, context.DeadlineExceeded)
// Unfortunately, the exact error returned seems to be non-deterministic.
switch {
case errors.Is(err, context.DeadlineExceeded):
// Good!
case errors.Is(err, ErrContextCancelled):
// Also good!
case err == nil:
t.Fatal("no error returned where a timeout error was expected")
default:
t.Fatalf("unexpected error returned: %v", err)
}
assert.Nil(t, summary)
}

View File

@ -59,9 +59,8 @@ func (sm *StateMachine) taskStatusChangeOnly(
task *persistence.Task,
newTaskStatus api.TaskStatus,
) error {
job := task.Job
if job == nil {
log.Panic().Str("task", task.UUID).Msg("task without job, cannot handle this")
if task.JobUUID == "" {
log.Panic().Str("task", task.UUID).Msg("task without job UUID, cannot handle this")
return nil // Will not run because of the panic.
}
@ -70,7 +69,7 @@ func (sm *StateMachine) taskStatusChangeOnly(
logger := log.With().
Str("task", task.UUID).
Str("job", job.UUID).
Str("job", task.JobUUID).
Str("taskStatusOld", string(oldTaskStatus)).
Str("taskStatusNew", string(newTaskStatus)).
Logger()
@ -83,7 +82,7 @@ func (sm *StateMachine) taskStatusChangeOnly(
if oldTaskStatus != newTaskStatus {
// logStorage already logs any error, and an error here shouldn't block the
// rest of the function.
_ = sm.logStorage.WriteTimestamped(logger, job.UUID, task.UUID,
_ = sm.logStorage.WriteTimestamped(logger, task.JobUUID, task.UUID,
fmt.Sprintf("task changed status %s -> %s", oldTaskStatus, newTaskStatus))
}
@ -101,9 +100,13 @@ func (sm *StateMachine) updateJobAfterTaskStatusChange(
ctx context.Context, task *persistence.Task, oldTaskStatus api.TaskStatus,
) error {
job := task.Job
if job == nil {
log.Panic().Str("task", task.UUID).Msg("task without job, cannot handle this")
return nil // Will not run because of the panic.
}
logger := log.With().
Str("job", job.UUID).
Str("job", task.JobUUID).
Str("task", task.UUID).
Str("taskStatusOld", string(oldTaskStatus)).
Str("taskStatusNew", string(task.Status)).

View File

@ -473,6 +473,7 @@ func taskWithStatus(jobStatus api.JobStatus, taskStatus api.TaskStatus) *persist
Model: persistence.Model{ID: 327},
UUID: "testtask-0001-4e28-aeea-8cbaf2fc96a5",
JobUUID: job.UUID,
JobID: job.ID,
Job: &job,
@ -488,6 +489,7 @@ func taskOfSameJob(task *persistence.Task, taskStatus api.TaskStatus) *persisten
return &persistence.Task{
Model: persistence.Model{ID: newTaskID},
UUID: fmt.Sprintf("testtask-%04d-4e28-aeea-8cbaf2fc96a5", newTaskID),
JobUUID: task.JobUUID,
JobID: task.JobID,
Job: task.Job,
Status: taskStatus,

View File

@ -5,7 +5,7 @@ info:
description: Render Farm manager API
contact:
name: Flamenco Team
url: https://flamenco.io/
url: https://flamenco.blender.org/
license:
name: GPLv3
url: https://www.gnu.org/licenses/gpl-3.0.en.html

View File

@ -50,202 +50,202 @@ var swaggerSpec = []string{
"cC4iObziSjsyCHS9H/u6mOYsC9fb+FmD3fbsup4itkFLVU6oXjxbsOTyLVNWk2+ZHoxW0918R+taOXlD",
"LwzCfSek/t4yg+gtAKE8fslQXgeMXFKF5g2DeTMuUpzF8ZHowOoCp41aS1CuWjC/UMuvZGmI4zgqGQHH",
"jK4UBvELnclKpNE1KVmVyUaxJjiSU/ygfaQINLsiP2y456E9sA1H/pKLtD7xrfCvB2EiVqHuPo4+NqUV",
"qpRMONVI981uLpi4uqLlwCKGl2csN46xBWcE7ZyM/YGUzGicINFTotDiZk13QPk+sKTSbJNxtt/y6RlJ",
"8LODdpwCBZ/EDuhFWcqyu58fjXLDE8LMz6RkqpBCsZgZOY0g/U9nZycEbZ3EvOG1BT8QOTacO8mqFI1C",
"eD1WmaQpURLx2wMQV9uAbZbZpXGBVlkujRb9zEz2cP/Q8x9vSUmpplOKmvW0UivDpxiBhbpFWTYmhaZc",
"EEruvWW6XI2ezjQr7+GrC0bBWGOWx0XKE6qZsuY41Mc1z9G6YI6CKa9ql0yXnKVj8hL0cicF2QG5AjnJ",
"oAk1srgTHe4pywHNu0nGmQAjUSqJkjkzavC8oXwa6Y19wGvEaUamNLmUsxnyTm++dpJr13aeM6XoPIZ7",
"LeSCc6/fj2LWFRP6JS3z060M8vWbb5nhaH6In+X0XWEkgKhupJj2puwhMdgBVg1yKpNLpo/f7L3+t7Mz",
"RAMUdlFMUeYgSiLY0jxUQzIpSnbFZaUuEG8n3hLFPiCaIhDbwlvGNLuwZ83SCxrhL8czqz1nDHiXodv+",
"CytGOXsPz5nSNC+Ioe+IUAbXHDKZT5WWJUpWLzOaM5FIz/Kbx2xgNjIjRllWhIi9e3f83MmDP4PbYoPH",
"oxaymgP9QvNQX4192AL3Juwwkpf31oT+H687PdyPIXTJZiVTiwuwdkeOxt9hL4zaW6YWYEG33wPBsbu5",
"p9B2Xku6gHWo+yhzYQ3g1dAgHUiwKQWlh9FkAUTjiqcVzdBvt4RZvClJS2mIwMoNYu3nRUkTsOv1GlJ2",
"B2K/twumjqDHmUdOOSMZVdqucmucW1J1gTcm7XEr4RU1WP7e6Pb25fqOmNuuJZnosmITq6rYX2pbHaiP",
"YHPl6b3aaq6YHlrKbG6Su915oVdb2TnhAjjgBK4866ALXHhNpOulja+o0m+tabePwlkElWWNoAbytUmY",
"53Re81cHPbvMuA6wlTNzONCLKp8KyrMt0CrcyrFZEbhlYtoBzkXVpf2Xn6QfTHzGnq2SmHDtCWDGZ2yU",
"mJcIuwLTg/U0GD0SuKJaVGh7SOVSDI1wUsKfVTEkTCcx4r6NYdEvDpaKOlJr171WQPyEqstXct53/uDm",
"z+ScJItKXFoGpyWhBPialgVP9hyvI6WUOUkZ0rQU37MylAH5EJ5cSZ6acVKQQVoEJwaHTEZsB8/MehyN",
"13aVY/KarrwElVeZ5gWIJYIpeJd90FFlxSHEWpYEARHDHb3wNaqZbaw9hm2kjDMA4wYxA8DRkTOAGlxX",
"0DD0/6oZ8rA9L98OcMNdiMNmvq9x0s9l/M04jet8c1P8LMYePIWzyleEXfiT7MVF1ArPaC9RwBfIGZ1v",
"QEWuPRrG6BvaBNdB0i9lW/YN1sAt2fdmlttnKQvAtM2lxTc3XtslgnUNxBIqLoz0QEu9ztLDlZ0SlD9a",
"aTmyX8WNPRZOUeXByZhoeWe61mjtcg207QDjLyb94/K3oRnm3lwoxkTM0aq004e5Ctdr3nc2kMBcud3a",
"N5OepVv95xIfBMOu5Cf+1QXi1S4fP4Mv3qLud7Oi+RUrlfVAbEHm+qmbG2fYuCuxO9y0DDhTHVBHMC+m",
"YFlcUojEMHRTZYwVYKwzV5La9ypxKeRS4BpApIsa7jrWBTMnxltA+KVdCE77qX3v1Y4WjG6MBD6OwsHK",
"sH+tTyBY2JyDW/BwfDB6/Gg0T9PDB+nDwx/cGRwN/l9Zle4ODSCIp9T+MAeH48MRzYoF3Q/OJnxMvuuM",
"/X13/7CKnVwsjWV8XItvTUy2YPAajfel5YxaLXtR5VQYKVNVOXyGMlbJMkYVI9OKZ6kLhwX3kiENVJFJ",
"uKoJqggSSHb9CcRnWcMkfj2Zcz0h9iswN0Y9Ua0Dr+9BAxT+6hiIxrDhZwylpVn2ZjY4+tt6hDt1fjPz",
"1afhxzUy41pPitMqifuCSOH1yai8jgEoMTu4+QHcfI4ibU2C/ultadcw4uzMEMafIdy6Q98g1n76DfH4",
"z5lMLjOudL8bExm1Nb7RkoERHOJeWUoSVoIaCdoUOjulEdOspSdxyLmVJylczwuhy1XMidR9qeOaXB8o",
"jvvZVoeyb/cQ0dYJ1EOHceE9JOS5vR7x4FjzlNCprDRGrjr900qRTsK05iTeEC9bfHFBcyoukgVLLmWl",
"13s/T+Fl4l4OAo/cAkqWyyuWEppJMccwcRcpsk0YYnMtPaCJW6o6C38hZDVfhN4lYBc0cMIUnCWMaDnH",
"LaZ8NmMlmI7hBMF2a74mlCwkmOwyEFrIu7evnEsnYssbkzMJzA2ClDBW5+2roXmUUM0E1YycDz5OqWKf",
"9j5K4aVeVc1m/ANTn84HMd3FfNBEyzKLUiE7TMNJuyEqv3UUMFUwUs9RvKZKOUw9ZRlL4jEwJ96BiUHj",
"5rcpsxT9vZwqZ6uvUdigSyBEgY5iadZFTj8MjgYH+weHo/1Ho/37Z/cPj+4/OLr/8F/3D47297vCT/fr",
"TjxnluFC0C3PShaSXLOwmSzB3+/4as2bWpdvB/ocBSnTNKWaAvtPU4jVpNlJxKzZYLyNzZRTrktarkhu",
"B3MIPSavzTYMdc3YhzCKzvo4c2l2AZEoleJiTiZ0PB0nE0PW6ztkQ2lbZ1SUEvZxNDgtSq4ZeVny+UIb",
"ZqNYOWY5GKIHajUtmfi/pzYYQ5Zz94aVh0/hBXKq//f/umLZoAdOJ9ZY/8zrZM0zDz1MOf3Ac6Od3N/f",
"Hw5yLvCviLupdQ38ID34fxrEIcUPS5cV6/m2X3NKqEjMMWDSUIH2muFgRjk+LGil4B9/r1iFr8EXIy9H",
"DXAfrGKoelUG1iNPk5px3TUe+WX1QRU91fGwFvwtSBCw0QMYVPZFxKW4TjZ0y+o7JS3LXjZhfwQ+4eMp",
"XWi+FynN9agUBDIiizNvIT9gKZnxjClkuoIlTClarmIEvMXgoubye88cdz1+fi+IgADRzcUctBlxmAM0",
"Jk+50YQErtR9EmPazg5lhQTHvGelzP3W+1SlGKDPqLpUp1We03IVy17LiwwcfCSz0iNmMDmoj8kz9Dtg",
"dIi1trsIVPPIHRI4Ys3v44hJ1LqJtxIqwc5sF7xFZFwvI1T/VjHcc8i0eG607ofDQR4Q9T4y+Wk4gLyq",
"i+kKcg8tu4LA5Nr4YC1RXDQIhqcDlkT81mWBuJaPNfW7H48e+Wzu85Jn2ijkNfcZOl7y6vgvL2pWEk13",
"kLOZYs2FRqMCalB93CHzUG1Jr/t2FAa37rKr4NTat+It01Up0DgMEggIzdRRT27FDdjCLrpSO0wgQOp+",
"BO4L5wTU3/ZOoSnjmncp4o0NOCRGppcjMBRWxWBYP1lUOpXLOFuzBoFnUsz4vCqpk1Kbm+TqJS+VfluJ",
"DZ4BrkC65yjyGwI6Mx/WgWN2PlJWIogx8alrIF5RMmNLMqOGFKshsVH7QooR5HcaLSQJ1wtMxgigTqn2",
"QdZTBrEpeaENSTdv6QVbWZFa3NNkynqDToCPYBpgupXuB6vQJRVqxkry9OQYUlBckPG4J7QFWOwrmdC4",
"fvDcsyTgd4abmZsGc9mPxxsNHO1Z2rsbhgccQz17an+lJXeBwG0EudBLuaQR3vZGsNGSrsiV/RhD3yH/",
"UyoN8aPSXHKbaQjJKRxSBUsGOaQ5BCAZxjv5aOTgTxOrYPIScxudSLKAdB7lPF6uiIAPd3a+sjE5W8rI",
"msA8aidNO2kdXvphdvlFRrXRZkbeZoPZvSAu2EGmK7/oPkSDjzabSKxptQa0+3KL83papZyJZtiwtU5Z",
"BUOtIw5uGLWO9a0je2306TDG17QoDIzhlN2hELNlSNnTPhGQYzJ/ZMOrvzBWvK2EiJYHqEPhlsHFtU67",
"nK7IJWOFIUrCCYVxESrvzNM90FoR6JHqG56vGHFpBe7Rpr5Qm4S9xrm0eH3sQ/tAIl8wMll6lxubEOtb",
"wkSVOl8Yr4+ZBOA9l+a/gn3QjSA0dGwPyaQJhAl5/e70zGjIE8i9nGwVb9YCpIdaH4xiWO4j549d6kNL",
"z7VpBusvVit9LzL8rWdyfLWEC9CEWLqZo9hsgu3SJN6yuWHbJUut570DSZqmJVNqx0Iplv7Gb5qc6SUt",
"2ZpruLOn2yUjXXgTtdpNxv6sUiuWAThQheVWHCCGgwRTZi9sfJKHQs/qY6d1ypKq5HrlcydaFHDbIPp1",
"0fOnTFfFU6W40lRoFD5jaSehkCenRrZzOjjIXWYU4ofpUmtrSHsBeSl0izzo/pScryWodbcQhSeIc896",
"PRWnGCxkjTHW9cBLcvrT04OHj/DaqyofEsX/AXnF0xUEeRuBzFZLIJldlEto6VpNWkZPmA3cvEh+BnWG",
"/XguUQgdHA0OH073Hzy5nxw8nu4fHh6m92fTBw9nyf7jH57Q+wcJ3X80vZ8+erCfHjx89OTxD/vTH/Yf",
"p+zh/oP08f7BE7ZvBuL/YIOj+w8OHoCfGGfL5HzOxTyc6tHh9PFB8uhw+uTBwYNZev9w+uTw8f5s+mh/",
"/9GT/R/2k0N6/+Hj+4+T2SFNHzw4eHT4cHr/h8fJI/rDk4f7j5/UUx08/tQ1JDiInESprXkaSI9OEbL8",
"Oix64MZxZVW8b8X6VdomLqDhVHmlCH2+YfgRORYEK7FYX71yfhU7FsYwudA288O53w45fn4+QGOTU7l9",
"wIDPAKK4CtDVJtaOM1JZNd+D8hwjQ732sMTF6Pj5pCff1aLMlto0rv0lz9hpwZKNijUOPmwe0+bbVHP/",
"mF3X/IZWutapxGpOXQM9rFu6jRigOFvQ1745vaDCej2bkQNUNQYFt4zNU6au8Eh9jclZIF18PvJtEVCy",
"5ZH4o+4SOKuCUSd1UaS8llbZRQd0OC4pthz5sh4PTRn1iN4TG601RCMrbJLacMzoGEBnPnbNbaxJowcb",
"HTVmNXa8Yb+w2wTwr1wvaifMVqB2SnjivJVR0A+tmDokKStslD7QEecT+cbPZlvZMziOHv9O51SH6+Lw",
"OuMFloA6yLAqMklT1McweChqFsDB3uJqoMCPi+K8ruABgkYDdr2yxA0JDbciINwCe+s//OZ5YVJwnKvh",
"aYGYTUkZfOZYyjA8SmubkM3rzsorI3e85BkLIqAA0Qwnsa+ZZy4xpJbrw4Ts28KB+mL6+3AzaBFO5K/b",
"F8aVgHx/LtZgXc0m4Wh7ifH8d+W5X4oQriV6JUtPN2lubVai4LOaY9HUCMVWpwsi9Ki1qpLzan//4JG3",
"B1vprFIG8zuGZi3tgJG5UJjy98AKUPdU090RzaAKLLw7WGK9YfjTcJAFANrR1nILrpLWqWe1huy33jCE",
"NNcUxQ6bJXNaTdfUKDplAqz4PgsRQ+QUhFzvqeDbCSZn2ppxWtpaUY5KBm+aH9/Lqc9KJM/cmFjias50",
"+DuqXmDqperSJ0+7vzM5V+jWEozZOhxFxhOus5WbdsowihwcK+an1dBvxGgRmH/j3jVjSIGxD99BLUDd",
"nHrmMnbfy+n3wLvN6+aVewryOcForXnOxufC+fiE1Ggama4gvRO0EstHqCZFKbVMZOZqJnlooW8GgekL",
"P0Nm07SUkPlkRm7GZDQvhyw2UpkILrxxtvJty/DFBnF1hZzlrz+MGstdaNk8hj1SifqBoQzjnZNEZbGu",
"Wt/6rQdiol8GxEzVf0UlxD5QRIgD1eSSi9TmRGwNAx8ZlmU/yykEaWfZr96pZQszUHWZyTn+GAbHhq+f",
"0Xnc/dXIQIiWSKstWkGZLy1rbGxKMNvEunx+SKD94fD3/4/817///h+//+fv/+P3//ivf//9f/7+n7//",
"/2EuP1SVCOM+YBbQeo4Gexi4u6dme+/lVKEZ5/7B4RheAjNKJS4vUK45DHDy5JcfDYoWanBkxCqo6mqk",
"nfuj+/tYOfECEtXYUvlqnRAbjNUU2QfNhM3kGRfWNWRWciEr7QsZNdaHU/gV7sV3bss+dsYrpdRrx7O1",
"PLGI4EXNCQcZF9WH4PqB13pkj8oGPncjbkMk2BAr4gNet60Xv6FeSHjWm2Jk3Ku17XuryJo6nLAHap3w",
"AKQ1Yk7USmmW1wHf9ttWzT0IM0zkXHDFuuKVfbmOmaYkk0tWjhKqmDdb2incomyIyTke6PlgSM4HSy5S",
"uVT4R0rLJRf4b1kwMVWp+YPpZExO/VQyL6jmvgb8j/KeIpOyEsAHf3zz5nTyJ1JWgkzAvyozknKlId5v",
"QiyXpT78z5Vf9otU43PxVDn5k2bE7GjY2Ac5dzE/5wNnHLSl7NE248KxoZxiUUI+BFXkfNCUNt1454Ma",
"9rlURp4AseaSEc2U3kvZtJrbYpWKMKo4lIW00oiLC0XvNU9IKhMoBwyJLlnW2Fm0bEJfIop5cLF90cch",
"SWTBQwVz0i79NzajTXy14W7ZyDP7V53MYYg3Swm3/nEsxJJKpsQ9TXKqE0zvoImuaOZH6hjmz7DKMYiO",
"ql1NEvBIZmkQWNcsjt+uGOqLo7sSKefiuLFArojMkU8Na1sZlA1bFVSpVlXsTjpPFOg2HVzTOYpy9va5",
"cnB19G2QRn/83Ifm2Jo2lnej+kg18aU3p4wYEpNWGV5/sxQ0GkJ4AkZ3yTLYmMEul31l0NB94VfSTH/b",
"Soqy7tduPZwIkYvJWfGGJ2euvgi2OIH4NuU0aGeud9XdhoSP2dglXPgwmSBMarxbaY0v2SblJpImMWT3",
"Yrq6cNFKuwQv22CDyFq3TGHboWIIpNFoWRk83ZCviNFpYuVLBpj/S+vkGRt3tFu5gK/fReamcjUd6dnl",
"xLfN72wXNIk1sAnb1PjLtKFjjS17tDFBEZLkpO1WE5Qy+qzKVnHvhCE0YGBvFTUaNizuXUwJahdtnLkq",
"s/jE796+CtOU69kJ14plM+/JlEuRSZpuE4FUlz7yp4g5f7D/vlP5jMwin0ig5EyP2glHMf2xnvAu5QyF",
"t/oaSUNhWkhXJ66UJqybXVqjO+Y7y0aZ9brsIIi/XezfsWzTXSKG101H35IiuZn6Tmpd5TX8zZd4hMB7",
"J8pJS6VRFUPMs2ZusDcCxYITgzKuKOphyxsj2fvTA9udLDBg+E9EWhNJ6wU+F1Cp4DuQb6SLuJ44emur",
"iAmpCSupjWz15RzaUrtZ1vebyox1Y9QzLmyHEBt9C5EU9xRJfBsKDDDnYfo2kGvy5oqVy5JrhrI8l5WC",
"gkYiqDrh8kyj4kOsCN0rObfF5TwNwDp3Tip23SvMouFUYEJGy4z3lPLWDRK4A5WIIlcdzRnVB0oGYSkJ",
"A50QlHcuMCofx4k4+9cFgn4eFVhzydyksUtU73G7qiU2aNTnzXUSJYqLYI8tyeCE2N86larWOmS2M6j0",
"j/X5ga2axjoBnVGkFI7v15XDoDdLzvIp4ulWIn2jWlt3AahdbTOAutyO5AZH1XAtBdVvojG1n34bRlLo",
"u+zQUdsazV5tU0+ke2l2VY7aOLreQ+xG778dGN8deAxqi7e1RdsnI1+7LGJFVSwpGXBKORJSjzTLshEV",
"KylYGMl8NDgcH/TB/uhvLmDWSG6zvGBz27hnVHduGQwHOVdJJBP0mqHmduEfv/zNastnOFPT0RmbwiJz",
"/5Gd8rl40z6sRgFAa5m3B/j05Bg6sQQncVFX3FJLOp+zclTxGzqYVmnCboJDf62uzmpv/pgcIYmfTGdF",
"a04pY6w4tbaviG/a/OxtYy48AdVIl+l2amAGLlomUkzD9PKNqyPl08ZTumrqaX5sQ7BBURqTp0WRcWZr",
"NmKevDQfcrBbTVK6UhdydrFk7HIC4X7wTvO5ednVpo6sEGRCQQ4ejBayKslPPx29fl1nEWMLpBptw5EH",
"R4NcEl0RiKMAN2F6AVL30eD+D0f7+5i0YpU+m9IMeOXe2n8SrZPSnKQbE0kTNlKsoCVG6y7lKGPQdMrV",
"y7FQhyLNdIV8kbHLHjCT784HuUSPg66cs+H7MXkB1s6cUaHI+YBdsXJlxnNVcbq9kfz+A9EJANqTeeRA",
"8zFeiN0DavNwbR7rxx42odkYN1jxmnuhqWZ9OrVNKC/D9Lrt03yiGnEw2FaLSvsKMNIlvbx2BcYtFrph",
"eU3Lhy8pObTrCspQQvsRc6RM2VfkbGaUETAOtOte1gjUX+Azkt2PleqQbNWKp01yrEOCoaiuLScdsQ2o",
"i4z+Y7U+7KiZP2n9E6jNhQ0hgVzVHhaUVmoN0Cq8isy44GrR10F0+AXPc+j3t+Zk+6wxf6aKJ2sEz/Fn",
"lABe7lICeBcj+leptvulMgS/WC3cbSqI+go8Lc2q9Dm117AzbV/ittbHYopfqLCQp+ispMKbgrKVjaNc",
"OWmDzgnXgeMeqrKAbWPsXYPWTFwYgUHO6hL8Rv0kipu/qWBgfOlKCR2NrFGf0QydSvLjyTuCgRveyvPi",
"xV9fvBjXNWl/PHk3gmcRIaHZ7XDnUpqazsfkme1ebL2ZrRJH1FbbR8O9Tbmg4GYvqUhlTmBAbyJSis+F",
"o1RfyHayQbc4o/MtSX9N7T0SqI6dwO7AIELzRDWdX/AUdIsHh/cP0kc/JCNGH6WjBw8fPRo9mc4ejdiT",
"2f6TKXvwQ8KmEbXCjxCI+ps7h6wT/d2Ia6Hj1PzOYnZV4aPGkE9rpkYjyXaWrGb9p4/XdUjFu6REjCRn",
"6Ab3px2wqU+oZUNaslGH8tDucUGrWILQO8VKKCBhC+ZalnH8fEgKqtRSlqkvoQxqta0TYvQfZ7+szRoG",
"9QAwwNkMX613utC6GHz6BC0Y0eEHPUISHRhAPK0+YzS3rir8Uh3t7c1cuCCXe93iGBizSF7SMrdhsBAy",
"PRgOMp4wm8XhidOrq8PO+MvlcjwX1ViW8z37jdqbF9nocLw/ZmK80DkWE+Q6a6w296W3a2X//nh/DAqS",
"LJigBQeLjHmEeUhwMnu04HtXh3tJu6zQHA0lvg7FcQrt+HSz/hDImJACAqMd7O87qDIB31Ojg2IE+N57",
"60FDvN0yAL45HxxeE+jCYHXmU1EQBZ2gZVaM0TPNDPVZp0cpXuq/QdAfEKB6jBciLSS3Vb/ntg9/Z8BO",
"5WYD+Sh49yCUZ8+ZWfqA/ZKL9M8+qfwEM8duDNzxDpkReL+UlahzzEE99j1JP9VtK7/UurC4QWQdp77z",
"4NJI/MtSivm4dfovuY14lyXJZcnIs1fHrg8mOmsg7k2RJYWIOZCh3HZiSFFIFTkpSECOHBXwzj/LdPXF",
"oNEqpBIBi+sAKkvr64PIIyweIjGIDEvf3DweNQozdFf6S/PiDnGRGOYGRzrjgt09nPorzTg4XGmITddB",
"phaeWq/tVT2+a39eH+RGooJpSqMgEHgNyjbSrr4q1p7cGn7+UyAmZqfVGNlMXtvA7nYYpxcZMTVhSyni",
"JWZvf9aR71C4+NOwMdaK5llzrLZcvAlB2gfxFnrsXrG44NGVE9aextMkYUr53ruRaoqRIUmYyoUbuwc+",
"/TcFE09Pjl2iWpbJpW0vApHmgmZ7VpK0BzohBU0uzWGfi/7jVkxXxYi6+j79ZOeUXrFoSaGbITzRqaJM",
"MwSrod30CtG7hZQPIh2fWsgAEehLNqVF4YwkqVGRZlWW1X1cta00ZuTKu0dK3tUhRT2prVhxyFqdoMmN",
"gB2uyKwSCd5EKMS+Ab0NQsQwu7dyVD8ONjjf3keXbfpp76Nzwn5aR5IazLDZsNwo4NzAzpZvsCpckM9a",
"K87WUbWLitPN8TVafGTCwJncP2Gbev12g8w0nre9O8V0WloryTpr5HuHXZgamd7mS2sScIneBjl9ljfa",
"/nfU79Ytp1FbvDf5ux9VfRLU7lhaV/j8bwy9xgbUZyBnXRmgbT4g71Sd8OyEdpqmI2Qma7LgkIz64qBs",
"ihlfMwotXQzjiCWPkClVdfWmaSmXqpEOdn2Mr/e4O467+to9nB+Sb7AF1Y2w+kYTsu4h/yynNl8557qD",
"njepcaxZELjFKiPhIe+0WWJGVLPhrUGTdgXQfnD/4OZlhDNPUX06HNN0DllzIFPWaXPNF6JJcxx7X2cr",
"kla+OpltYJTQZOGQzw8F90FKkhnR5FzcqngEPxBXErNJCRDHrGcHakbKsnNHsK4DJNSFsg8Wi28M93Mz",
"h5DZS9m5VKjab3G1QK/9uvcrCZaw7no9iKfp73ghfLanoaLYh2NhBMpf3pxhdqVtrGfTF+r0PL2Q1Xzx",
"3xfqj3KhAK02XCfAfr9vMxKY0qCEypKbE9e1d5ZHrlmjC1q/WZ7pZPFjJqe0UacCUshulovEe8ZtJdAM",
"41fuzHXXc+nQcHuoWEU7wvXIRdBHDrKJWXllu5VGPlcbju8NVA3G7jh1FtIcAN2znNb55VSpETYww626",
"fzUPEHq9Mdv47YaoZW9buajts9lYrlnrHRu6SduYbXxt0qqwIVxIXHMK+azmprhGppYiProVilgyXJOQ",
"Qdu6mhDacxnfGWr1mpaXuNIQZMNaGnddTZKSa1ZyugHjYbzc3LadBkUe4KSFOuEKCxgYpgCo4iihrUoF",
"hczMiZvnefPQuyQXBi1KibbHBfPv+pT3KU0u56WsRDo+F79ImI/inZ20WxVOiFdVIezJfMVSUhUgKwnN",
"S3DtS5G6siA5RfREr10HPFg/dyUrwj4ULNFDrO7AeEkmdc+pSZ3IrmztXaOkZbgnCk1cYdaWbROIyd9d",
"L6y4zAWdhmw5oxsiILYdV8yE1y7s2iQVc6bHt63hNFov9bMkgGrgWbFxYlgZAiqq8JlBZhBhgBTY5kTw",
"4d0hBSAE+BIwBvDbcbe6OdYM+nFBoJhIiZIQ4NvlaUZ82/to/vsLzdla05CtkLKVYcgNeGfsNO06L70q",
"Bv7WlkNsLoUXeA1MoRmNh8SG8wly/ZutnbGsTPRc1BanoQa3CLSodcu/5HejIgAMUNk2uQaVCkjq1kCs",
"p/IMxY/XBeFHjDD7tJWsthVW+/oC/Ti9KQbut23EqedIggI65hmTr+ujSz6fG2n1donWO4EckaUEMgO6",
"vkkM6Aw4KaoAQ8JFklUpKkfKatPQ58uoA3KOxYZR5ba1kvwghl27IP2OeEB+kb7Bhup0+f5uxfT3TYOl",
"x6x+/eurYsStmAY56nZdptNSkFxX8vVmJvxIpCTI4eu7j3vTZsf8+M18C31WG/31b/NAbkTiqrcSU1iq",
"wuDvdxhzOrT1MVYF+97IXEHbeO+79HDc0pPs7iZNElZAeSwmdMmZNWoBWbGT3DWiAt2E3WptPXJz5wMQ",
"7Hq/vw5e3dxFX4tcYEtZg2BGtZpLjfAMalDB7b9LqIA0CkxAzWT4urS82wOgSSohmNbquH7LqrnD9VIH",
"Rsh4VPPuOQecOJXbwdrXtr2hqe9bQMo/uEmxedTXMC9GB200Iu9HIMV0WK6oxzcDmsBJXRPoD84i3U5s",
"Tm+Pq0OwJXGwuabJ0k3k846o8owRrZQHB33luFzTTbcEFwmH3/s42q9MNNcgq5cE6i1YMDTjXTYiaJ0d",
"uQ49T33tqj82cjZKuPWgZjPBGKIzrJn5Wmh62hjuOkjaXJDFVPBc+cN2Wc3KN/Dwkv8fBI2bm9wFiUEP",
"3ciez+Ctb4Mnw158Pl9cVkQYc6bCUmqqI/ncMbGQ2nVDATiaZeGqG9iwjbwX33EciZYLqkdLWWWp9Q+O",
"UtmLU97m9OuC6l/NR8f6+bci8DmPZJ+ch70SrFknYoMwyBfIUNjC0GWCO5sOJELjKBCJ4KpKu2gNrCU6",
"BDtTJuc2Cq5XHgOTke24Us9SD4eGJahfKLz7KyWJFC4nIFu5KbgKWmtb74OrVo9dEVHwlJXuMUp9GViE",
"uIodcPZcM7w9LIC7hmk3e8jeULxPc5KYFyrsGOdiNIhtqHl7zqdoD9BYjL/rgwnts22zzsAdjvx6/8nN",
"E0u/EpqVjKYrW0zcCgwPbtX3jqcHIWhiDoGsZKJaEK3byk2Ca4Ioz5MFkcKa92+N3VQtdtMiUs+wRS+t",
"O6Xi9VerPOPi0kcXQLdkhADGl2kkKhYolRFdsiywvmEfOKQWtkGWrfGe0CzzF7yO5KvpBwK1nf1gF0SJ",
"Ci8TLKbRuZmWjK6lGWHzv20pR3iyN0pFYg0otyUoX4GWRPsvxtZbTe2xQW8PCeJ8eBDDsJaYecc2LLSu",
"lDt1ZaC/Z90cOYSB7RqLCT+FLLWyF79mvHZjGxH+KWacURet6NlGe0DfYs5FQGKfSlxFTXbgXaWNgOCX",
"0L0lMOzeR9fD9NPeR3jC/7HGoR62M5Qlc6G1LRlw6+60UDy1KzC6V3fyww878wbl4l1jR18pPjKr2/02",
"s9bNin+78YvXaWG5pSHyTl2isIxZ3Woz2nS1IWAG92Ud8fYY+c+NjMOYUcUSFVc20/ocbOv7lM1YSXwn",
"V9drJ7MZm+eDg/0fzgceseq4OlAqwL+nq1I4kb7envJyHIZV+ta5nQPHSDyaKYljKJkzKRhhmYJx6vrl",
"sWUCtgAAF4xiSQELwv9nhNOMnlExem72OXoHAwwiMAwadcZgKEs+54JmMKcZH1r3YIH0TIYF1X2LYa6D",
"flW2RTAPqbZV8lwNLEEohzegLdWcY0z6pr29sQsbvbQLG2yMVdpGnpGJZnqkdMlo3qQQXlOfcmHu93Bz",
"YvgznEO1+pJfw67oxNCuSfFg/4dNr1t0bCCiJTkY3/s4OkJpPzfqAIbhTpleMovsFpxBNJDX2m04yMz3",
"VZdlh+540dnhMig7DyNdiPASu9Tp9bfW3cD65ljEc7GrckamzHzo55+uGvcOJYpJ7xU6IubMJraCIVCX",
"RnTyLWdTbOBAwBlsPkU/3yHNeN3Gj3A/Z7JM+DRbkSSTtonDT2dnJySRQmAgu2uOJKHQpCW8ttqmapwX",
"I+wDTTRRNGdWktTSNVIjqayMkIcfKGhCi29hqiHeprrWYOQEyFSmq15WGua0mylq7aILlobk6B0nfQF+",
"L2mZn9ZtWG5IMKpneQui9/UrYIXOA67qCL0ZLfMNSfo4dWcU1h4kgB9YZ/c+2t4/n9Yb8KHc3VZhq76V",
"0N00sNqWBVHHE5akFTN5Ry3zzaZWa8yekS/WnPye7Ziy/vRdD65vBQncftbhAnTVcvjQExDWljjhwwVV",
"REAjGbJi+m6hUxjB0WlghpHuOcOsDtz7BgeiraTTCttwQ443IJ6G1sxbIN+ZefHuIJ9mH/RekVEudqxM",
"dNYGzreCV0FcGVWazNjSdlwKkAxb2m9FvcJP/Hiui9NarNouqCJoynSrWPXlLbid1njffFwFssBvILAC",
"O575fDpwY7DZjCXaqQXQxRhHoIosWZa1swvNt4zaSiGLKqdCYQw5CPfggr/itFu9pC4Fbu4INAZwNwoD",
"QuFi1fdqQrhQmtF2Ll5QXr23JI4vhH5zUriVc91U1xbCvcDcaHBel5JZL4ejaqx8w27sNOdM6NqWBvB5",
"oLSeLqLh4DGM8rne03RuTmK+XTZOXdF6W0OGpvM6MeYuR7CHLQugxDtchkpgsWvVaFftw/zN7tA3YsZQ",
"UFqgPsYazBtC3teA9cshclCNPE7Gg81HUNgL/eFrvXvdhu/NvwDbK6oITLGEXROoX547boSnzUZuAeya",
"BkGDabbbp79OWOHk7mTG2tKBVGBUA9QZ3AZZGog2tNuENi82nZ02cbOPkG2IFfQHpm7lmr3qyfeoG/Gr",
"8ZpszGX4Wv89i1f4hSCIr34BdkP8W6R05jIFoUBoT3ZxQdDkRHmXz5AoWdtLE5pl1lB6KeQSwtjevTt+",
"fncuoQ+AEWy56/VDSaSJevHbFnSz3HThbuG29V21v4AXxK11011TW8HIJpO4T52o23C4xNoAdIG399H2",
"xthB9NpKpfTD3nw6dKdetsUdz6NsLOTdlPictrS0fRiPNd78ROa5b9oMPuAEQpbBAWVr3NYGlKVvg8MF",
"mdgWbBNQrtCD2nwJQ1Zs/6ehYeIF4ZrMeKn0mDwVK7TI4Gthq5VgGOdzBbJe+R5n15M7vypOfWlSsIbj",
"bptWvfR917aRV0jKNIU6dct6mh1u/jZWJavzd5uR3fbR3ZQQEW2wdheMTXfEDtSLgNtZgxxG74SUTqDu",
"NXQ25OlvAg07TdF6cLAro5Pj56phQqj91q6HOpGzf04cDSrKG0ghNNSCF94C9uvu+JkxVoxU0HV5E5dr",
"tmn+llhec2fbNDUBb36jL/W6pG4WCnVCxr68myi4gXJ9VYy4MU66CRlcjnb7FK9tmfJ9sb+qXeqatMkI",
"cLJ0lrVGP+EImrfcGNh7kJUj/Hud/IYvenn75s7/bdAPcZ31SRK3+ls1zThIsLRfXO+4U+5OjJ1bfsO8",
"0lEUOjJafSSG5dVfqghSGX1vJGezNaIXn4s3s9lWLpi7B0vbIRRIbKM36N+g3WirRGqg81JF6vbmawH+",
"jGYZRns664yWJLNuOFfmFMx3esFW90pG5lCKxg4/7j0VseFQxI1ebTtF/6XOmaYp1fQrGFvDZv9/iCu9",
"NRo+rfSCCQ1ZBa5Pn8EGF4raZy34bJzEQG4tYQabwywDTsXrA49irLaJxFHBODi1wddGDlip0258EEev",
"QCok6f/ibmPV7hjiMuRcU39WYtaJWPUAoRcVRvhm2k/COoeVDm7a5uMnimkttf9CeTzdWUL9A1MeS9Xt",
"uTl7MoQlJN64oAhNDNnIWIq1HTHxzFKUUTMmyqEL+Fa5qBOeLJVh5SiTCc2AwNFMfWmqdsUau6li7iUI",
"DlrDZ608buPGb66+rjW894Z1Q7m6oN1LH7n6Rbp6qj6t1RcZC+weD/YPv2DrQ0SxXsQ8YaXrPPOcCY6k",
"09Y/iJvOMYTOsjyaaH6FllgG7lFXYyvL5BJ9FRYsduslny80EXJpA/gOb5fBuItEBeT0oQPPSOGwOszM",
"g4z/uYSW9jazBS/cjpfWugepHz+AxqbbBDjlFM4y3hQoGkHXf13MkGh/+xaCUe1O+q6jlY24wCW6wMBr",
"WTXsWN3o09gtqXM8VMNj5zDJlfVU0ubD+bHr0nS3bTD5TObUMOqqyyHRq4InEHtouzWBwFyUcl4ypYbQ",
"zsk1uJAlmVGeVSXbyGEcX1FMpA1HnQG3Gx2qb7OSbb4pezldjfiorPrDSl/TlTWlVOKbSEp5TVd/Yax4",
"ix7nb0w9w8BvK8bU2d+BxBy43gMGVVaC7JFLxgrniq8DwMmbwtWOgkREyoUilKCrPZRJvVMm5n/vQeSO",
"RA/KXrCy1pq4qqPS16O2rHRR6VFRyrRK1gn6hli+gZdP3Lt3gjlAza+99wWb75qNPbTfFmL+tRK5D7ZM",
"5Abpz6You7YfD+7fv/mL9oqJuV744kd/CjvHpTzFfuGGylJiQTCyn2Bevl3p4c2v9ISuIF8X2tbR0vb7",
"enD/4W24EVRVFLI0B/WapZySs1VhPWaAYgQxygmTU59uXneBDaO/Hhw8uZ0Og67+BXJKIB1SYoepmbnY",
"ttCedUvrRSm1zpgtx/eHkjwwz90AOpdKk5IlmP3vSwfCflEeCLLdOQAH+06Zj2tHCBMKa/9hDgVI7/aU",
"zZf3FEn5nCkoHtw+Y/LMVx+AOLGTX34EOP988uJHYlHJDFpkVIh4nNY6gUcvqnwqKM/UXlGyK86Wjizx",
"EgsmOmpPkPo7MQggWl45al6V2eBosDcIjFBtYnXcDILqtAVzmOLZASSpdAuJ/CynzkwKMtrfK1Zyg351",
"u9Nhqx3FuFFFU0UGfXpy3OwPGZrIZJ5XAsVNKFDSXvq47cCNTGCx4bVfE3l6cjzs786MzazMNsxdKWXm",
"VtSZDJyOkVI5WH7AzwJ8oq6dYCHoe1a+l1NfES6cw5Y7+PTbp/8TAAD//zpWTzTPEQEA",
"qpRMONVI981uLpi4uqLlwCJGv5TiTJ+d87A/kJIZPRPkeEoU2tmswQ7o3QeWVJptMsn22zs9+wh+djCO",
"053gk9ixvChLWXb386NRaXhCmPmZlEwVUigWMx6nEVT/6ezshKCFk5g3vI7gByLHhl8nWZWiKQgvxSqT",
"NCVKIlZ7AOJqG7DNMrs0LtAWy6XRnZ+ZyR7uH3qu4+0nKdV0SlGfnlZqZbgTI7BQtyjLvKTQlAtCyb23",
"TJer0dOZZuU9fHXBKJhozPK4SHlCNVPWCIdauOY52hTMUTDlFeyS6ZKzdExegjbuZB87IFcgHRk0oUYC",
"dwLDPWX5nnk3yTgTYBpKJVEyZ0b5nTdUTiOzsQ94eTjNyJQml3I2Q47pjdZOXu1azHOmFJ3HcK+FXHDu",
"9ftRzLpiQr+kZX66lRm+fvMtM3zMD/GznL4rDN+PakSKaW/AHhKDHWDLIKcyuWT6+M3e6387O0M0QBEX",
"hRNlDqIkgi3NQzUkk6JkV1xW6gLxduLtT+wDoikCsS2yZUyzC3vWLL2gEa5yPLM6c8aAYxlq7b+wwpOz",
"8vCcKU3zghiqjghlcM0hk/lUaVmiPPUyozkTifSMvnnMBmYjM2KUUUWI2Lt3x8+dFPgzOCs2+Dlq0ao5",
"0C80D7XU2IctcG/CDiNveR9N6PXxGtPD/RhCl2xWMrW4ABt35Gj8HfYiqL1lagF2c/s9EBy7m3sKLea1",
"fAtYhxqPMhfWAF4NDdKB3JpSUHUYTRZANK54WtEMvXVLmMUbkLSUhgis3CDWal6UNAFrXq/5ZHcg9vu4",
"YOoIepx55JQzklGl7Sq3xrklVRd4Y9IeZxJeUYPl741Gb1+u74i57VqSiS4rNrEKiv2lttCB0giWVp7e",
"q23liumhpczmJrnbnRd6tZV1Ey6AA07gwLNuucBx10S6Xtr4iir91hp0+yicRVBZ1ghqIF8bgnlO5zV/",
"ddCzy4xL/lu5MIcDvajyqaA82wKtwq0cmxWBMyamE+BcVF3af/lJ+sHEZ+zZKomJ1J4AZnzGRol5ibAr",
"MDhY/4LRHoErqkWFFodULsXQCCcl/FkVQ8J0EiPu25gT/eJgqagZtXbda/vDT6i6fCXnfecPzv1Mzkmy",
"qMSlZXBaEkqAr2lZ8GTP8TpSSpmTlCFNS/E9K0MZkA/hyZXkqRknBRmkRXBicMhkxGLwzKzH0XhtVzkm",
"r+nKS1B5lWlegFgimIJ32QcdVVEcQqxlSRAGMdzR916jmtnG2mPYRso4AzBuEDMAHB05A6jBdQUNQ/+v",
"moEO2/Py7QA33IU4bOb7Gif9XMbfjM64zjc3xc9i7MFTOKt8RdiFP8leXESt8Iz2EgV8gZzR+QZU5Nqj",
"YYy+oSVwHST9UrZl32AD3JJ9b2a5ffaxAEzbXFp8c+O1XSJY10AsoeLCSA+01OvsO1zZKUH5o5WWI/tV",
"3MRj4RRVHpyMifZ2pmuN1i7XQNsOMP5i0j8ufxuaYe7NhWJMxNyrSjt9mKtwveZ9ZwMJjJTbrX0z6Vm6",
"1X8u8UEw7Ep+4l9dIF7t8vEz+OIt6n43K5pfsVJZv8MWZK6furlxho27ErvDTcuAM9ABdQSjYgr2xCWF",
"+AtDN1XGWAEmOnMlqX2vEpdCLgWuAUS6qOGuY10wc2KUBQRd2oXgtJ/a917taMHoRkbg4ygcrAz71/oE",
"goXNOTgDD8cHo8ePRvM0PXyQPjz8wZ3B0eD/lVXp7tAAQndK7Q9zcDg+HNGsWND94GzCx+S7ztjfd/cP",
"q9jJsdJYxse1+NbEZAsGr9F4D1rOqNWyF1VOhZEyVZXDZyhjlSxjVDEyrXiWuiBYcCoZ0kAVmYSrmqCK",
"IIFk159AVJY1TOLXkznXE2K/AnNj1P/UOvD6HjRA4a+OgWgMG37GAFqaZW9mg6O/rUe4U+ctM199Gn5c",
"IzOu9Z84rZK4L4gUXp+MyusYdhKzg5sfwLnnKNLWJOif3pZ2DSPOzgxh/BnCrTv0DWLtp98Qj/+cyeQy",
"40r3Oy+RUVvjGy0ZGMEh2pWlJGElqJGgTaGLUxoxzVp6EoecW/mPwvW8ELpcxVxH3Zc6Dsn14eG4n211",
"KPt2DxFtnUA9dBgN3kNCntvrEQ+JNU8JncpKY7yq0z+tFOkkTGtO4g3xssUXFzSn4iJZsORSVnq9z/MU",
"Xibu5SDcyC2gZLm8YimhmRRzDA538SHbBB8219IDmrilqrPwF0JW80XoXQJ2QQMnTMFZwoiWc9xiymcz",
"VoLpGE4QbLfma0LJQoLJLgOhhbx7+8q5dCK2vDE5k8DcIDQJI3TevhqaRwnVTFDNyPng45Qq9mnvoxRe",
"6lXVbMY/MPXpfBDTXcwHTbQssygVssM0XLMbYvFbRwFTBSP1HMVrqpTD1FOWsSQe+XLiHZgYKm5+mzJL",
"0d/LqXK2+hqFDboEQhToKJZmXeT0w+BocLB/cDjafzTav392//Do/oOj+w//df/gaH+/K/x0v+5EcWYZ",
"LgSd8axkIck1C5vJErz8jq/WvKl1+Xagz1GQMk1Tqimw/zSFCE2anUTMmg3G29hMOeW6pOWK5HYwh9Bj",
"8tpsw1DXjH0IY+esjzOXZhcQf1IpLuZkQsfTcTIxZL2+QzaAtnVGRSlhH0eD06LkmpGXJZ8vtGE2ipVj",
"loMheqBW05KJ/3tqQzBkOXdvWHn4FF4gp/p//68rlg164HRijfXPvE7WPPPQw5TTDzw32sn9/f3hIOcC",
"/4q4m1rXwA/Sg/+nQfRR/LB0WbGeb/s1p4SKxBwDpgoVaK8ZDmaU48OCVgr+8feKVfgafDHyctQA98Eq",
"hqpXZWA98jSpGc1d45FfVh9U0VMdD2bB34K0ABs9gKFkX0RciutkQ7esvlPSsuxlE/ZH4BM+itIF5HuR",
"0lyPSkH4IrI48xbyA5aSGc+YQqYrWMKUouUqRsBbDC5qLr/3zHHX4+f3gggIEN1czEGbEYeZP2PylBtN",
"SOBK3Scxpu3sUFZIcMx7Vsrcb71PVYoB+oyqS3Va5TktV7GctbzIwMFHMis9Yt6Sg/qYPEO/A0aHWGu7",
"izs1j9whgSPW/D6OmEStm3groRLszHbBW8TD9TJC9W8Vwz2HTIvnRut+OBzkAVHvI5OfhgPIprqYriDj",
"0LIrCEeujQ/WEsVFg2B4OmBJxG9dFohr+VhTv/vx6JHP5j4veaaNQl5zn6HjJa+O//KiZiXRJAc5mynW",
"XGg0KqAG1ccd8g3VlvS6b0dhSOsuuwpOrX0r3jJdlQKNwyCBgNBMHfXkVtyALeyiK7XDBAKk7kfgviBO",
"QP1t7xSaMq55lyLe2IBDYjx6OQJDYVUMhvWTRaVTuYyzNWsQeCbFjM+rkjoptblJrl7yUum3ldjgGeAK",
"pHuOIr8hoDPzYR04ZucjZSWCGBOfsAbiFSUztiQzakixGhIbqy+kGEFWp9FCknC9wGSMAOqUah9aPWUQ",
"m5IX2pB085ZesJUVqcU9TaasN+gE+Agm/6Vb6X6wCl1SoWasJE9PjiHxxIUWj3tCW4DFvpIJjesHzz1L",
"An5nuJm5aTCX/Xi80cDRnqW9u2F4wDHUs6f2V1pyF/7bRpALvZRLGuFtbwQbLemKXNmPMeAdsj6l0hA/",
"Ks0lt/mFkJLCIUGwZJA5mkMAkmG8k49GDv40sQomLzGj0YkkC0jiUc7j5UoH+CBn5ysbk7OljKwJzKN2",
"0rSTzOGlH2aXX2RUG21m5G02mNML4oIdZLryi+5DNPhos4nEmlZrQLsvtzivp1XKmWgGC1vrlFUw1Dri",
"4IZR61jfOrLXRp8OY3xNi8LAGE7ZHQoxW4ZEPe3T/zim8Ec2vPoLY8XbSohoUYA6FG4ZXFzrtMvpilwy",
"VhiiJJxQGBeh8s483QOtFYEeqb7h+YoRl1bgHm3qC7VJ2GucS4vXxz60DyTyBSOTpXe5sQmxviVMT6mz",
"hPH6mEkA3nNp/ivYB90IQkPH9pBMmkCYkNfvTs+MhjyBjMvJVvFmLUB6qPXBKIblPl7+2CU8tPRcm1yw",
"/mK1wuEjw996/sZXS7MATYilmzmKzZLYLjniLZsbtl2y1HreO5CkaVoypXYsj2Lpb/ymyZle0pKtuYY7",
"e7pdCtKFN1Gr3WTszyqwYhmAA1VYZMUBYjhIMFH2wsYneSj0rD52WqcsqUquVz53okUBtw2iXxc9f8p0",
"VTxViitNhUbhM5Z2Egp5cmpkO6eDg9xlRiF+mC61toa0F5CXQrfIfu5PxPlaglp3C1F4gjj3rNdTcYrB",
"QtYYY10PvCSnPz09ePgIr72q8iFR/B+QTTxdQZC3EchsjQSS2UW5hJau1aRl9ITZwM2L5GdQ59WP5xKF",
"0MHR4PDhdP/Bk/vJwePp/uHhYXp/Nn3wcJbsP/7hCb1/kND9R9P76aMH++nBw0dPHv+wP/1h/3HKHu4/",
"SB/vHzxh+2Yg/g82OLr/4OAB+IlxtkzO51zMw6keHU4fHySPDqdPHhw8mKX3D6dPDh/vz6aP9vcfPdn/",
"YT85pPcfPr7/OJkd0vTBg4NHhw+n9394nDyiPzx5uP/4ST3VweNPXUOCg8hJlNqap4H06BQhy6/DUgdu",
"HFdMxftWrF+lbeICGk6VV4rQ5xuGH5FjQbD+ivXVK+dXsWNhDJMLbTM/nPvtkOPn5wM0NjmV2wcM+Awg",
"iqsAXW1i7TgjlVXzPSjKMTLUaw8LW4yOn096slwtymypTePaX/KMnRYs2ahY4+DD5jFtvk0194/Zdc1v",
"aKVrnUqs0tQ10MO6pduIAYqzBX3tm9MLKqzXsxk5QFVjUHDL2Oxk6sqN1NeYnAXSxecj3xYBJVseiT/q",
"LoGzKhh1UhdFymtplV10QIfjkmLLkS/r8dCUUY/oPbHRCkM0ssImqQ3HjI4BdOZj19zGmjR6sNFRY1Zj",
"xxv2C7tNAP/K9aJ2wmwFaqeEJ85bGQX90IqpQ5KywkbpAx1xPpFv/Gy2lT2D4+jx73ROdbguDq8zXmAJ",
"qIMMqyKTNEV9DIOHomYBHOwtrgbK+rgozusKHiBoNGDXK0vckNBwKwLCLbC3/sNvnhcmBce5Gp4WiNmU",
"lMFnjqUMw6O0tgnZvO6svDJyx0uesSACChDNcBL7mnnmEkNquT5MyL4tHKgvpr8PN4MW4UT+un1hXAnI",
"9+diDVbTbBKOtpcYz39XnvulCOFaoley9HST5tZmJQo+qzkWTY1QbHW6IEKPWqsqOa/29w8eeXuwlc4q",
"ZTC/Y2jW0g4YmQuFKX8PrAB1TzXdHdEMqsDCu4Ml1huGPw0HWQCgHW0tt+AqaZ16VmvIfusNQ0hzTVHs",
"sFkyp9V0TWWiUybAiu+zEDFETkHI9Z4Kvp1gcqatFKelrRDlqGTwpvnxvZz6rETyzI2Jha3mTIe/o+oF",
"pl6qLn3ytPs7k3OFbi3BmK3DUWQ84TpbuWmnDKPIwbFifloN/UaMFoH5N+5dM4YUGPvwHVQA1M2pZy5j",
"972cfg+827xuXrmnIJ8TjNaa52x8LpyPT0iNppHpCtI7QSuxfIRqUpRSy0RmrlKShxb6ZhCYvtwzZDZN",
"SwmZT2bkZkxG83LIYiOVieDCG2cr37b4XmwQV03IWf76w6ix3IWWzWPYI5WoHxjKMN45SVQW62r0rd96",
"ICb6ZUDMVP1XVELsA0WEOFBNLrlIbU7E1jDwkWFZ9rOcQpB2lv3qnVq2MANVl5mc449hcGz4+hmdx91f",
"jQyEaGG02qIVFPfSssbGpgSzTazL54cE2h8Of///yH/9++//8ft//v4/fv+P//r33//n7//5+/8f5vJD",
"VYkw7gNmAa3naLCHgbt7arb3Xk4VmnHuHxyO4SUwo1Ti8gLlmsMAJ09++dGgaKEGR0asglquRtq5P7q/",
"j/USLyBRjS2Vr9EJscFYQ5F90EzYTJ5xYV1DZiUXstK+fFFjfTiFX+FefOe22GNnvFJKvXY8W8ETSwde",
"1JxwkHFRfQiuH3itR/aobOBzN+I2RIINsSI+4HXbKvEb6oWEZ70pRsa9Wtu+t4qsqcMJe6DWCQ9AWiPm",
"RK2UZnkd8G2/bVXagzDDRM4FV6wrXtmX65hpSjK5ZOUooYp5s6Wdwi3Khpic44GeD4bkfLDkIpVLhX+k",
"tFxygf+WBRNTlZo/mE7G5NRPJfOCau4rv/8o7ykyKSsBfPDHN29OJ38iZSXIBPyrMiMpVxri/SbEclnq",
"w/9c0WW/SDU+F0+Vkz9pRsyOho19kHMX83M+cMZBW8AebTMuHBuKKBYl5ENQRc4HTWnTjXc+qGGfS2Xk",
"CRBrLhnRTOm9lE2ruS1RqQijikMxSCuNuLhQ9F7zhKQygSLAkOiSZY2dRcsm9CWimAcX25d6HJJEFjxU",
"MCftgn9jM9rE1xjuFos8s3/VyRyGeLOUcOsfx0IsqWRK3NMkpzrB9A6a6IpmfqSOYf4MaxuD6KjaNSQB",
"j2SWBoF1zZL47TqhviS6K5FyLo4bC+SKyBz51LC2lUHZsFVBlWrVwu6k80SBbtPBNZ2jKGdvnysHV0ff",
"Bmn0x899aI6taWN5N6qPVBNfcHPKiCExaZXh9TdLQaMhhCdgdJcsg40Z7HLZVwYN3Rd+Jc30t62kKOt+",
"7dbDiRC5mJwVb3Ny5uqLYGMTiG9TToN25npX3W1I+JiNXcKFD5MJwqTGu5XW+JLNUW4iaRJDdi+mqwsX",
"rbRL8LINNoisdcsUth0qhkAajZaVwdMN+YoYnSZWvmSA+b+0Tp6xcUe7lQv4+r1jbipX05GeXU582/zO",
"dkGTWNuasDmNv0wb+tTYskcbExQhSU7aHjVBKaPPqmwV904YQgMG9lZRo2HD4t7FlKB20caZqzKLT/zu",
"7aswTbmenXCtWDbznky5FJmk6TYRSHXpI3+KmPMH++87lc/ILPKJBErO9KidcBTTH+sJ71LOUHirr5E0",
"FKaFdHXiSmnCutmlNbpjvrNsFFevyw6C+NvF/h3LNt0lYnjddPQtKZKbqe+k1lVew998iUcIvHeinLRU",
"GlUxxDxr5gZ7I1AsODEo44qiHja6MZK9Pz2w3ckCA4b/RKQ1kbRe4HMBlQq+A/lGuojriaO3toqYkJqw",
"ktrIVl/OoS21m2V9v6nMWDdGPePC9gWx0bcQSXFPkcQ3n8AAcx6mbwO5Jm+uWLksuWYoy3NZKShoJIKq",
"Ey7PNCo+xIrQvZJzW1zO0wCsc+ekYtezwiwaTgUmZLTMeE8Bb90ggTtQiShy1dGcUX2gZBCWkjDQCUF5",
"5wKj8nGciLN/XSDo51GBNZfMTRq7RPUet6taYoNGfd5cJ1GiuAj22JIMToj9rVOpaq1DZjuDSv9Ynx/Y",
"qmms/88ZRUrh+H5dOQw6suQsnyKebiXSN6q1dReA2tU2A6jL7UhucFQN11JQ/SYaU/vpt2Ekhb7LDh21",
"rdHs1Tb1RLqXZlflqI2j6z3EbvT+24Hx3YHHoLZ4W1u0fTLytcsiVlTFkpIBp5QjIfVIsywbUbGSgoWR",
"zEeDw/FBH+yP/uYCZo3kNssLNrftekZ1v5bBcJBzlUQyQa8Zam4X/vHL36y2fIYzNR2dsSksMvcf2Smf",
"izftw2oUALSWeXuAT0+Oof9KcBIXdcUttaTzOStHFb+hg2mVJuwmOPTX6uqs9uaPyRGS+Ml0VrTmlDLG",
"ilNr+4r4ps3P3jbmwhNQjXSZbqcGZuCiZSLFNEwv37g6Uj5tPKWrpp7mxzYEGxSlMXlaFBlntmYj5slL",
"8yEHu9UkpSt1IWcXS8YuJxDuB+80n5uXXW3qyApBJhTk4MFoIauS/PTT0evXdRYxNj6q0TYceXA0yCXR",
"FYE4CnATphcgdR8N7v9wtL+PSStW6bMpzYBX7q39J9E6Kc1JujGRNGEjxQpaYrTuUo4yBq2mXL0cC3Uo",
"0kxXyBcZu+wBM/nufJBL9Djoyjkbvh+TF2DtzBkVipwP2BUrV2Y8VxWn2xHJ7z8QnQCgPZlHDjQf44XY",
"PaA2D9fmsX7sYROajXGDFa+5F5pq1qdT24TyMkyv2z7NJ6oRB4Nttai0rwAjXdLLa1dg3GKhG5bXtHz4",
"kpJDu66gDCW0HzFHypR9Rc5mRhkB40C77mWNQP0FPiPZ/VipDslWrXjaJMc6JBiK6tpy0hHbgLrI6D9W",
"68OOmvmT1j+B2lzYBhLIVe1hQWml1gCtwqvIjAuuFn19Q4df8DyHfn9rTrbPGvNnqniyRvAcf0YJ4OUu",
"JYB3MaJ/lWq7XypD8IvVwt2mgqivwNPSrEqfU3sNO9P2JW5rfSym+IUKC3mKzkoqvCkoW9k4ypWTNuic",
"cB047qEqC9g2xt41aM3EhREY5KwuwW/UT6K4+ZsKBsaXrpTQ0cga9RnN0KkkP568Ixi44a08L1789cWL",
"cV2T9seTdyN4FhESmj0Ody6lqel8TJ7ZnsXWm9kqcURttX003NuUCwpu9pKKVOYEBvQmIqX4XDhK9YVs",
"Jxt0izM635L019TeI4Hq2AnsDgwiNE9U0/kFT0G3eHB4/yB99EMyYvRROnrw8NGj0ZPp7NGIPZntP5my",
"Bz8kbBpRK/wIgai/uXPIOtHfjbgWOk7N7yxmVxU+agz5tGZqNJJsZ8lq1n/6eF2HVLxLSsRIcoZucH/a",
"AZv6hFo2pCUbdSgP7R4XtIolCL1TrIQCErZgrmUZx8+HpKBKLWWZ+hLKoFbbOiFG/3H2y9qsYVAPAAOc",
"zfDVeqcLrYvBp0/QeBEdftAjJNGBAcTT6jNGc+uqwi/V0d7ezIULBmF+e90qGRi8SF7SMrfxsBA7PRgO",
"Mp4wm87hqdSrq8PORMvlcjwXFYxvv1F78yIbHY73x0yMFzrHqoJcZ41l574Gd6313x/vj0FTkgUTtOBg",
"mjGPMCEJjmiPFnzv6nAvadcXmqPFxBekOE6hL59uFiICYRNyQWC0g/19B14m4HtqlFEMBd97b11piMBb",
"RsI354NTbAJdGPTOfE4K4qKTuMyKMYymmao+67Qoxdv9N4j+A0pUj/FCpIXktvz33Lbh7wzYKeFsIB8F",
"7x7E9Ow5e0sfsF9ykf7ZZ5efYArZjYE73iAzAu+XshJ1sjnoyb4lKbxsIxy/0LqwykFkHae+BeHSiP7L",
"Uor5uHX6L7kNfZclyWXJyLNXx64hJnptIABOkSWF0DkQptx2YkhRSBU5KchEjhwVMNE/y3T1xaDRqqgS",
"AYtrBSpL6/SDECSsIiIxmgxr4Nw8HjUqNHRX+kvz4g5xkRjvBkc644LdPZz6K804eF5piE3XQaYWnlr3",
"7VU9vut+Xh/kRqKC+UqjICJ4Dco28q++Ktae3Bp+/lMgJqap1RjZzGLbwO52GKcXGTFHYUsp4iWmcX/W",
"ke9QwfjTsDHWiuZZc6y2gLwJQdoH8Raa7V6xuODRlRPWnsbTJGFK+Sa8kbKKkSFJmNOFG7sHzv03BRNP",
"T45dxlqWyaXtMwIh54Jme1aStAc6IQVNLs1hn4v+41ZMV8WIukI//WTnlF6xaG2hmyE80amiTDMEq6Hd",
"9ArRu4WUDyKtn1rIAKHoSzalReGsJanRlWZVltUNXbUtOWbkyrtHSt7VsUU9Oa5Yesian6DbjYAdrsis",
"EgneRKjIvgG9DULEMLu3hFQ/DjY4395Hl3b6ae+j88Z+WkeSGsyw2bncaOLcwM7WcbAqXJDYWmvQ1mO1",
"i4rTTfY16nxkwsCr3D9hm3r9doPMNJ7AvTvFdFpaK9s6ayR+h+2YGinf5ktrG3AZ3wY5fbo3OgF21O/W",
"LadRZLw3C7wfVX021O5YWpf6/G8MvcYG1GcgZ10ioG0+IO9UnfnshHaapiNkJmvS4ZCM+iqhbIqpXzMK",
"vV0M44hlkZApVXUZp2kpl6qRF3Z9jK/3uDuOu0LbPZwfsnCwF9WNsPpGN7LuIf8spzZxOee6g543qXGs",
"WRD4xyoj4SHvtOliRlSzca5Bt3YF0H5w/+DmZYQzT1F9XhzTdA7pcyBT1vlzzRei2XMcm2BnK5JWvkyZ",
"7WSU0GThkM8PBfdBSpIZ0eRc3Kp4BD8QVxuzSQkQx6yLB4pHyrJzR7DAA2TWhbIPVo1vDPdzM5mQ2UvZ",
"uVSo2m9xtUCv/br3KwmWsO56PYjn6+94IXzap6Gi2JBjYQTKX96cYZql7bBn8xjqPD29kNV88d8X6o9y",
"oQCtNlwnwH6/bzMSmNKglsqSmxPXtZuWR65Zox1av1me6WTxYyantFGwAnLJbpaLxJvHbSXQDONX7sy1",
"2XN50XB7qFhFW8P1yEXQUA7Sill5ZduWRj5XG47vDZQPxjY5dTrSHADds5zW+eVUqRF2MsOtun81DxCa",
"vjHbAe6GqGVvf7mo7bPZYa5Z9B07u0nboW18bdKqsDNcSFxzComt5qa4jqaWIj66FYpYMlyTkEH/upoQ",
"2nMZ3xlq9ZqWl7jSEGTDWhp37U2SkmtWcroB42G83Ny2nQZFHuCkhTrzCisZGKYAqOIooS1PBRXNzImb",
"53nz0LskFwYtSom2xwXz7/rc9ylNLuelrEQ6Phe/SJiP4p2dtHsWTohXVSH+yXzFUlIVICsJzUvw8UuR",
"uvogOUX0RK9dBzxYSHclK8I+FCzRQyzzwHhJJnXzqUmd0a5sEV6jpGW4JwrdXGHWlm0TiMnfXVOsuMwF",
"LYdsXaMbIiC2L1fMhNeu8NokFXOmx7et4TR6MPWzJIBq4FmxAWNYIgJKq/CZQWYQYYAU2C5F8OHdIQUg",
"BPhaMAbw23G3ukvWDBpzQcSYSImSEOnb5WlGfNv7aP77C83ZWtOQLZWylWHIDXhn7DTtgi+9Kgb+1pZD",
"bFKFF3gNTKErjYfEhvMJkv6bPZ6xvkz0XNQWp6EGtwi0qHXLv+R3oyIADFDZdrsGlQpI6tZArKfyDMWP",
"1wXhRww1+7SVrLYVVvtCA/04vSkY7rdtxKnnSIICOuYZky/wo0s+nxtp9XaJ1juBHJGlBFIEur5JjOwM",
"OCmqAEPCRZJVKSpHymrT0PDLqANyjlWHUeW2RZP8IIZdu2j9jnhAfpG+04bqtPv+bsX0902Dpcesfv3r",
"q2LErZgGOep2XabTUpBce/L1Zib8SKQkSObru49702br/PjNfAsNVxuN9m/zQG5E4qq3ElNYqsLg73cY",
"fDq0hTJWBfveyFxB/3jvu/Rw3NKT7O4mTRJWQJ0sJnTJmTVqAVmxk9w1ogJthd1qbWFyc+cDEOx6v78O",
"Xt3cRV+LXGBLWYNgRrWaS43wDIpRwe2/S6iANApMQM2s+LrGvNsDoEkqIZjW6rh+y6q5w/VSB0bIeFTz",
"7jkHnDiV28Ha17a9oanvW0DKP7hJsXnU1zAvRgdtdCTvRyDFdFi3qMc3A5rASV0c6A/OIt1ObHJvj6tD",
"sCVxsLmmydJN5BOQqPKMEa2UBwd9dblc9023BBcJh9/7ONqvTDTXIKuXBOotWDA04102ImidJrkOPU99",
"Eas/NnI2arn1oGYz0xiiM6yZ+VpoetoY7jpI2lyQxVTwXPnDdunNynfy8JL/HwSNm5vcBYlBD93Ins/g",
"rW+DJ8NefGJfXFZEGHOmwppqqiP53DGxkNp1QyU4mmXhqhvYsI28F99xHImWC6pHS1llqfUPjlLZi1Pe",
"5vTrgupfzUfH+vm3IvA5j2SfnIdNE6xZJ2KDMMgXyFDYy9ClhDubDmRE4ygQieDKS7toDSwqOgQ7Uybn",
"NgquVx4Dk5FtvVLPUg+HhiUoZCi8+ysliRQuJyBbuSm4CnpsW++DK1uP7RFR8JSV7jFKfRlYhLiKrXD2",
"XFe8PayEu4ZpN5vJ3lC8T3OSmBcqbB3nYjSI7ax5e86naDPQWIy/a4gJfbRt187AHY78ev/JzRNLvxKa",
"lYymK1tV3AoMD27V946nByFoYg6BrGSiWhCt+8tNgmuCKM+TBZHCmvdvjd1ULXbTIlLPsFcvrVum4vVX",
"qzzj4tJHF0DbZIQAxpdpJCoWKJURXbIssL5hQzikFrZTli32ntAs8xe8juSr6QcCtZ39YBdEiQovEyym",
"0cKZloyupRlhF8BtKUd4sjdKRWKdKLclKF+BlkQbMcbWW03tsUGTDwnifHgQw7ComHnHdi60rpQ7dWWg",
"0WfdJTmEgW0fiwk/hSy1she/Zrx2YxsR/ilmnFEXrejZRntA32vORUBiw0pcRU124F2ljYDgl9C9JTDs",
"3kfXzPTT3kd4wv+xxqEe9jWUJXOhtS0ZcOs2tVBFtSswuld38sMPO/MGdeNdh0dfMj4yq9v9NrPWXYt/",
"u/GL1+lluaUh8k5dorCeWd1zM9p9tSFgBvdlHfH2GPnPjYzDmFHFEhVXP9P6HGwP/JTNWEl8S1fXdCez",
"GZvng4P9H84HHrHquDpQKsC/p6tSOJG+3p7ychyGVfoeup0Dx0g8mimJYyiZMykYYZmCcepC5rFlArYA",
"ABeMYkkBC8L/Z4TTjJ5RMXpu9jl6BwMMIjAMOnbGYChLPueCZjCnGR96+GCl9EyGldV9r2Gug8ZVtlcw",
"D6m2VfJcMSxBKIc3oD/VnGNM+qa9vbELG720CxtsjFXaRp6RiWZ6pHTJaN6kEF5Tn3Jh7vdwc2L4M5xD",
"tRqUX8Ou6MTQrknxYP+HTa9bdGwgoiU5GN/7ODpCaT836gCG4U6ZXjKL7BacQTSQ19ptOMjMN1iXZYfu",
"eNHZ4TIoOw8j7YjwErvU6fW31t3A+uZYxHOxq3JGpsx86Oefrhr3DiWKSe8VOiLmzCa2lCFQl0Z08i1n",
"U2zgQMAZbD5FP98hzXjdxo9wP2eyTPg0W5Ekk7abw09nZyckkUJgILvrkiSh4qQlvLbspmqcFyPsA000",
"UTRnVpLU0nVUI6msjJCHHyjoRotvYaoh3qa66GDkBMhUpqteVhrmtJspau2iC5aG5OgdJ30Bfi9pmZ/W",
"/VhuSDCqZ3kLovf1K2CFzgOu6gi9GS3zDUn6OHVnFNYeJIAfWGf3PtomQJ/WG/Ch7t1WYau+p9DdNLDa",
"3gVRxxPWphUzeUct883uVmvMnpEv1pz8nm2dsv70XTOubwUJ3H7W4QK013L40BMQ1pY44cMFVURARxmy",
"YvpuoVMYwdHpZIaR7jnDrA7c+wYHoq2k0wrbcEOONyCehh7NWyDfmXnx7iCfZh/0XpFRLnasTHTWBs63",
"gldBXBlVmszY0rZeCpAMe9tvRb3CT/x4rp3TWqzaLqgi6M50q1j15S24nR5533xcBbLAbyCwAluf+Xw6",
"cGOw2Ywl2qkF0M4YR6CKLFmWtbMLzbeM2kohiyqnQmEMOQj34IK/4rRbvaSuCW7uCHQIcDcKA0LhYtX3",
"akK4UJrRdi5eUGe9tySOr4h+c1K4lXPdVNcWwr3A3Oh0XpeSWS+Ho2qsfOdubDnnTOjalgbweaC0ni6i",
"4eAxjPK53tN0bk5ivl02Tl3aeltDhqbzOjHmLkewh70LoNY7XIZKYNVr1ehb7cP8ze7QN2LGUFBaoD7G",
"GswbQt7XgPXLIXJQljxOxoPNR1DYC/3ha7173Ybvzb8A2yuqCEyxhF0TqF+eO26Ep81GbgHsmgZBg2m2",
"7ae/Tljh5O5kxtrSgVRgVAPUGdwGWRqINrTbhH4vNp2dNnGzj5BtiBX0B6Zu5Zq96sn3qDvyq/GabMxl",
"+Fr/PYtX+IUgiK9+AXZD/FukdOYyBaFAaE92cUHQ7UR5l8+QKFnbSxOaZdZQeinkEsLY3r07fn53LqEP",
"gBFsuev1Q0mkiXrx2xa0tdx04W7htvVdtb+AF8StddNdU1vByCaTuE+dqNtwuMTaAHSBt/fRNsnYQfTa",
"SqX0w958OnSnXrbFHc+jbCzk3ZT4nLa0tA0ZjzXe/ETmue/eDD7gBEKWwQFla9zWBpSl74fDBZnYXmwT",
"UK7Qg9p8CUNWbCOooWHiBeGazHip9Jg8FSu0yOBrYc+VYBjncwWyXvlmZ9eTO78qTn1pUrCG426bVr30",
"Ddi2kVdIyjSFOnXLepodbv42ViWr83e7kt320d2UEBHttHYXjE13xA7Ui4DbWYMcRu+ElE6g7jV0NuTp",
"bwINO93RenCwK6OT4+eqYUKo/daumTqRs39OHA0qyhtIITTUghfeAvbr7viZMVaMVNB+eROXa/Zr/pZY",
"XnNn2zQ1AW9+o0H1uqRuFgp1Qsa+vJsouIFyfVWMuDFOugkZXI52+xSvbZnyDbK/ql3qmrTJCHCydJa1",
"RmPhCJq33BjYhJCVI/x7nfyGL3p5++bO/23QGHGd9UkSt/pbNc04SLC0X1zvuFPuToydW37DvNJRFDoy",
"Wn0khuXVX6oIUhl9byRnszWiF5+LN7PZVi6YuwdL2yoUSGyjSejfoO9oq0RqoPNSReo+52sB/oxmGUZ7",
"OuuMliSzbjhX5hTMd3rBVvdKRuZQisYOP+49FbHhUMSNXm07Rf+lzpmmKdX0Kxhbw67/f4grvTUaPq30",
"ggkNWQWuT5/BBheK2mct+GycxEBuLWEGm8MsA07F6wOPYqy2icRRwTg4tcHXRg5YqdNufBBHr0AqJOn/",
"4m5j1e4Y4jLkXHd/VmLWiVj1AKEXFUb4ZtpPwjqHlQ5u2ubjJ4ppLbX/Qnk83VlC/QNTHkvV7bk5ezKE",
"JSTeuKAITQzZyFiKtR0x8cxSlFEzJsqhC/hWuagTniyVYeUokwnNgMDRTH1pqnbFGrupYu4lCA5aw2et",
"PG7jxm+uvq41vPeGdUO5uqDdSx+5+kW6eqo+rdUXGQvsHg/2D79g60NEsV7EPGGl6zzznAmOpNPWP4ib",
"zjGEzrI8mmh+hZZYBu5RV2Mry+QSfRUWLHbrJZ8vNBFyaQP4Dm+XwbiLRAXk9KEDz0jhsDrMzIOM/7mE",
"3vY2swUv3I6X1roHqR8/gMam2wQ45RTOMt4UKBpB139dzJBof/sWglHtTvquo5WNuMAlusDAa1k17Fjd",
"6NPYLalzPFTDY+cwyZX1VNLmw/mx69J0t20w+Uzm1DDqqssh0auCJxB7aLs1gcBclHJeMqWG0M7JNbiQ",
"JZlRnlUl28hhHF9RTKQNR50Btxsdqm+zkm2+KXs5XY34qKz6w0pf05U1pVTim0hKeU1Xf2GseIse529M",
"PcPAbyvG1NnfgcQcuN4DBlVWguyRS8YK54qvA8DJm8LVjoJERMqFIpSgqz2USb1TJuZ/70HkjkQPyl6w",
"staauKqj0tejtqx0UelRUcq0StYJ+oZYvoGXT9y7d4I5QM2vvfcFm++ajT203xZi/rUSuQ+2TOQG6c+m",
"KLu2Hw/u37/5i/aKible+OJHfwo7x6U8xX7hhspSYkEwsp9gXr5d6eHNr/SEriBfF9rW0dL2+3pw/+Ft",
"uBFUVRSyNAf1mqWckrNVYT1mgGIEMcoJk1Ofbl53gQ2jvx4cPLmdDoOu/gVySiAdUmKHqZm52LbQnnVL",
"60Uptc6YLcf3h5I8MM/dADqXSpOSJZj970sHwn5RHgiy3TkAB/tOmY9rRwgTCmv/YQ4FSO/2lM2X9xRJ",
"+ZwpKB7cPmPyzFcfgDixk19+BDj/fPLiR2JRyQxaZFSIeJzWOoFHL6p8KijP1F5RsivOlo4s8RILJjpq",
"T5D6OzEIIFpeOWpeldngaLA3CIxQbWJ13AyC6rQFc5ji2QEkqXQLifwsp85MCjLa3ytWcoN+dbvTYasd",
"xbhRRVNFBn16ctzsDxmayGSeVwLFTShQ0l76uO3AjUxgseG1XxN5enI87O/OjM2szDbMXSll5lbUmQyc",
"jpFSOVh+wM8CfKKunWAh6HtWvpdTXxEunMOWO/j026f/EwAA//9t3o1qzhEBAA==",
}
// GetSwagger returns the content of the embedded swagger specification file

View File

@ -14,3 +14,23 @@ sql:
rename:
uuid: "UUID"
uuids: "UUIDs"
jobuuid: "JobUUID"
taskUUID: "TaskUUID"
workeruuid: "WorkerUUID"
- engine: "sqlite"
schema: "internal/manager/persistence/sqlc/schema.sql"
queries: "internal/manager/persistence/sqlc/query_workers.sql"
gen:
go:
out: "internal/manager/persistence/sqlc"
overrides:
- db_type: "jsonb"
go_type:
import: "encoding/json"
type: "RawMessage"
rename:
uuid: "UUID"
uuids: "UUIDs"
jobuuid: "JobUUID"
taskUUID: "TaskUUID"
workeruuid: "WorkerUUID"

View File

@ -28,7 +28,7 @@ import StatusFilterBar from '@/components/StatusFilterBar.vue';
export default {
name: 'JobsTable',
props: ['activeJobID'],
emits: ['tableRowClicked', 'activeJobDeleted'],
emits: ['tableRowClicked', 'activeJobDeleted', 'jobDeleted'],
components: {
JobActionsBar,
StatusFilterBar,
@ -166,7 +166,10 @@ export default {
if (row) promise = row.delete();
else promise = Promise.resolve();
promise.finally(() => {
this.$emit('jobDeleted', jobUpdate.id);
if (jobUpdate.id == this.activeJobID) {
this.$emit('activeJobDeleted', jobUpdate.id);
}
});
} else {
if (row) promise = this.tabulator.updateData([jobUpdate]);

View File

@ -8,15 +8,46 @@ object-relational mapper (but see the note below).
Since SQLite has limited support for altering table schemas, migration requires
copying old data to a temporary table with the new schema, then swap out the
tables. Because of this, avoid `NOT NULL` columns, as they will be problematic
in this process.
tables.
## SQLC
Flamenco mostly uses [GORM][gorm] for interfacing with its SQLite database. This
is gradually being phased out, to be replaced with [SQLC][sqlc].
Flamenco mostly uses [GORM](https://gorm.io/) for interfacing with its SQLite database. This
is gradually being phased out, to be replaced with [SQLC](https://sqlc.dev/).
To generate the SQLC schema file:
### Installing & using SQLC
SQLC can be installed ([installation docs][sqlc-install]) with a `go install`
command just like any other Go package, but that does depend on a C/C++
compiler:
```sh
go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
```
The [precompiled sqlc binaries][sqlc-precompiled] work just as well, so choose
whatever works for you.
{{< hint type=important >}}
Installing sqlc itself is only necessary to regenerate the database code. Once
generated, the code is independent of sqlc.
Since installing sqlc via `go install` requires a C/C++ compiler, it is **not** part
of the `make with-deps` script. Because of this, it is also **not** included in the
`make generate-go` script.
{{< /hint >}}
[sqlc-install]: https://docs.sqlc.dev/en/latest/overview/install.html
[sqlc-precompiled]: https://docs.sqlc.dev/en/latest/overview/install.html#downloads
### Handling Schema changes
Database schema changes are managed with [Goose][goose]. Every change is defined
in a separate SQL file, and has the queries to make the change and to roll it
back. Of course the roll-back is only possible when no data was removed.
SQLC needs to know the final schema those Goose migrations produced. To generate
the SQLC schema from the database itself, run:
```sh
make db-migrate-up
go run ./cmd/sqlc-export-schema
@ -27,3 +58,5 @@ To generate Go code with SQLC after changing `schema.sql` or `queries.sql`:
go install github.com/sqlc-dev/sqlc/cmd/sqlc@latest
sqlc generate
```
[goose]: https://github.com/pressly/goose

View File

@ -126,7 +126,10 @@ enable the race condition checker, and all other kinds of useful things.
If you're interested in helping out with Flamenco development, please read [Get Involved][get-involved]!
If you need to change or add any database queries, read through the [database section][database].
[get-involved]: {{<ref "development/get-involved" >}}
[database]: {{<ref "development/database" >}}
## Software Design

View File

@ -137,6 +137,9 @@ Storage Services][cloud-storage].
There can be a few causes for this, each with their own solution.
1. **Check the Manager output on the terminal** for any messages related to "auto-discovery" or "UPnP/SSDP". Older versions of Spotify can interfere, so make sure to close that before you start the Manager.
![Screenshot of Flamenco Manager's log output on the terminal](ssdp-port-already-in-use.webp)
2. Ensure that the **Manager port is open** in your firewall. On Windows, the system will prompt you for this during the initial setup. If you're using a third-party firewall (sometimes presenting itself as anti-virus software), you may need to create a custom rule manually. The default port is `8080`, which can be changed in [the Manager configuration file][managercfg].
3. If that doesn't help, you'll have to **tell the Worker where it can find the Manager**. This can be done on the commandline, by running it like `flamenco-worker -manager http://192.168.0.1:8080/` (adjust the address to your situation) or more permanently by editing [the worker configuration file][workercfg].
@ -206,3 +209,18 @@ The Worker was deleted via the Flamenco Manager web interface.
This is shown on the Manager after an "unknown worker is trying to communicate"
message. It is also shown on the Worker for the same reason. See
[What does "unknown worker is trying to communicate" mean?](#what-does-unknown-worker-is-trying-to-communicate-mean)
### Why are all the `\` doubled?
You may see double backslashes in Flamenco Manager's log output, error messages,
etc. To give an example, `C:\path-to\myfile.blend` may be shown as
`C:\\path-to\\myfile.blend`. The technical reason for this is that the path
separator `\` has double duty, and in certain cases can also be used as an
'escape character', denoting that the character *following* it should be treated
specially. Common uses are a newline `\n` or a quote within quoted text: `"They
said \"hello\" to me"`. In such cases, a literal backslash should be escaped as
well, and thus `\` becomes `\\`.
In other words, even though it looks strange, this is not a bug in Flamenco. The
aim is to prevent you from seeing these doublings as little as possible, but
unfortunately it cannot always be avoided.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

After

Width:  |  Height:  |  Size: 67 KiB

View File

@ -4,14 +4,25 @@ weight: 30
---
This section contains third-party job types for Flamenco. These have been
submitted by the community. If you wish to contribute, consider joining the
[Blender Chat channel][flamencochannel] and chime-in there.
submitted by the community. If you wish to contribute your custom job type,
either
- join the [#flamenco Blender Chat channel][flamencochannel] and poke `@dr.sybren`, or
- write an [issue in the tracker][tracker] with your proposal.
## How can I create my own Job Type?
This is described [Job Types][jobtypes]. It is recommended to use the
[built-in scripts][built-in-scripts] as examples and adjust them from there.
## Installation
Each job type consists of a `.js` file. After downloading, place those in the
`scripts` directory next to the Flamenco Manager executable. Create the
directory if necessary. Then restart Flamenco Manager and in Blender press the
"Refresh from Manager" button.
## Third-Party Job Types
{{< flamenco/toc-children >}}
@ -19,3 +30,4 @@ This is described [Job Types][jobtypes]. It is recommended to use the
[jobtypes]: {{< ref "usage/job-types" >}}
[built-in-scripts]: https://projects.blender.org/studio/flamenco/src/branch/main/internal/manager/job_compilers/scripts
[flamencochannel]: https://blender.chat/channel/flamenco
[tracker]: https://projects.blender.org/studio/flamenco/issues/new?template=.gitea%2fissue_template%2fjobtype.yaml

View File

@ -3,18 +3,13 @@ title: Compositor Nodes
weight: 10
---
*Job type documented and maintained by: [Dylan Blanqué][author]. Please report any issues at [the script's Github project][github].*
{{< flamenco/thirdPartyCompatibility blender="v4.0" flamenco="v3.5" >}}
Documented and maintained by [Dylan Blanqué][author].
Please report any issues at [the script's Github][github].
[author]: https://projects.blender.org/Dylan-Blanque
[github]: https://github.com/dblanque/flamenco-compositor-script/issues
{{< hint >}}
This is a community-made job type. It may not reflect the same design as the
rest of Flamenco, as it was made for a specific person to solve a specific need.
{{< /hint >}}
{{< /flamenco/thirdPartyCompatibility >}}
This job type updates Blender's compositor nodes to work with Flamenco.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 122 KiB

After

Width:  |  Height:  |  Size: 122 KiB

View File

@ -0,0 +1,317 @@
// SPDX-License-Identifier: GPL-3.0-or-later
const JOB_TYPE = {
label: 'Cycles OPTIX GPU',
description:
'OPTIX GPU rendering + extra checkboxes for some experimental features + extra CLI args for Blender',
settings: [
// Settings for artists to determine:
{
key: 'frames',
type: 'string',
required: true,
eval: "f'{C.scene.frame_start}-{C.scene.frame_end}'",
evalInfo: {
showLinkButton: true,
description: 'Scene frame range',
},
description: "Frame range to render. Examples: '47', '1-30', '3, 5-10, 47-327'",
},
{
key: 'chunk_size',
type: 'int32',
default: 1,
description: 'Number of frames to render in one Blender render task',
visible: 'submission',
},
// render_output_root + add_path_components determine the value of render_output_path.
{
key: 'render_output_root',
type: 'string',
subtype: 'dir_path',
required: true,
visible: 'submission',
description:
'Base directory of where render output is stored. Will have some job-specific parts appended to it',
},
{
key: 'add_path_components',
type: 'int32',
required: true,
default: 0,
propargs: { min: 0, max: 32 },
visible: 'submission',
description:
'Number of path components of the current blend file to use in the render output path',
},
{
key: 'render_output_path',
type: 'string',
subtype: 'file_path',
editable: false,
eval: "str(Path(abspath(settings.render_output_root), last_n_dir_parts(settings.add_path_components), jobname, '{timestamp}', '######'))",
description: 'Final file path of where render output will be saved',
},
{
key: 'experimental_gp3',
label: 'Experimental: GPv3',
description: 'Experimental Flag: Grease Pencil 3',
type: 'bool',
required: false,
},
{
key: 'experimental_new_anim',
label: 'Experimental: Baklava',
description: 'Experimental Flag: New Animation Data-block',
type: 'bool',
required: false,
},
// Extra CLI arguments for Blender, for debugging purposes.
{
key: 'blender_args_before',
label: 'Blender CLI args: Before',
description: 'CLI arguments for Blender, placed before the .blend filename',
type: 'string',
required: false,
},
{
key: 'blender_args_after',
label: 'After',
description: 'CLI arguments for Blender, placed after the .blend filename',
type: 'string',
required: false,
},
// Automatically evaluated settings:
{
key: 'blendfile',
type: 'string',
required: true,
description: 'Path of the Blend file to render',
visible: 'web',
},
{
key: 'fps',
type: 'float',
eval: 'C.scene.render.fps / C.scene.render.fps_base',
visible: 'hidden',
},
{
key: 'format',
type: 'string',
required: true,
eval: 'C.scene.render.image_settings.file_format',
visible: 'web',
},
{
key: 'image_file_extension',
type: 'string',
required: true,
eval: 'C.scene.render.file_extension',
visible: 'hidden',
description: 'File extension used when rendering images',
},
{
key: 'has_previews',
type: 'bool',
required: false,
eval: 'C.scene.render.image_settings.use_preview',
visible: 'hidden',
description: 'Whether Blender will render preview images.',
},
],
};
// Set of scene.render.image_settings.file_format values that produce
// files which FFmpeg is known not to handle as input.
const ffmpegIncompatibleImageFormats = new Set([
'EXR',
'MULTILAYER', // Old CLI-style format indicators
'OPEN_EXR',
'OPEN_EXR_MULTILAYER', // DNA values for these formats.
]);
// File formats that would cause rendering to video.
// This is not supported by this job type.
const videoFormats = ['FFMPEG', 'AVI_RAW', 'AVI_JPEG'];
function compileJob(job) {
print('Blender Render job submitted');
print('job: ', job);
const settings = job.settings;
if (videoFormats.indexOf(settings.format) >= 0) {
throw `This job type only renders images, and not "${settings.format}"`;
}
const renderOutput = renderOutputPath(job);
// Make sure that when the job is investigated later, it shows the
// actually-used render output:
settings.render_output_path = renderOutput;
const renderDir = path.dirname(renderOutput);
const renderTasks = authorRenderTasks(settings, renderDir, renderOutput);
const videoTask = authorCreateVideoTask(settings, renderDir);
for (const rt of renderTasks) {
job.addTask(rt);
}
if (videoTask) {
// If there is a video task, all other tasks have to be done first.
for (const rt of renderTasks) {
videoTask.addDependency(rt);
}
job.addTask(videoTask);
}
cleanupJobSettings(job.settings);
}
// Do field replacement on the render output path.
function renderOutputPath(job) {
let path = job.settings.render_output_path;
if (!path) {
throw 'no render_output_path setting!';
}
return path.replace(/{([^}]+)}/g, (match, group0) => {
switch (group0) {
case 'timestamp':
return formatTimestampLocal(job.created);
default:
return match;
}
});
}
const enable_all_optix = `
import bpy
cycles_prefs = bpy.context.preferences.addons['cycles'].preferences
cycles_prefs.compute_device_type = 'OPTIX'
for dev in cycles_prefs.get_devices_for_type('OPTIX'):
dev.use = (dev.type != 'CPU')
`;
const enable_experimental_common = `
import bpy
exp_prefs = bpy.context.preferences.experimental
`;
function authorRenderTasks(settings, renderDir, renderOutput) {
print('authorRenderTasks(', renderDir, renderOutput, ')');
// Extra arguments for Blender.
const blender_args_before = shellSplit(settings.blender_args_before);
const blender_args_after = shellSplit(settings.blender_args_after);
// More arguments for Blender, which will be the same for each task.
const task_invariant_args = [
'--python-expr',
enable_all_optix,
'--python-expr',
"import bpy; bpy.context.scene.cycles.device = 'GPU'",
'--render-output',
path.join(renderDir, path.basename(renderOutput)),
'--render-format',
settings.format,
].concat(blender_args_after);
// Add any experimental flags.
{
let py_code_to_join = [enable_experimental_common];
if (settings.experimental_gp3) {
py_code_to_join.push('exp_prefs.use_grease_pencil_version3 = True');
}
if (settings.experimental_new_anim) {
py_code_to_join.push('exp_prefs.use_animation_baklava = True');
}
// If it's not just the common code, at least one flag was enabled.
if (py_code_to_join.length > 1) {
task_invariant_args.push('--python-expr');
task_invariant_args.push(py_code_to_join.join('\n'));
}
}
// Construct a task for each chunk.
let renderTasks = [];
let chunks = frameChunker(settings.frames, settings.chunk_size);
for (let chunk of chunks) {
const task = author.Task(`render-${chunk}`, 'blender');
const command = author.Command('blender-render', {
exe: '{blender}',
exeArgs: '{blenderArgs}',
argsBefore: blender_args_before,
blendfile: settings.blendfile,
args: task_invariant_args.concat([
'--render-frame',
chunk.replaceAll('-', '..'), // Convert to Blender frame range notation.
]),
});
task.addCommand(command);
renderTasks.push(task);
}
return renderTasks;
}
function authorCreateVideoTask(settings, renderDir) {
const needsPreviews = ffmpegIncompatibleImageFormats.has(settings.format);
if (needsPreviews && !settings.has_previews) {
print('Not authoring video task, FFmpeg-incompatible render output');
return;
}
if (!settings.fps) {
print('Not authoring video task, no FPS known:', settings);
return;
}
const stem = path.stem(settings.blendfile).replace('.flamenco', '');
const outfile = path.join(renderDir, `${stem}-${settings.frames}.mp4`);
const outfileExt = needsPreviews ? '.jpg' : settings.image_file_extension;
const task = author.Task('preview-video', 'ffmpeg');
const command = author.Command('frames-to-video', {
exe: 'ffmpeg',
fps: settings.fps,
inputGlob: path.join(renderDir, `*${outfileExt}`),
outputFile: outfile,
args: [
'-c:v',
'h264',
'-crf',
'20',
'-g',
'18',
'-vf',
'pad=ceil(iw/2)*2:ceil(ih/2)*2',
'-pix_fmt',
'yuv420p',
'-r',
settings.fps,
'-y', // Be sure to always pass either "-n" or "-y".
],
});
task.addCommand(command);
print(`Creating output video for ${settings.format}`);
return task;
}
// Clean up empty job settings so that they're no longer shown in the web UI.
function cleanupJobSettings(settings) {
const settings_to_check = [
'blender_args_before',
'blender_args_after',
'experimental_gp3',
'experimental_new_anim',
];
for (let setting_name of settings_to_check) {
if (!settings[setting_name]) delete settings[setting_name];
}
}

View File

@ -0,0 +1,36 @@
---
title: Cycles/OPTIX + Experimental
weight: 20
resources:
- name: screenshot
src: cycles-optix-gpu.png
title: Screenshot of the Flamenco job submission panel in Blender
---
{{< flamenco/thirdPartyCompatibility blender="v4.2-alpha+" flamenco="v3.6-alpha+" >}}
Documented and maintained by [Sybren Stüvel][author].
Please report any issues at [Flamenco's tracker][tracker].
[author]: https://projects.blender.org/dr.sybren
[tracker]: https://projects.blender.org/studio/flamenco/issues
{{< /flamenco/thirdPartyCompatibility >}}
This job type is the most-used one at [Blender Studio](https://studio.blender.org/). It includes a few features:
- Always enable GPU rendering with OPTIX.
- Checkboxes to enable specific experimental flags.
- Extra input fields for arbitrary commandline arguments for Blender.
To use, download [cycles_optix_gpu.js](cycles_optix_gpu.js) and place it in the
`scripts` directory next to the Flamenco Manager executable. Create the
directory if necessary. Then restart Flamenco Manager and in Blender press the
"Refresh from Manager" button.
<style>
figure {
width: 30em;
}
</style>
{{< img name="screenshot" size="medium" >}}

View File

@ -0,0 +1,39 @@
{{/*
This is an adjusted copy of themes/hugo-geekdoc/layouts/shortcodes/hint.html
- Add a CSS class.
- Different the default title.
*/}}
{{ $type := default "note" (.Get "type") }}
{{ $icon := .Get "icon" }}
{{ $title := default "Compatibility Information" (.Get "title") }}
{{ $blender := default "unknown" (.Get "blender" ) }}
{{ $flamenco := default "unknown" (.Get "flamenco" ) }}
<blockquote class="gdoc-hint {{ $type | lower }} compatibility-box">
<div class="gdoc-hint__title flex align-center">
{{- with $icon -}}
<svg class="gdoc-icon {{ . }}">
<use xlink:href="#{{ . }}"></use>
</svg>
<span>{{ $title }}</span>
{{- else -}}
<i class="fa {{ $type | lower }}" title="{{ $title }}"></i>
{{- end -}}
</div>
<div class="gdoc-hint__text">
<div class="infobox">
<dl class="versions">
<dt>Blender</dt>
<dd>{{ $blender }}</dd>
<dt>Flamenco</dt>
<dd>{{ $flamenco }}</dd>
</dl>
<p class="disclaimer">This is a community-made job type. It may not reflect the same design as the
rest of Flamenco, as it was made for a specific person to solve a specific need.</p>
</div>
{{ .Inner | $.Page.RenderString }}
</div>
</blockquote>

View File

@ -208,3 +208,38 @@ article p {
table tbody td {
vertical-align: top;
}
/* 3rd party job types compatibility notes. */
.compatibility-box .infobox {
display: flex;
justify-content: space-around;
align-items: flex-start;
}
.compatibility-box p.disclaimer {
font-style: italic;
flex-basis: 70%;
text-align: justify;
}
.compatibility-box .infobox dl, .compatibility-box .infobox p.disclaimer {
margin: 0.6ex;
}
.compatibility-box dl {
flex-basis: 30%;
display: flex;
flex-flow: row wrap;
}
.compatibility-box dl dt {
margin: 0;
flex-basis: 55%;
padding: 0.2em 0.4em;
text-align: right;
}
.compatibility-box dl dt::after {
content: ":";
}
.compatibility-box dl dd {
flex-basis: 45%;
flex-grow: 1;
margin: 0;
padding: 0.2em 0.4em;
}