From 2bcbd9bdbdb533ddfe50488828a800ac48fdbb82 Mon Sep 17 00:00:00 2001 From: Ethan <39577870+ethanndickson@users.noreply.github.com> Date: Wed, 28 May 2025 17:17:38 +1000 Subject: [PATCH 001/296] fix(site): remove trailing comment from cursor.svg (#18072) The trailing comment was preventing the SVG from rendering on Coder Desktop macOS, with the SVG loader we use. I've moved it to a place where it's apparently OK? Couldn't tell you why. https://validator.w3.org/ had no complaints. I tested this by hardcoding the icon to that served by a build of coder with this new svg. ![image.png](https://graphite-user-uploaded-assets-prod.s3.amazonaws.com/jI7h94jB23BidWsYTSCk/4c94ae5f-d0e2-496e-90eb-4968cf40d639.png) The first icon is without the trailing comment, the second is with. --- site/static/icon/cursor.svg | 41 +++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/site/static/icon/cursor.svg b/site/static/icon/cursor.svg index 5c37cb9c053b0..f224c88d6c985 100644 --- a/site/static/icon/cursor.svg +++ b/site/static/icon/cursor.svg @@ -1,4 +1,25 @@ + Cursor @@ -24,24 +45,4 @@ - From 110102a60afed908bd514539a0bb792b84cdba53 Mon Sep 17 00:00:00 2001 From: Yevhenii Shcherbina Date: Wed, 28 May 2025 08:21:16 -0400 Subject: [PATCH 002/296] fix: optimize queue position sql query (#17974) Use only `online provisioner daemons` for `GetProvisionerJobsByIDsWithQueuePosition` query. It should improve performance of the query. --- coderd/database/dbauthz/dbauthz.go | 2 +- coderd/database/dbauthz/dbauthz_test.go | 2 +- coderd/database/dbmem/dbmem.go | 8 ++--- coderd/database/dbmetrics/querymetrics.go | 2 +- coderd/database/dbmock/dbmock.go | 8 ++--- coderd/database/querier.go | 2 +- coderd/database/querier_test.go | 27 ++++++++++++---- coderd/database/queries.sql.go | 21 +++++++++---- coderd/database/queries/provisionerjobs.sql | 12 ++++--- coderd/templateversions.go | 35 ++++++++++++++++----- coderd/workspacebuilds.go | 5 ++- 11 files changed, 88 insertions(+), 36 deletions(-) diff --git a/coderd/database/dbauthz/dbauthz.go b/coderd/database/dbauthz/dbauthz.go index a210599d17cc4..027f5b78d7a77 100644 --- a/coderd/database/dbauthz/dbauthz.go +++ b/coderd/database/dbauthz/dbauthz.go @@ -2341,7 +2341,7 @@ func (q *querier) GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUID) return provisionerJobs, nil } -func (q *querier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { +func (q *querier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids database.GetProvisionerJobsByIDsWithQueuePositionParams) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { // TODO: Remove this once we have a proper rbac check for provisioner jobs. // Details in https://github.com/coder/coder/issues/16160 return q.db.GetProvisionerJobsByIDsWithQueuePosition(ctx, ids) diff --git a/coderd/database/dbauthz/dbauthz_test.go b/coderd/database/dbauthz/dbauthz_test.go index 703e51d739c47..3876ccac55dc6 100644 --- a/coderd/database/dbauthz/dbauthz_test.go +++ b/coderd/database/dbauthz/dbauthz_test.go @@ -4345,7 +4345,7 @@ func (s *MethodTestSuite) TestSystemFunctions() { check.Args([]uuid.UUID{uuid.New()}).Asserts(rbac.ResourceSystem, policy.ActionRead) })) s.Run("GetProvisionerJobsByIDsWithQueuePosition", s.Subtest(func(db database.Store, check *expects) { - check.Args([]uuid.UUID{}).Asserts() + check.Args(database.GetProvisionerJobsByIDsWithQueuePositionParams{}).Asserts() })) s.Run("GetReplicaByID", s.Subtest(func(db database.Store, check *expects) { check.Args(uuid.New()).Asserts(rbac.ResourceSystem, policy.ActionRead).Errors(sql.ErrNoRows) diff --git a/coderd/database/dbmem/dbmem.go b/coderd/database/dbmem/dbmem.go index a13dd33466bf6..7ba2a5a649a0e 100644 --- a/coderd/database/dbmem/dbmem.go +++ b/coderd/database/dbmem/dbmem.go @@ -4684,14 +4684,14 @@ func (q *FakeQuerier) GetProvisionerJobsByIDs(_ context.Context, ids []uuid.UUID return jobs, nil } -func (q *FakeQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { +func (q *FakeQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, arg database.GetProvisionerJobsByIDsWithQueuePositionParams) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { q.mutex.RLock() defer q.mutex.RUnlock() - if ids == nil { - ids = []uuid.UUID{} + if arg.IDs == nil { + arg.IDs = []uuid.UUID{} } - return q.getProvisionerJobsByIDsWithQueuePositionLockedTagBasedQueue(ctx, ids) + return q.getProvisionerJobsByIDsWithQueuePositionLockedTagBasedQueue(ctx, arg.IDs) } func (q *FakeQuerier) GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner(ctx context.Context, arg database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams) ([]database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow, error) { diff --git a/coderd/database/dbmetrics/querymetrics.go b/coderd/database/dbmetrics/querymetrics.go index e35ec11b02453..6229c74af2abf 100644 --- a/coderd/database/dbmetrics/querymetrics.go +++ b/coderd/database/dbmetrics/querymetrics.go @@ -1215,7 +1215,7 @@ func (m queryMetricsStore) GetProvisionerJobsByIDs(ctx context.Context, ids []uu return jobs, err } -func (m queryMetricsStore) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { +func (m queryMetricsStore) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids database.GetProvisionerJobsByIDsWithQueuePositionParams) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { start := time.Now() r0, r1 := m.s.GetProvisionerJobsByIDsWithQueuePosition(ctx, ids) m.queryLatencies.WithLabelValues("GetProvisionerJobsByIDsWithQueuePosition").Observe(time.Since(start).Seconds()) diff --git a/coderd/database/dbmock/dbmock.go b/coderd/database/dbmock/dbmock.go index 7a1fc0c4b2a6f..bf64897208963 100644 --- a/coderd/database/dbmock/dbmock.go +++ b/coderd/database/dbmock/dbmock.go @@ -2494,18 +2494,18 @@ func (mr *MockStoreMockRecorder) GetProvisionerJobsByIDs(ctx, ids any) *gomock.C } // GetProvisionerJobsByIDsWithQueuePosition mocks base method. -func (m *MockStore) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { +func (m *MockStore) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, arg database.GetProvisionerJobsByIDsWithQueuePositionParams) ([]database.GetProvisionerJobsByIDsWithQueuePositionRow, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProvisionerJobsByIDsWithQueuePosition", ctx, ids) + ret := m.ctrl.Call(m, "GetProvisionerJobsByIDsWithQueuePosition", ctx, arg) ret0, _ := ret[0].([]database.GetProvisionerJobsByIDsWithQueuePositionRow) ret1, _ := ret[1].(error) return ret0, ret1 } // GetProvisionerJobsByIDsWithQueuePosition indicates an expected call of GetProvisionerJobsByIDsWithQueuePosition. -func (mr *MockStoreMockRecorder) GetProvisionerJobsByIDsWithQueuePosition(ctx, ids any) *gomock.Call { +func (mr *MockStoreMockRecorder) GetProvisionerJobsByIDsWithQueuePosition(ctx, arg any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisionerJobsByIDsWithQueuePosition", reflect.TypeOf((*MockStore)(nil).GetProvisionerJobsByIDsWithQueuePosition), ctx, ids) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProvisionerJobsByIDsWithQueuePosition", reflect.TypeOf((*MockStore)(nil).GetProvisionerJobsByIDsWithQueuePosition), ctx, arg) } // GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner mocks base method. diff --git a/coderd/database/querier.go b/coderd/database/querier.go index ac7497b641a05..c24016867b31a 100644 --- a/coderd/database/querier.go +++ b/coderd/database/querier.go @@ -278,7 +278,7 @@ type sqlcQuerier interface { GetProvisionerJobByIDForUpdate(ctx context.Context, id uuid.UUID) (ProvisionerJob, error) GetProvisionerJobTimingsByJobID(ctx context.Context, jobID uuid.UUID) ([]ProvisionerJobTiming, error) GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUID) ([]ProvisionerJob, error) - GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) + GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, arg GetProvisionerJobsByIDsWithQueuePositionParams) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner(ctx context.Context, arg GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams) ([]GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow, error) GetProvisionerJobsCreatedAfter(ctx context.Context, createdAt time.Time) ([]ProvisionerJob, error) // To avoid repeatedly attempting to reap the same jobs, we randomly order and limit to @max_jobs. diff --git a/coderd/database/querier_test.go b/coderd/database/querier_test.go index 5bafa58796b7a..b5ed8b019c1cb 100644 --- a/coderd/database/querier_test.go +++ b/coderd/database/querier_test.go @@ -15,7 +15,6 @@ import ( "github.com/stretchr/testify/require" "cdr.dev/slog/sloggers/slogtest" - "github.com/coder/coder/v2/coderd/coderdtest" "github.com/coder/coder/v2/coderd/database" "github.com/coder/coder/v2/coderd/database/db2sdk" @@ -27,6 +26,7 @@ import ( "github.com/coder/coder/v2/coderd/database/migrations" "github.com/coder/coder/v2/coderd/httpmw" "github.com/coder/coder/v2/coderd/prebuilds" + "github.com/coder/coder/v2/coderd/provisionerdserver" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" "github.com/coder/coder/v2/provisionersdk" @@ -1268,7 +1268,10 @@ func TestQueuePosition(t *testing.T) { Tags: database.StringMap{}, }) - queued, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + queued, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) require.NoError(t, err) require.Len(t, queued, jobCount) sort.Slice(queued, func(i, j int) bool { @@ -1296,7 +1299,10 @@ func TestQueuePosition(t *testing.T) { require.NoError(t, err) require.Equal(t, jobs[0].ID, job.ID) - queued, err = db.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + queued, err = db.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) require.NoError(t, err) require.Len(t, queued, jobCount) sort.Slice(queued, func(i, j int) bool { @@ -2550,7 +2556,10 @@ func TestGetProvisionerJobsByIDsWithQueuePosition(t *testing.T) { } // When: we fetch the jobs by their IDs - actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, filteredJobIDs) + actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: filteredJobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) require.NoError(t, err) require.Len(t, actualJobs, len(filteredJobs), "should return all unskipped jobs") @@ -2693,7 +2702,10 @@ func TestGetProvisionerJobsByIDsWithQueuePosition_MixedStatuses(t *testing.T) { } // When: we fetch the jobs by their IDs - actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) require.NoError(t, err) require.Len(t, actualJobs, len(allJobs), "should return all jobs") @@ -2788,7 +2800,10 @@ func TestGetProvisionerJobsByIDsWithQueuePosition_OrderValidation(t *testing.T) } // When: we fetch the jobs by their IDs - actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + actualJobs, err := db.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) require.NoError(t, err) require.Len(t, actualJobs, len(allJobs), "should return all jobs") diff --git a/coderd/database/queries.sql.go b/coderd/database/queries.sql.go index c166dd5fed89a..0d620706d011c 100644 --- a/coderd/database/queries.sql.go +++ b/coderd/database/queries.sql.go @@ -7663,17 +7663,21 @@ pending_jobs AS ( WHERE job_status = 'pending' ), +online_provisioner_daemons AS ( + SELECT id, tags FROM provisioner_daemons pd + WHERE pd.last_seen_at IS NOT NULL AND pd.last_seen_at >= (NOW() - ($2::bigint || ' ms')::interval) +), ranked_jobs AS ( -- Step 3: Rank only pending jobs based on provisioner availability SELECT pj.id, pj.created_at, - ROW_NUMBER() OVER (PARTITION BY pd.id ORDER BY pj.created_at ASC) AS queue_position, - COUNT(*) OVER (PARTITION BY pd.id) AS queue_size + ROW_NUMBER() OVER (PARTITION BY opd.id ORDER BY pj.created_at ASC) AS queue_position, + COUNT(*) OVER (PARTITION BY opd.id) AS queue_size FROM pending_jobs pj - INNER JOIN provisioner_daemons pd - ON provisioner_tagset_contains(pd.tags, pj.tags) -- Join only on the small pending set + INNER JOIN online_provisioner_daemons opd + ON provisioner_tagset_contains(opd.tags, pj.tags) -- Join only on the small pending set ), final_jobs AS ( -- Step 4: Compute best queue position and max queue size per job @@ -7705,6 +7709,11 @@ ORDER BY fj.created_at ` +type GetProvisionerJobsByIDsWithQueuePositionParams struct { + IDs []uuid.UUID `db:"ids" json:"ids"` + StaleIntervalMS int64 `db:"stale_interval_ms" json:"stale_interval_ms"` +} + type GetProvisionerJobsByIDsWithQueuePositionRow struct { ID uuid.UUID `db:"id" json:"id"` CreatedAt time.Time `db:"created_at" json:"created_at"` @@ -7713,8 +7722,8 @@ type GetProvisionerJobsByIDsWithQueuePositionRow struct { QueueSize int64 `db:"queue_size" json:"queue_size"` } -func (q *sqlQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) { - rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDsWithQueuePosition, pq.Array(ids)) +func (q *sqlQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, arg GetProvisionerJobsByIDsWithQueuePositionParams) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) { + rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDsWithQueuePosition, pq.Array(arg.IDs), arg.StaleIntervalMS) if err != nil { return nil, err } diff --git a/coderd/database/queries/provisionerjobs.sql b/coderd/database/queries/provisionerjobs.sql index 88bacc705601c..f3902ba2ddd38 100644 --- a/coderd/database/queries/provisionerjobs.sql +++ b/coderd/database/queries/provisionerjobs.sql @@ -80,17 +80,21 @@ pending_jobs AS ( WHERE job_status = 'pending' ), +online_provisioner_daemons AS ( + SELECT id, tags FROM provisioner_daemons pd + WHERE pd.last_seen_at IS NOT NULL AND pd.last_seen_at >= (NOW() - (@stale_interval_ms::bigint || ' ms')::interval) +), ranked_jobs AS ( -- Step 3: Rank only pending jobs based on provisioner availability SELECT pj.id, pj.created_at, - ROW_NUMBER() OVER (PARTITION BY pd.id ORDER BY pj.created_at ASC) AS queue_position, - COUNT(*) OVER (PARTITION BY pd.id) AS queue_size + ROW_NUMBER() OVER (PARTITION BY opd.id ORDER BY pj.created_at ASC) AS queue_position, + COUNT(*) OVER (PARTITION BY opd.id) AS queue_size FROM pending_jobs pj - INNER JOIN provisioner_daemons pd - ON provisioner_tagset_contains(pd.tags, pj.tags) -- Join only on the small pending set + INNER JOIN online_provisioner_daemons opd + ON provisioner_tagset_contains(opd.tags, pj.tags) -- Join only on the small pending set ), final_jobs AS ( -- Step 4: Compute best queue position and max queue size per job diff --git a/coderd/templateversions.go b/coderd/templateversions.go index 7b682eac14ea0..8dd523374d69f 100644 --- a/coderd/templateversions.go +++ b/coderd/templateversions.go @@ -53,7 +53,10 @@ func (api *API) templateVersion(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() templateVersion := httpmw.TemplateVersionParam(r) - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{templateVersion.JobID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{templateVersion.JobID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil || len(jobs) == 0 { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", @@ -182,7 +185,10 @@ func (api *API) patchTemplateVersion(rw http.ResponseWriter, r *http.Request) { return } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{templateVersion.JobID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{templateVersion.JobID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil || len(jobs) == 0 { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", @@ -733,7 +739,10 @@ func (api *API) fetchTemplateVersionDryRunJob(rw http.ResponseWriter, r *http.Re return database.GetProvisionerJobsByIDsWithQueuePositionRow{}, false } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{jobUUID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{jobUUID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if httpapi.Is404Error(err) { httpapi.Write(ctx, rw, http.StatusNotFound, codersdk.Response{ Message: fmt.Sprintf("Provisioner job %q not found.", jobUUID), @@ -865,7 +874,10 @@ func (api *API) templateVersionsByTemplate(rw http.ResponseWriter, r *http.Reque for _, version := range versions { jobIDs = append(jobIDs, version.JobID) } - jobs, err := store.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + jobs, err := store.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", @@ -933,7 +945,10 @@ func (api *API) templateVersionByName(rw http.ResponseWriter, r *http.Request) { }) return } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{templateVersion.JobID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{templateVersion.JobID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil || len(jobs) == 0 { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", @@ -1013,7 +1028,10 @@ func (api *API) templateVersionByOrganizationTemplateAndName(rw http.ResponseWri }) return } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{templateVersion.JobID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{templateVersion.JobID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil || len(jobs) == 0 { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", @@ -1115,7 +1133,10 @@ func (api *API) previousTemplateVersionByOrganizationTemplateAndName(rw http.Res return } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, []uuid.UUID{previousTemplateVersion.JobID}) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: []uuid.UUID{previousTemplateVersion.JobID}, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil || len(jobs) == 0 { httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ Message: "Internal error fetching provisioner job.", diff --git a/coderd/workspacebuilds.go b/coderd/workspacebuilds.go index 1fd0c95ff3a77..1d14c4518602c 100644 --- a/coderd/workspacebuilds.go +++ b/coderd/workspacebuilds.go @@ -797,7 +797,10 @@ func (api *API) workspaceBuildsData(ctx context.Context, workspaceBuilds []datab for _, build := range workspaceBuilds { jobIDs = append(jobIDs, build.JobID) } - jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, jobIDs) + jobs, err := api.Database.GetProvisionerJobsByIDsWithQueuePosition(ctx, database.GetProvisionerJobsByIDsWithQueuePositionParams{ + IDs: jobIDs, + StaleIntervalMS: provisionerdserver.StaleInterval.Milliseconds(), + }) if err != nil && !errors.Is(err, sql.ErrNoRows) { return workspaceBuildsData{}, xerrors.Errorf("get provisioner jobs: %w", err) } From 6e255c72c6cb5e2e5c7e0c66833cd1b8b4a6de0c Mon Sep 17 00:00:00 2001 From: Danielle Maywood Date: Wed, 28 May 2025 14:21:17 +0100 Subject: [PATCH 003/296] chore(coderd/database): enforce agent name unique within workspace build (#18052) Adds a database trigger that runs on insert and update of the `workspace_agents` table. The trigger ensures that the agent name is unique within the context of the workspace build it is being inserted into. --- coderd/database/dump.sql | 43 ++++ ...rkspace_agent_name_unique_trigger.down.sql | 2 + ...workspace_agent_name_unique_trigger.up.sql | 45 ++++ coderd/database/querier_test.go | 234 ++++++++++++++++++ coderd/insights_test.go | 8 +- 5 files changed, 328 insertions(+), 4 deletions(-) create mode 100644 coderd/database/migrations/000332_workspace_agent_name_unique_trigger.down.sql create mode 100644 coderd/database/migrations/000332_workspace_agent_name_unique_trigger.up.sql diff --git a/coderd/database/dump.sql b/coderd/database/dump.sql index a50abf13c5f78..acb9780b82ea6 100644 --- a/coderd/database/dump.sql +++ b/coderd/database/dump.sql @@ -316,6 +316,43 @@ CREATE TYPE workspace_transition AS ENUM ( 'delete' ); +CREATE FUNCTION check_workspace_agent_name_unique() RETURNS trigger + LANGUAGE plpgsql + AS $$ +DECLARE + workspace_build_id uuid; + agents_with_name int; +BEGIN + -- Find the workspace build the workspace agent is being inserted into. + SELECT workspace_builds.id INTO workspace_build_id + FROM workspace_resources + JOIN workspace_builds ON workspace_builds.job_id = workspace_resources.job_id + WHERE workspace_resources.id = NEW.resource_id; + + -- If the agent doesn't have a workspace build, we'll allow the insert. + IF workspace_build_id IS NULL THEN + RETURN NEW; + END IF; + + -- Count how many agents in this workspace build already have the given agent name. + SELECT COUNT(*) INTO agents_with_name + FROM workspace_agents + JOIN workspace_resources ON workspace_resources.id = workspace_agents.resource_id + JOIN workspace_builds ON workspace_builds.job_id = workspace_resources.job_id + WHERE workspace_builds.id = workspace_build_id + AND workspace_agents.name = NEW.name + AND workspace_agents.id != NEW.id; + + -- If there's already an agent with this name, raise an error + IF agents_with_name > 0 THEN + RAISE EXCEPTION 'workspace agent name "%" already exists in this workspace build', NEW.name + USING ERRCODE = 'unique_violation'; + END IF; + + RETURN NEW; +END; +$$; + CREATE FUNCTION compute_notification_message_dedupe_hash() RETURNS trigger LANGUAGE plpgsql AS $$ @@ -2773,6 +2810,12 @@ CREATE TRIGGER update_notification_message_dedupe_hash BEFORE INSERT OR UPDATE O CREATE TRIGGER user_status_change_trigger AFTER INSERT OR UPDATE ON users FOR EACH ROW EXECUTE FUNCTION record_user_status_change(); +CREATE TRIGGER workspace_agent_name_unique_trigger BEFORE INSERT OR UPDATE OF name, resource_id ON workspace_agents FOR EACH ROW EXECUTE FUNCTION check_workspace_agent_name_unique(); + +COMMENT ON TRIGGER workspace_agent_name_unique_trigger ON workspace_agents IS 'Use a trigger instead of a unique constraint because existing data may violate +the uniqueness requirement. A trigger allows us to enforce uniqueness going +forward without requiring a migration to clean up historical data.'; + ALTER TABLE ONLY api_keys ADD CONSTRAINT api_keys_user_id_uuid_fkey FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE; diff --git a/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.down.sql b/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.down.sql new file mode 100644 index 0000000000000..916e1d469ed69 --- /dev/null +++ b/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.down.sql @@ -0,0 +1,2 @@ +DROP TRIGGER IF EXISTS workspace_agent_name_unique_trigger ON workspace_agents; +DROP FUNCTION IF EXISTS check_workspace_agent_name_unique(); diff --git a/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.up.sql b/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.up.sql new file mode 100644 index 0000000000000..7b10fcdc1dcde --- /dev/null +++ b/coderd/database/migrations/000332_workspace_agent_name_unique_trigger.up.sql @@ -0,0 +1,45 @@ +CREATE OR REPLACE FUNCTION check_workspace_agent_name_unique() +RETURNS TRIGGER AS $$ +DECLARE + workspace_build_id uuid; + agents_with_name int; +BEGIN + -- Find the workspace build the workspace agent is being inserted into. + SELECT workspace_builds.id INTO workspace_build_id + FROM workspace_resources + JOIN workspace_builds ON workspace_builds.job_id = workspace_resources.job_id + WHERE workspace_resources.id = NEW.resource_id; + + -- If the agent doesn't have a workspace build, we'll allow the insert. + IF workspace_build_id IS NULL THEN + RETURN NEW; + END IF; + + -- Count how many agents in this workspace build already have the given agent name. + SELECT COUNT(*) INTO agents_with_name + FROM workspace_agents + JOIN workspace_resources ON workspace_resources.id = workspace_agents.resource_id + JOIN workspace_builds ON workspace_builds.job_id = workspace_resources.job_id + WHERE workspace_builds.id = workspace_build_id + AND workspace_agents.name = NEW.name + AND workspace_agents.id != NEW.id; + + -- If there's already an agent with this name, raise an error + IF agents_with_name > 0 THEN + RAISE EXCEPTION 'workspace agent name "%" already exists in this workspace build', NEW.name + USING ERRCODE = 'unique_violation'; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER workspace_agent_name_unique_trigger + BEFORE INSERT OR UPDATE OF name, resource_id ON workspace_agents + FOR EACH ROW + EXECUTE FUNCTION check_workspace_agent_name_unique(); + +COMMENT ON TRIGGER workspace_agent_name_unique_trigger ON workspace_agents IS +'Use a trigger instead of a unique constraint because existing data may violate +the uniqueness requirement. A trigger allows us to enforce uniqueness going +forward without requiring a migration to clean up historical data.'; diff --git a/coderd/database/querier_test.go b/coderd/database/querier_test.go index b5ed8b019c1cb..6f7ee8fe88294 100644 --- a/coderd/database/querier_test.go +++ b/coderd/database/querier_test.go @@ -4,12 +4,14 @@ import ( "context" "database/sql" "encoding/json" + "errors" "fmt" "sort" "testing" "time" "github.com/google/uuid" + "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -4720,6 +4722,238 @@ func TestGetPresetsAtFailureLimit(t *testing.T) { }) } +func TestWorkspaceAgentNameUniqueTrigger(t *testing.T) { + t.Parallel() + + if !dbtestutil.WillUsePostgres() { + t.Skip("This test makes use of a database trigger not implemented in dbmem") + } + + createWorkspaceWithAgent := func(t *testing.T, db database.Store, org database.Organization, agentName string) (database.WorkspaceBuild, database.WorkspaceResource, database.WorkspaceAgent) { + t.Helper() + + user := dbgen.User(t, db, database.User{}) + template := dbgen.Template(t, db, database.Template{ + OrganizationID: org.ID, + CreatedBy: user.ID, + }) + templateVersion := dbgen.TemplateVersion(t, db, database.TemplateVersion{ + TemplateID: uuid.NullUUID{Valid: true, UUID: template.ID}, + OrganizationID: org.ID, + CreatedBy: user.ID, + }) + workspace := dbgen.Workspace(t, db, database.WorkspaceTable{ + OrganizationID: org.ID, + TemplateID: template.ID, + OwnerID: user.ID, + }) + job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{ + Type: database.ProvisionerJobTypeWorkspaceBuild, + OrganizationID: org.ID, + }) + build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{ + BuildNumber: 1, + JobID: job.ID, + WorkspaceID: workspace.ID, + TemplateVersionID: templateVersion.ID, + }) + resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{ + JobID: build.JobID, + }) + agent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ + ResourceID: resource.ID, + Name: agentName, + }) + + return build, resource, agent + } + + t.Run("DuplicateNamesInSameWorkspaceResource", func(t *testing.T) { + t.Parallel() + + db, _ := dbtestutil.NewDB(t) + org := dbgen.Organization(t, db, database.Organization{}) + ctx := testutil.Context(t, testutil.WaitShort) + + // Given: A workspace with an agent + _, resource, _ := createWorkspaceWithAgent(t, db, org, "duplicate-agent") + + // When: Another agent is created for that workspace with the same name. + _, err := db.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{ + ID: uuid.New(), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Name: "duplicate-agent", // Same name as agent1 + ResourceID: resource.ID, + AuthToken: uuid.New(), + Architecture: "amd64", + OperatingSystem: "linux", + APIKeyScope: database.AgentKeyScopeEnumAll, + }) + + // Then: We expect it to fail. + require.Error(t, err) + var pqErr *pq.Error + require.True(t, errors.As(err, &pqErr)) + require.Equal(t, pq.ErrorCode("23505"), pqErr.Code) // unique_violation + require.Contains(t, pqErr.Message, `workspace agent name "duplicate-agent" already exists in this workspace build`) + }) + + t.Run("DuplicateNamesInSameProvisionerJob", func(t *testing.T) { + t.Parallel() + + db, _ := dbtestutil.NewDB(t) + org := dbgen.Organization(t, db, database.Organization{}) + ctx := testutil.Context(t, testutil.WaitShort) + + // Given: A workspace with an agent + _, resource, agent := createWorkspaceWithAgent(t, db, org, "duplicate-agent") + + // When: A child agent is created for that workspace with the same name. + _, err := db.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{ + ID: uuid.New(), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Name: agent.Name, + ResourceID: resource.ID, + AuthToken: uuid.New(), + Architecture: "amd64", + OperatingSystem: "linux", + APIKeyScope: database.AgentKeyScopeEnumAll, + }) + + // Then: We expect it to fail. + require.Error(t, err) + var pqErr *pq.Error + require.True(t, errors.As(err, &pqErr)) + require.Equal(t, pq.ErrorCode("23505"), pqErr.Code) // unique_violation + require.Contains(t, pqErr.Message, `workspace agent name "duplicate-agent" already exists in this workspace build`) + }) + + t.Run("DuplicateChildNamesOverMultipleResources", func(t *testing.T) { + t.Parallel() + + db, _ := dbtestutil.NewDB(t) + org := dbgen.Organization(t, db, database.Organization{}) + ctx := testutil.Context(t, testutil.WaitShort) + + // Given: A workspace with two agents + _, resource1, agent1 := createWorkspaceWithAgent(t, db, org, "parent-agent-1") + + resource2 := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{JobID: resource1.JobID}) + agent2 := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ + ResourceID: resource2.ID, + Name: "parent-agent-2", + }) + + // Given: One agent has a child agent + agent1Child := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ + ParentID: uuid.NullUUID{Valid: true, UUID: agent1.ID}, + Name: "child-agent", + ResourceID: resource1.ID, + }) + + // When: A child agent is inserted for the other parent. + _, err := db.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{ + ID: uuid.New(), + ParentID: uuid.NullUUID{Valid: true, UUID: agent2.ID}, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Name: agent1Child.Name, + ResourceID: resource2.ID, + AuthToken: uuid.New(), + Architecture: "amd64", + OperatingSystem: "linux", + APIKeyScope: database.AgentKeyScopeEnumAll, + }) + + // Then: We expect it to fail. + require.Error(t, err) + var pqErr *pq.Error + require.True(t, errors.As(err, &pqErr)) + require.Equal(t, pq.ErrorCode("23505"), pqErr.Code) // unique_violation + require.Contains(t, pqErr.Message, `workspace agent name "child-agent" already exists in this workspace build`) + }) + + t.Run("SameNamesInDifferentWorkspaces", func(t *testing.T) { + t.Parallel() + + agentName := "same-name-different-workspace" + + db, _ := dbtestutil.NewDB(t) + org := dbgen.Organization(t, db, database.Organization{}) + + // Given: A workspace with an agent + _, _, agent1 := createWorkspaceWithAgent(t, db, org, agentName) + require.Equal(t, agentName, agent1.Name) + + // When: A second workspace is created with an agent having the same name + _, _, agent2 := createWorkspaceWithAgent(t, db, org, agentName) + require.Equal(t, agentName, agent2.Name) + + // Then: We expect there to be different agents with the same name. + require.NotEqual(t, agent1.ID, agent2.ID) + require.Equal(t, agent1.Name, agent2.Name) + }) + + t.Run("NullWorkspaceID", func(t *testing.T) { + t.Parallel() + + db, _ := dbtestutil.NewDB(t) + org := dbgen.Organization(t, db, database.Organization{}) + ctx := testutil.Context(t, testutil.WaitShort) + + // Given: A resource that does not belong to a workspace build (simulating template import) + orphanJob := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{ + Type: database.ProvisionerJobTypeTemplateVersionImport, + OrganizationID: org.ID, + }) + orphanResource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{ + JobID: orphanJob.ID, + }) + + // And this resource has a workspace agent. + agent1, err := db.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{ + ID: uuid.New(), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Name: "orphan-agent", + ResourceID: orphanResource.ID, + AuthToken: uuid.New(), + Architecture: "amd64", + OperatingSystem: "linux", + APIKeyScope: database.AgentKeyScopeEnumAll, + }) + require.NoError(t, err) + require.Equal(t, "orphan-agent", agent1.Name) + + // When: We created another resource that does not belong to a workspace build. + orphanJob2 := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{ + Type: database.ProvisionerJobTypeTemplateVersionImport, + OrganizationID: org.ID, + }) + orphanResource2 := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{ + JobID: orphanJob2.ID, + }) + + // Then: We expect to be able to create an agent in this new resource that has the same name. + agent2, err := db.InsertWorkspaceAgent(ctx, database.InsertWorkspaceAgentParams{ + ID: uuid.New(), + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + Name: "orphan-agent", // Same name as agent1 + ResourceID: orphanResource2.ID, + AuthToken: uuid.New(), + Architecture: "amd64", + OperatingSystem: "linux", + APIKeyScope: database.AgentKeyScopeEnumAll, + }) + require.NoError(t, err) + require.Equal(t, "orphan-agent", agent2.Name) + require.NotEqual(t, agent1.ID, agent2.ID) + }) +} + func requireUsersMatch(t testing.TB, expected []database.User, found []database.GetUsersRow, msg string) { t.Helper() require.ElementsMatch(t, expected, database.ConvertUserRows(found), msg) diff --git a/coderd/insights_test.go b/coderd/insights_test.go index 47a80df528501..693bb48811acc 100644 --- a/coderd/insights_test.go +++ b/coderd/insights_test.go @@ -609,8 +609,8 @@ func TestTemplateInsights_Golden(t *testing.T) { Name: "example", Type: "aws_instance", Agents: []*proto.Agent{{ - Id: uuid.NewString(), // Doesn't matter, not used in DB. - Name: "dev", + Id: uuid.NewString(), // Doesn't matter, not used in DB. + Name: fmt.Sprintf("dev-%d", len(resources)), // Ensure unique name per agent Auth: &proto.Agent_Token{ Token: authToken.String(), }, @@ -1525,8 +1525,8 @@ func TestUserActivityInsights_Golden(t *testing.T) { Name: "example", Type: "aws_instance", Agents: []*proto.Agent{{ - Id: uuid.NewString(), // Doesn't matter, not used in DB. - Name: "dev", + Id: uuid.NewString(), // Doesn't matter, not used in DB. + Name: fmt.Sprintf("dev-%d", len(resources)), // Ensure unique name per agent Auth: &proto.Agent_Token{ Token: authToken.String(), }, From ca8660cea6e3c914217f3c794efd9e376316f0f7 Mon Sep 17 00:00:00 2001 From: Steven Masley Date: Wed, 28 May 2025 10:00:39 -0500 Subject: [PATCH 004/296] chore: keep previous workspace build parameters for dynamic params (#18059) The existing code persists all static parameters and their values. Using the previous build as the source if no new inputs are found. Dynamic params do not have a state of the parameters saved to disk. So instead, all previous values are persisted always, and new inputs override. --- coderd/parameters_test.go | 97 ++++++++++++++++++++++++++++++++--- coderd/wsbuilder/wsbuilder.go | 28 ++++++++-- 2 files changed, 114 insertions(+), 11 deletions(-) diff --git a/coderd/parameters_test.go b/coderd/parameters_test.go index 91809d3a037d6..98a5d546eaffc 100644 --- a/coderd/parameters_test.go +++ b/coderd/parameters_test.go @@ -15,6 +15,7 @@ import ( "github.com/coder/coder/v2/coderd/database/dbtestutil" "github.com/coder/coder/v2/coderd/database/pubsub" "github.com/coder/coder/v2/coderd/rbac" + "github.com/coder/coder/v2/coderd/util/ptr" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/wsjson" "github.com/coder/coder/v2/provisioner/echo" @@ -211,6 +212,86 @@ func TestDynamicParametersWithTerraformValues(t *testing.T) { require.Zero(t, setup.api.FileCache.Count()) }) + t.Run("RebuildParameters", func(t *testing.T) { + t.Parallel() + + dynamicParametersTerraformSource, err := os.ReadFile("testdata/parameters/modules/main.tf") + require.NoError(t, err) + + modulesArchive, err := terraform.GetModulesArchive(os.DirFS("testdata/parameters/modules")) + require.NoError(t, err) + + setup := setupDynamicParamsTest(t, setupDynamicParamsTestParams{ + provisionerDaemonVersion: provProto.CurrentVersion.String(), + mainTF: dynamicParametersTerraformSource, + modulesArchive: modulesArchive, + plan: nil, + static: nil, + }) + + ctx := testutil.Context(t, testutil.WaitMedium) + stream := setup.stream + previews := stream.Chan() + + // Should see the output of the module represented + preview := testutil.RequireReceive(ctx, t, previews) + require.Equal(t, -1, preview.ID) + require.Empty(t, preview.Diagnostics) + + require.Len(t, preview.Parameters, 1) + require.Equal(t, "jetbrains_ide", preview.Parameters[0].Name) + require.True(t, preview.Parameters[0].Value.Valid) + require.Equal(t, "CL", preview.Parameters[0].Value.Value) + _ = stream.Close(websocket.StatusGoingAway) + + wrk := coderdtest.CreateWorkspace(t, setup.client, setup.template.ID, func(request *codersdk.CreateWorkspaceRequest) { + request.RichParameterValues = []codersdk.WorkspaceBuildParameter{ + { + Name: preview.Parameters[0].Name, + Value: "GO", + }, + } + }) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, setup.client, wrk.LatestBuild.ID) + + params, err := setup.client.WorkspaceBuildParameters(ctx, wrk.LatestBuild.ID) + require.NoError(t, err) + require.Len(t, params, 1) + require.Equal(t, "jetbrains_ide", params[0].Name) + require.Equal(t, "GO", params[0].Value) + + // A helper function to assert params + doTransition := func(t *testing.T, trans codersdk.WorkspaceTransition) { + t.Helper() + + fooVal := coderdtest.RandomUsername(t) + bld, err := setup.client.CreateWorkspaceBuild(ctx, wrk.ID, codersdk.CreateWorkspaceBuildRequest{ + TemplateVersionID: setup.template.ActiveVersionID, + Transition: trans, + RichParameterValues: []codersdk.WorkspaceBuildParameter{ + // No validation, so this should work as is. + // Overwrite the value on each transition + {Name: "foo", Value: fooVal}, + }, + EnableDynamicParameters: ptr.Ref(true), + }) + require.NoError(t, err) + coderdtest.AwaitWorkspaceBuildJobCompleted(t, setup.client, wrk.LatestBuild.ID) + + latestParams, err := setup.client.WorkspaceBuildParameters(ctx, bld.ID) + require.NoError(t, err) + require.ElementsMatch(t, latestParams, []codersdk.WorkspaceBuildParameter{ + {Name: "jetbrains_ide", Value: "GO"}, + {Name: "foo", Value: fooVal}, + }) + } + + // Restart the workspace, then delete. Asserting params on all builds. + doTransition(t, codersdk.WorkspaceTransitionStop) + doTransition(t, codersdk.WorkspaceTransitionStart) + doTransition(t, codersdk.WorkspaceTransitionDelete) + }) + t.Run("BadOwner", func(t *testing.T) { t.Parallel() @@ -266,9 +347,10 @@ type setupDynamicParamsTestParams struct { } type dynamicParamsTest struct { - client *codersdk.Client - api *coderd.API - stream *wsjson.Stream[codersdk.DynamicParametersResponse, codersdk.DynamicParametersRequest] + client *codersdk.Client + api *coderd.API + stream *wsjson.Stream[codersdk.DynamicParametersResponse, codersdk.DynamicParametersRequest] + template codersdk.Template } func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dynamicParamsTest { @@ -300,7 +382,7 @@ func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dyn version := coderdtest.CreateTemplateVersion(t, templateAdmin, owner.OrganizationID, files) coderdtest.AwaitTemplateVersionJobCompleted(t, templateAdmin, version.ID) - _ = coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) + tpl := coderdtest.CreateTemplate(t, templateAdmin, owner.OrganizationID, version.ID) ctx := testutil.Context(t, testutil.WaitShort) stream, err := templateAdmin.TemplateVersionDynamicParameters(ctx, version.ID) @@ -321,9 +403,10 @@ func setupDynamicParamsTest(t *testing.T, args setupDynamicParamsTestParams) dyn }) return dynamicParamsTest{ - client: ownerClient, - stream: stream, - api: api, + client: ownerClient, + api: api, + stream: stream, + template: tpl, } } diff --git a/coderd/wsbuilder/wsbuilder.go b/coderd/wsbuilder/wsbuilder.go index 46035f28dda77..bcc2cef40ebdc 100644 --- a/coderd/wsbuilder/wsbuilder.go +++ b/coderd/wsbuilder/wsbuilder.go @@ -623,6 +623,11 @@ func (b *Builder) getParameters() (names, values []string, err error) { return nil, nil, BuildError{http.StatusBadRequest, "Unable to build workspace with unsupported parameters", err} } + lastBuildParameterValues := db2sdk.WorkspaceBuildParameters(lastBuildParameters) + resolver := codersdk.ParameterResolver{ + Rich: lastBuildParameterValues, + } + // Dynamic parameters skip all parameter validation. // Deleting a workspace also should skip parameter validation. // Pass the user's input as is. @@ -632,19 +637,34 @@ func (b *Builder) getParameters() (names, values []string, err error) { // conditional parameter existence, the static frame of reference // is not sufficient. So assume the user is correct, or pull in the // dynamic param code to find the actual parameters. + latestValues := make(map[string]string, len(b.richParameterValues)) + for _, latest := range b.richParameterValues { + latestValues[latest.Name] = latest.Value + } + + // Merge the inputs with values from the previous build. + for _, last := range lastBuildParameterValues { + // TODO: Ideally we use the resolver here and look at parameter + // fields such as 'ephemeral'. This requires loading the terraform + // files. For now, just send the previous inputs as is. + if _, exists := latestValues[last.Name]; exists { + // latestValues take priority, so skip this previous value. + continue + } + names = append(names, last.Name) + values = append(values, last.Value) + } + for _, value := range b.richParameterValues { names = append(names, value.Name) values = append(values, value.Value) } + b.parameterNames = &names b.parameterValues = &values return names, values, nil } - resolver := codersdk.ParameterResolver{ - Rich: db2sdk.WorkspaceBuildParameters(lastBuildParameters), - } - for _, templateVersionParameter := range templateVersionParameters { tvp, err := db2sdk.TemplateVersionParameter(templateVersionParameter) if err != nil { From 6a2f22abf7301843bccf6f56e811c99f2707e738 Mon Sep 17 00:00:00 2001 From: Atif Ali Date: Wed, 28 May 2025 08:33:12 -0700 Subject: [PATCH 005/296] chore: add JetBrains icon (#18073) --- site/src/theme/icons.json | 1 + site/static/icon/jetbrains.svg | 1 + 2 files changed, 2 insertions(+) create mode 100644 site/static/icon/jetbrains.svg diff --git a/site/src/theme/icons.json b/site/src/theme/icons.json index 8e92dd9a48198..4e162f38b6bb5 100644 --- a/site/src/theme/icons.json +++ b/site/src/theme/icons.json @@ -61,6 +61,7 @@ "javascript.svg", "jax.svg", "jetbrains-toolbox.svg", + "jetbrains.svg", "jfrog.svg", "jupyter.svg", "k8s.png", diff --git a/site/static/icon/jetbrains.svg b/site/static/icon/jetbrains.svg new file mode 100644 index 0000000000000..b281f962fca81 --- /dev/null +++ b/site/static/icon/jetbrains.svg @@ -0,0 +1 @@ + \ No newline at end of file From b330c0803ce75e29d1d8c344469cbd3fa9082b48 Mon Sep 17 00:00:00 2001 From: Yevhenii Shcherbina Date: Wed, 28 May 2025 14:18:32 -0400 Subject: [PATCH 006/296] fix: reimplement reporting of preset-hard-limited metric (#18055) Addresses concerns raised in https://github.com/coder/coder/pull/18045 --- coderd/prebuilds/global_snapshot.go | 35 ++++++---- .../coderd/prebuilds/metricscollector.go | 11 +--- enterprise/coderd/prebuilds/reconcile.go | 64 +++++++++++++------ enterprise/coderd/prebuilds/reconcile_test.go | 7 +- 4 files changed, 72 insertions(+), 45 deletions(-) diff --git a/coderd/prebuilds/global_snapshot.go b/coderd/prebuilds/global_snapshot.go index f4c094289b54e..976461780fd07 100644 --- a/coderd/prebuilds/global_snapshot.go +++ b/coderd/prebuilds/global_snapshot.go @@ -12,11 +12,11 @@ import ( // GlobalSnapshot represents a full point-in-time snapshot of state relating to prebuilds across all templates. type GlobalSnapshot struct { - Presets []database.GetTemplatePresetsWithPrebuildsRow - RunningPrebuilds []database.GetRunningPrebuiltWorkspacesRow - PrebuildsInProgress []database.CountInProgressPrebuildsRow - Backoffs []database.GetPresetsBackoffRow - HardLimitedPresets []database.GetPresetsAtFailureLimitRow + Presets []database.GetTemplatePresetsWithPrebuildsRow + RunningPrebuilds []database.GetRunningPrebuiltWorkspacesRow + PrebuildsInProgress []database.CountInProgressPrebuildsRow + Backoffs []database.GetPresetsBackoffRow + HardLimitedPresetsMap map[uuid.UUID]database.GetPresetsAtFailureLimitRow } func NewGlobalSnapshot( @@ -26,12 +26,17 @@ func NewGlobalSnapshot( backoffs []database.GetPresetsBackoffRow, hardLimitedPresets []database.GetPresetsAtFailureLimitRow, ) GlobalSnapshot { + hardLimitedPresetsMap := make(map[uuid.UUID]database.GetPresetsAtFailureLimitRow, len(hardLimitedPresets)) + for _, preset := range hardLimitedPresets { + hardLimitedPresetsMap[preset.PresetID] = preset + } + return GlobalSnapshot{ - Presets: presets, - RunningPrebuilds: runningPrebuilds, - PrebuildsInProgress: prebuildsInProgress, - Backoffs: backoffs, - HardLimitedPresets: hardLimitedPresets, + Presets: presets, + RunningPrebuilds: runningPrebuilds, + PrebuildsInProgress: prebuildsInProgress, + Backoffs: backoffs, + HardLimitedPresetsMap: hardLimitedPresetsMap, } } @@ -66,9 +71,7 @@ func (s GlobalSnapshot) FilterByPreset(presetID uuid.UUID) (*PresetSnapshot, err backoffPtr = &backoff } - _, isHardLimited := slice.Find(s.HardLimitedPresets, func(row database.GetPresetsAtFailureLimitRow) bool { - return row.PresetID == preset.ID - }) + _, isHardLimited := s.HardLimitedPresetsMap[preset.ID] return &PresetSnapshot{ Preset: preset, @@ -80,6 +83,12 @@ func (s GlobalSnapshot) FilterByPreset(presetID uuid.UUID) (*PresetSnapshot, err }, nil } +func (s GlobalSnapshot) IsHardLimited(presetID uuid.UUID) bool { + _, isHardLimited := s.HardLimitedPresetsMap[presetID] + + return isHardLimited +} + // filterExpiredWorkspaces splits running workspaces into expired and non-expired // based on the preset's TTL. // If TTL is missing or zero, all workspaces are considered non-expired. diff --git a/enterprise/coderd/prebuilds/metricscollector.go b/enterprise/coderd/prebuilds/metricscollector.go index 90257c26dd580..4499849ffde0a 100644 --- a/enterprise/coderd/prebuilds/metricscollector.go +++ b/enterprise/coderd/prebuilds/metricscollector.go @@ -280,16 +280,9 @@ func (k hardLimitedPresetKey) String() string { return fmt.Sprintf("%s:%s:%s", k.orgName, k.templateName, k.presetName) } -// nolint:revive // isHardLimited determines if the preset should be reported as hard-limited in Prometheus. -func (mc *MetricsCollector) trackHardLimitedStatus(orgName, templateName, presetName string, isHardLimited bool) { +func (mc *MetricsCollector) registerHardLimitedPresets(isPresetHardLimited map[hardLimitedPresetKey]bool) { mc.isPresetHardLimitedMu.Lock() defer mc.isPresetHardLimitedMu.Unlock() - key := hardLimitedPresetKey{orgName: orgName, templateName: templateName, presetName: presetName} - - if isHardLimited { - mc.isPresetHardLimited[key] = true - } else { - delete(mc.isPresetHardLimited, key) - } + mc.isPresetHardLimited = isPresetHardLimited } diff --git a/enterprise/coderd/prebuilds/reconcile.go b/enterprise/coderd/prebuilds/reconcile.go index 90c97afa26d69..ebfcfaf2b3182 100644 --- a/enterprise/coderd/prebuilds/reconcile.go +++ b/enterprise/coderd/prebuilds/reconcile.go @@ -256,6 +256,9 @@ func (c *StoreReconciler) ReconcileAll(ctx context.Context) error { if err != nil { return xerrors.Errorf("determine current snapshot: %w", err) } + + c.reportHardLimitedPresets(snapshot) + if len(snapshot.Presets) == 0 { logger.Debug(ctx, "no templates found with prebuilds configured") return nil @@ -296,6 +299,49 @@ func (c *StoreReconciler) ReconcileAll(ctx context.Context) error { return err } +func (c *StoreReconciler) reportHardLimitedPresets(snapshot *prebuilds.GlobalSnapshot) { + // presetsMap is a map from key (orgName:templateName:presetName) to list of corresponding presets. + // Multiple versions of a preset can exist with the same orgName, templateName, and presetName, + // because templates can have multiple versions — or deleted templates can share the same name. + presetsMap := make(map[hardLimitedPresetKey][]database.GetTemplatePresetsWithPrebuildsRow) + for _, preset := range snapshot.Presets { + key := hardLimitedPresetKey{ + orgName: preset.OrganizationName, + templateName: preset.TemplateName, + presetName: preset.Name, + } + + presetsMap[key] = append(presetsMap[key], preset) + } + + // Report a preset as hard-limited only if all the following conditions are met: + // - The preset is marked as hard-limited + // - The preset is using the active version of its template, and the template has not been deleted + // + // The second condition is important because a hard-limited preset that has become outdated is no longer relevant. + // Its associated prebuilt workspaces were likely deleted, and it's not meaningful to continue reporting it + // as hard-limited to the admin. + // + // This approach accounts for all relevant scenarios: + // Scenario #1: The admin created a new template version with the same preset names. + // Scenario #2: The admin created a new template version and renamed the presets. + // Scenario #3: The admin deleted a template version that contained hard-limited presets. + // + // In all of these cases, only the latest and non-deleted presets will be reported. + // All other presets will be ignored and eventually removed from Prometheus. + isPresetHardLimited := make(map[hardLimitedPresetKey]bool) + for key, presets := range presetsMap { + for _, preset := range presets { + if preset.UsingActiveVersion && !preset.Deleted && snapshot.IsHardLimited(preset.ID) { + isPresetHardLimited[key] = true + break + } + } + } + + c.metrics.registerHardLimitedPresets(isPresetHardLimited) +} + // SnapshotState captures the current state of all prebuilds across templates. func (c *StoreReconciler) SnapshotState(ctx context.Context, store database.Store) (*prebuilds.GlobalSnapshot, error) { if err := ctx.Err(); err != nil { @@ -361,24 +407,6 @@ func (c *StoreReconciler) ReconcilePreset(ctx context.Context, ps prebuilds.Pres slog.F("preset_name", ps.Preset.Name), ) - // Report a metric only if the preset uses the latest version of the template and the template is not deleted. - // This avoids conflicts between metrics from old and new template versions. - // - // NOTE: Multiple versions of a preset can exist with the same orgName, templateName, and presetName, - // because templates can have multiple versions — or deleted templates can share the same name. - // - // The safest approach is to report the metric only for the latest version of the preset. - // When a new template version is released, the metric for the new preset should overwrite - // the old value in Prometheus. - // - // However, there’s one edge case: if an admin creates a template, it becomes hard-limited, - // then deletes the template and never creates another with the same name, - // the old preset will continue to be reported as hard-limited — - // even though it’s deleted. This will persist until `coderd` is restarted. - if ps.Preset.UsingActiveVersion && !ps.Preset.Deleted { - c.metrics.trackHardLimitedStatus(ps.Preset.OrganizationName, ps.Preset.TemplateName, ps.Preset.Name, ps.IsHardLimited) - } - // If the preset reached the hard failure limit for the first time during this iteration: // - Mark it as hard-limited in the database // - Send notifications to template admins diff --git a/enterprise/coderd/prebuilds/reconcile_test.go b/enterprise/coderd/prebuilds/reconcile_test.go index 7de22db64c8be..a0e1f9726d7d5 100644 --- a/enterprise/coderd/prebuilds/reconcile_test.go +++ b/enterprise/coderd/prebuilds/reconcile_test.go @@ -1034,8 +1034,7 @@ func TestHardLimitedPresetShouldNotBlockDeletion(t *testing.T) { require.Equal(t, database.WorkspaceTransitionDelete, workspaceBuilds[0].Transition) require.Equal(t, database.WorkspaceTransitionStart, workspaceBuilds[1].Transition) - // The metric is still set to 1, even though the preset has become outdated. - // This happens because the old value hasn't been overwritten by a newer preset yet. + // Metric is deleted after preset became outdated. mf, err = registry.Gather() require.NoError(t, err) metric = findMetric(mf, prebuilds.MetricPresetHardLimitedGauge, map[string]string{ @@ -1043,9 +1042,7 @@ func TestHardLimitedPresetShouldNotBlockDeletion(t *testing.T) { "preset_name": preset.Name, "org_name": org.Name, }) - require.NotNil(t, metric) - require.NotNil(t, metric.GetGauge()) - require.EqualValues(t, 1, metric.GetGauge().GetValue()) + require.Nil(t, metric) }) } } From bc3b8d5a51145d93c6dbf00bdaedb533d845c708 Mon Sep 17 00:00:00 2001 From: Bruno Quaresma Date: Wed, 28 May 2025 15:19:05 -0300 Subject: [PATCH 007/296] feat: add task page (#18076) **Demo:** ![image](https://github.com/user-attachments/assets/ca59ba92-a73a-4613-ae41-910f3f0455d2) --- site/src/components/Table/Table.tsx | 28 +- site/src/modules/apps/AppStatusIcon.tsx | 47 ++ site/src/modules/tasks/tasks.ts | 8 + .../WorkspaceAppStatus/WorkspaceAppStatus.tsx | 6 +- site/src/pages/TaskPage/TaskPage.stories.tsx | 150 +++++++ site/src/pages/TaskPage/TaskPage.tsx | 407 ++++++++++++++++++ site/src/pages/TasksPage/TasksPage.tsx | 35 +- site/src/pages/TerminalPage/TerminalPage.tsx | 6 +- site/src/pages/WorkspacePage/AppStatuses.tsx | 51 +-- site/src/router.tsx | 2 + 10 files changed, 676 insertions(+), 64 deletions(-) create mode 100644 site/src/modules/apps/AppStatusIcon.tsx create mode 100644 site/src/modules/tasks/tasks.ts create mode 100644 site/src/pages/TaskPage/TaskPage.stories.tsx create mode 100644 site/src/pages/TaskPage/TaskPage.tsx diff --git a/site/src/components/Table/Table.tsx b/site/src/components/Table/Table.tsx index c20fe99428e09..b642655f5539b 100644 --- a/site/src/components/Table/Table.tsx +++ b/site/src/components/Table/Table.tsx @@ -3,6 +3,7 @@ * @see {@link https://ui.shadcn.com/docs/components/table} */ +import { type VariantProps, cva } from "class-variance-authority"; import * as React from "react"; import { cn } from "utils/cn"; @@ -60,15 +61,38 @@ const TableFooter = React.forwardRef< /> )); +const tableRowVariants = cva( + [ + "border-0 border-b border-solid border-border transition-colors", + "data-[state=selected]:bg-muted", + ], + { + variants: { + hover: { + false: null, + true: cn([ + "cursor-pointer hover:outline focus:outline outline-1 -outline-offset-1 outline-border-hover", + "first:rounded-t-md last:rounded-b-md", + ]), + }, + }, + defaultVariants: { + hover: false, + }, + }, +); + export const TableRow = React.forwardRef< HTMLTableRowElement, - React.HTMLAttributes ->(({ className, ...props }, ref) => ( + React.HTMLAttributes & + VariantProps +>(({ className, hover, ...props }, ref) => ( = ({ + status, + latest, + className: customClassName, +}) => { + const className = cn(["size-4 shrink-0", customClassName]); + + switch (status.state) { + case "complete": + return ( + + ); + case "failure": + return ( + + ); + case "working": + return latest ? ( + + ) : ( + + ); + default: + return ( + + ); + } +}; diff --git a/site/src/modules/tasks/tasks.ts b/site/src/modules/tasks/tasks.ts new file mode 100644 index 0000000000000..c48f5ec1c3f22 --- /dev/null +++ b/site/src/modules/tasks/tasks.ts @@ -0,0 +1,8 @@ +import type { Workspace } from "api/typesGenerated"; + +export const AI_PROMPT_PARAMETER_NAME = "AI Prompt"; + +export type Task = { + workspace: Workspace; + prompt: string; +}; diff --git a/site/src/modules/workspaces/WorkspaceAppStatus/WorkspaceAppStatus.tsx b/site/src/modules/workspaces/WorkspaceAppStatus/WorkspaceAppStatus.tsx index 76e74f17c351e..f2eab7f2086ac 100644 --- a/site/src/modules/workspaces/WorkspaceAppStatus/WorkspaceAppStatus.tsx +++ b/site/src/modules/workspaces/WorkspaceAppStatus/WorkspaceAppStatus.tsx @@ -34,7 +34,7 @@ export const WorkspaceAppStatus = ({ } return ( -
+
@@ -48,7 +48,9 @@ export const WorkspaceAppStatus = ({ {status.message} - {status.state} + + {status.state} +
); }; diff --git a/site/src/pages/TaskPage/TaskPage.stories.tsx b/site/src/pages/TaskPage/TaskPage.stories.tsx new file mode 100644 index 0000000000000..1fd9c4b93cfa6 --- /dev/null +++ b/site/src/pages/TaskPage/TaskPage.stories.tsx @@ -0,0 +1,150 @@ +import type { Meta, StoryObj } from "@storybook/react"; +import { spyOn } from "@storybook/test"; +import { + MockFailedWorkspace, + MockStartingWorkspace, + MockStoppedWorkspace, + MockWorkspace, + MockWorkspaceAgent, + MockWorkspaceApp, + MockWorkspaceAppStatus, + MockWorkspaceResource, + mockApiError, +} from "testHelpers/entities"; +import { withProxyProvider } from "testHelpers/storybook"; +import TaskPage, { data } from "./TaskPage"; + +const meta: Meta = { + title: "pages/TaskPage", + component: TaskPage, + parameters: { + layout: "fullscreen", + }, +}; + +export default meta; +type Story = StoryObj; + +export const Loading: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockImplementation( + () => new Promise((res) => 1000 * 60 * 60), + ); + }, +}; + +export const LoadingError: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockRejectedValue( + mockApiError({ + message: "Failed to load task", + detail: "You don't have permission to access this resource.", + }), + ); + }, +}; + +export const WaitingOnBuild: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: MockStartingWorkspace, + }); + }, +}; + +export const WaitingOnStatus: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: { + ...MockWorkspace, + latest_app_status: null, + }, + }); + }, +}; + +export const FailedBuild: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: MockFailedWorkspace, + }); + }, +}; + +export const TerminatedBuild: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: MockStoppedWorkspace, + }); + }, +}; + +export const TerminatedBuildWithStatus: Story = { + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: { + ...MockStoppedWorkspace, + latest_app_status: MockWorkspaceAppStatus, + }, + }); + }, +}; + +export const Active: Story = { + decorators: [withProxyProvider()], + beforeEach: () => { + spyOn(data, "fetchTask").mockResolvedValue({ + prompt: "Create competitors page", + workspace: { + ...MockWorkspace, + latest_build: { + ...MockWorkspace.latest_build, + resources: [ + { + ...MockWorkspaceResource, + agents: [ + { + ...MockWorkspaceAgent, + apps: [ + { + ...MockWorkspaceApp, + id: "claude-code", + display_name: "Claude Code", + icon: "/icon/claude.svg", + url: `${window.location.protocol}/iframe.html?viewMode=story&id=pages-terminal--ready&args=&globals=`, + external: true, + statuses: [ + MockWorkspaceAppStatus, + { + ...MockWorkspaceAppStatus, + id: "2", + message: "Planning changes", + state: "working", + }, + ], + }, + { + ...MockWorkspaceApp, + id: "vscode", + display_name: "VSCode", + icon: "/icon/code.svg", + }, + ], + }, + ], + }, + ], + }, + latest_app_status: { + ...MockWorkspaceAppStatus, + app_id: "claude-code", + }, + }, + }); + }, +}; diff --git a/site/src/pages/TaskPage/TaskPage.tsx b/site/src/pages/TaskPage/TaskPage.tsx new file mode 100644 index 0000000000000..692c99db2d63f --- /dev/null +++ b/site/src/pages/TaskPage/TaskPage.tsx @@ -0,0 +1,407 @@ +import { API } from "api/api"; +import { getErrorDetail, getErrorMessage } from "api/errors"; +import type { WorkspaceApp, WorkspaceStatus } from "api/typesGenerated"; +import { Button } from "components/Button/Button"; +import { ExternalImage } from "components/ExternalImage/ExternalImage"; +import { Loader } from "components/Loader/Loader"; +import { Margins } from "components/Margins/Margins"; +import { ScrollArea } from "components/ScrollArea/ScrollArea"; +import { Spinner } from "components/Spinner/Spinner"; +import { + Tooltip, + TooltipContent, + TooltipProvider, + TooltipTrigger, +} from "components/Tooltip/Tooltip"; +import { useProxy } from "contexts/ProxyContext"; +import { ArrowLeftIcon, LayoutGridIcon, RotateCcwIcon } from "lucide-react"; +import { AppStatusIcon } from "modules/apps/AppStatusIcon"; +import { getAppHref } from "modules/apps/apps"; +import { useAppLink } from "modules/apps/useAppLink"; +import { AI_PROMPT_PARAMETER_NAME, type Task } from "modules/tasks/tasks"; +import { WorkspaceAppStatus } from "modules/workspaces/WorkspaceAppStatus/WorkspaceAppStatus"; +import type React from "react"; +import { type FC, type ReactNode, useState } from "react"; +import { Helmet } from "react-helmet-async"; +import { useQuery } from "react-query"; +import { useParams } from "react-router-dom"; +import { Link as RouterLink } from "react-router-dom"; +import { cn } from "utils/cn"; +import { pageTitle } from "utils/page"; +import { timeFrom } from "utils/time"; + +const TaskPage = () => { + const { workspace: workspaceName, username } = useParams() as { + workspace: string; + username: string; + }; + const { + data: task, + error, + refetch, + } = useQuery({ + queryKey: ["tasks", username, workspaceName], + queryFn: () => data.fetchTask(username, workspaceName), + refetchInterval: 5_000, + }); + + if (error) { + return ( + <> + + {pageTitle("Error loading task")} + + +
+
+

+ {getErrorMessage(error, "Failed to load task")} +

+ + {getErrorDetail(error)} + +
+ + +
+
+
+ + ); + } + + if (!task) { + return ( + <> + + {pageTitle("Loading task")} + + + + ); + } + + let content: ReactNode = null; + const waitingStatuses: WorkspaceStatus[] = ["starting", "pending"]; + const terminatedStatuses: WorkspaceStatus[] = [ + "canceled", + "canceling", + "deleted", + "deleting", + "stopped", + "stopping", + ]; + + if (waitingStatuses.includes(task.workspace.latest_build.status)) { + content = ( +
+
+ +

+ Building your task +

+ + Your task is being built and will be ready soon + +
+
+ ); + } else if (task.workspace.latest_build.status === "failed") { + content = ( +
+
+

+ Task build failed +

+ + Please check the logs for more details. + + +
+
+ ); + } else if (terminatedStatuses.includes(task.workspace.latest_build.status)) { + content = ( + +
+ {task.workspace.latest_app_status && ( +
+ +
+ )} +
+
+

+ Task build terminated +

+ + So apps and previous statuses are not available + +
+
+
+
+ ); + } else if (!task.workspace.latest_app_status) { + content = ( +
+
+ +

+ Running your task +

+ + The status should be available soon + +
+
+ ); + } else { + const statuses = task.workspace.latest_build.resources + .flatMap((r) => r.agents) + .flatMap((a) => a?.apps) + .flatMap((a) => a?.statuses) + .filter((s) => !!s) + .sort( + (a, b) => + new Date(b.created_at).getTime() - new Date(a.created_at).getTime(), + ); + + content = ( +
+ + + +
+ ); + } + + return ( + <> + + {pageTitle(task.prompt)} + + +
+
+
+ + + + + + Back to tasks + + + +
+

{task.prompt}

+ + Created by {task.workspace.owner_name}{" "} + {timeFrom(new Date(task.workspace.created_at))} + +
+
+
+ + {content} +
+ + ); +}; + +export default TaskPage; + +type TaskAppsProps = { + task: Task; +}; + +const TaskApps: FC = ({ task }) => { + const agents = task.workspace.latest_build.resources + .flatMap((r) => r.agents) + .filter((a) => !!a); + + const apps = agents.flatMap((a) => a?.apps).filter((a) => !!a); + + const [activeAppId, setActiveAppId] = useState(() => { + const appId = task.workspace.latest_app_status?.app_id; + if (!appId) { + throw new Error("No active app found in task"); + } + return appId; + }); + + const activeApp = apps.find((app) => app.id === activeAppId); + if (!activeApp) { + throw new Error(`Active app with ID ${activeAppId} not found in task`); + } + + const agent = agents.find((a) => + a.apps.some((app) => app.id === activeAppId), + ); + if (!agent) { + throw new Error(`Agent for app ${activeAppId} not found in task workspace`); + } + + const { proxy } = useProxy(); + const [iframeSrc, setIframeSrc] = useState(() => { + const src = getAppHref(activeApp, { + agent, + workspace: task.workspace, + path: proxy.preferredPathAppURL, + host: proxy.preferredWildcardHostname, + }); + return src; + }); + + return ( +
+
+ {apps.map((app) => ( + { + if (app.external) { + return; + } + + e.preventDefault(); + setActiveAppId(app.id); + setIframeSrc(e.currentTarget.href); + }} + /> + ))} +
+ +
+