diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index 6ff4b20a3a77..1ac67054fcf9 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -191,10 +191,6 @@ const ( // This is a permanent migration which should exist forever. Permanent_V22_2SQLSchemaTelemetryScheduledJobs - // TODO_Delete_V23_1Start demarcates the start of cluster versions stepped - // through during the process of upgrading from 22.2 to 23.1. - TODO_Delete_V23_1Start - // TODO_Delete_V23_1TenantNamesStateAndServiceMode adds columns to // system.tenants. TODO_Delete_V23_1TenantNamesStateAndServiceMode @@ -204,11 +200,6 @@ const ( // for the system tenant. TODO_Delete_V23_1DescIDSequenceForSystemTenant - // TODO_Delete_V23_1AddPartialStatisticsColumns adds two columns: one to store - // the predicate for a partial statistics collection, and another to refer to - // the full statistic it was collected from. - TODO_Delete_V23_1AddPartialStatisticsColumns - // TODO_Delete_V23_1CreateSystemJobInfoTable creates the system.job_info table. TODO_Delete_V23_1CreateSystemJobInfoTable @@ -216,10 +207,6 @@ const ( // role_members system table has columns for ids. TODO_Delete_V23_1RoleMembersTableHasIDColumns - // TODO_Delete_V23_1RoleMembersIDColumnsBackfilled is the version where the - // columns for ids in the role_members system table have been backfilled. - TODO_Delete_V23_1RoleMembersIDColumnsBackfilled - // TODO_Delete_V23_1ScheduledChangefeeds is the version where scheduled changefeeds are // supported through `CREATE SCHEDULE FOR CHANGEFEED` statement. TODO_Delete_V23_1ScheduledChangefeeds @@ -232,15 +219,6 @@ const ( // column in the system.jobs table. TODO_Delete_V23_1BackfillTypeColumnInJobsTable - // TODO_Delete_V23_1_AlterSystemStatementStatisticsAddIndexesUsage creates - // indexes usage virtual column based on (statistics->>'indexes') with - // inverted index on table system.statement_statistics. - TODO_Delete_V23_1_AlterSystemStatementStatisticsAddIndexesUsage - - // TODO_Delete_V23_1AlterSystemSQLInstancesAddSQLAddr adds a sql_addr column - // to the system.sql_instances table. - TODO_Delete_V23_1AlterSystemSQLInstancesAddSQLAddr - // TODO_Delete_V23_1_ChangefeedExpressionProductionReady marks changefeed // expressions (transformation) as production ready. This gate functions as a // signal to attempt to upgrade chagnefeeds created prior to this version. @@ -250,12 +228,6 @@ const ( // support the key visualizer. Permanent_V23_1KeyVisualizerTablesAndJobs - // TODO_Delete_V23_1_KVDirectColumnarScans introduces the support of the "direct" - // columnar scans in the KV layer. - TODO_Delete_V23_1_KVDirectColumnarScans - - TODO_Delete_V23_1_DeleteDroppedFunctionDescriptors - // Permanent_V23_1_CreateJobsMetricsPollingJob creates the permanent job // responsible for polling the jobs table for metrics. Permanent_V23_1_CreateJobsMetricsPollingJob @@ -270,40 +242,15 @@ const ( // user_id column has been added to the system.privileges table. TODO_Delete_V23_1SystemPrivilegesTableHasUserIDColumn - // TODO_Delete_V23_1SystemPrivilegesTableUserIDColumnBackfilled is the version - // where the user_id column in the system.privileges table has been - // backfilled. - TODO_Delete_V23_1SystemPrivilegesTableUserIDColumnBackfilled - // TODO_Delete_V23_1WebSessionsTableHasUserIDColumn is the version where the // user_id column has been added to the system.web_sessions table. TODO_Delete_V23_1WebSessionsTableHasUserIDColumn - // TODO_Delete_V23_1WebSessionsTableUserIDColumnBackfilled is the version - // where the user_id column in the system.web_sessions table has been - // backfilled. - TODO_Delete_V23_1WebSessionsTableUserIDColumnBackfilled - - // TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates is the version - // where the declarative schema changer no longer produces - // scpb.SecondaryIndexPartial elements. - TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates - - // TODO_Delete_V23_1AlterSystemPrivilegesAddIndexOnPathAndUsername adds a - // covering secondary index to system.privileges, on the path and username - // columns. - TODO_Delete_V23_1AlterSystemPrivilegesAddIndexOnPathAndUsername - // TODO_Delete_V23_1DatabaseRoleSettingsHasRoleIDColumn is the version where // the role_id column has been added to the system.database_role_settings // table. TODO_Delete_V23_1DatabaseRoleSettingsHasRoleIDColumn - // TODO_Delete_V23_1DatabaseRoleSettingsRoleIDColumnBackfilled is the version - // where the role_id column in the system.database_role_settings table has - // been backfilled. - TODO_Delete_V23_1DatabaseRoleSettingsRoleIDColumnBackfilled - // TODO_Delete_V23_1TenantCapabilities is the version where tenant // capabilities can be set. TODO_Delete_V23_1TenantCapabilities @@ -312,66 +259,16 @@ const ( // longer write cluster version keys to engines. TODO_Delete_V23_1DeprecateClusterVersionKey - // TODO_Delete_V23_1_SystemRbrDualWrite indicates regional by row compatible - // system tables should write to the old and new indexes. See - // system_rbr_indexes.go for more details. - TODO_Delete_V23_1_SystemRbrDualWrite - - // TODO_Delete_V23_1_SystemRbrReadNew indicates regional by row compatible - // system tables should read from the new index. See system_rbr_indexes.go for - // more details. - TODO_Delete_V23_1_SystemRbrReadNew - - // TODO_Delete_V23_1_SystemRbrSingleWrite indicates regional by row compatible - // system tables no longer need to write to the old index. See - // system_rbr_indexes.go for more details. - TODO_Delete_V23_1_SystemRbrSingleWrite - - // TODO_Delete_V23_1_SystemRbrCleanup is used to gate an upgrade job that - // cleans up old keys that are not regional by row compatible. - TODO_Delete_V23_1_SystemRbrCleanup - // TODO_Delete_V23_1ExternalConnectionsTableHasOwnerIDColumn is the version // where the owner_id column has been added to the system.external_connections // table. TODO_Delete_V23_1ExternalConnectionsTableHasOwnerIDColumn - // TODO_Delete_V23_1ExternalConnectionsTableOwnerIDColumnBackfilled is the - // version where the owner_id column in the system.external_connections table - // has been backfilled. - TODO_Delete_V23_1ExternalConnectionsTableOwnerIDColumnBackfilled - - // TODO_Delete_V23_1AllowNewSystemPrivileges is the version at which we allow - // the new MODIFYSQLCLUSTERSETTING abd VIEWJOB system privileges to be used. - // Note: After v23.1 is released, we won't need to version gate these anymore, - // since we've made mixed-version clusters tolerate new privileges. - TODO_Delete_V23_1AllowNewSystemPrivileges - // TODO_Delete_V23_1JobInfoTableIsBackfilled is a version gate after which the // system.job_info table has been backfilled with rows for the payload and // progress of each job in the system.jobs table. TODO_Delete_V23_1JobInfoTableIsBackfilled - // TODO_Delete_V23_1EnableFlushableIngest upgrades the Pebble format major - // version to FormatFlushableIngest, which enables use of flushable ingestion. - TODO_Delete_V23_1EnableFlushableIngest - - // TODO_Delete_V23_1_UseDelRangeInGCJob enables the use of the DelRange - // operation in the GC job. Before it is enabled, the GC job uses ClearRange - // operations after the job waits out the GC TTL. After it has been enabled, - // the job instead issues DelRange operations at the beginning of the job and - // then waits for the data to be removed automatically before removing the - // descriptor and zone configurations. - TODO_Delete_V23_1_UseDelRangeInGCJob - - // TODO_Delete_V23_1WaitedForDelRangeInGCJob corresponds to the migration - // which waits for the GC jobs to adopt the use of DelRange with tombstones. - TODO_Delete_V23_1WaitedForDelRangeInGCJob - - // TODO_Delete_V23_1_TaskSystemTables is the version where the system tables - // task_payloads and tenant_tasks have been created. - TODO_Delete_V23_1_TaskSystemTables - // Permanent_V23_1_CreateAutoConfigRunnerJob is the version where the auto // config runner persistent job has been created. Permanent_V23_1_CreateAutoConfigRunnerJob @@ -397,10 +294,6 @@ const ( // updated to all SQL Stats tables. Permanent_V23_1ChangeSQLStatsTTL - // TODO_Delete_V23_1_TenantIDSequence is the version where - // system.tenant_id_seq was introduced. - TODO_Delete_V23_1_TenantIDSequence - // Permanent_V23_1CreateSystemActivityUpdateJob is the version at which // Cockroach adds a job that periodically updates the statement_activity and // transaction_activity tables. @@ -529,53 +422,30 @@ var versionTable = [numKeys]roachpb.Version{ Permanent_V22_2SQLSchemaTelemetryScheduledJobs: {Major: 22, Minor: 1, Internal: 42}, // v23.1 versions. Internal versions must be even. - TODO_Delete_V23_1Start: {Major: 22, Minor: 2, Internal: 2}, - TODO_Delete_V23_1TenantNamesStateAndServiceMode: {Major: 22, Minor: 2, Internal: 4}, - TODO_Delete_V23_1DescIDSequenceForSystemTenant: {Major: 22, Minor: 2, Internal: 6}, - TODO_Delete_V23_1AddPartialStatisticsColumns: {Major: 22, Minor: 2, Internal: 8}, - TODO_Delete_V23_1CreateSystemJobInfoTable: {Major: 22, Minor: 2, Internal: 10}, - TODO_Delete_V23_1RoleMembersTableHasIDColumns: {Major: 22, Minor: 2, Internal: 12}, - TODO_Delete_V23_1RoleMembersIDColumnsBackfilled: {Major: 22, Minor: 2, Internal: 14}, - TODO_Delete_V23_1ScheduledChangefeeds: {Major: 22, Minor: 2, Internal: 16}, - TODO_Delete_V23_1AddTypeColumnToJobsTable: {Major: 22, Minor: 2, Internal: 18}, - TODO_Delete_V23_1BackfillTypeColumnInJobsTable: {Major: 22, Minor: 2, Internal: 20}, - TODO_Delete_V23_1_AlterSystemStatementStatisticsAddIndexesUsage: {Major: 22, Minor: 2, Internal: 22}, - TODO_Delete_V23_1AlterSystemSQLInstancesAddSQLAddr: {Major: 22, Minor: 2, Internal: 28}, - TODO_Delete_V23_1_ChangefeedExpressionProductionReady: {Major: 22, Minor: 2, Internal: 30}, - Permanent_V23_1KeyVisualizerTablesAndJobs: {Major: 22, Minor: 2, Internal: 32}, - TODO_Delete_V23_1_KVDirectColumnarScans: {Major: 22, Minor: 2, Internal: 34}, - TODO_Delete_V23_1_DeleteDroppedFunctionDescriptors: {Major: 22, Minor: 2, Internal: 36}, - Permanent_V23_1_CreateJobsMetricsPollingJob: {Major: 22, Minor: 2, Internal: 38}, - TODO_Delete_V23_1AllocatorCPUBalancing: {Major: 22, Minor: 2, Internal: 40}, - TODO_Delete_V23_1SystemPrivilegesTableHasUserIDColumn: {Major: 22, Minor: 2, Internal: 42}, - TODO_Delete_V23_1SystemPrivilegesTableUserIDColumnBackfilled: {Major: 22, Minor: 2, Internal: 44}, - TODO_Delete_V23_1WebSessionsTableHasUserIDColumn: {Major: 22, Minor: 2, Internal: 46}, - TODO_Delete_V23_1WebSessionsTableUserIDColumnBackfilled: {Major: 22, Minor: 2, Internal: 48}, - TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates: {Major: 22, Minor: 2, Internal: 50}, - TODO_Delete_V23_1AlterSystemPrivilegesAddIndexOnPathAndUsername: {Major: 22, Minor: 2, Internal: 52}, - TODO_Delete_V23_1DatabaseRoleSettingsHasRoleIDColumn: {Major: 22, Minor: 2, Internal: 54}, - TODO_Delete_V23_1DatabaseRoleSettingsRoleIDColumnBackfilled: {Major: 22, Minor: 2, Internal: 56}, - TODO_Delete_V23_1TenantCapabilities: {Major: 22, Minor: 2, Internal: 60}, - TODO_Delete_V23_1DeprecateClusterVersionKey: {Major: 22, Minor: 2, Internal: 62}, - TODO_Delete_V23_1_SystemRbrDualWrite: {Major: 22, Minor: 2, Internal: 66}, - TODO_Delete_V23_1_SystemRbrReadNew: {Major: 22, Minor: 2, Internal: 68}, - TODO_Delete_V23_1_SystemRbrSingleWrite: {Major: 22, Minor: 2, Internal: 70}, - TODO_Delete_V23_1_SystemRbrCleanup: {Major: 22, Minor: 2, Internal: 72}, - TODO_Delete_V23_1ExternalConnectionsTableHasOwnerIDColumn: {Major: 22, Minor: 2, Internal: 74}, - TODO_Delete_V23_1ExternalConnectionsTableOwnerIDColumnBackfilled: {Major: 22, Minor: 2, Internal: 76}, - TODO_Delete_V23_1AllowNewSystemPrivileges: {Major: 22, Minor: 2, Internal: 78}, - TODO_Delete_V23_1JobInfoTableIsBackfilled: {Major: 22, Minor: 2, Internal: 80}, - TODO_Delete_V23_1EnableFlushableIngest: {Major: 22, Minor: 2, Internal: 82}, - TODO_Delete_V23_1_UseDelRangeInGCJob: {Major: 22, Minor: 2, Internal: 84}, - TODO_Delete_V23_1WaitedForDelRangeInGCJob: {Major: 22, Minor: 2, Internal: 86}, - TODO_Delete_V23_1_TaskSystemTables: {Major: 22, Minor: 2, Internal: 88}, - Permanent_V23_1_CreateAutoConfigRunnerJob: {Major: 22, Minor: 2, Internal: 90}, - TODO_Delete_V23_1AddSQLStatsComputedIndexes: {Major: 22, Minor: 2, Internal: 92}, - TODO_Delete_V23_1AddSystemActivityTables: {Major: 22, Minor: 2, Internal: 94}, - TODO_Delete_V23_1StopWritingPayloadAndProgressToSystemJobs: {Major: 22, Minor: 2, Internal: 96}, - Permanent_V23_1ChangeSQLStatsTTL: {Major: 22, Minor: 2, Internal: 98}, - TODO_Delete_V23_1_TenantIDSequence: {Major: 22, Minor: 2, Internal: 100}, - Permanent_V23_1CreateSystemActivityUpdateJob: {Major: 22, Minor: 2, Internal: 102}, + TODO_Delete_V23_1TenantNamesStateAndServiceMode: {Major: 22, Minor: 2, Internal: 4}, + TODO_Delete_V23_1DescIDSequenceForSystemTenant: {Major: 22, Minor: 2, Internal: 6}, + TODO_Delete_V23_1CreateSystemJobInfoTable: {Major: 22, Minor: 2, Internal: 10}, + TODO_Delete_V23_1RoleMembersTableHasIDColumns: {Major: 22, Minor: 2, Internal: 12}, + TODO_Delete_V23_1ScheduledChangefeeds: {Major: 22, Minor: 2, Internal: 16}, + TODO_Delete_V23_1AddTypeColumnToJobsTable: {Major: 22, Minor: 2, Internal: 18}, + TODO_Delete_V23_1BackfillTypeColumnInJobsTable: {Major: 22, Minor: 2, Internal: 20}, + TODO_Delete_V23_1_ChangefeedExpressionProductionReady: {Major: 22, Minor: 2, Internal: 30}, + Permanent_V23_1KeyVisualizerTablesAndJobs: {Major: 22, Minor: 2, Internal: 32}, + Permanent_V23_1_CreateJobsMetricsPollingJob: {Major: 22, Minor: 2, Internal: 38}, + TODO_Delete_V23_1AllocatorCPUBalancing: {Major: 22, Minor: 2, Internal: 40}, + TODO_Delete_V23_1SystemPrivilegesTableHasUserIDColumn: {Major: 22, Minor: 2, Internal: 42}, + TODO_Delete_V23_1WebSessionsTableHasUserIDColumn: {Major: 22, Minor: 2, Internal: 46}, + TODO_Delete_V23_1DatabaseRoleSettingsHasRoleIDColumn: {Major: 22, Minor: 2, Internal: 54}, + TODO_Delete_V23_1TenantCapabilities: {Major: 22, Minor: 2, Internal: 60}, + TODO_Delete_V23_1DeprecateClusterVersionKey: {Major: 22, Minor: 2, Internal: 62}, + TODO_Delete_V23_1ExternalConnectionsTableHasOwnerIDColumn: {Major: 22, Minor: 2, Internal: 74}, + TODO_Delete_V23_1JobInfoTableIsBackfilled: {Major: 22, Minor: 2, Internal: 80}, + Permanent_V23_1_CreateAutoConfigRunnerJob: {Major: 22, Minor: 2, Internal: 90}, + TODO_Delete_V23_1AddSQLStatsComputedIndexes: {Major: 22, Minor: 2, Internal: 92}, + TODO_Delete_V23_1AddSystemActivityTables: {Major: 22, Minor: 2, Internal: 94}, + TODO_Delete_V23_1StopWritingPayloadAndProgressToSystemJobs: {Major: 22, Minor: 2, Internal: 96}, + Permanent_V23_1ChangeSQLStatsTTL: {Major: 22, Minor: 2, Internal: 98}, + Permanent_V23_1CreateSystemActivityUpdateJob: {Major: 22, Minor: 2, Internal: 102}, V23_1: {Major: 23, Minor: 1, Internal: 0}, diff --git a/pkg/jobs/BUILD.bazel b/pkg/jobs/BUILD.bazel index d9d475bd3a75..af44fba697c1 100644 --- a/pkg/jobs/BUILD.bazel +++ b/pkg/jobs/BUILD.bazel @@ -145,6 +145,7 @@ go_test( "//pkg/sql/sem/tree", "//pkg/sql/sessiondata", "//pkg/sql/sqlliveness", + "//pkg/sql/sqlliveness/slstorage", "//pkg/testutils", "//pkg/testutils/jobutils", "//pkg/testutils/serverutils", diff --git a/pkg/jobs/registry_external_test.go b/pkg/jobs/registry_external_test.go index 1290ea3ef564..f7c69622950e 100644 --- a/pkg/jobs/registry_external_test.go +++ b/pkg/jobs/registry_external_test.go @@ -37,6 +37,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" + "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slstorage" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/testutils/skip" @@ -112,12 +113,17 @@ RETURNING id; terminalStatuses := []jobs.Status{jobs.StatusSucceeded, jobs.StatusCanceled, jobs.StatusFailed} terminalIDs := make([]jobspb.JobID, len(terminalStatuses)) terminalClaims := make([][]byte, len(terminalStatuses)) + mkSessionID := func() []byte { + sessionID, err := slstorage.MakeSessionID([]byte("us"), uuid.MakeV4()) + require.NoError(t, err) + return []byte(sessionID) + } for i, s := range terminalStatuses { - terminalClaims[i] = uuid.MakeV4().GetBytes() // bogus claim + terminalClaims[i] = mkSessionID() // bogus claim tdb.QueryRow(t, insertQuery, s, terminalClaims[i], 42).Scan(&terminalIDs[i]) } var nonTerminalID jobspb.JobID - tdb.QueryRow(t, insertQuery, jobs.StatusRunning, uuid.MakeV4().GetBytes(), 42).Scan(&nonTerminalID) + tdb.QueryRow(t, insertQuery, jobs.StatusRunning, mkSessionID(), 42).Scan(&nonTerminalID) checkClaimEqual := func(id jobspb.JobID, exp []byte) error { const getClaimQuery = `SELECT claim_session_id FROM system.jobs WHERE id = $1` diff --git a/pkg/server/settingswatcher/version_guard_test.go b/pkg/server/settingswatcher/version_guard_test.go index e345e59672e4..9a9e6181d919 100644 --- a/pkg/server/settingswatcher/version_guard_test.go +++ b/pkg/server/settingswatcher/version_guard_test.go @@ -109,9 +109,9 @@ func TestVersionGuard(t *testing.T) { storageVersion: &initialVersion, settingsVersion: maxVersion, checkVersions: map[clusterversion.Key]bool{ - initialVersion: true, - maxVersion: true, - clusterversion.TODO_Delete_V23_1Start: true, + initialVersion: true, + startVersion: true, + maxVersion: true, }, }, } diff --git a/pkg/sql/alter_role.go b/pkg/sql/alter_role.go index e0e6933af164..f0ee1ee49667 100644 --- a/pkg/sql/alter_role.go +++ b/pkg/sql/alter_role.go @@ -15,7 +15,6 @@ import ( "fmt" "strings" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -428,9 +427,6 @@ func (p *planner) processSetOrResetClause( } func (n *alterRoleSetNode) startExec(params runParams) error { - databaseRoleSettingsHasRoleIDCol := params.p.ExecCfg().Settings.Version.IsActive(params.ctx, - clusterversion.TODO_Delete_V23_1DatabaseRoleSettingsHasRoleIDColumn) - var opName string if n.isRole { sqltelemetry.IncIAMAlterCounter(sqltelemetry.Role) @@ -454,12 +450,7 @@ func (n *alterRoleSetNode) startExec(params runParams) error { sessioninit.DatabaseRoleSettingsTableName, ) - var upsertQuery = fmt.Sprintf( - `UPSERT INTO %s (database_id, role_name, settings) VALUES ($1, $2, $3)`, - sessioninit.DatabaseRoleSettingsTableName, - ) - if databaseRoleSettingsHasRoleIDCol { - upsertQuery = fmt.Sprintf(` + var upsertQuery = fmt.Sprintf(` UPSERT INTO %s (database_id, role_name, settings, role_id) VALUES ($1, $2, $3, ( SELECT CASE $2 @@ -467,9 +458,8 @@ VALUES ($1, $2, $3, ( ELSE (SELECT user_id FROM system.users WHERE username = $2) END ))`, - sessioninit.DatabaseRoleSettingsTableName, username.EmptyRole, username.EmptyRoleID, - ) - } + sessioninit.DatabaseRoleSettingsTableName, username.EmptyRole, username.EmptyRoleID, + ) // Instead of inserting an empty settings array, this function will make // sure the row is deleted instead. diff --git a/pkg/sql/alter_table.go b/pkg/sql/alter_table.go index b001433f6313..dcaeae16444e 100644 --- a/pkg/sql/alter_table.go +++ b/pkg/sql/alter_table.go @@ -1341,9 +1341,8 @@ func insertJSONStatistic( histogram interface{}, ) error { var ( - ctx = params.ctx - txn = params.p.InternalSQLTxn() - settings = params.ExecCfg().Settings + ctx = params.ctx + txn = params.p.InternalSQLTxn() ) var name interface{} @@ -1351,39 +1350,6 @@ func insertJSONStatistic( name = s.Name } - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1AddPartialStatisticsColumns) { - - if s.PartialPredicate != "" { - return pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "statistic for columns %v with collection time %s to insert is partial but cluster version is below 23.1", s.Columns, s.CreatedAt) - } - - _ /* rows */, err := txn.Exec( - ctx, - "insert-stats", - txn.KV(), - `INSERT INTO system.table_statistics ( - "tableID", - "name", - "columnIDs", - "createdAt", - "rowCount", - "distinctCount", - "nullCount", - "avgSize", - histogram - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)`, - tableID, - name, - columnIDs, - s.CreatedAt, - s.RowCount, - s.DistinctCount, - s.NullCount, - s.AvgSize, - histogram) - return err - } - var predicateValue interface{} if s.PartialPredicate != "" { predicateValue = s.PartialPredicate diff --git a/pkg/sql/catalog/descidgen/BUILD.bazel b/pkg/sql/catalog/descidgen/BUILD.bazel index 5511aa44d627..294a89ebeae1 100644 --- a/pkg/sql/catalog/descidgen/BUILD.bazel +++ b/pkg/sql/catalog/descidgen/BUILD.bazel @@ -6,7 +6,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/catalog/descidgen", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/keys", "//pkg/kv", "//pkg/roachpb", diff --git a/pkg/sql/catalog/descidgen/generate_id.go b/pkg/sql/catalog/descidgen/generate_id.go index 8589cfcce842..0b0cab785e18 100644 --- a/pkg/sql/catalog/descidgen/generate_id.go +++ b/pkg/sql/catalog/descidgen/generate_id.go @@ -13,7 +13,6 @@ package descidgen import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -65,15 +64,6 @@ func (g *generator) run(ctx context.Context, inc int64) (catid.DescID, error) { return catid.DescID(nextID), err } -// ErrDescIDSequenceMigrationInProgress is the error returned when the -// descriptor ID generator is unavailable due to migrating the system tenant -// counter from keys.LegacyDescIDGenerator to system.descriptor_id_seq. -// -// TODO(postamar): remove along with clusterversion.TODO_Delete_V23_1DescIDSequenceForSystemTenant -var ErrDescIDSequenceMigrationInProgress = errors.New( - "descriptor ID generator unavailable, migration in progress, retry later", -) - // NewGenerator constructs a non-transactional eval.DescIDGenerator. // // In this implementation the value returned by PeekNextUniqueDescID is _not_ @@ -108,16 +98,6 @@ func key( ctx context.Context, codec keys.SQLCodec, settings *cluster.Settings, ) (roachpb.Key, error) { key := codec.SequenceKey(keys.DescIDSequenceID) - if cv := settings.Version; codec.ForSystemTenant() && - !cv.IsActive(ctx, clusterversion.TODO_Delete_V23_1DescIDSequenceForSystemTenant) { - // At this point, the system tenant may still be using a legacy non-SQL key, - // or may be in the process of undergoing the migration away from it, in - // which case descriptor ID generation is made unavailable. - if cv.IsActive(ctx, clusterversion.TODO_Delete_V23_1DescIDSequenceForSystemTenant-1) { - return nil, ErrDescIDSequenceMigrationInProgress - } - key = keys.LegacyDescIDGenerator - } return key, nil } diff --git a/pkg/sql/catalog/lease/BUILD.bazel b/pkg/sql/catalog/lease/BUILD.bazel index f9bda14fac84..012bda04fed9 100644 --- a/pkg/sql/catalog/lease/BUILD.bazel +++ b/pkg/sql/catalog/lease/BUILD.bazel @@ -19,7 +19,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/clusterversion", "//pkg/keys", "//pkg/kv", "//pkg/kv/kvclient/rangefeed", diff --git a/pkg/sql/catalog/lease/kv_writer.go b/pkg/sql/catalog/lease/kv_writer.go index b2e9e1868c71..0bfe08a8d383 100644 --- a/pkg/sql/catalog/lease/kv_writer.go +++ b/pkg/sql/catalog/lease/kv_writer.go @@ -13,7 +13,6 @@ package lease import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/server/settingswatcher" @@ -58,63 +57,35 @@ func leaseTableWithID(id descpb.ID) catalog.TableDescriptor { return mut.ImmutableCopy().(catalog.TableDescriptor) } -func (w *kvWriter) versionGuard( - ctx context.Context, txn *kv.Txn, -) (settingswatcher.VersionGuard, error) { - return w.settingsWatcher.MakeVersionGuard(ctx, txn, clusterversion.TODO_Delete_V23_1_SystemRbrCleanup) -} - func (w *kvWriter) insertLease(ctx context.Context, txn *kv.Txn, l leaseFields) error { - return w.do(ctx, txn, l, func(guard settingswatcher.VersionGuard, b *kv.Batch) error { - if guard.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrDualWrite) { - err := w.newWriter.Insert(ctx, b, false /*kvTrace */, leaseAsRbrDatum(l)...) - if err != nil { - return err - } - } - if !guard.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrSingleWrite) { - err := w.oldWriter.Insert(ctx, b, false /*kvTrace */, leaseAsRbtDatum(l)...) - if err != nil { - return err - } + return w.do(ctx, txn, l, func(b *kv.Batch) error { + err := w.newWriter.Insert(ctx, b, false /*kvTrace */, leaseAsRbrDatum(l)...) + if err != nil { + return err } return nil }) } func (w *kvWriter) deleteLease(ctx context.Context, txn *kv.Txn, l leaseFields) error { - return w.do(ctx, txn, l, func(guard settingswatcher.VersionGuard, b *kv.Batch) error { - if guard.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrDualWrite) { - err := w.newWriter.Delete(ctx, b, false /*kvTrace */, leaseAsRbrDatum(l)...) - if err != nil { - return err - } - } - if !guard.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrSingleWrite) { - err := w.oldWriter.Delete(ctx, b, false /*kvTrace */, leaseAsRbtDatum(l)...) - if err != nil { - return err - } + return w.do(ctx, txn, l, func(b *kv.Batch) error { + err := w.newWriter.Delete(ctx, b, false /*kvTrace */, leaseAsRbrDatum(l)...) + if err != nil { + return err } return nil }) } -type addToBatchFunc = func(settingswatcher.VersionGuard, *kv.Batch) error +type addToBatchFunc = func(*kv.Batch) error func (w *kvWriter) do( ctx context.Context, txn *kv.Txn, lease leaseFields, addToBatch addToBatchFunc, ) error { run := (*kv.Txn).Run do := func(ctx context.Context, txn *kv.Txn) error { - guard, err := w.versionGuard(ctx, txn) - if err != nil { - return err - } - b := txn.NewBatch() - err = addToBatch(guard, b) - if err != nil { + if err := addToBatch(b); err != nil { return errors.NewAssertionErrorWithWrappedErrf(err, "failed to encode lease entry") } return run(txn, ctx, b) @@ -136,12 +107,3 @@ func leaseAsRbrDatum(l leaseFields) []tree.Datum { } } - -func leaseAsRbtDatum(l leaseFields) []tree.Datum { - return []tree.Datum{ - tree.NewDInt(tree.DInt(l.descID)), - tree.NewDInt(tree.DInt(l.version)), - tree.NewDInt(tree.DInt(l.instanceID)), - &l.expiration, - } -} diff --git a/pkg/sql/catalog/lease/lease.go b/pkg/sql/catalog/lease/lease.go index b866c0b3e9b3..cfec80dc2d9e 100644 --- a/pkg/sql/catalog/lease/lease.go +++ b/pkg/sql/catalog/lease/lease.go @@ -21,7 +21,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" @@ -1400,18 +1399,9 @@ func (m *Manager) DeleteOrphanedLeases(ctx context.Context, timeThreshold int64) // doesn't implement AS OF SYSTEM TIME. // Read orphaned leases. - const ( - queryWithRegion = ` + query := ` SELECT "descID", version, expiration, crdb_region FROM system.public.lease AS OF SYSTEM TIME %d WHERE "nodeID" = %d ` - queryWithoutRegion = ` -SELECT "descID", version, expiration FROM system.public.lease AS OF SYSTEM TIME %d WHERE "nodeID" = %d -` - ) - query := queryWithRegion - if !m.settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_SystemRbrReadNew) { - query = queryWithoutRegion - } sqlQuery := fmt.Sprintf(query, timeThreshold, instanceID) var rows []tree.Datums retryOptions := base.DefaultRetryOptions() diff --git a/pkg/sql/colexec/colbuilder/BUILD.bazel b/pkg/sql/colexec/colbuilder/BUILD.bazel index e1e96da08433..8d3cc8004e97 100644 --- a/pkg/sql/colexec/colbuilder/BUILD.bazel +++ b/pkg/sql/colexec/colbuilder/BUILD.bazel @@ -9,7 +9,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/colexec/colbuilder", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/col/coldata", "//pkg/col/coldataext", "//pkg/col/typeconv", diff --git a/pkg/sql/colexec/colbuilder/execplan.go b/pkg/sql/colexec/colbuilder/execplan.go index f9fbd3f49c4f..a1a57c852fd5 100644 --- a/pkg/sql/colexec/colbuilder/execplan.go +++ b/pkg/sql/colexec/colbuilder/execplan.go @@ -15,7 +15,6 @@ import ( "reflect" "strings" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/col/coldata" "github.com/cockroachdb/cockroach/pkg/col/coldataext" "github.com/cockroachdb/cockroach/pkg/col/typeconv" @@ -835,9 +834,6 @@ func NewColOperator( var resultTypes []*types.T if flowCtx.EvalCtx.SessionData().DirectColumnarScansEnabled { canUseDirectScan := func() bool { - if !flowCtx.EvalCtx.Settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_KVDirectColumnarScans) { - return false - } // We currently don't use the direct scans if TraceKV is // enabled (due to not being able to tell the KV server // about it). One idea would be to include this boolean into diff --git a/pkg/sql/conn_executor.go b/pkg/sql/conn_executor.go index 523b9548f12b..fa42f7bd512d 100644 --- a/pkg/sql/conn_executor.go +++ b/pkg/sql/conn_executor.go @@ -3287,10 +3287,6 @@ var retriableMinTimestampBoundUnsatisfiableError = errors.Newf( func errIsRetriable(err error) bool { return errors.HasInterface(err, (*pgerror.ClientVisibleRetryError)(nil)) || errors.Is(err, retriableMinTimestampBoundUnsatisfiableError) || - // Note that this error is not handled internally and can make it to the - // client in implicit transactions. This is not great; it should - // be marked as a client visible retry error. - errors.Is(err, descidgen.ErrDescIDSequenceMigrationInProgress) || descs.IsTwoVersionInvariantViolationError(err) } @@ -3752,13 +3748,6 @@ func (ex *connExecutor) txnStateTransitionsApplyWrapper( return advanceInfo{}, err } } - // Similarly, if the descriptor ID generator is not available because of - // an ongoing migration, wait for the migration to complete first. - if errors.Is(payloadErr, descidgen.ErrDescIDSequenceMigrationInProgress) { - if err := ex.handleWaitingForDescriptorIDGeneratorMigration(ex.Ctx()); err != nil { - return advanceInfo{}, err - } - } } // Handle transaction events which cause updates to txnState. @@ -3922,13 +3911,6 @@ func (ex *connExecutor) handleWaitingForConcurrentSchemaChanges( return ex.resetTransactionOnSchemaChangeRetry(ctx) } -func (ex *connExecutor) handleWaitingForDescriptorIDGeneratorMigration(ctx context.Context) error { - if err := ex.planner.waitForDescriptorIDGeneratorMigration(ctx); err != nil { - return err - } - return ex.resetTransactionOnSchemaChangeRetry(ctx) -} - // initStatementResult initializes res according to a query. // // cols represents the columns of the result rows. Should be nil if diff --git a/pkg/sql/crdb_internal.go b/pkg/sql/crdb_internal.go index 2be99d75e773..6e3990d7a9fe 100644 --- a/pkg/sql/crdb_internal.go +++ b/pkg/sql/crdb_internal.go @@ -24,7 +24,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/build" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config/zonepb" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/jobs" @@ -944,25 +943,6 @@ WITH LEFT JOIN latestprogress AS progress ON j.id = progress.job_id ` - // Before clusterversion.TODO_Delete_V23_1JobInfoTableIsBackfilled, the system.job_info - // table has not been fully populated with the payload and progress of jobs in - // the cluster. - systemJobsBaseQuery = ` - SELECT - id, status, created, payload, progress, created_by_type, created_by_id, - claim_session_id, claim_instance_id, num_runs, last_run, job_type - FROM system.jobs` - - // TODO(jayant): remove the version gate in 24.1 - // Before clusterversion.TODO_Delete_V23_1BackfillTypeColumnInJobsTable, the system.jobs table did not have - // a fully populated job_type column, so we must project it manually - // with crdb_internal.job_payload_type. - oldSystemJobsBaseQuery = ` - SELECT id, status, created, payload, progress, created_by_type, created_by_id, - claim_session_id, claim_instance_id, num_runs, last_run, - crdb_internal.job_payload_type(payload) as job_type - FROM system.jobs` - systemJobsIDPredicate = ` WHERE id = $1` systemJobsTypePredicate = ` WHERE job_type = $1` systemJobsStatusPredicate = ` WHERE status = $1` @@ -977,30 +957,16 @@ const ( jobStatus ) -func getInternalSystemJobsQueryFromClusterVersion( - ctx context.Context, version clusterversion.Handle, predicate systemJobsPredicate, -) string { - var baseQuery string - if version.IsActive(ctx, clusterversion.TODO_Delete_V23_1JobInfoTableIsBackfilled) { - baseQuery = SystemJobsAndJobInfoBaseQuery - if predicate == jobID { - baseQuery = systemJobsAndJobInfoBaseQueryWithIDPredicate - } - } else if version.IsActive(ctx, clusterversion.TODO_Delete_V23_1BackfillTypeColumnInJobsTable) { - baseQuery = systemJobsBaseQuery - } else { - baseQuery = oldSystemJobsBaseQuery - } - +func getInternalSystemJobsQuery(predicate systemJobsPredicate) string { switch predicate { case noPredicate: - return baseQuery + return SystemJobsAndJobInfoBaseQuery case jobID: - return baseQuery + systemJobsIDPredicate + return systemJobsAndJobInfoBaseQueryWithIDPredicate + systemJobsIDPredicate case jobType: - return baseQuery + systemJobsTypePredicate + return SystemJobsAndJobInfoBaseQuery + systemJobsTypePredicate case jobStatus: - return baseQuery + systemJobsStatusPredicate + return SystemJobsAndJobInfoBaseQuery + systemJobsStatusPredicate } return "" @@ -1030,28 +996,28 @@ CREATE TABLE crdb_internal.system_jobs ( indexes: []virtualIndex{ { populate: func(ctx context.Context, unwrappedConstraint tree.Datum, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) (matched bool, err error) { - q := getInternalSystemJobsQueryFromClusterVersion(ctx, p.execCfg.Settings.Version, jobID) + q := getInternalSystemJobsQuery(jobID) targetType := tree.MustBeDInt(unwrappedConstraint) return populateSystemJobsTableRows(ctx, p, addRow, q, targetType) }, }, { populate: func(ctx context.Context, unwrappedConstraint tree.Datum, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) (matched bool, err error) { - q := getInternalSystemJobsQueryFromClusterVersion(ctx, p.execCfg.Settings.Version, jobType) + q := getInternalSystemJobsQuery(jobType) targetType := tree.MustBeDString(unwrappedConstraint) return populateSystemJobsTableRows(ctx, p, addRow, q, targetType) }, }, { populate: func(ctx context.Context, unwrappedConstraint tree.Datum, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) (matched bool, err error) { - q := getInternalSystemJobsQueryFromClusterVersion(ctx, p.execCfg.Settings.Version, jobStatus) + q := getInternalSystemJobsQuery(jobStatus) targetType := tree.MustBeDString(unwrappedConstraint) return populateSystemJobsTableRows(ctx, p, addRow, q, targetType) }, }, }, populate: func(ctx context.Context, p *planner, db catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) error { - _, err := populateSystemJobsTableRows(ctx, p, addRow, getInternalSystemJobsQueryFromClusterVersion(ctx, p.execCfg.Settings.Version, noPredicate)) + _, err := populateSystemJobsTableRows(ctx, p, addRow, getInternalSystemJobsQuery(noPredicate)) return err }, } @@ -1132,22 +1098,14 @@ const ( // Note that we are querying crdb_internal.system_jobs instead of system.jobs directly. // The former has access control built in and will filter out jobs that the // user is not allowed to see. - jobsQFrom = ` FROM crdb_internal.system_jobs` - jobsBackoffArgs = `(SELECT $1::FLOAT AS initial_delay, $2::FLOAT AS max_delay) args` - jobsStatusFilter = ` WHERE status = $3` - oldJobsTypeFilter = ` WHERE crdb_internal.job_payload_type(payload) = $3` - jobsTypeFilter = ` WHERE job_type = $3` - jobsQuery = jobsQSelect + `, last_run::timestamptz, COALESCE(num_runs, 0), ` + jobs.NextRunClause + + jobsQFrom = ` FROM crdb_internal.system_jobs` + jobsBackoffArgs = `(SELECT $1::FLOAT AS initial_delay, $2::FLOAT AS max_delay) args` + jobsStatusFilter = ` WHERE status = $3` + jobsTypeFilter = ` WHERE job_type = $3` + jobsQuery = jobsQSelect + `, last_run::timestamptz, COALESCE(num_runs, 0), ` + jobs.NextRunClause + ` as next_run` + jobsQFrom + ", " + jobsBackoffArgs ) -func getCRDBInternalJobsTableTypeFilter(ctx context.Context, version clusterversion.Handle) string { - if !version.IsActive(ctx, clusterversion.TODO_Delete_V23_1BackfillTypeColumnInJobsTable) { - return oldJobsTypeFilter - } - return jobsTypeFilter -} - // TODO(tbg): prefix with kv_. var crdbInternalJobsTable = virtualSchemaTable{ schema: ` @@ -1186,7 +1144,7 @@ CREATE TABLE crdb_internal.jobs ( }, }, { populate: func(ctx context.Context, unwrappedConstraint tree.Datum, p *planner, _ catalog.DatabaseDescriptor, addRow func(...tree.Datum) error) (matched bool, err error) { - q := jobsQuery + getCRDBInternalJobsTableTypeFilter(ctx, p.execCfg.Settings.Version) + q := jobsQuery + jobsTypeFilter targetStatus := tree.MustBeDString(unwrappedConstraint) return makeJobsTableRows(ctx, p, addRow, q, p.execCfg.JobRegistry.RetryInitialDelay(), p.execCfg.JobRegistry.RetryMaxDelay(), targetStatus) }, diff --git a/pkg/sql/create_external_connection.go b/pkg/sql/create_external_connection.go index 26f95614343e..78f234515c62 100644 --- a/pkg/sql/create_external_connection.go +++ b/pkg/sql/create_external_connection.go @@ -16,7 +16,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/cloud" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/privilege" @@ -130,18 +129,17 @@ func (p *planner) createExternalConnection( ex.SetConnectionDetails(*exConn.ConnectionProto()) ex.SetConnectionType(exConn.ConnectionType()) ex.SetOwner(p.User()) - if p.ExecCfg().Settings.Version.IsActive(params.ctx, clusterversion.TODO_Delete_V23_1ExternalConnectionsTableHasOwnerIDColumn) { - row, err := txn.QueryRowEx(params.ctx, `get-user-id`, txn.KV(), - sessiondata.NodeUserSessionDataOverride, - `SELECT user_id FROM system.users WHERE username = $1`, - p.User(), - ) - if err != nil { - return errors.Wrap(err, "failed to get owner ID for External Connection") - } - ownerID := tree.MustBeDOid(row[0]).Oid - ex.SetOwnerID(ownerID) + + row, err := txn.QueryRowEx(params.ctx, `get-user-id`, txn.KV(), + sessiondata.NodeUserSessionDataOverride, + `SELECT user_id FROM system.users WHERE username = $1`, + p.User(), + ) + if err != nil { + return errors.Wrap(err, "failed to get owner ID for External Connection") } + ownerID := tree.MustBeDOid(row[0]).Oid + ex.SetOwnerID(ownerID) // Create the External Connection and persist it in the // `system.external_connections` table. diff --git a/pkg/sql/delegate/BUILD.bazel b/pkg/sql/delegate/BUILD.bazel index d354fd28b5ab..71890d373d02 100644 --- a/pkg/sql/delegate/BUILD.bazel +++ b/pkg/sql/delegate/BUILD.bazel @@ -40,7 +40,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/delegate", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/jobs", "//pkg/jobs/jobspb", "//pkg/security/username", diff --git a/pkg/sql/delegate/show_changefeed_jobs.go b/pkg/sql/delegate/show_changefeed_jobs.go index d0b18cdf774b..30450f8ba54e 100644 --- a/pkg/sql/delegate/show_changefeed_jobs.go +++ b/pkg/sql/delegate/show_changefeed_jobs.go @@ -13,8 +13,6 @@ package delegate import ( "fmt" - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/sqltelemetry" ) @@ -30,9 +28,6 @@ func (d *delegator) delegateShowChangefeedJobs(n *tree.ShowChangefeedJobs) (tree crdb_internal.system_jobs WHERE job_type = 'CHANGEFEED' ` - queryTargetPre23_1 = ` - system.jobs - ` baseSelectClause = ` WITH payload AS ( SELECT @@ -78,14 +73,7 @@ FROM INNER JOIN payload ON id = job_id` ) - use23_1 := d.evalCtx.Settings.Version.IsActive(d.ctx, clusterversion.TODO_Delete_V23_1BackfillTypeColumnInJobsTable) - - var selectClause string - if use23_1 { - selectClause = fmt.Sprintf(baseSelectClause, queryTarget23_1) - } else { - selectClause = fmt.Sprintf(baseSelectClause, queryTargetPre23_1) - } + selectClause := fmt.Sprintf(baseSelectClause, queryTarget23_1) var whereClause, orderbyClause string if n.Jobs == nil { @@ -96,17 +84,9 @@ FROM // The "ORDER BY" clause below exploits the fact that all // running jobs have finished = NULL. orderbyClause = `ORDER BY COALESCE(finished, now()) DESC, started DESC` - if !use23_1 { - whereClause = fmt.Sprintf("WHERE job_type = '%s'", jobspb.TypeChangefeed) - } } else { // Limit the jobs displayed to the select statement in n.Jobs. - if use23_1 { - whereClause = fmt.Sprintf(`WHERE job_id in (%s)`, n.Jobs.String()) - } else { - whereClause = fmt.Sprintf("WHERE job_type = '%s' AND job_id in (%s)", - jobspb.TypeChangefeed, n.Jobs.String()) - } + whereClause = fmt.Sprintf(`WHERE job_id in (%s)`, n.Jobs.String()) } sqlStmt := fmt.Sprintf("%s %s %s", selectClause, whereClause, orderbyClause) diff --git a/pkg/sql/gcjob/gcjobnotifier/BUILD.bazel b/pkg/sql/gcjob/gcjobnotifier/BUILD.bazel index 4aef9ec0be2a..70fdab26f8bf 100644 --- a/pkg/sql/gcjob/gcjobnotifier/BUILD.bazel +++ b/pkg/sql/gcjob/gcjobnotifier/BUILD.bazel @@ -6,7 +6,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/config", "//pkg/gossip", "//pkg/keys", diff --git a/pkg/sql/gcjob/gcjobnotifier/notifier.go b/pkg/sql/gcjob/gcjobnotifier/notifier.go index 371c9d647e32..7b061dfa1add 100644 --- a/pkg/sql/gcjob/gcjobnotifier/notifier.go +++ b/pkg/sql/gcjob/gcjobnotifier/notifier.go @@ -17,7 +17,6 @@ package gcjobnotifier import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/gossip" "github.com/cockroachdb/cockroach/pkg/keys" @@ -70,8 +69,6 @@ func (n *Notifier) SystemConfigProvider() config.SystemConfigProvider { func noopFunc() {} // AddNotifyee should be called prior to the first reading of the system config. -// The returned channel will also receive a notification if the cluster version -// UseDelRangeInGCJob is activated. // // TODO(lucy,ajwerner): Currently we're calling refreshTables on every zone // config update to any table. We should really be only updating a cached @@ -144,22 +141,10 @@ func (n *Notifier) Start(ctx context.Context) { func (n *Notifier) run(_ context.Context) { defer n.markStopped() systemConfigUpdateCh, _ := n.provider.RegisterSystemConfigChannel() - var haveNotified syncutil.AtomicBool - versionSettingChanged := make(chan struct{}, 1) - versionBeingWaited := clusterversion.ByKey(clusterversion.TODO_Delete_V23_1_UseDelRangeInGCJob) - n.settings.Version.SetOnChange(func(ctx context.Context, newVersion clusterversion.ClusterVersion) { - if !haveNotified.Get() && - versionBeingWaited.LessEq(newVersion.Version) && - !haveNotified.Swap(true) { - versionSettingChanged <- struct{}{} - } - }) for { select { case <-n.stopper.ShouldQuiesce(): return - case <-versionSettingChanged: - n.notify() case <-systemConfigUpdateCh: n.maybeNotify() } @@ -188,12 +173,6 @@ func (n *Notifier) maybeNotify() { n.notifyLocked() } -func (n *Notifier) notify() { - n.mu.Lock() - defer n.mu.Unlock() - n.notifyLocked() -} - func (n *Notifier) notifyLocked() { for c := range n.mu.notifyees { select { diff --git a/pkg/sql/grant_revoke_system.go b/pkg/sql/grant_revoke_system.go index 1f6b0a66f7cd..8e6f54ea7be8 100644 --- a/pkg/sql/grant_revoke_system.go +++ b/pkg/sql/grant_revoke_system.go @@ -15,14 +15,11 @@ import ( "fmt" "github.com/cockroachdb/cockroach/pkg/cloud/externalconn" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catprivilege" "github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" - "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice" "github.com/cockroachdb/cockroach/pkg/sql/privilege" "github.com/cockroachdb/cockroach/pkg/sql/sem/catconstants" @@ -38,27 +35,6 @@ import ( func (n *changeNonDescriptorBackedPrivilegesNode) ReadingOwnWrites() {} func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) error { - privilegesTableHasUserIDCol := params.p.ExecCfg().Settings.Version.IsActive(params.ctx, - clusterversion.TODO_Delete_V23_1SystemPrivilegesTableHasUserIDColumn) - if !params.p.ExecCfg().Settings.Version.IsActive( - params.ctx, - clusterversion.TODO_Delete_V23_1AllowNewSystemPrivileges, - ) { - if n.desiredprivs.Contains(privilege.MODIFYSQLCLUSTERSETTING) { - return pgerror.New(pgcode.FeatureNotSupported, "upgrade must be finalized before using MODIFYSQLCLUSTERSETTING system privilege") - } - if n.desiredprivs.Contains(privilege.VIEWJOB) { - return pgerror.New(pgcode.FeatureNotSupported, "upgrade must be finalized before using VIEWJOB system privilege") - } - if n.desiredprivs.Contains(privilege.REPLICATION) { - return pgerror.New(pgcode.FeatureNotSupported, "upgrade must be finalized before using REPLICATION system privilege") - } - if n.desiredprivs.Contains(privilege.MANAGEVIRTUALCLUSTER) { - return pgerror.New(pgcode.FeatureNotSupported, "upgrade must be finalized before using MANAGEVIRTUALCLUSTER system privilege") - } - - } - if err := params.p.preChangePrivilegesValidation(params.ctx, n.grantees, n.withGrantOption, n.isGrant); err != nil { return err } @@ -84,11 +60,7 @@ func (n *changeNonDescriptorBackedPrivilegesNode) startExec(params runParams) er deleteStmt := fmt.Sprintf( `DELETE FROM system.%s VALUES WHERE username = $1 AND path = $2`, catconstants.SystemPrivilegeTableName) - upsertStmt := fmt.Sprintf( - `UPSERT INTO system.%s (username, path, privileges, grant_options) VALUES ($1, $2, $3, $4)`, - catconstants.SystemPrivilegeTableName) - if privilegesTableHasUserIDCol { - upsertStmt = fmt.Sprintf(` + upsertStmt := fmt.Sprintf(` UPSERT INTO system.%s (username, path, privileges, grant_options, user_id) VALUES ($1, $2, $3, $4, ( SELECT CASE $1 @@ -96,8 +68,7 @@ VALUES ($1, $2, $3, $4, ( ELSE (SELECT user_id FROM system.users WHERE username = $1) END ))`, - catconstants.SystemPrivilegeTableName, username.PublicRole, username.PublicRoleID) - } + catconstants.SystemPrivilegeTableName, username.PublicRole, username.PublicRoleID) if n.isGrant { // Privileges are valid, write them to the system.privileges table. diff --git a/pkg/sql/grant_role.go b/pkg/sql/grant_role.go index 78d3a6024594..542b074fcaf4 100644 --- a/pkg/sql/grant_role.go +++ b/pkg/sql/grant_role.go @@ -14,7 +14,6 @@ import ( "context" "strings" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/sql/decodeusername" "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" @@ -176,17 +175,13 @@ func (p *planner) GrantRoleNode(ctx context.Context, n *tree.GrantRole) (*GrantR func (n *GrantRoleNode) startExec(params runParams) error { var rowsAffected int - roleMembersHasIDs := params.p.ExecCfg().Settings.Version.IsActive(params.ctx, clusterversion.TODO_Delete_V23_1RoleMembersTableHasIDColumns) // Add memberships. Existing memberships are allowed. // If admin option is false, we do not remove it from existing memberships. - memberStmt := `INSERT INTO system.role_members ("role", "member", "isAdmin") VALUES ($1, $2, $3) ON CONFLICT ("role", "member")` - if roleMembersHasIDs { - memberStmt = ` + memberStmt := ` INSERT INTO system.role_members ("role", "member", "isAdmin", role_id, member_id) VALUES ($1, $2, $3, (SELECT user_id FROM system.users WHERE username = $1), (SELECT user_id FROM system.users WHERE username = $2)) ON CONFLICT ("role", "member")` - } if n.adminOption { // admin option: true, set "isAdmin" even if the membership exists. memberStmt += ` DO UPDATE SET "isAdmin" = true` diff --git a/pkg/sql/schema_change_plan_node.go b/pkg/sql/schema_change_plan_node.go index 1b94ed21330f..126548587746 100644 --- a/pkg/sql/schema_change_plan_node.go +++ b/pkg/sql/schema_change_plan_node.go @@ -17,10 +17,8 @@ import ( "github.com/cockroachdb/cockroach/pkg/base" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server/telemetry" - "github.com/cockroachdb/cockroach/pkg/sql/catalog/catalogkeys" "github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" @@ -117,64 +115,6 @@ func (p *planner) newSchemaChangeBuilderDependencies(statements []string) scbuil ) } -// waitForDescriptorIDGeneratorMigration polls the system.descriptor table (in -// separate transactions) until the descriptor_id_seq record is present, which -// indicates that the system tenant's descriptor ID generator has successfully -// been migrated. -func (p *planner) waitForDescriptorIDGeneratorMigration(ctx context.Context) error { - // Drop all leases and locks due to the current transaction, and, in the - // process, abort the transaction. - p.Descriptors().ReleaseAll(ctx) - if err := p.txn.Rollback(ctx); err != nil { - return err - } - - // Wait for the system.descriptor_id_gen descriptor to appear. - start := timeutil.Now() - logEvery := log.Every(30 * time.Second) - blocked := true - for r := retry.StartWithCtx(ctx, base.DefaultRetryOptions()); blocked && r.Next(); { - if knobs := p.ExecCfg().TenantTestingKnobs; knobs != nil { - if fn := knobs.BeforeCheckingForDescriptorIDSequence; fn != nil { - fn(ctx) - } - } - now := p.ExecCfg().Clock.Now() - if logEvery.ShouldLog() { - log.Infof( - ctx, - "waiting for system tenant descriptor ID generator migration, waited %v so far", - timeutil.Since(start), - ) - } - if err := p.ExecCfg().InternalDB.DescsTxn(ctx, func( - ctx context.Context, txn descs.Txn, - ) error { - kvTxn := txn.KV() - if err := kvTxn.SetFixedTimestamp(ctx, now); err != nil { - return err - } - k := catalogkeys.MakeDescMetadataKey(p.ExecCfg().Codec, keys.DescIDSequenceID) - result, err := txn.KV().Get(ctx, k) - if err != nil { - return err - } - if result.Exists() { - blocked = false - } - return nil - }); err != nil { - return err - } - } - log.Infof( - ctx, - "done waiting for system tenant descriptor ID generator migration after %v", - timeutil.Since(start), - ) - return nil -} - // waitForDescriptorSchemaChanges polls the specified descriptor (in separate // transactions) until all its ongoing schema changes have completed. // Internally, this call will restart the planner's underlying transaction and diff --git a/pkg/sql/schema_changer.go b/pkg/sql/schema_changer.go index 6fbd04be4769..88dac57819ab 100644 --- a/pkg/sql/schema_changer.go +++ b/pkg/sql/schema_changer.go @@ -18,7 +18,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/config" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" @@ -1369,7 +1368,7 @@ func (sc *SchemaChanger) createIndexGCJobWithDropTime( gcJobRecord := CreateGCJobRecord( jobDesc, sc.job.Payload().UsernameProto.Decode(), indexGCDetails, - !sc.settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_UseDelRangeInGCJob), + false, /* useLegacyGCJob */ ) jobID := sc.jobRegistry.MakeJobID() if _, err := sc.jobRegistry.CreateJobWithTxn(ctx, gcJobRecord, jobID, txn); err != nil { diff --git a/pkg/sql/schemachanger/scdecomp/decomp.go b/pkg/sql/schemachanger/scdecomp/decomp.go index ca13c9b2ceb0..4eae531a2aa6 100644 --- a/pkg/sql/schemachanger/scdecomp/decomp.go +++ b/pkg/sql/schemachanger/scdecomp/decomp.go @@ -641,15 +641,7 @@ func (w *walkCtx) walkIndex(tbl catalog.TableDescriptor, idx catalog.Index) { if idx.IsPartial() { pp, err := w.newExpression(idx.GetPredicate()) onErrPanic(err) - if w.clusterVersion.IsActive(clusterversion.TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates) { - sec.EmbeddedExpr = pp - } else { - w.ev(scpb.Status_PUBLIC, &scpb.SecondaryIndexPartial{ - TableID: index.TableID, - IndexID: index.IndexID, - Expression: *pp, - }) - } + sec.EmbeddedExpr = pp } w.ev(idxStatus, sec) } diff --git a/pkg/sql/schemachanger/scexec/gc_jobs.go b/pkg/sql/schemachanger/scexec/gc_jobs.go index 097593adef44..7bf4d18802f5 100644 --- a/pkg/sql/schemachanger/scexec/gc_jobs.go +++ b/pkg/sql/schemachanger/scexec/gc_jobs.go @@ -214,6 +214,8 @@ func (gj gcJobs) sort() { // createGCJobRecord creates the job record for a GC job, setting some // properties which are common for all GC jobs. +// +// TODO(radu): we should remove useLegacyJob, it is no longer used. func createGCJobRecord( id jobspb.JobID, description string, diff --git a/pkg/sql/schemachanger/scpb/migration.go b/pkg/sql/schemachanger/scpb/migration.go index 33b13bd3bc43..8dd2202cfa96 100644 --- a/pkg/sql/schemachanger/scpb/migration.go +++ b/pkg/sql/schemachanger/scpb/migration.go @@ -20,11 +20,7 @@ import ( // HasDeprecatedElements returns if the target contains any element marked // for deprecation. func HasDeprecatedElements(version clusterversion.ClusterVersion, target Target) bool { - if version.IsActive(clusterversion.TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates) && - target.GetSecondaryIndexPartial() != nil { - return true - } - return false + return target.GetSecondaryIndexPartial() != nil } // migrateTargetElement migrates an individual target at a given index. diff --git a/pkg/sql/schemachanger/screl/scalars.go b/pkg/sql/schemachanger/screl/scalars.go index a790efb14f44..62329060f518 100644 --- a/pkg/sql/schemachanger/screl/scalars.go +++ b/pkg/sql/schemachanger/screl/scalars.go @@ -142,9 +142,5 @@ func VersionSupportsElementUse(el scpb.Element, version clusterversion.ClusterVe // MaxElementVersion returns the maximum cluster version at which an element // may be used. func MaxElementVersion(el scpb.Element) (version clusterversion.Key, exists bool) { - switch el.(type) { - case *scpb.SecondaryIndexPartial: - return clusterversion.TODO_Delete_V23_1_SchemaChangerDeprecatedIndexPredicates, true /* exists */ - } - return version, false /* exists */ + return 0, false /* exists */ } diff --git a/pkg/sql/show_stats.go b/pkg/sql/show_stats.go index daaea1a59582..f44be25e0ff1 100644 --- a/pkg/sql/show_stats.go +++ b/pkg/sql/show_stats.go @@ -13,11 +13,9 @@ package sql import ( "context" encjson "encoding/json" - "fmt" "sort" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/sql/catalog" "github.com/cockroachdb/cockroach/pkg/sql/catalog/colinfo" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" @@ -34,17 +32,6 @@ import ( ) var showTableStatsColumns = colinfo.ResultColumns{ - {Name: "statistics_name", Typ: types.String}, - {Name: "column_names", Typ: types.StringArray}, - {Name: "created", Typ: types.TimestampTZ}, - {Name: "row_count", Typ: types.Int}, - {Name: "distinct_count", Typ: types.Int}, - {Name: "null_count", Typ: types.Int}, - {Name: "avg_size", Typ: types.Int}, - {Name: "histogram_id", Typ: types.Int}, -} - -var showTableStatsColumnsPartialStatisticsVer = colinfo.ResultColumns{ {Name: "statistics_name", Typ: types.String}, {Name: "column_names", Typ: types.StringArray}, {Name: "created", Typ: types.TimestampTZ}, @@ -99,11 +86,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p if err := p.CheckAnyPrivilege(ctx, desc); err != nil { return nil, err } - partialStatsVerActive := p.ExtendedEvalContext().ExecCfg.Settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1AddPartialStatisticsColumns) - columns := showTableStatsColumnsPartialStatisticsVer - if !partialStatsVerActive { - columns = showTableStatsColumns - } + columns := showTableStatsColumns if n.UsingJSON { columns = showTableStatsJSONColumns } @@ -118,16 +101,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p // "handle" which can be used with SHOW HISTOGRAM. // TODO(yuzefovich): refactor the code to use the iterator API // (currently it is not possible due to a panic-catcher below). - var partialPredicateCol string - var fullStatisticIDCol string - if partialStatsVerActive { - partialPredicateCol = ` -"partialPredicate",` - fullStatisticIDCol = ` -,"fullStatisticID" -` - } - stmt := fmt.Sprintf(`SELECT + stmt := `SELECT "tableID", "statisticID", name, @@ -137,12 +111,12 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p "distinctCount", "nullCount", "avgSize", - %s - histogram - %s + "partialPredicate", + histogram, + "fullStatisticID" FROM system.table_statistics WHERE "tableID" = $1 - ORDER BY "createdAt", "columnIDs", "statisticID"`, partialPredicateCol, fullStatisticIDCol) + ORDER BY "createdAt", "columnIDs", "statisticID"` // There is a privilege check above to make sure the user has any // privilege on the table being inspected. We use the node user to execute @@ -178,10 +152,6 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p histIdx := histogramIdx nCols := numCols - if !partialStatsVerActive { - histIdx = histogramIdx - 1 - nCols = numCols - 2 - } // Guard against crashes in the code below (e.g. #56356). defer func() { @@ -211,7 +181,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p if ignoreStatsRowWithDroppedColumn { continue } - stat, err := stats.NewTableStatisticProto(row, partialStatsVerActive) + stat, err := stats.NewTableStatisticProto(row) if err != nil { return nil, err } @@ -235,7 +205,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p statsList = append(merged, statsList...) // Iterate in reverse order to match the ORDER BY "columnIDs". for i := len(merged) - 1; i >= 0; i-- { - mergedRow, err := tableStatisticProtoToRow(&merged[i].TableStatisticProto, partialStatsVerActive) + mergedRow, err := tableStatisticProtoToRow(&merged[i].TableStatisticProto) if err != nil { return nil, err } @@ -248,7 +218,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p forecastRows := make([]tree.Datums, 0, len(forecasts)) // Iterate in reverse order to match the ORDER BY "columnIDs". for i := len(forecasts) - 1; i >= 0; i-- { - forecastRow, err := tableStatisticProtoToRow(&forecasts[i].TableStatisticProto, partialStatsVerActive) + forecastRow, err := tableStatisticProtoToRow(&forecasts[i].TableStatisticProto) if err != nil { return nil, err } @@ -296,7 +266,7 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p if r[nameIdx] != tree.DNull { statsRow.Name = string(*r[nameIdx].(*tree.DString)) } - if partialStatsVerActive && r[partialPredicateIdx] != tree.DNull && r[fullStatisticIDIdx] != tree.DNull { + if r[partialPredicateIdx] != tree.DNull && r[fullStatisticIDIdx] != tree.DNull { statsRow.PartialPredicate = string(*r[partialPredicateIdx].(*tree.DString)) statsRow.FullStatisticID = (uint64)(*r[fullStatisticIDIdx].(*tree.DInt)) } @@ -358,31 +328,17 @@ func (p *planner) ShowTableStats(ctx context.Context, n *tree.ShowTableStats) (p histogramID = r[statIDIdx] } - var res tree.Datums - if partialStatsVerActive { - res = tree.Datums{ - r[nameIdx], - colNames, - createdAtTZ, - r[rowCountIdx], - r[distinctCountIdx], - r[nullCountIdx], - r[avgSizeIdx], - r[partialPredicateIdx], - histogramID, - r[fullStatisticIDIdx], - } - } else { - res = tree.Datums{ - r[nameIdx], - colNames, - createdAtTZ, - r[rowCountIdx], - r[distinctCountIdx], - r[nullCountIdx], - r[avgSizeIdx], - histogramID, - } + res := tree.Datums{ + r[nameIdx], + colNames, + createdAtTZ, + r[rowCountIdx], + r[distinctCountIdx], + r[nullCountIdx], + r[avgSizeIdx], + r[partialPredicateIdx], + histogramID, + r[fullStatisticIDIdx], } if _, err := v.rows.AddRow(ctx, res); err != nil { @@ -405,9 +361,7 @@ func statColumnString(desc catalog.TableDescriptor, colID tree.Datum) (colName s return colDesc.GetName(), nil } -func tableStatisticProtoToRow( - stat *stats.TableStatisticProto, partialStatsVerActive bool, -) (tree.Datums, error) { +func tableStatisticProtoToRow(stat *stats.TableStatisticProto) (tree.Datums, error) { name := tree.DNull if stat.Name != "" { name = tree.NewDString(stat.Name) @@ -436,10 +390,7 @@ func tableStatisticProtoToRow( tree.NewDInt(tree.DInt(stat.DistinctCount)), tree.NewDInt(tree.DInt(stat.NullCount)), tree.NewDInt(tree.DInt(stat.AvgSize)), - } - - if partialStatsVerActive { - row = append(row, partialPredicate) + partialPredicate, } if stat.HistogramData == nil { @@ -451,8 +402,6 @@ func tableStatisticProtoToRow( } row = append(row, tree.NewDBytes(tree.DBytes(histogram))) } - if partialStatsVerActive { - row = append(row, FullStatisticID) - } + row = append(row, FullStatisticID) return row, nil } diff --git a/pkg/sql/sqlinstance/instancestorage/BUILD.bazel b/pkg/sql/sqlinstance/instancestorage/BUILD.bazel index ab21d81a7af6..f5fc7b4f3229 100644 --- a/pkg/sql/sqlinstance/instancestorage/BUILD.bazel +++ b/pkg/sql/sqlinstance/instancestorage/BUILD.bazel @@ -13,7 +13,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/base", - "//pkg/clusterversion", "//pkg/keys", "//pkg/kv", "//pkg/kv/kvclient/rangefeed", @@ -45,7 +44,6 @@ go_library( "//pkg/util/syncutil", "//pkg/util/timeutil", "@com_github_cockroachdb_errors//:errors", - "@com_github_cockroachdb_logtags//:logtags", ], ) diff --git a/pkg/sql/sqlinstance/instancestorage/instancecache.go b/pkg/sql/sqlinstance/instancestorage/instancecache.go index 818de305ecb1..a787f519c774 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancecache.go +++ b/pkg/sql/sqlinstance/instancestorage/instancecache.go @@ -13,20 +13,15 @@ package instancestorage import ( "context" "strings" - "sync" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/util/grpcutil" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" - "github.com/cockroachdb/cockroach/pkg/util/stop" "github.com/cockroachdb/cockroach/pkg/util/syncutil" - "github.com/cockroachdb/logtags" ) // instanceCache represents a cache over the contents of sql_instances table. @@ -217,118 +212,6 @@ type migrationCache struct { } } -// onVersionReached installs a callback that runs once the version is reached. -// If the version was reached before installing the callback, the callback is run -// synchronously. -func onVersionReached( - ctx context.Context, settings *cluster.Settings, expect clusterversion.Key, do func(), -) { - var once sync.Once - - onVersionChanged := func(rpcContext context.Context, version clusterversion.ClusterVersion) { - if !version.IsActive(expect) { - return - } - once.Do(do) - } - - settings.Version.SetOnChange(onVersionChanged) - - onVersionChanged(ctx, settings.Version.ActiveVersion(ctx)) -} - -// newMigrationCache uses the oldCache and newCache functions to construct -// instanceCaches. The cache registers a hook with the setting and switches -// from the old implementation to the new implementation when the version -// changes to TODO_Delete_V23_1_SystemRbrReadNew. -func newMigrationCache( - ctx context.Context, - stopper *stop.Stopper, - settings *cluster.Settings, - oldCache, newCache func(ctx context.Context) (instanceCache, error), -) (instanceCache, error) { - c := &migrationCache{} - - // oldReady is signaled when the old cache finishes starting. - oldReady := make(chan error, 1) - err := stopper.RunAsyncTask(ctx, "start-old-cache-implementation", func(ctx context.Context) { - cache, err := oldCache(ctx) - if err != nil { - oldReady <- err - } - - onVersionReached(ctx, settings, clusterversion.TODO_Delete_V23_1_SystemRbrReadNew, func() { - // Once the read new version gate is reached, close the original - // cache in order to clean up resources and prevent reading updates - // when the original index is deleted. - cache.Close() - }) - - // If the old cache is already stale, do not return it. - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_SystemRbrReadNew) { - return - } - - c.mu.Lock() - defer c.mu.Unlock() - - // In the case of a race condition, if the new cache initialized first, - // do not ovewrwrite it. - if c.mu.cache != nil { - return - } - - c.mu.cache = cache - oldReady <- nil - }) - if err != nil { - oldReady <- err - } - - // newReady is signaled when the new cache finishes starting. - newReady := make(chan error, 1) - newCacheCtx := logtags.AddTags(context.Background(), logtags.FromContext(ctx)) - onVersionReached(ctx, settings, clusterversion.TODO_Delete_V23_1_SystemRbrReadNew, func() { - err := stopper.RunAsyncTask(newCacheCtx, "start-new-cache-implementation", func(ctx context.Context) { - // Rebuild the cancel signal since the goroutine has a background - // context. - ctx, cancel := stopper.WithCancelOnQuiesce(ctx) - defer cancel() - - log.Ops.Info(ctx, "starting new system.sql_instance cache") - - cache, err := newCache(ctx) - if err != nil { - log.Ops.Errorf(ctx, "error starting the new system.sql_instance cache: %s", err) - newReady <- err - return - } - - log.Ops.Info(ctx, "new system.sql_instance cache is ready") - - c.mu.Lock() - defer c.mu.Unlock() - - c.mu.cache = cache - newReady <- nil - }) - if err != nil { - log.Ops.Errorf(ctx, "unable to start new system.sql_instance cache: %s", err) - newReady <- err - } - }) - - // Wait for one of the caches to be ready or fail to start. - select { - case err = <-newReady: - case err = <-oldReady: - } - if err != nil { - return nil, err - } - return c, nil -} - func (c *migrationCache) Close() { c.mu.Lock() defer c.mu.Unlock() diff --git a/pkg/sql/sqlinstance/instancestorage/instancecache_test.go b/pkg/sql/sqlinstance/instancestorage/instancecache_test.go index a9942408aba0..defc27bcd07c 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancecache_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancecache_test.go @@ -91,7 +91,7 @@ func TestRangeFeed(t *testing.T) { require.NoError(t, storage.generateAvailableInstanceRows(ctx, [][]byte{enum.One}, tenant.Clock().Now().Add(int64(time.Minute), 0))) - feed, err := storage.newInstanceCache(ctx, tenant.AppStopper()) + feed, err := storage.newInstanceCache(ctx) require.NoError(t, err) require.NotNil(t, feed) defer feed.Close() @@ -104,7 +104,7 @@ func TestRangeFeed(t *testing.T) { t.Run("auth_error", func(t *testing.T) { storage := newStorage(t, keys.SystemSQLCodec) - _, err := storage.newInstanceCache(ctx, tenant.AppStopper()) + _, err := storage.newInstanceCache(ctx) require.True(t, grpcutil.IsAuthError(err), "expected %+v to be an auth error", err) }) @@ -114,7 +114,7 @@ func TestRangeFeed(t *testing.T) { ctx, cancel := context.WithCancel(ctx) cancel() - _, err := storage.newInstanceCache(ctx, tenant.AppStopper()) + _, err := storage.newInstanceCache(ctx) require.Error(t, err) require.ErrorIs(t, err, ctx.Err()) }) diff --git a/pkg/sql/sqlinstance/instancestorage/instancereader.go b/pkg/sql/sqlinstance/instancestorage/instancereader.go index 8476e4bf034f..85e3e773e8d8 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancereader.go +++ b/pkg/sql/sqlinstance/instancestorage/instancereader.go @@ -85,7 +85,7 @@ func (r *Reader) Start(ctx context.Context, self sqlinstance.InstanceInfo) { // Make sure that the reader shuts down gracefully. ctx, cancel := r.stopper.WithCancelOnQuiesce(ctx) err := r.stopper.RunAsyncTask(ctx, "start-instance-reader", func(ctx context.Context) { - cache, err := r.storage.newInstanceCache(ctx, r.stopper) + cache, err := r.storage.newInstanceCache(ctx) if err != nil { r.setInitialScanDone(err) return diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage.go b/pkg/sql/sqlinstance/instancestorage/instancestorage.go index b1f6b725f993..68391b188740 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage.go @@ -19,7 +19,6 @@ import ( "time" "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" @@ -79,8 +78,7 @@ var errNoPreallocatedRows = errors.New("no preallocated rows") type Storage struct { db *kv.DB slReader sqlliveness.Reader - oldRowCodec rowCodec - newRowCodec rowCodec + rowCodec rowCodec settings *cluster.Settings settingsWatch *settingswatcher.SettingsWatcher clock *hlc.Clock @@ -125,8 +123,7 @@ func NewTestingStorage( ) *Storage { s := &Storage{ db: db, - newRowCodec: makeRowCodec(codec, table, true), - oldRowCodec: makeRowCodec(codec, table, false), + rowCodec: makeRowCodec(codec, table, true), slReader: slReader, clock: clock, f: f, @@ -186,25 +183,18 @@ func (s *Storage) ReleaseInstance( ctx context.Context, sessionID sqlliveness.SessionID, instanceID base.SQLInstanceID, ) error { return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - region, _, err := slstorage.UnsafeDecodeSessionID(sessionID) if err != nil { return errors.Wrap(err, "unable to determine region for sql_instance") } - readCodec := s.getReadCodec(&version) - - key := readCodec.encodeKey(region, instanceID) + key := s.rowCodec.encodeKey(region, instanceID) kv, err := txn.Get(ctx, key) if err != nil { return err } - instance, err := readCodec.decodeRow(kv.Key, kv.Value) + instance, err := s.rowCodec.decodeRow(kv.Key, kv.Value) if err != nil { return err } @@ -217,21 +207,12 @@ func (s *Storage) ReleaseInstance( batch := txn.NewBatch() - value, err := readCodec.encodeAvailableValue() + value, err := s.rowCodec.encodeAvailableValue() if err != nil { return err } batch.Put(key, value) - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualKey := dualCodec.encodeKey(region, instanceID) - dualValue, err := dualCodec.encodeAvailableValue() - if err != nil { - return err - } - batch.Put(dualKey, dualValue) - } - return txn.CommitInBatch(ctx, batch) }) } @@ -271,11 +252,6 @@ func (s *Storage) createInstanceRow( return err } - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - // Set the transaction deadline to the session expiration to ensure // transaction commits before the session expires. err = txn.UpdateDeadline(ctx, sessionExpiration) @@ -294,7 +270,7 @@ func (s *Storage) createInstanceRow( } else { // Try to retrieve an available instance ID. This blocks until one // is available. - availableID, err = s.getAvailableInstanceIDForRegion(ctx, region, txn, &version) + availableID, err = s.getAvailableInstanceIDForRegion(ctx, region, txn) if err != nil { return err } @@ -302,20 +278,11 @@ func (s *Storage) createInstanceRow( b := txn.NewBatch() - rowCodec := s.getReadCodec(&version) - value, err := rowCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) + value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) if err != nil { return err } - b.Put(rowCodec.encodeKey(region, availableID), value) - - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualValue, err := dualCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) - if err != nil { - return err - } - b.Put(dualCodec.encodeKey(region, availableID), dualValue) - } + b.Put(s.rowCodec.encodeKey(region, availableID), value) return txn.CommitInBatch(ctx, b) }); err != nil { @@ -372,28 +339,17 @@ func (s *Storage) createInstanceRow( // newInstanceCache constructs an instanceCache backed by a range feed over the // sql_instances table. newInstanceCache blocks until the initial scan is // complete. -func (s *Storage) newInstanceCache( - ctx context.Context, stopper *stop.Stopper, -) (instanceCache, error) { - if !s.settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_SystemRbrReadNew) { - oldCache := func(ctx context.Context) (instanceCache, error) { - return newRangeFeedCache(ctx, s.oldRowCodec, s.clock, s.f) - } - newCache := func(ctx context.Context) (instanceCache, error) { - return newRangeFeedCache(ctx, s.newRowCodec, s.clock, s.f) - } - return newMigrationCache(ctx, stopper, s.settings, oldCache, newCache) - } - return newRangeFeedCache(ctx, s.newRowCodec, s.clock, s.f) +func (s *Storage) newInstanceCache(ctx context.Context) (instanceCache, error) { + return newRangeFeedCache(ctx, s.rowCodec, s.clock, s.f) } // getAvailableInstanceIDForRegion retrieves an available instance ID for the // current region associated with Storage s, and returns errNoPreallocatedRows // if there are no available rows. func (s *Storage) getAvailableInstanceIDForRegion( - ctx context.Context, region []byte, txn *kv.Txn, version *settingswatcher.VersionGuard, + ctx context.Context, region []byte, txn *kv.Txn, ) (base.SQLInstanceID, error) { - rows, err := s.getInstanceRows(ctx, region, version, txn, lock.WaitPolicy_SkipLocked) + rows, err := s.getInstanceRows(ctx, region, txn, lock.WaitPolicy_SkipLocked) if err != nil { return base.SQLInstanceID(0), err } @@ -430,11 +386,8 @@ func (s *Storage) reclaimRegion(ctx context.Context, region []byte) error { // never become active again. var instances []instancerow if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - instances, err = s.getInstanceRows(ctx, region, &version, txn, lock.WaitPolicy_Block) + var err error + instances, err = s.getInstanceRows(ctx, region, txn, lock.WaitPolicy_Block) return err }); err != nil { return err @@ -453,42 +406,23 @@ func (s *Storage) reclaimRegion(ctx context.Context, region []byte) error { // Reclaim and delete rows target := int(PreallocatedCount.Get(&s.settings.SV)) return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - - instances, err := s.getInstanceRows(ctx, region, &version, txn, lock.WaitPolicy_Block) + instances, err := s.getInstanceRows(ctx, region, txn, lock.WaitPolicy_Block) if err != nil { return err } toReclaim, toDelete := idsToReclaim(target, instances, isExpired) - readCodec := s.getReadCodec(&version) - dualCodec := s.getDualWriteCodec(&version) - writeBatch := txn.NewBatch() for _, instance := range toReclaim { - availableValue, err := readCodec.encodeAvailableValue() + availableValue, err := s.rowCodec.encodeAvailableValue() if err != nil { return err } - writeBatch.Put(readCodec.encodeKey(region, instance), availableValue) - - if dualCodec != nil { - dualValue, err := dualCodec.encodeAvailableValue() - if err != nil { - return err - } - writeBatch.Put(dualCodec.encodeKey(region, instance), dualValue) - } + writeBatch.Put(s.rowCodec.encodeKey(region, instance), availableValue) } for _, instance := range toDelete { - writeBatch.Del(readCodec.encodeKey(region, instance)) - if dualCodec != nil { - writeBatch.Del(dualCodec.encodeKey(region, instance)) - } + writeBatch.Del(s.rowCodec.encodeKey(region, instance)) } return txn.CommitInBatch(ctx, writeBatch) @@ -498,11 +432,7 @@ func (s *Storage) reclaimRegion(ctx context.Context, region []byte) error { // getAllInstanceRows returns all instance rows, including instance rows that // are pre-allocated. func (s *Storage) getAllInstanceRows(ctx context.Context, txn *kv.Txn) ([]instancerow, error) { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return nil, err - } - return s.getInstanceRows(ctx, nil, &version, txn, lock.WaitPolicy_Block) + return s.getInstanceRows(ctx, nil, txn, lock.WaitPolicy_Block) } // getInstanceRows decodes and returns all instance rows associated @@ -513,19 +443,13 @@ func (s *Storage) getAllInstanceRows(ctx context.Context, txn *kv.Txn) ([]instan // case where multiple instances attempt to initialize their instance IDs // simultaneously. func (s *Storage) getInstanceRows( - ctx context.Context, - region []byte, - version *settingswatcher.VersionGuard, - txn *kv.Txn, - waitPolicy lock.WaitPolicy, + ctx context.Context, region []byte, txn *kv.Txn, waitPolicy lock.WaitPolicy, ) ([]instancerow, error) { - rowCodec := s.getReadCodec(version) - var start roachpb.Key if region == nil { - start = rowCodec.makeIndexPrefix() + start = s.rowCodec.makeIndexPrefix() } else { - start = rowCodec.makeRegionPrefix(region) + start = s.rowCodec.makeRegionPrefix(region) } // Scan the entire range @@ -547,7 +471,7 @@ func (s *Storage) getInstanceRows( instances := make([]instancerow, len(rows)) for i := range rows { var err error - instances[i], err = rowCodec.decodeRow(rows[i].Key, rows[i].Value) + instances[i], err = s.rowCodec.decodeRow(rows[i].Key, rows[i].Value) if err != nil { return nil, err } @@ -638,32 +562,6 @@ func (s *Storage) RunInstanceIDReclaimLoop( }) } -func (s *Storage) getReadCodec(version *settingswatcher.VersionGuard) *rowCodec { - if version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrReadNew) { - return &s.newRowCodec - } - return &s.oldRowCodec -} - -func (s *Storage) getDualWriteCodec(version *settingswatcher.VersionGuard) *rowCodec { - switch { - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrSingleWrite): - return nil - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrReadNew): - return &s.oldRowCodec - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrDualWrite): - return &s.newRowCodec - default: - return nil - } -} - -func (s *Storage) versionGuard( - ctx context.Context, txn *kv.Txn, -) (settingswatcher.VersionGuard, error) { - return s.settingsWatch.MakeVersionGuard(ctx, txn, clusterversion.TODO_Delete_V23_1_SystemRbrCleanup) -} - // generateAvailableInstanceRows allocates available instance IDs, and store // them in the sql_instances table. When instance IDs are pre-allocated, all // other fields in that row will be NULL. @@ -673,33 +571,18 @@ func (s *Storage) generateAvailableInstanceRows( ctx = multitenant.WithTenantCostControlExemption(ctx) target := int(PreallocatedCount.Get(&s.settings.SV)) return s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - - instances, err := s.getInstanceRows(ctx, nil /*global*/, &version, txn, lock.WaitPolicy_Block) + instances, err := s.getInstanceRows(ctx, nil /*global*/, txn, lock.WaitPolicy_Block) if err != nil { return err } - readCodec := s.getReadCodec(&version) - dualCodec := s.getDualWriteCodec(&version) - b := txn.NewBatch() for _, row := range idsToAllocate(target, regions, instances) { - value, err := readCodec.encodeAvailableValue() + value, err := s.rowCodec.encodeAvailableValue() if err != nil { return errors.Wrapf(err, "failed to encode row for instance id %d", row.instanceID) } - b.Put(readCodec.encodeKey(row.region, row.instanceID), value) - if dualCodec != nil { - dualValue, err := dualCodec.encodeAvailableValue() - if err != nil { - return errors.Wrapf(err, "failed to encode dual write row for instance id %d", row.instanceID) - } - b.Put(dualCodec.encodeKey(row.region, row.instanceID), dualValue) - } + b.Put(s.rowCodec.encodeKey(row.region, row.instanceID), value) } return txn.CommitInBatch(ctx, b) }) diff --git a/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go b/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go index ece753524faf..504024f8ae13 100644 --- a/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go +++ b/pkg/sql/sqlinstance/instancestorage/instancestorage_internal_test.go @@ -56,12 +56,8 @@ func TestGetAvailableInstanceIDForRegion(t *testing.T) { getAvailableInstanceID := func(storage *Storage, region []byte) (id base.SQLInstanceID, err error) { err = storage.db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { - version, err := storage.versionGuard(ctx, txn) - if err != nil { - return err - } - - id, err = storage.getAvailableInstanceIDForRegion(ctx, region, txn, &version) + var err error + id, err = storage.getAvailableInstanceIDForRegion(ctx, region, txn) return err }) return diff --git a/pkg/sql/sqlinstance/instancestorage/test_helpers.go b/pkg/sql/sqlinstance/instancestorage/test_helpers.go index 525617645c58..ee9e8a1bcace 100644 --- a/pkg/sql/sqlinstance/instancestorage/test_helpers.go +++ b/pkg/sql/sqlinstance/instancestorage/test_helpers.go @@ -97,8 +97,8 @@ func (s *Storage) CreateInstanceDataForTest( return err } - key := s.newRowCodec.encodeKey(region, instanceID) - value, err := s.newRowCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) + key := s.rowCodec.encodeKey(region, instanceID) + value, err := s.rowCodec.encodeValue(rpcAddr, sqlAddr, sessionID, locality, binaryVersion) if err != nil { return err } @@ -113,7 +113,7 @@ func (s *Storage) CreateInstanceDataForTest( func (s *Storage) GetInstanceDataForTest( ctx context.Context, region []byte, instanceID base.SQLInstanceID, ) (sqlinstance.InstanceInfo, error) { - k := s.newRowCodec.encodeKey(region, instanceID) + k := s.rowCodec.encodeKey(region, instanceID) ctx = multitenant.WithTenantCostControlExemption(ctx) row, err := s.db.Get(ctx, k) if err != nil { @@ -122,7 +122,7 @@ func (s *Storage) GetInstanceDataForTest( if row.Value == nil { return sqlinstance.InstanceInfo{}, sqlinstance.NonExistentInstanceError } - rpcAddr, sqlAddr, sessionID, locality, binaryVersion, _, err := s.newRowCodec.decodeValue(*row.Value) + rpcAddr, sqlAddr, sessionID, locality, binaryVersion, _, err := s.rowCodec.decodeValue(*row.Value) if err != nil { return sqlinstance.InstanceInfo{}, errors.Wrapf(err, "could not decode data for instance %d", instanceID) } @@ -145,11 +145,8 @@ func (s *Storage) GetAllInstancesDataForTest( var rows []instancerow ctx = multitenant.WithTenantCostControlExemption(ctx) if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - rows, err = s.getInstanceRows(ctx, nil /*global*/, &version, txn, lock.WaitPolicy_Block) + var err error + rows, err = s.getInstanceRows(ctx, nil /*global*/, txn, lock.WaitPolicy_Block) return err }); err != nil { return nil, err diff --git a/pkg/sql/sqlliveness/slstorage/BUILD.bazel b/pkg/sql/sqlliveness/slstorage/BUILD.bazel index d58ec48adb37..85e255bb479c 100644 --- a/pkg/sql/sqlliveness/slstorage/BUILD.bazel +++ b/pkg/sql/sqlliveness/slstorage/BUILD.bazel @@ -12,7 +12,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness/slstorage", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/keys", "//pkg/kv", "//pkg/kv/kvpb", @@ -23,7 +22,6 @@ go_library( "//pkg/settings/cluster", "//pkg/sql/catalog", "//pkg/sql/catalog/systemschema", - "//pkg/sql/enum", "//pkg/sql/sem/eval", "//pkg/sql/sqlliveness", "//pkg/util/admission/admissionpb", diff --git a/pkg/sql/sqlliveness/slstorage/key_encoder.go b/pkg/sql/sqlliveness/slstorage/key_encoder.go index 2111cfe6e281..3e866a33f3f7 100644 --- a/pkg/sql/sqlliveness/slstorage/key_encoder.go +++ b/pkg/sql/sqlliveness/slstorage/key_encoder.go @@ -76,33 +76,3 @@ func (e *rbrEncoder) decode(key roachpb.Key) (sqlliveness.SessionID, error) { func (e *rbrEncoder) indexPrefix() roachpb.Key { return e.rbrIndex.Clone() } - -type rbtEncoder struct { - rbtIndex roachpb.Key -} - -func (e *rbtEncoder) encode(id sqlliveness.SessionID) (roachpb.Key, error) { - const columnFamilyID = 0 - - key := e.indexPrefix() - key = encoding.EncodeBytesAscending(key, id.UnsafeBytes()) - return keys.MakeFamilyKey(key, columnFamilyID), nil -} - -func (e *rbtEncoder) decode(key roachpb.Key) (sqlliveness.SessionID, error) { - if !bytes.HasPrefix(key, e.rbtIndex) { - return "", errors.Newf("sqlliveness table key has an invalid prefix: %v", key) - } - rem := key[len(e.rbtIndex):] - - rem, session, err := encoding.DecodeBytesAscending(rem, nil) - if err != nil { - return "", errors.Wrap(err, "failed to decode region from session key") - } - - return sqlliveness.SessionID(session), nil -} - -func (e *rbtEncoder) indexPrefix() roachpb.Key { - return e.rbtIndex.Clone() -} diff --git a/pkg/sql/sqlliveness/slstorage/key_encoder_test.go b/pkg/sql/sqlliveness/slstorage/key_encoder_test.go index 944a65658716..b5ccfc61eb90 100644 --- a/pkg/sql/sqlliveness/slstorage/key_encoder_test.go +++ b/pkg/sql/sqlliveness/slstorage/key_encoder_test.go @@ -17,7 +17,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/enum" - "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/uuid" @@ -58,14 +57,4 @@ func TestKeyEncoder(t *testing.T) { require.NoError(t, err) require.Equal(t, id, decodedID) }) - - t.Run("EncodeLegacySession", func(t *testing.T) { - id := sqlliveness.SessionID(uuid.MakeV4().GetBytes()) - - key, err := codec.encode(id) - require.NoError(t, err) - decodedID, err := codec.decode(key) - require.NoError(t, err) - require.Equal(t, id, decodedID) - }) } diff --git a/pkg/sql/sqlliveness/slstorage/sessionid.go b/pkg/sql/sqlliveness/slstorage/sessionid.go index d56be21641db..060290ad5e03 100644 --- a/pkg/sql/sqlliveness/slstorage/sessionid.go +++ b/pkg/sql/sqlliveness/slstorage/sessionid.go @@ -11,8 +11,6 @@ package slstorage import ( - "github.com/cockroachdb/cockroach/pkg/clusterversion" - "github.com/cockroachdb/cockroach/pkg/sql/enum" "github.com/cockroachdb/cockroach/pkg/sql/sqlliveness" "github.com/cockroachdb/cockroach/pkg/util/uuid" "github.com/cockroachdb/errors" @@ -70,15 +68,7 @@ func MakeSessionID(region []byte, id uuid.UUID) (sqlliveness.SessionID, error) { func UnsafeDecodeSessionID(session sqlliveness.SessionID) (region, id []byte, err error) { b := session.UnsafeBytes() if len(b) == legacyLen { - // TODO(jeffswenson): once the TODO_Delete_V23_1_SystemRbrCleanup version gate is - // deleted, replace this branch with a validation error. - _ = clusterversion.TODO_Delete_V23_1_SystemRbrCleanup - - // Legacy format of SessionID. Treat the session as if it belongs to - // region enum.One. This may crop up briefly if a session was created - // with an old binary right before the server is upgraded and the - // upgrade is kicked off while the session is still 'live'. - return enum.One, b, nil + return nil, nil, errors.Newf("unexpected legacy SessionID format") } if len(b) < minimumNonLegacyLen { // The smallest valid v1 session id is a [version, 1, single_byte_region, uuid...], diff --git a/pkg/sql/sqlliveness/slstorage/sessionid_test.go b/pkg/sql/sqlliveness/slstorage/sessionid_test.go index ba0097bde8a1..3a78062d68cb 100644 --- a/pkg/sql/sqlliveness/slstorage/sessionid_test.go +++ b/pkg/sql/sqlliveness/slstorage/sessionid_test.go @@ -87,12 +87,6 @@ func TestSessionIDEncoding(t *testing.T) { session: "", err: "session id is too short", }, - { - name: "legacy_session", - session: sqlliveness.SessionID(id1.GetBytes()), - region: enum.One, - id: id1, - }, { name: "session_v1", session: must(slstorage.MakeSessionID(enum.One, id1)), diff --git a/pkg/sql/sqlliveness/slstorage/slstorage.go b/pkg/sql/sqlliveness/slstorage/slstorage.go index 3ce6904322fd..7827bad06de1 100644 --- a/pkg/sql/sqlliveness/slstorage/slstorage.go +++ b/pkg/sql/sqlliveness/slstorage/slstorage.go @@ -15,7 +15,6 @@ import ( "math/rand" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" @@ -87,8 +86,7 @@ type Storage struct { metrics Metrics gcInterval func() time.Duration newTimer func() timeutil.TimerI - newKeyCodec keyCodec - oldKeyCodec keyCodec + keyCodec keyCodec mu struct { syncutil.Mutex @@ -122,7 +120,6 @@ func NewTestingStorage( table catalog.TableDescriptor, newTimer func() timeutil.TimerI, ) *Storage { - const rbtIndexID = 1 s := &Storage{ settings: settings, settingsWatcher: settingsWatcher, @@ -130,8 +127,7 @@ func NewTestingStorage( clock: clock, db: db, codec: codec, - newKeyCodec: &rbrEncoder{codec.IndexPrefix(uint32(table.GetID()), uint32(table.GetPrimaryIndexID()))}, - oldKeyCodec: &rbtEncoder{codec.IndexPrefix(uint32(table.GetID()), rbtIndexID)}, + keyCodec: &rbrEncoder{codec.IndexPrefix(uint32(table.GetID()), uint32(table.GetPrimaryIndexID()))}, newTimer: newTimer, gcInterval: func() time.Duration { baseInterval := GCInterval.Get(&settings.SV) @@ -246,32 +242,6 @@ func (s *Storage) isAlive( return res.Val.(bool), nil } -func (s *Storage) getReadCodec(version *settingswatcher.VersionGuard) keyCodec { - if version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrReadNew) { - return s.newKeyCodec - } - return s.oldKeyCodec -} - -func (s *Storage) getDualWriteCodec(version *settingswatcher.VersionGuard) keyCodec { - switch { - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrSingleWrite): - return nil - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrReadNew): - return s.oldKeyCodec - case version.IsActive(clusterversion.TODO_Delete_V23_1_SystemRbrDualWrite): - return s.newKeyCodec - default: - return nil - } -} - -func (s *Storage) versionGuard( - ctx context.Context, txn *kv.Txn, -) (settingswatcher.VersionGuard, error) { - return s.settingsWatcher.MakeVersionGuard(ctx, txn, clusterversion.TODO_Delete_V23_1_SystemRbrCleanup) -} - // This function will launch a singleflight goroutine for the session which // will populate its result into the caches underneath the mutex. The result // value will be a bool. The singleflight goroutine does not cancel its work @@ -343,13 +313,7 @@ func (s *Storage) deleteOrFetchSession( // Reset captured variable in case of retry. deleted, expiration, prevExpiration = false, hlc.Timestamp{}, hlc.Timestamp{} - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - - readCodec := s.getReadCodec(&version) - k, err := readCodec.encode(sid) + k, err := s.keyCodec.encode(sid) if err != nil { return err } @@ -376,13 +340,6 @@ func (s *Storage) deleteOrFetchSession( deleted, expiration = true, hlc.Timestamp{} ba := txn.NewBatch() ba.Del(k) - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualKey, err := dualCodec.encode(sid) - if err != nil { - return err - } - ba.Del(dualKey) - } return txn.CommitInBatch(ctx, ba) }); err != nil { @@ -490,11 +447,8 @@ func (s *Storage) fetchExpiredSessionIDs(ctx context.Context) ([]sqlliveness.Ses var result []sqlliveness.SessionID if err := s.txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - result, err = findRows(ctx, txn, s.getReadCodec(&version)) + var err error + result, err = findRows(ctx, txn, s.keyCodec) return err }); err != nil { return nil, err @@ -514,27 +468,13 @@ func (s *Storage) Insert( if err := s.txn(ctx, func(ctx context.Context, txn *kv.Txn) error { batch := txn.NewBatch() - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - readCodec := s.getReadCodec(&version) - k, err := readCodec.encode(sid) + k, err := s.keyCodec.encode(sid) if err != nil { return err } v := encodeValue(expiration) batch.InitPut(k, &v, true) - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualKey, err := dualCodec.encode(sid) - if err != nil { - return err - } - dualValue := encodeValue(expiration) - batch.InitPut(dualKey, &dualValue, true) - } - return txn.CommitInBatch(ctx, batch) }); err != nil { s.metrics.WriteFailures.Inc(1) @@ -552,14 +492,7 @@ func (s *Storage) Update( ) (sessionExists bool, err error) { ctx = multitenant.WithTenantCostControlExemption(ctx) err = s.txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - - readCodec := s.getReadCodec(&version) - - k, err := readCodec.encode(sid) + k, err := s.keyCodec.encode(sid) if err != nil { return err } @@ -573,14 +506,6 @@ func (s *Storage) Update( v := encodeValue(expiration) ba := txn.NewBatch() ba.Put(k, &v) - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualKey, err := dualCodec.encode(sid) - if err != nil { - return err - } - dualValue := encodeValue(expiration) - ba.Put(dualKey, &dualValue) - } return txn.CommitInBatch(ctx, ba) }) if err != nil || !sessionExists { @@ -598,27 +523,14 @@ func (s *Storage) Update( // tasks using the session have stopped. func (s *Storage) Delete(ctx context.Context, session sqlliveness.SessionID) error { return s.txn(ctx, func(ctx context.Context, txn *kv.Txn) error { - version, err := s.versionGuard(ctx, txn) - if err != nil { - return err - } - batch := txn.NewBatch() - readCodec := s.getReadCodec(&version) - key, err := readCodec.encode(session) + key, err := s.keyCodec.encode(session) if err != nil { return err } batch.Del(key) - if dualCodec := s.getDualWriteCodec(&version); dualCodec != nil { - dualKey, err := dualCodec.encode(session) - if err != nil { - return err - } - batch.Del(dualKey) - } return txn.CommitInBatch(ctx, batch) }) } diff --git a/pkg/sql/sqlliveness/slstorage/slstorage_internal_test.go b/pkg/sql/sqlliveness/slstorage/slstorage_internal_test.go index f62bf27815b2..3cb0270f90f5 100644 --- a/pkg/sql/sqlliveness/slstorage/slstorage_internal_test.go +++ b/pkg/sql/sqlliveness/slstorage/slstorage_internal_test.go @@ -15,7 +15,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" - "github.com/cockroachdb/cockroach/pkg/server/settingswatcher" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/log" @@ -30,7 +29,6 @@ func TestGetEncoder(t *testing.T) { const ( isNil codecType = iota isRbr - isRbt ) checkCodec := func(t *testing.T, typ codecType, codec keyCodec) { @@ -42,10 +40,6 @@ func TestGetEncoder(t *testing.T) { require.NotNil(t, codec) _, ok := codec.(*rbrEncoder) require.True(t, ok, "expected %v to be an rbr encoder", codec) - case isRbt: - require.NotNil(t, codec) - _, ok := codec.(*rbtEncoder) - require.True(t, ok, "expected %v to be an rbt encoder", codec) } } @@ -68,11 +62,7 @@ func TestGetEncoder(t *testing.T) { storage := NewTestingStorage( log.AmbientContext{}, nil, nil, nil, keys.SystemSQLCodec, nil, nil, systemschema.SqllivenessTable(), nil) - version := clusterversion.ClusterVersion{Version: clusterversion.ByKey(tc.version)} - guard := settingswatcher.TestMakeVersionGuard(version) - - checkCodec(t, tc.readCodec, storage.getReadCodec(&guard)) - checkCodec(t, tc.dualCodec, storage.getDualWriteCodec(&guard)) + checkCodec(t, tc.readCodec, storage.keyCodec) }) } } diff --git a/pkg/sql/stats/BUILD.bazel b/pkg/sql/stats/BUILD.bazel index 394588835f90..3e609e98342d 100644 --- a/pkg/sql/stats/BUILD.bazel +++ b/pkg/sql/stats/BUILD.bazel @@ -22,7 +22,6 @@ go_library( importpath = "github.com/cockroachdb/cockroach/pkg/sql/stats", visibility = ["//visibility:public"], deps = [ - "//pkg/clusterversion", "//pkg/jobs/jobspb", "//pkg/keys", "//pkg/kv/kvclient/rangefeed", diff --git a/pkg/sql/stats/new_stat.go b/pkg/sql/stats/new_stat.go index b65a395d6730..72a9475866b5 100644 --- a/pkg/sql/stats/new_stat.go +++ b/pkg/sql/stats/new_stat.go @@ -13,14 +13,12 @@ package stats import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/isql" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" "github.com/cockroachdb/cockroach/pkg/sql/types" "github.com/cockroachdb/cockroach/pkg/util/protoutil" - "github.com/cockroachdb/errors" ) // InsertNewStats inserts a slice of statistics at the current time into the @@ -88,34 +86,6 @@ func InsertNewStat( } } - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1AddPartialStatisticsColumns) { - if partialPredicate != "" { - return errors.New("unable to insert new partial statistic as cluster version is from before V23.1.") - } - _, err := txn.Exec( - ctx, "insert-statistic", txn.KV(), - `INSERT INTO system.table_statistics ( - "tableID", - "name", - "columnIDs", - "rowCount", - "distinctCount", - "nullCount", - "avgSize", - histogram - ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`, - tableID, - nameVal, - columnIDsVal, - rowCount, - distinctCount, - nullCount, - avgSize, - histogramVal, - ) - return err - } - // Need to assign to a nil interface{} to be able // to insert NULL value. var predicateValue interface{} diff --git a/pkg/sql/stats/stats_cache.go b/pkg/sql/stats/stats_cache.go index 20cb617c0e78..354b8b7d7727 100644 --- a/pkg/sql/stats/stats_cache.go +++ b/pkg/sql/stats/stats_cache.go @@ -17,7 +17,6 @@ import ( "sort" "sync" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed" @@ -522,19 +521,13 @@ const ( // NewTableStatisticProto converts a row of datums from system.table_statistics // into a TableStatisticsProto. Note that any user-defined types in the // HistogramData will be unresolved. -func NewTableStatisticProto( - datums tree.Datums, partialStatisticsColumnsVerActive bool, -) (*TableStatisticProto, error) { +func NewTableStatisticProto(datums tree.Datums) (*TableStatisticProto, error) { if datums == nil || datums.Len() == 0 { return nil, nil } hgIndex := histogramIndex numStats := statsLen - if !partialStatisticsColumnsVerActive { - hgIndex = histogramIndex - 1 - numStats = statsLen - 2 - } // Validate the input length. if datums.Len() != numStats { return nil, errors.Errorf("%d values returned from table statistics lookup. Expected %d", datums.Len(), numStats) @@ -556,28 +549,9 @@ func NewTableStatisticProto( {"distinctCount", distinctCountIndex, types.Int, false}, {"nullCount", nullCountIndex, types.Int, false}, {"avgSize", avgSizeIndex, types.Int, false}, + {"partialPredicate", partialPredicateIndex, types.String, true}, {"histogram", hgIndex, types.Bytes, true}, - } - - // It's ok for expectedTypes to be in a different order than the input datums - // since we don't rely on a precise order of expectedTypes when we check them - // below. - if partialStatisticsColumnsVerActive { - expectedTypes = append(expectedTypes, - []struct { - fieldName string - fieldIndex int - expectedType *types.T - nullable bool - }{ - { - "partialPredicate", partialPredicateIndex, types.String, true, - }, - { - "fullStatisticID", fullStatisticsIdIndex, types.Int, true, - }, - }..., - ) + {"fullStatisticID", fullStatisticsIdIndex, types.Int, true}, } for _, v := range expectedTypes { @@ -606,13 +580,11 @@ func NewTableStatisticProto( if datums[nameIndex] != tree.DNull { res.Name = string(*datums[nameIndex].(*tree.DString)) } - if partialStatisticsColumnsVerActive { - if datums[partialPredicateIndex] != tree.DNull { - res.PartialPredicate = string(*datums[partialPredicateIndex].(*tree.DString)) - } - if datums[fullStatisticsIdIndex] != tree.DNull { - res.FullStatisticID = uint64(*datums[fullStatisticsIdIndex].(*tree.DInt)) - } + if datums[partialPredicateIndex] != tree.DNull { + res.PartialPredicate = string(*datums[partialPredicateIndex].(*tree.DString)) + } + if datums[fullStatisticsIdIndex] != tree.DNull { + res.FullStatisticID = uint64(*datums[fullStatisticsIdIndex].(*tree.DInt)) } if datums[hgIndex] != tree.DNull { res.HistogramData = &HistogramData{} @@ -629,11 +601,11 @@ func NewTableStatisticProto( // parseStats converts the given datums to a TableStatistic object. It might // need to run a query to get user defined type metadata. func (sc *TableStatisticsCache) parseStats( - ctx context.Context, datums tree.Datums, partialStatisticsColumnsVerActive bool, + ctx context.Context, datums tree.Datums, ) (*TableStatistic, error) { var tsp *TableStatisticProto var err error - tsp, err = NewTableStatisticProto(datums, partialStatisticsColumnsVerActive) + tsp, err = NewTableStatisticProto(datums) if err != nil { return nil, err } @@ -779,17 +751,7 @@ func (tsp *TableStatisticProto) IsAuto() bool { func (sc *TableStatisticsCache) getTableStatsFromDB( ctx context.Context, tableID descpb.ID, forecast bool, ) ([]*TableStatistic, error) { - partialStatisticsColumnsVerActive := sc.settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1AddPartialStatisticsColumns) - var partialPredicateCol string - var fullStatisticIDCol string - if partialStatisticsColumnsVerActive { - partialPredicateCol = ` -"partialPredicate",` - fullStatisticIDCol = ` -,"fullStatisticID" -` - } - getTableStatisticsStmt := fmt.Sprintf(` + getTableStatisticsStmt := ` SELECT "tableID", "statisticID", @@ -800,13 +762,13 @@ SELECT "distinctCount", "nullCount", "avgSize", - %s - histogram - %s + "partialPredicate", + histogram, + "fullStatisticID" FROM system.table_statistics WHERE "tableID" = $1 ORDER BY "createdAt" DESC, "columnIDs" DESC, "statisticID" DESC -`, partialPredicateCol, fullStatisticIDCol) +` // TODO(michae2): Add an index on system.table_statistics (tableID, createdAt, // columnIDs, statisticID). @@ -820,7 +782,7 @@ ORDER BY "createdAt" DESC, "columnIDs" DESC, "statisticID" DESC var statsList []*TableStatistic var ok bool for ok, err = it.Next(ctx); ok; ok, err = it.Next(ctx) { - stats, err := sc.parseStats(ctx, it.Cur(), partialStatisticsColumnsVerActive) + stats, err := sc.parseStats(ctx, it.Cur()) if err != nil { log.Warningf(ctx, "could not decode statistic for table %d: %v", tableID, err) continue diff --git a/pkg/sql/tenant_accessors.go b/pkg/sql/tenant_accessors.go index 108d988f6e64..e0eacca9631f 100644 --- a/pkg/sql/tenant_accessors.go +++ b/pkg/sql/tenant_accessors.go @@ -13,7 +13,6 @@ package sql import ( "context" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv/kvpb" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfo" @@ -65,11 +64,6 @@ func GetAllNonDropTenantIDs( ) ([]roachpb.TenantID, error) { q := `SELECT id FROM system.tenants WHERE data_state != $1 ORDER BY id` var arg interface{} = mtinfopb.DataStateDrop - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - q = `SELECT id FROM system.tenants -WHERE crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info, true)->>'deprecatedDataState' != $1 ORDER BY id` - arg = "DROP" - } rows, err := txn.QueryBufferedEx(ctx, "get-tenant-ids", txn.KV(), sessiondata.NodeUserSessionDataOverride, q, arg) if err != nil { return nil, err @@ -94,10 +88,6 @@ WHERE crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info, true)->> func GetTenantRecordByName( ctx context.Context, settings *cluster.Settings, txn isql.Txn, tenantName roachpb.TenantName, ) (*mtinfopb.TenantInfo, error) { - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - return nil, errors.Newf("tenant names not supported until upgrade to %s or higher is completed", - clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode.String()) - } row, err := txn.QueryRowEx( ctx, "get-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, `SELECT id, info, name, data_state, service_mode FROM system.tenants WHERE name = $1`, tenantName, @@ -116,9 +106,6 @@ func GetTenantRecordByID( ctx context.Context, txn isql.Txn, tenID roachpb.TenantID, settings *cluster.Settings, ) (*mtinfopb.TenantInfo, error) { q := `SELECT id, info, name, data_state, service_mode FROM system.tenants WHERE id = $1` - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - q = `SELECT id, info, NULL, NULL, NULL FROM system.tenants WHERE id = $1` - } row, err := txn.QueryRowEx( ctx, "get-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, q, tenID.ToUint64(), diff --git a/pkg/sql/tenant_capability.go b/pkg/sql/tenant_capability.go index 27b617ab6ce3..ede65e9ee822 100644 --- a/pkg/sql/tenant_capability.go +++ b/pkg/sql/tenant_capability.go @@ -14,7 +14,6 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities" "github.com/cockroachdb/cockroach/pkg/multitenant/tenantcapabilities/tenantcapabilitiespb" "github.com/cockroachdb/cockroach/pkg/spanconfig/spanconfigbounds" @@ -46,9 +45,6 @@ func (p *planner) AlterTenantCapability( if err := rejectIfCantCoordinateMultiTenancy(p.execCfg.Codec, "grant/revoke capabilities to", p.execCfg.Settings); err != nil { return nil, err } - if !p.ExecCfg().Settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantCapabilities) { - return nil, pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, "cannot alter tenant capabilities until version is finalized") - } tSpec, err := p.planTenantSpec(ctx, n.TenantSpec, alterTenantCapabilityOp) if err != nil { diff --git a/pkg/sql/tenant_creation.go b/pkg/sql/tenant_creation.go index 47ab53f745aa..2dc73219aaff 100644 --- a/pkg/sql/tenant_creation.go +++ b/pkg/sql/tenant_creation.go @@ -271,9 +271,6 @@ func CreateTenantRecord( return roachpb.TenantID{}, err } if info.Name != "" { - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - return roachpb.TenantID{}, pgerror.Newf(pgcode.FeatureNotSupported, "cannot use tenant names") - } if err := info.Name.IsValid(); err != nil { return roachpb.TenantID{}, pgerror.WithCandidateCode(err, pgcode.Syntax) } @@ -297,23 +294,20 @@ func CreateTenantRecord( return roachpb.TenantID{}, pgerror.Newf(pgcode.ProgramLimitExceeded, "tenant ID %d out of range", info.ID) } - // Update the ID sequence if available. + // Update the ID sequence. // We only keep the latest ID. - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_TenantIDSequence) { - if err := updateTenantIDSequence(ctx, txn, info.ID); err != nil { - return roachpb.TenantID{}, err - } + if err := updateTenantIDSequence(ctx, txn, info.ID); err != nil { + return roachpb.TenantID{}, err } if info.Name == "" { - // No name: generate one if we are at the appropriate version. - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - info.Name = roachpb.TenantName(fmt.Sprintf("cluster-%d", info.ID)) - } + // No name: generate one. + info.Name = roachpb.TenantName(fmt.Sprintf("cluster-%d", info.ID)) } // Populate the deprecated DataState field for compatibility // with pre-v23.1 servers. + // TODO(radu): we can remove this now. switch info.DataState { case mtinfopb.DataStateReady: info.DeprecatedDataState = mtinfopb.ProtoInfo_READY @@ -339,9 +333,6 @@ func CreateTenantRecord( // Insert into the tenant table and detect collisions. var name tree.Datum if info.Name != "" { - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - return roachpb.TenantID{}, pgerror.Newf(pgcode.FeatureNotSupported, "cannot use tenant names") - } name = tree.NewDString(string(info.Name)) } else { name = tree.DNull @@ -349,11 +340,6 @@ func CreateTenantRecord( query := `INSERT INTO system.tenants (id, active, info, name, data_state, service_mode) VALUES ($1, $2, $3, $4, $5, $6)` args := []interface{}{tenID, active, infoBytes, name, info.DataState, info.ServiceMode} - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - // Ensure the insert can succeed if the upgrade is not finalized yet. - query = `INSERT INTO system.tenants (id, active, info) VALUES ($1, $2, $3)` - args = args[:3] - } if num, err := txn.ExecEx( ctx, "create-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, @@ -606,12 +592,9 @@ HAVING ($1 = '' OR NOT EXISTS (SELECT 1 FROM system.tenants t WHERE t.name = $1) nextIDFromTable := uint64(*row[0].(*tree.DInt)) // Is the sequence available yet? - var lastIDFromSequence int64 - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1_TenantIDSequence) { - lastIDFromSequence, err = getTenantIDSequenceValue(ctx, txn) - if err != nil { - return roachpb.TenantID{}, err - } + lastIDFromSequence, err := getTenantIDSequenceValue(ctx, txn) + if err != nil { + return roachpb.TenantID{}, err } nextID := nextIDFromTable diff --git a/pkg/sql/tenant_deletion.go b/pkg/sql/tenant_deletion.go index a987816c2cbb..63df95891244 100644 --- a/pkg/sql/tenant_deletion.go +++ b/pkg/sql/tenant_deletion.go @@ -14,7 +14,6 @@ import ( "context" "fmt" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" @@ -86,21 +85,19 @@ func dropTenantInternal( return err } - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - if ignoreServiceMode { - // Compatibility with CC serverless use of - // crdb_internal.destroy_tenant(): we want to disable the check - // immediately below, as well as the additional check performed - // inside UpdateTenantRecord() (via validateTenantInfo). - info.ServiceMode = mtinfopb.ServiceModeNone - } - // We can only check the service mode after upgrading to a version - // that supports the service mode column. - if info.ServiceMode != mtinfopb.ServiceModeNone { - return errors.WithHint(pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, - "cannot drop tenant %q (%d) in service mode %v", info.Name, tenID, info.ServiceMode), - "Use ALTER VIRTUAL CLUSTER STOP SERVICE before DROP VIRTUAL CLUSTER.") - } + if ignoreServiceMode { + // Compatibility with CC serverless use of + // crdb_internal.destroy_tenant(): we want to disable the check + // immediately below, as well as the additional check performed + // inside UpdateTenantRecord() (via validateTenantInfo). + info.ServiceMode = mtinfopb.ServiceModeNone + } + // We can only check the service mode after upgrading to a version + // that supports the service mode column. + if info.ServiceMode != mtinfopb.ServiceModeNone { + return errors.WithHint(pgerror.Newf(pgcode.ObjectNotInPrerequisiteState, + "cannot drop tenant %q (%d) in service mode %v", info.Name, tenID, info.ServiceMode), + "Use ALTER VIRTUAL CLUSTER STOP SERVICE before DROP VIRTUAL CLUSTER.") } if info.DataState == mtinfopb.DataStateDrop { diff --git a/pkg/sql/tenant_update.go b/pkg/sql/tenant_update.go index fc645ec9fb76..93529835dff1 100644 --- a/pkg/sql/tenant_update.go +++ b/pkg/sql/tenant_update.go @@ -14,7 +14,6 @@ import ( "context" "time" - "github.com/cockroachdb/cockroach/pkg/clusterversion" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/multitenant" "github.com/cockroachdb/cockroach/pkg/multitenant/mtinfopb" @@ -76,11 +75,6 @@ func UpdateTenantRecord( SET active = $2, info = $3, name = $4, data_state = $5, service_mode = $6 WHERE id = $1` args := []interface{}{info.ID, active, infoBytes, name, info.DataState, info.ServiceMode} - if !settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - // Ensure the update can succeed if the upgrade is not finalized yet. - query = `UPDATE system.tenants SET active = $2, info = $3 WHERE id = $1` - args = args[:3] - } if num, err := txn.ExecEx( ctx, "update-tenant", txn.KV(), sessiondata.NodeUserSessionDataOverride, @@ -106,13 +100,9 @@ func validateTenantInfo( return errors.Newf("tenant in data state %v with dropped name %q", info.DataState, info.DroppedName) } - if settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - // We can only check the service mode after upgrading to a version - // that supports the service mode column. - if info.ServiceMode != mtinfopb.ServiceModeNone && info.DataState != mtinfopb.DataStateReady { - return errors.Newf("cannot use tenant service mode %v with data state %v", - info.ServiceMode, info.DataState) - } + if info.ServiceMode != mtinfopb.ServiceModeNone && info.DataState != mtinfopb.DataStateReady { + return errors.Newf("cannot use tenant service mode %v with data state %v", + info.ServiceMode, info.DataState) } // Sanity check. Note that this interlock is not a guarantee that @@ -261,10 +251,6 @@ func (p *planner) renameTenant( if err := newName.IsValid(); err != nil { return pgerror.WithCandidateCode(err, pgcode.Syntax) } - - if !p.EvalContext().Settings.Version.IsActive(ctx, clusterversion.TODO_Delete_V23_1TenantNamesStateAndServiceMode) { - return pgerror.Newf(pgcode.FeatureNotSupported, "cannot use tenant names") - } } if info.ServiceMode != mtinfopb.ServiceModeNone {