From e5c98671a47e7d85c339118f019b5be1177b3bc7 Mon Sep 17 00:00:00 2001 From: Jiaqiang Huang Date: Wed, 25 Sep 2024 23:25:29 +0800 Subject: [PATCH] ddl: support scatter region in cluster/global level (#56157) ref tikv/pd#8424, ref pingcap/tidb#54886, close pingcap/tidb#55184 --- .../snap_client/systable_restore_test.go | 2 +- pkg/ddl/executor.go | 18 +++--- pkg/ddl/index_modify_test.go | 8 +-- pkg/ddl/partition.go | 5 +- pkg/ddl/split_region.go | 47 ++++++++++------ pkg/ddl/table_split_test.go | 55 ++++++++++++++++++- pkg/ddl/tests/partition/db_partition_test.go | 4 +- pkg/ddl/tests/serial/serial_test.go | 4 +- pkg/executor/executor_failpoint_test.go | 2 +- pkg/executor/sample_test.go | 2 +- pkg/executor/set_test.go | 19 ++++--- pkg/executor/test/showtest/show_test.go | 2 +- .../test/splittest/split_table_test.go | 6 +- pkg/session/bootstrap.go | 16 +++++- pkg/session/session.go | 2 +- pkg/sessionctx/variable/session.go | 12 ++++ pkg/sessionctx/variable/sysvar.go | 16 +++++- pkg/sessionctx/variable/tidb_vars.go | 4 +- .../integrationtest/r/executor/sample.result | 2 +- tests/integrationtest/t/executor/sample.test | 2 +- 20 files changed, 171 insertions(+), 57 deletions(-) diff --git a/br/pkg/restore/snap_client/systable_restore_test.go b/br/pkg/restore/snap_client/systable_restore_test.go index 6dae14d9d6e17..8b1b464023af0 100644 --- a/br/pkg/restore/snap_client/systable_restore_test.go +++ b/br/pkg/restore/snap_client/systable_restore_test.go @@ -116,5 +116,5 @@ func TestCheckSysTableCompatibility(t *testing.T) { // // The above variables are in the file br/pkg/restore/systable_restore.go func TestMonitorTheSystemTableIncremental(t *testing.T) { - require.Equal(t, int64(215), session.CurrentBootstrapVersion) + require.Equal(t, int64(216), session.CurrentBootstrapVersion) } diff --git a/pkg/ddl/executor.go b/pkg/ddl/executor.go index 2c3bcc85b73c8..f84225017b0db 100644 --- a/pkg/ddl/executor.go +++ b/pkg/ddl/executor.go @@ -1390,21 +1390,21 @@ func preSplitAndScatter(ctx sessionctx.Context, store kv.Storage, tbInfo *model. return } var ( - preSplit func() - scatterRegion bool + preSplit func() + scatterScope string ) - val, err := ctx.GetSessionVars().GetGlobalSystemVar(context.Background(), variable.TiDBScatterRegion) - if err != nil { - logutil.DDLLogger().Warn("won't scatter region", zap.Error(err)) + val, ok := ctx.GetSessionVars().GetSystemVar(variable.TiDBScatterRegion) + if !ok { + logutil.DDLLogger().Warn("get system variable met problem, won't scatter region") } else { - scatterRegion = variable.TiDBOptOn(val) + scatterScope = val } if len(parts) > 0 { - preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterRegion) } + preSplit = func() { splitPartitionTableRegion(ctx, sp, tbInfo, parts, scatterScope) } } else { - preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterRegion) } + preSplit = func() { splitTableRegion(ctx, sp, tbInfo, scatterScope) } } - if scatterRegion { + if scatterScope != variable.ScatterOff { preSplit() } else { go preSplit() diff --git a/pkg/ddl/index_modify_test.go b/pkg/ddl/index_modify_test.go index 333126ca33ba2..7547190229c82 100644 --- a/pkg/ddl/index_modify_test.go +++ b/pkg/ddl/index_modify_test.go @@ -180,10 +180,10 @@ func testAddIndex(t *testing.T, tp testAddIndexType, createTableSQL, idxTp strin isTestPartition := (testPartition & tp) > 0 if isTestShardRowID { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") defer func() { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) - tk.MustExec("set global tidb_scatter_region = 0") + tk.MustExec("set global tidb_scatter_region = ''") }() } if (testClusteredIndex & tp) > 0 { @@ -473,10 +473,10 @@ func testAddIndexWithSplitTable(t *testing.T, createSQL, splitTableSQL string) { hasAutoRandomField := len(splitTableSQL) > 0 if !hasAutoRandomField { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") defer func() { atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) - tk.MustExec("set global tidb_scatter_region = 0") + tk.MustExec("set global tidb_scatter_region = ''") }() } tk.MustExec(createSQL) diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index 29c631ca43646..ff1ea91d93406 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -3209,8 +3209,9 @@ func (w *worker) onReorganizePartition(jobCtx *jobContext, t *meta.Meta, job *mo // Doing the preSplitAndScatter here, since all checks are completed, // and we will soon start writing to the new partitions. if s, ok := jobCtx.store.(kv.SplittableStore); ok && s != nil { - // partInfo only contains the AddingPartitions - splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, true) + // 1. partInfo only contains the AddingPartitions + // 2. ScatterTable control all new split region need waiting for scatter region finish at table level. + splitPartitionTableRegion(w.sess.Context, s, tblInfo, partInfo.Definitions, variable.ScatterTable) } // Assume we cannot have more than MaxUint64 rows, set the progress to 1/10 of that. diff --git a/pkg/ddl/split_region.go b/pkg/ddl/split_region.go index a921ae1245348..5152197279ff5 100644 --- a/pkg/ddl/split_region.go +++ b/pkg/ddl/split_region.go @@ -24,13 +24,14 @@ import ( "github.com/pingcap/tidb/pkg/meta/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" tikverr "github.com/tikv/client-go/v2/error" "go.uber.org/zap" ) -func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatter bool) { +func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, parts []model.PartitionDefinition, scatterScope string) { // Max partition count is 8192, should we sample and just choose some partitions to split? regionIDs := make([]uint64, 0, len(parts)) ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) @@ -38,34 +39,47 @@ func splitPartitionTableRegion(ctx sessionctx.Context, store kv.SplittableStore, ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { for _, def := range parts { - regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatter)...) + regionIDs = append(regionIDs, preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, def.ID, scatterScope)...) } } else { for _, def := range parts { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatter)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, def.ID, tbInfo.ID, scatterScope)) } } - if scatter { + if scatterScope != variable.ScatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatter bool) { +func splitTableRegion(ctx sessionctx.Context, store kv.SplittableStore, tbInfo *model.TableInfo, scatterScope string) { ctxWithTimeout, cancel := context.WithTimeout(context.Background(), ctx.GetSessionVars().GetSplitRegionTimeout()) defer cancel() ctxWithTimeout = kv.WithInternalSourceType(ctxWithTimeout, kv.InternalTxnDDL) var regionIDs []uint64 if shardingBits(tbInfo) > 0 && tbInfo.PreSplitRegions > 0 { - regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatter) + regionIDs = preSplitPhysicalTableByShardRowID(ctxWithTimeout, store, tbInfo, tbInfo.ID, scatterScope) } else { - regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatter)) + regionIDs = append(regionIDs, SplitRecordRegion(ctxWithTimeout, store, tbInfo.ID, tbInfo.ID, scatterScope)) } - if scatter { + if scatterScope != variable.ScatterOff { WaitScatterRegionFinish(ctxWithTimeout, store, regionIDs...) } } -func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatter bool) []uint64 { +// `tID` is used to control the scope of scatter. If it is `ScatterTable`, the corresponding tableID is used. +// If it is `ScatterGlobal`, the scatter configured at global level uniformly use -1 as `tID`. +func getScatterConfig(scope string, tableID int64) (scatter bool, tID int64) { + switch scope { + case variable.ScatterTable: + return true, tableID + case variable.ScatterGlobal: + return true, -1 + default: + return false, tableID + } +} + +func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableStore, tbInfo *model.TableInfo, physicalID int64, scatterScope string) []uint64 { // Example: // sharding_bits = 4 // PreSplitRegions = 2 @@ -107,20 +121,21 @@ func preSplitPhysicalTableByShardRowID(ctx context.Context, store kv.SplittableS key := tablecodec.EncodeRecordKey(recordPrefix, kv.IntHandle(recordID)) splitTableKeys = append(splitTableKeys, key) } - var err error - regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, &tbInfo.ID) + scatter, tableID := getScatterConfig(scatterScope, tbInfo.ID) + regionIDs, err := store.SplitRegions(ctx, splitTableKeys, scatter, &tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table regions failed", zap.Stringer("table", tbInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) } - regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter)...) + regionIDs = append(regionIDs, splitIndexRegion(store, tbInfo, scatter, &tableID)...) return regionIDs } // SplitRecordRegion is to split region in store by table prefix. -func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatter bool) uint64 { +func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTableID, tableID int64, scatterScope string) uint64 { tableStartKey := tablecodec.GenTablePrefix(physicalTableID) - regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, &tableID) + scatter, tID := getScatterConfig(scatterScope, tableID) + regionIDs, err := store.SplitRegions(ctx, [][]byte{tableStartKey}, scatter, &tID) if err != nil { // It will be automatically split by TiKV later. logutil.DDLLogger().Warn("split table region failed", zap.Error(err)) @@ -131,13 +146,13 @@ func SplitRecordRegion(ctx context.Context, store kv.SplittableStore, physicalTa return 0 } -func splitIndexRegion(store kv.SplittableStore, tblInfo *model.TableInfo, scatter bool) []uint64 { +func splitIndexRegion(store kv.SplittableStore, tblInfo *model.TableInfo, scatter bool, tableID *int64) []uint64 { splitKeys := make([][]byte, 0, len(tblInfo.Indices)) for _, idx := range tblInfo.Indices { indexPrefix := tablecodec.EncodeTableIndexPrefix(tblInfo.ID, idx.ID) splitKeys = append(splitKeys, indexPrefix) } - regionIDs, err := store.SplitRegions(context.Background(), splitKeys, scatter, &tblInfo.ID) + regionIDs, err := store.SplitRegions(context.Background(), splitKeys, scatter, tableID) if err != nil { logutil.DDLLogger().Warn("pre split some table index regions failed", zap.Stringer("table", tblInfo.Name), zap.Int("successful region count", len(regionIDs)), zap.Error(err)) diff --git a/pkg/ddl/table_split_test.go b/pkg/ddl/table_split_test.go index c2110d3b6905b..7ee9ebec021c4 100644 --- a/pkg/ddl/table_split_test.go +++ b/pkg/ddl/table_split_test.go @@ -45,11 +45,19 @@ func TestTableSplit(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") // Synced split table region. - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set @@session.tidb_scatter_region = 'table'") tk.MustExec(`create table t_part (a int key) partition by range(a) ( partition p0 values less than (10), partition p1 values less than (20) )`) + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) + tk.MustExec("set @@global.tidb_scatter_region = 'table'") + tk = testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`create table t_part_2 (a int key) partition by range(a) ( + partition p0 values less than (10), + partition p1 values less than (20) + )`) defer dom.Close() atomic.StoreUint32(&ddl.EnableSplitTableRegion, 0) infoSchema := dom.InfoSchema() @@ -65,6 +73,51 @@ func TestTableSplit(t *testing.T) { for _, def := range pi.Definitions { checkRegionStartWithTableID(t, def.ID, store.(kvStore)) } + tbl, err = infoSchema.TableByName(context.Background(), model.NewCIStr("test"), model.NewCIStr("t_part_2")) + require.NoError(t, err) + pi = tbl.Meta().GetPartitionInfo() + require.NotNil(t, pi) + for _, def := range pi.Definitions { + checkRegionStartWithTableID(t, def.ID, store.(kvStore)) + } +} + +// TestScatterRegion test the behavior of the tidb_scatter_region system variable, for verifying: +// 1. The variable can be set and queried correctly at both session and global levels. +// 2. Changes to the global variable affect new sessions but not existing ones. +// 3. The variable only accepts valid values (”, 'table', 'global'). +// 4. Attempts to set invalid values result in appropriate error messages. +func TestScatterRegion(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk2 := testkit.NewTestKit(t, store) + + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk.MustExec("set @@tidb_scatter_region = 'table';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("table")) + tk.MustExec("set @@tidb_scatter_region = 'global';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("global")) + tk.MustExec("set @@tidb_scatter_region = '';") + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + + tk.MustExec("set global tidb_scatter_region = 'table';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("table")) + tk.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("")) + tk2 = testkit.NewTestKit(t, store) + tk2.MustQuery("select @@tidb_scatter_region;").Check(testkit.Rows("table")) + + tk.MustExec("set global tidb_scatter_region = 'global';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("global")) + tk.MustExec("set global tidb_scatter_region = '';") + tk.MustQuery("select @@global.tidb_scatter_region;").Check(testkit.Rows("")) + + err := tk.ExecToErr("set @@tidb_scatter_region = 'test';") + require.ErrorContains(t, err, "invalid value for 'test', it should be either '', 'table' or 'global'") + err = tk.ExecToErr("set @@tidb_scatter_region = '1';") + require.ErrorContains(t, err, "invalid value for '1', it should be either '', 'table' or 'global'") + err = tk.ExecToErr("set @@tidb_scatter_region = 0;") + require.ErrorContains(t, err, "invalid value for '0', it should be either '', 'table' or 'global'") } type kvStore interface { diff --git a/pkg/ddl/tests/partition/db_partition_test.go b/pkg/ddl/tests/partition/db_partition_test.go index 52d8511096ccf..5499aadb87d46 100644 --- a/pkg/ddl/tests/partition/db_partition_test.go +++ b/pkg/ddl/tests/partition/db_partition_test.go @@ -1210,7 +1210,7 @@ func TestAlterTableTruncatePartitionPreSplitRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") tk.MustExec("use test;") tk.MustExec("drop table if exists t1;") @@ -1653,7 +1653,7 @@ func TestGlobalIndexShowTableRegions(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists p") - tk.MustExec("set @@global.tidb_scatter_region = on") + tk.MustExec("set @@session.tidb_scatter_region = 'table'") tk.MustExec(`create table p (id int, c int, d int, unique key uidx(c)) partition by range (c) ( partition p0 values less than (4), partition p1 values less than (7), diff --git a/pkg/ddl/tests/serial/serial_test.go b/pkg/ddl/tests/serial/serial_test.go index 2004c65fe6823..f208f188a7450 100644 --- a/pkg/ddl/tests/serial/serial_test.go +++ b/pkg/ddl/tests/serial/serial_test.go @@ -156,7 +156,7 @@ func TestCreateTableWithLike(t *testing.T) { // Test create table like for partition table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) tk.MustExec("use test") - tk.MustExec("set @@global.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region='table'") tk.MustExec("drop table if exists partition_t") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") tk.MustExec("drop table if exists t1") @@ -1108,7 +1108,7 @@ func TestAutoRandomWithPreSplitRegion(t *testing.T) { origin := atomic.LoadUint32(&ddl.EnableSplitTableRegion) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) defer atomic.StoreUint32(&ddl.EnableSplitTableRegion, origin) - tk.MustExec("set @@global.tidb_scatter_region=1") + tk.MustExec("set @@session.tidb_scatter_region='table'") // Test pre-split table region for auto_random table. tk.MustExec("create table t (a bigint auto_random(2) primary key clustered, b int) pre_split_regions=2") diff --git a/pkg/executor/executor_failpoint_test.go b/pkg/executor/executor_failpoint_test.go index bd823ffa2c14d..dc42b18d0a3fd 100644 --- a/pkg/executor/executor_failpoint_test.go +++ b/pkg/executor/executor_failpoint_test.go @@ -210,7 +210,7 @@ func TestSplitRegionTimeout(t *testing.T) { // Test pre-split with timeout. tk.MustExec("drop table if exists t") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") require.NoError(t, failpoint.Enable("tikvclient/mockScatterRegionTimeout", `return(true)`)) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) start := time.Now() diff --git a/pkg/executor/sample_test.go b/pkg/executor/sample_test.go index bd3a477807ec2..83c4ef26900ba 100644 --- a/pkg/executor/sample_test.go +++ b/pkg/executor/sample_test.go @@ -32,7 +32,7 @@ func createSampleTestkit(t *testing.T, store kv.Storage) *testkit.TestKit { tk.MustExec("drop database if exists test_table_sample;") tk.MustExec("create database test_table_sample;") tk.MustExec("use test_table_sample;") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") return tk } diff --git a/pkg/executor/set_test.go b/pkg/executor/set_test.go index 5ad7e37f46ba1..e24f8f1a6b2c7 100644 --- a/pkg/executor/set_test.go +++ b/pkg/executor/set_test.go @@ -368,13 +368,18 @@ func TestSetVar(t *testing.T) { tk.MustQuery(`select @@session.tidb_wait_split_region_finish;`).Check(testkit.Rows("0")) // test for tidb_scatter_region - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("0")) - tk.MustExec("set global tidb_scatter_region = 1") - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("1")) - tk.MustExec("set global tidb_scatter_region = 0") - tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("0")) - require.Error(t, tk.ExecToErr("set session tidb_scatter_region = 0")) - require.Error(t, tk.ExecToErr(`select @@session.tidb_scatter_region;`)) + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("")) + tk.MustExec("set global tidb_scatter_region = 'table'") + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("table")) + tk.MustExec("set global tidb_scatter_region = 'global'") + tk.MustQuery(`select @@global.tidb_scatter_region;`).Check(testkit.Rows("global")) + tk.MustExec("set session tidb_scatter_region = ''") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("")) + tk.MustExec("set session tidb_scatter_region = 'table'") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("table")) + tk.MustExec("set session tidb_scatter_region = 'global'") + tk.MustQuery(`select @@session.tidb_scatter_region;`).Check(testkit.Rows("global")) + require.Error(t, tk.ExecToErr("set session tidb_scatter_region = 'test'")) // test for tidb_wait_split_region_timeout tk.MustQuery(`select @@session.tidb_wait_split_region_timeout;`).Check(testkit.Rows(strconv.Itoa(variable.DefWaitSplitRegionTimeout))) diff --git a/pkg/executor/test/showtest/show_test.go b/pkg/executor/test/showtest/show_test.go index ae5701974ae4b..3dbc72cb1574a 100644 --- a/pkg/executor/test/showtest/show_test.go +++ b/pkg/executor/test/showtest/show_test.go @@ -804,7 +804,7 @@ func TestAutoRandomWithLargeSignedShowTableRegions(t *testing.T) { tk.MustExec("drop table if exists t;") tk.MustExec("create table t (a bigint unsigned auto_random primary key clustered);") - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") // 18446744073709541615 is MaxUint64 - 10000. // 18446744073709551615 is the MaxUint64. tk.MustQuery("split table t between (18446744073709541615) and (18446744073709551615) regions 2;"). diff --git a/pkg/executor/test/splittest/split_table_test.go b/pkg/executor/test/splittest/split_table_test.go index 938df7a5ca6ed..2a3d577d41f91 100644 --- a/pkg/executor/test/splittest/split_table_test.go +++ b/pkg/executor/test/splittest/split_table_test.go @@ -41,7 +41,7 @@ func TestClusterIndexShowTableRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") tk.MustExec("drop database if exists cluster_index_regions;") tk.MustExec("create database cluster_index_regions;") tk.MustExec("use cluster_index_regions;") @@ -75,7 +75,7 @@ func TestShowTableRegion(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("drop table if exists t_regions") - tk.MustExec("set global tidb_scatter_region = 1") + tk.MustExec("set global tidb_scatter_region = 'table'") atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) tk.MustExec("create table t_regions (a int key, b int, c int, index idx(b), index idx2(c))") tk.MustGetErrMsg( @@ -223,7 +223,7 @@ func TestShowTableRegion(t *testing.T) { // Test show table regions for partition table when enable split region when create table. atomic.StoreUint32(&ddl.EnableSplitTableRegion, 1) - tk.MustExec("set @@global.tidb_scatter_region=1;") + tk.MustExec("set @@session.tidb_scatter_region='table';") tk.MustExec("drop table if exists partition_t;") tk.MustExec("create table partition_t (a int, b int,index(a)) partition by hash (a) partitions 3") re = tk.MustQuery("show table partition_t regions") diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index 9a527110e941f..633ae102d0bc5 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -1174,11 +1174,15 @@ const ( // If the TiDB upgrading from the a version before v7.0 to a newer version, we keep the tidb_enable_inl_join_inner_multi_pattern to 0. version215 = 215 + + // version 216 + // changes variable `tidb_scatter_region` value from ON to "table" and OFF to "". + version216 = 216 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version215 +var currentBootstrapVersion int64 = version216 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 @@ -1349,6 +1353,7 @@ var ( upgradeToVer213, upgradeToVer214, upgradeToVer215, + upgradeToVer216, } ) @@ -3207,6 +3212,15 @@ func upgradeToVer215(s sessiontypes.Session, ver int64) { initGlobalVariableIfNotExists(s, variable.TiDBEnableINLJoinInnerMultiPattern, variable.Off) } +func upgradeToVer216(s sessiontypes.Session, ver int64) { + if ver >= version216 { + return + } + + mustExecute(s, "UPDATE mysql.global_variables SET VARIABLE_VALUE='' WHERE VARIABLE_NAME = 'tidb_scatter_region' AND VARIABLE_VALUE = 'OFF'") + mustExecute(s, "UPDATE mysql.global_variables SET VARIABLE_VALUE='table' WHERE VARIABLE_NAME = 'tidb_scatter_region' AND VARIABLE_VALUE = 'ON'") +} + // initGlobalVariableIfNotExists initialize a global variable with specific val if it does not exist. func initGlobalVariableIfNotExists(s sessiontypes.Session, name string, val any) { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) diff --git a/pkg/session/session.go b/pkg/session/session.go index b9a255d652684..61b6aa8234d98 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -3211,7 +3211,7 @@ func splitAndScatterTable(store kv.Storage, tableIDs []int64) { for _, id := range tableIDs { regionIDs = append(regionIDs, ddl.SplitRecordRegion(ctxWithTimeout, s, id, id, variable.DefTiDBScatterRegion)) } - if variable.DefTiDBScatterRegion { + if variable.DefTiDBScatterRegion != variable.ScatterOff { ddl.WaitScatterRegionFinish(ctxWithTimeout, s, regionIDs...) } cancel() diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index 98137f1bf7ce1..29d304d133403 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -1705,6 +1705,9 @@ type SessionVars struct { // SharedLockPromotion indicates whether the `select for lock` statements would be executed as the // `select for update` statements which do acquire pessimsitic locks. SharedLockPromotion bool + + // ScatterRegion will scatter the regions for DDLs when it is "table" or "global", "" indicates not trigger scatter. + ScatterRegion string } // GetSessionVars implements the `SessionVarsProvider` interface. @@ -4009,3 +4012,12 @@ func (s *SessionVars) PessimisticLockEligible() bool { } return false } + +const ( + // ScatterOff means default, will not scatter region + ScatterOff string = "" + // ScatterTable means scatter region at table level + ScatterTable string = "table" + // ScatterGlobal means scatter region at global level + ScatterGlobal string = "global" +) diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index 323c69eb44830..9e407b7eb1870 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -775,7 +775,21 @@ var defaultSysVars = []*SysVar{ SetMaxDeltaSchemaCount(TidbOptInt64(val, DefTiDBMaxDeltaSchemaCount)) return nil }}, - {Scope: ScopeGlobal, Name: TiDBScatterRegion, Value: BoolToOnOff(DefTiDBScatterRegion), Type: TypeBool}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBScatterRegion, Value: DefTiDBScatterRegion, PossibleValues: []string{ScatterOff, ScatterTable, ScatterGlobal}, Type: TypeStr, + SetSession: func(vars *SessionVars, val string) error { + vars.ScatterRegion = val + return nil + }, + GetSession: func(vars *SessionVars) (string, error) { + return vars.ScatterRegion, nil + }, + Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { + if normalizedValue != ScatterOff && normalizedValue != ScatterTable && normalizedValue != ScatterGlobal { + return "", fmt.Errorf("invalid value for '%s', it should be either '%s', '%s' or '%s'", normalizedValue, ScatterOff, ScatterTable, ScatterGlobal) + } + return normalizedValue, nil + }, + }, {Scope: ScopeGlobal, Name: TiDBEnableStmtSummary, Value: BoolToOnOff(DefTiDBEnableStmtSummary), Type: TypeBool, AllowEmpty: true, SetGlobal: func(_ context.Context, s *SessionVars, val string) error { return stmtsummaryv2.SetEnabled(TiDBOptOn(val)) diff --git a/pkg/sessionctx/variable/tidb_vars.go b/pkg/sessionctx/variable/tidb_vars.go index 77145e44f9423..9a89085aa8b2b 100644 --- a/pkg/sessionctx/variable/tidb_vars.go +++ b/pkg/sessionctx/variable/tidb_vars.go @@ -536,7 +536,7 @@ const ( // deltaSchemaInfos is a queue that maintains the history of schema changes. TiDBMaxDeltaSchemaCount = "tidb_max_delta_schema_count" - // TiDBScatterRegion will scatter the regions for DDLs when it is ON. + // TiDBScatterRegion will scatter the regions for DDLs when it is "table" or "global", "" indicates not trigger scatter. TiDBScatterRegion = "tidb_scatter_region" // TiDBWaitSplitRegionFinish defines the split region behaviour is sync or async. @@ -1340,7 +1340,7 @@ const ( DefTiDBSkipIsolationLevelCheck = false DefTiDBExpensiveQueryTimeThreshold = 60 // 60s DefTiDBExpensiveTxnTimeThreshold = 60 * 10 // 10 minutes - DefTiDBScatterRegion = false + DefTiDBScatterRegion = ScatterOff DefTiDBWaitSplitRegionFinish = true DefWaitSplitRegionTimeout = 300 // 300s DefTiDBEnableNoopFuncs = Off diff --git a/tests/integrationtest/r/executor/sample.result b/tests/integrationtest/r/executor/sample.result index f7a0bcaae3dae..013d426567e15 100644 --- a/tests/integrationtest/r/executor/sample.result +++ b/tests/integrationtest/r/executor/sample.result @@ -1,4 +1,4 @@ -set @@global.tidb_scatter_region=1 +set @@global.tidb_scatter_region='table' drop table if exists t; set tidb_enable_clustered_index = on; create table t (a varchar(255) primary key, b bigint); diff --git a/tests/integrationtest/t/executor/sample.test b/tests/integrationtest/t/executor/sample.test index 0b73a13be795a..6c270d2ee5b5b 100644 --- a/tests/integrationtest/t/executor/sample.test +++ b/tests/integrationtest/t/executor/sample.test @@ -1,4 +1,4 @@ -set @@global.tidb_scatter_region=1 +set @@global.tidb_scatter_region='table' # TestTableSampleSchema drop table if exists t;