From 54aea78184824feb5734e8baccdf362f431bc866 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Wed, 23 Jun 2021 18:31:22 +0800 Subject: [PATCH 01/38] planner,executor: fix batch point get for update read panic on partition table (#25652) (#25657) --- executor/partition_table_test.go | 5 +++++ planner/core/find_best_task.go | 6 ++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 69b6b64431809..02d4dcbca9466 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -3026,4 +3026,9 @@ func (s *partitionTableSuite) TestIssue25528(c *C) { tk.MustExec("insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20')") tk.MustExec("begin pessimistic") tk.MustQuery("select * from issue25528 where id = 1 for update").Check(testkit.Rows("1 100.00 -100.00 2021-06-17 22:35:20")) + + tk.MustExec("drop table if exists issue25528") + tk.MustExec("CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10;") + tk.MustExec("INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4);") + tk.MustQuery("select * from issue25528 where c1 in (3, 4) order by c2 for update;").Check(testkit.Rows("3 3 3 3", "4 4 4 4")) } diff --git a/planner/core/find_best_task.go b/planner/core/find_best_task.go index f9b233ff1d7c3..4af175b545561 100644 --- a/planner/core/find_best_task.go +++ b/planner/core/find_best_task.go @@ -723,7 +723,8 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter // We do not build [batch] point get for dynamic table partitions now. This can be optimized. if ds.ctx.GetSessionVars().UseDynamicPartitionPrune() { canConvertPointGet = false - } else if len(path.Ranges) > 1 { + } + if canConvertPointGet && len(path.Ranges) > 1 { // We can only build batch point get for hash partitions on a simple column now. This is // decided by the current implementation of `BatchPointGetExec::initialize()`, specifically, // the `getPhysID()` function. Once we optimize that part, we can come back and enable @@ -732,7 +733,8 @@ func (ds *DataSource) findBestTask(prop *property.PhysicalProperty, planCounter if hashPartColName == nil { canConvertPointGet = false } - } else { + } + if canConvertPointGet { // If the schema contains ExtraPidColID, do not convert to point get. // Because the point get executor can not handle the extra partition ID column now. for _, col := range ds.schema.Columns { From 8acd5c88471cb7b4d4c4a8ed73b4d53d6833f13e Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Wed, 23 Jun 2021 19:19:22 +0800 Subject: [PATCH 02/38] executor: fix KVSnapshot missing set txnScope (#25710) (#25713) --- distsql/request_builder.go | 2 +- distsql/request_builder_test.go | 2 +- executor/benchmark_test.go | 2 +- executor/coprocessor.go | 2 +- executor/executor_required_rows_test.go | 2 +- executor/set.go | 4 +++- go.mod | 1 - go.sum | 27 +++++++------------------ planner/core/preprocess.go | 2 +- store/driver/txn/snapshot.go | 2 ++ 10 files changed, 18 insertions(+), 28 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index 423d0a2b1e785..ba48dc94f2803 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -28,13 +28,13 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" ) // RequestBuilder is used to build a "kv.Request". diff --git a/distsql/request_builder_test.go b/distsql/request_builder_test.go index d6a258a6067d7..7c7db2f5ced52 100644 --- a/distsql/request_builder_test.go +++ b/distsql/request_builder_test.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/statistics" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" @@ -35,7 +36,6 @@ import ( "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" ) var _ = Suite(&testSuite{}) diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index 6ff4022c5a8c8..c9a3d98846b8f 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -38,13 +38,13 @@ import ( "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" "github.com/pingcap/tidb/util/stringutil" - "github.com/tikv/client-go/v2/oracle" "go.uber.org/zap/zapcore" ) diff --git a/executor/coprocessor.go b/executor/coprocessor.go index 93f7dbcf40366..1860eae857362 100644 --- a/executor/coprocessor.go +++ b/executor/coprocessor.go @@ -26,12 +26,12 @@ import ( "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/timeutil" "github.com/pingcap/tipb/go-tipb" - "github.com/tikv/client-go/v2/oracle" ) // CoprocessorDAGHandler uses to handle cop dag request. diff --git a/executor/executor_required_rows_test.go b/executor/executor_required_rows_test.go index 44c9a5d910dfd..7f416dfd6647e 100644 --- a/executor/executor_required_rows_test.go +++ b/executor/executor_required_rows_test.go @@ -30,13 +30,13 @@ import ( "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/types/json" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/disk" "github.com/pingcap/tidb/util/memory" "github.com/pingcap/tidb/util/mock" - "github.com/tikv/client-go/v2/oracle" ) type requiredRowsDataSource struct { diff --git a/executor/set.go b/executor/set.go index 9f9e70157146d..5fee1a114abf1 100644 --- a/executor/set.go +++ b/executor/set.go @@ -159,7 +159,9 @@ func (e *SetExecutor) setSysVariable(name string, v *expression.VarAssignment) e } newSnapshotTS := getSnapshotTSByName() newSnapshotIsSet := newSnapshotTS > 0 && newSnapshotTS != oldSnapshotTS - if newSnapshotIsSet { + // We don't check snapshot with gc safe point for read_ts + // Client-go will automatically check the snapshotTS with gc safe point. It's unnecessary to check gc safe point during set executor. + if newSnapshotIsSet && name != variable.TiDBTxnReadTS { err = gcutil.ValidateSnapshot(e.ctx, newSnapshotTS) if err != nil { fallbackOldSnapshotTS() diff --git a/go.mod b/go.mod index 80a5b9f15121d..8aa71eafad3cd 100644 --- a/go.mod +++ b/go.mod @@ -56,7 +56,6 @@ require ( github.com/shirou/gopsutil v3.21.2+incompatible github.com/soheilhy/cmux v0.1.4 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.0-20210618031827-8c6eba458928 github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d github.com/twmb/murmur3 v1.1.3 github.com/uber-go/atomic v1.4.0 diff --git a/go.sum b/go.sum index 3d7a734f56374..6dcbd8e96ae86 100644 --- a/go.sum +++ b/go.sum @@ -237,7 +237,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -325,9 +324,8 @@ github.com/klauspost/compress v1.10.5 h1:7q6vHIqubShURwQz8cQK6yIe/xC3IF0Vm7TGfqj github.com/klauspost/compress v1.10.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= @@ -434,7 +432,6 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20210531063847-f42e582bf0bb/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210602120243-804ac0a6ce21 h1:WLViPiCazVwK0wULKpmwLVP/aA8NvyyfOUqQy0bPEPk= github.com/pingcap/kvproto v0.0.0-20210602120243-804ac0a6ce21/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= @@ -442,7 +439,6 @@ github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIf github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 h1:ERrF0fTuIOnwfGbt71Ji3DKbOEaP189tjym50u8gpC8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20210525032559-c37778aff307/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0 h1:VVYIGdmi1xr+zaL1+rFWKgNATkmjyF3ddcNmPzdN1ko= github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= @@ -512,9 +508,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -558,8 +553,6 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfK github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tikv/client-go/v2 v2.0.0-20210618031827-8c6eba458928 h1:PnJPpWSOaauWVYdwlF0+qiL1MOCTG9ZnstHgR40rNQg= -github.com/tikv/client-go/v2 v2.0.0-20210618031827-8c6eba458928/go.mod h1:2ZWVKSfm6BWBY0dgH0h6o+vAzNuXWD/z0ihkMNv3J6E= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d h1:K0XnvsnT6ofLDuM8Rt3PuFQO4p8bNraeHYstspD316g= github.com/tikv/pd v1.1.0-beta.0.20210323121136-78679e5e209d/go.mod h1:Jw9KG11C/23Rr7DW4XWQ7H5xOgGZo6DFL1OKAF4+Igw= github.com/tklauser/go-sysconf v0.3.4 h1:HT8SVixZd3IzLdfs/xlpq0jeSfTX57g1v6wB1EuzV7M= @@ -632,16 +625,14 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -655,9 +646,8 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -689,9 +679,8 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -926,9 +915,8 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -937,9 +925,8 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE= -honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 1ac47b8df35bb..4dca6ddb50170 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -32,12 +32,12 @@ import ( "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/domainutil" utilparser "github.com/pingcap/tidb/util/parser" - "github.com/tikv/client-go/v2/oracle" ) // PreprocessOpt presents optional parameters to `Preprocess` method. diff --git a/store/driver/txn/snapshot.go b/store/driver/txn/snapshot.go index 257feb37938af..73f62fd19fa4b 100644 --- a/store/driver/txn/snapshot.go +++ b/store/driver/txn/snapshot.go @@ -94,6 +94,8 @@ func (s *tikvSnapshot) SetOption(opt int, val interface{}) { s.KVSnapshot.SetMatchStoreLabels(val.([]*metapb.StoreLabel)) case kv.ResourceGroupTag: s.KVSnapshot.SetResourceGroupTag(val.([]byte)) + case kv.TxnScope: + s.KVSnapshot.SetTxnScope(val.(string)) } } From 3939e67315b17b18cc2be3255c826b0b1e715934 Mon Sep 17 00:00:00 2001 From: tiancaiamao Date: Tue, 29 Jun 2021 19:59:25 +0800 Subject: [PATCH 03/38] *: Merge global temporary table into release-5.1 (#25724) --- bindinfo/bind_test.go | 16 +++ ddl/ddl_api.go | 46 ++++---- ddl/serial_test.go | 113 +++++++++++++++++++- errno/errcode.go | 1 + errno/errname.go | 1 + errors.toml | 15 +++ executor/adapter.go | 2 + executor/builder.go | 68 +++++++++++- executor/executor_test.go | 144 ++++++++++++++++++++++++- executor/point_get.go | 5 + executor/set_test.go | 4 +- executor/stale_txn_test.go | 140 ++++++++++++------------ kv/option.go | 2 + planner/core/errors.go | 13 ++- planner/core/preprocess.go | 153 +++++++++++++++++++++------ planner/core/preprocess_test.go | 26 +++++ session/session.go | 48 +++------ session/session_test.go | 31 ++++++ sessionctx/variable/noop.go | 1 - sessionctx/variable/session.go | 7 ++ sessionctx/variable/sysvar.go | 10 +- sessionctx/variable/tidb_vars.go | 1 + sessionctx/variable/varsutil_test.go | 1 + store/driver/txn/txn_driver.go | 2 + table/table.go | 2 + table/tables/tables.go | 50 ++++++++- util/tableutil/tableutil.go | 3 + 27 files changed, 734 insertions(+), 171 deletions(-) diff --git a/bindinfo/bind_test.go b/bindinfo/bind_test.go index 4175ddce77eb9..13ea3e8f9d367 100644 --- a/bindinfo/bind_test.go +++ b/bindinfo/bind_test.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/bindinfo" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/metrics" @@ -2098,3 +2099,18 @@ func (s *testSuite) TestBindingWithoutCharset(c *C) { c.Assert(rows[0][0], Equals, "select * from `test` . `t` where `a` = ?") c.Assert(rows[0][1], Equals, "SELECT * FROM `test`.`t` WHERE `a` = 'aa'") } + +func (s *testSuite) TestTemporaryTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + s.cleanBindingEnv(tk) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("set tidb_enable_global_temporary_table = true") + tk.MustExec("create global temporary table t(a int, b int, key(a), key(b)) on commit delete rows") + tk.MustExec("create table t2(a int, b int, key(a), key(b))") + tk.MustGetErrCode("create session binding for select * from t where b = 123 using select * from t ignore index(b) where b = 123;", errno.ErrOptOnTemporaryTable) + tk.MustGetErrCode("create binding for insert into t select * from t2 where t2.b = 1 and t2.c > 1 using insert into t select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1", errno.ErrOptOnTemporaryTable) + tk.MustGetErrCode("create binding for replace into t select * from t2 where t2.b = 1 and t2.c > 1 using replace into t select /*+ use_index(t2,c) */ * from t2 where t2.b = 1 and t2.c > 1", errno.ErrOptOnTemporaryTable) + tk.MustGetErrCode("create binding for update t set a = 1 where b = 1 and c > 1 using update /*+ use_index(t, c) */ t set a = 1 where b = 1 and c > 1", errno.ErrOptOnTemporaryTable) + tk.MustGetErrCode("create binding for delete from t where b = 1 and c > 1 using delete /*+ use_index(t, c) */ from t where b = 1 and c > 1", errno.ErrOptOnTemporaryTable) +} diff --git a/ddl/ddl_api.go b/ddl/ddl_api.go index 69eefdd9358c6..a1f8ffa807dec 100644 --- a/ddl/ddl_api.go +++ b/ddl/ddl_api.go @@ -1642,12 +1642,15 @@ func checkTableInfoValid(tblInfo *model.TableInfo) error { return checkInvisibleIndexOnPK(tblInfo) } -func buildTableInfoWithLike(ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error) { +func buildTableInfoWithLike(ctx sessionctx.Context, ident ast.Ident, referTblInfo *model.TableInfo, s *ast.CreateTableStmt) (*model.TableInfo, error) { // Check the referred table is a real table object. if referTblInfo.IsSequence() || referTblInfo.IsView() { return nil, ErrWrongObject.GenWithStackByArgs(ident.Schema, referTblInfo.Name, "BASE TABLE") } tblInfo := *referTblInfo + if err := setTemporaryType(ctx, &tblInfo, s); err != nil { + return nil, errors.Trace(err) + } // Check non-public column and adjust column offset. newColumns := referTblInfo.Cols() newIndices := make([]*model.IndexInfo, 0, len(tblInfo.Indices)) @@ -1735,22 +1738,8 @@ func buildTableInfoWithStmt(ctx sessionctx.Context, s *ast.CreateTableStmt, dbCh if err != nil { return nil, errors.Trace(err) } - switch s.TemporaryKeyword { - case ast.TemporaryGlobal: - tbInfo.TempTableType = model.TempTableGlobal - if !ctx.GetSessionVars().EnableGlobalTemporaryTable { - return nil, errors.New("global temporary table is experimental and it is switched off by tidb_enable_global_temporary_table") - } - // "create global temporary table ... on commit preserve rows" - if !s.OnCommitDelete { - return nil, errors.Trace(errUnsupportedOnCommitPreserve) - } - case ast.TemporaryLocal: - // TODO: set "tbInfo.TempTableType = model.TempTableLocal" after local temporary table is supported. - tbInfo.TempTableType = model.TempTableNone - ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("local TEMPORARY TABLE is not supported yet, TEMPORARY will be parsed but ignored")) - case ast.TemporaryNone: - tbInfo.TempTableType = model.TempTableNone + if err = setTemporaryType(ctx, tbInfo, s); err != nil { + return nil, errors.Trace(err) } if err = setTableAutoRandomBits(ctx, tbInfo, colDefs); err != nil { @@ -1812,7 +1801,7 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e // build tableInfo var tbInfo *model.TableInfo if s.ReferTable != nil { - tbInfo, err = buildTableInfoWithLike(ident, referTbl.Meta(), s) + tbInfo, err = buildTableInfoWithLike(ctx, ident, referTbl.Meta(), s) } else { tbInfo, err = buildTableInfoWithStmt(ctx, s, schema.Charset, schema.Collate) } @@ -1832,6 +1821,27 @@ func (d *ddl) CreateTable(ctx sessionctx.Context, s *ast.CreateTableStmt) (err e return d.CreateTableWithInfo(ctx, schema.Name, tbInfo, onExist, false /*tryRetainID*/) } +func setTemporaryType(ctx sessionctx.Context, tbInfo *model.TableInfo, s *ast.CreateTableStmt) error { + switch s.TemporaryKeyword { + case ast.TemporaryGlobal: + tbInfo.TempTableType = model.TempTableGlobal + if !ctx.GetSessionVars().EnableGlobalTemporaryTable { + return errors.New("global temporary table is experimental and it is switched off by tidb_enable_global_temporary_table") + } + // "create global temporary table ... on commit preserve rows" + if !s.OnCommitDelete { + return errors.Trace(errUnsupportedOnCommitPreserve) + } + case ast.TemporaryLocal: + // TODO: set "tbInfo.TempTableType = model.TempTableLocal" after local temporary table is supported. + tbInfo.TempTableType = model.TempTableNone + ctx.GetSessionVars().StmtCtx.AppendWarning(errors.New("local TEMPORARY TABLE is not supported yet, TEMPORARY will be parsed but ignored")) + default: + tbInfo.TempTableType = model.TempTableNone + } + return nil +} + func (d *ddl) CreateTableWithInfo( ctx sessionctx.Context, dbName model.CIStr, diff --git a/ddl/serial_test.go b/ddl/serial_test.go index e6e3164381566..915e9d66f8cef 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -525,6 +525,10 @@ func (s *testSerialSuite) TestCreateTableWithLike(c *C) { tk.MustExec("drop database ctwl_db") tk.MustExec("drop database ctwl_db1") +} + +func (s *testSerialSuite) TestCreateTableWithLikeAtTemporaryMode(c *C) { + tk := testkit.NewTestKit(c, s.store) // Test create table like at temporary mode. tk.MustExec("set tidb_enable_global_temporary_table=true") @@ -532,9 +536,116 @@ func (s *testSerialSuite) TestCreateTableWithLike(c *C) { tk.MustExec("drop table if exists temporary_table;") tk.MustExec("create global temporary table temporary_table (a int, b int,index(a)) on commit delete rows") tk.MustExec("drop table if exists temporary_table_t1;") - _, err = tk.Exec("create table temporary_table_t1 like temporary_table") + _, err := tk.Exec("create table temporary_table_t1 like temporary_table") c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error()) tk.MustExec("drop table if exists temporary_table;") + + // Test create temporary table like. + // Test auto_random. + tk.MustExec("drop table if exists auto_random_table") + _, err = tk.Exec("create table auto_random_table (a bigint primary key auto_random(3), b varchar(255));") + defer tk.MustExec("drop table if exists auto_random_table") + tk.MustExec("drop table if exists auto_random_temporary_global") + _, err = tk.Exec("create global temporary table auto_random_temporary_global like auto_random_table on commit delete rows;") + c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random").Error()) + + // Test pre split regions. + tk.MustExec("drop table if exists table_pre_split") + _, err = tk.Exec("create table table_pre_split(id int) shard_row_id_bits = 2 pre_split_regions=2;") + defer tk.MustExec("drop table if exists table_pre_split") + tk.MustExec("drop table if exists temporary_table_pre_split") + _, err = tk.Exec("create global temporary table temporary_table_pre_split like table_pre_split ON COMMIT DELETE ROWS;") + c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions").Error()) + + // Test shard_row_id_bits. + tk.MustExec("drop table if exists shard_row_id_table, shard_row_id_temporary_table, shard_row_id_table_plus, shard_row_id_temporary_table_plus") + _, err = tk.Exec("create table shard_row_id_table (a int) shard_row_id_bits = 5;") + _, err = tk.Exec("create global temporary table shard_row_id_temporary_table like shard_row_id_table on commit delete rows;") + c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits").Error()) + tk.MustExec("create table shard_row_id_table_plus (a int);") + tk.MustExec("create global temporary table shard_row_id_temporary_table_plus (a int) on commit delete rows;") + defer tk.MustExec("drop table if exists shard_row_id_table, shard_row_id_temporary_table, shard_row_id_table_plus, shard_row_id_temporary_table_plus") + _, err = tk.Exec("alter table shard_row_id_temporary_table_plus shard_row_id_bits = 4;") + c.Assert(err.Error(), Equals, ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits").Error()) + + // Test partition. + tk.MustExec("drop table if exists global_partition_table;") + tk.MustExec("create table global_partition_table (a int, b int) partition by hash(a) partitions 3;") + defer tk.MustExec("drop table if exists global_partition_table;") + tk.MustGetErrCode("create global temporary table global_partition_temp_table like global_partition_table ON COMMIT DELETE ROWS;", + errno.ErrPartitionNoTemporary) + // Test virtual columns. + tk.MustExec("drop table if exists test_gv_ddl, test_gv_ddl_temp") + tk.MustExec(`create table test_gv_ddl(a int, b int as (a+8) virtual, c int as (b + 2) stored)`) + tk.MustExec(`create global temporary table test_gv_ddl_temp like test_gv_ddl on commit delete rows;`) + defer tk.MustExec("drop table if exists test_gv_ddl_temp, test_gv_ddl") + is := tk.Se.(sessionctx.Context).GetInfoSchema().(infoschema.InfoSchema) + table, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_gv_ddl")) + c.Assert(err, IsNil) + testCases := []struct { + generatedExprString string + generatedStored bool + }{ + {"", false}, + {"`a` + 8", false}, + {"`b` + 2", true}, + } + for i, column := range table.Meta().Columns { + c.Assert(column.GeneratedExprString, Equals, testCases[i].generatedExprString) + c.Assert(column.GeneratedStored, Equals, testCases[i].generatedStored) + } + result := tk.MustQuery(`DESC test_gv_ddl_temp`) + result.Check(testkit.Rows(`a int(11) YES `, `b int(11) YES VIRTUAL GENERATED`, `c int(11) YES STORED GENERATED`)) + tk.MustExec("begin;") + tk.MustExec("insert into test_gv_ddl_temp values (1, default, default)") + tk.MustQuery("select * from test_gv_ddl_temp").Check(testkit.Rows("1 9 11")) + _, err = tk.Exec("commit") + c.Assert(err, IsNil) + + // Test foreign key. + tk.MustExec("drop table if exists test_foreign_key, t1") + tk.MustExec("create table t1 (a int, b int);") + tk.MustExec("create table test_foreign_key (c int,d int,foreign key (d) references t1 (b));") + defer tk.MustExec("drop table if exists test_foreign_key, t1;") + tk.MustExec("create global temporary table test_foreign_key_temp like test_foreign_key on commit delete rows;") + is = tk.Se.(sessionctx.Context).GetInfoSchema().(infoschema.InfoSchema) + table, err = is.TableByName(model.NewCIStr("test"), model.NewCIStr("test_foreign_key_temp")) + c.Assert(err, IsNil) + tableInfo := table.Meta() + c.Assert(len(tableInfo.ForeignKeys), Equals, 0) + + // Issue 25613. + // Test from->normal, to->normal. + tk.MustExec("drop table if exists tb1, tb2") + tk.MustExec("create table tb1(id int);") + tk.MustExec("create table tb2 like tb1") + defer tk.MustExec("drop table if exists tb1, tb2") + tk.MustQuery("show create table tb2;").Check(testkit.Rows("tb2 CREATE TABLE `tb2` (\n" + + " `id` int(11) DEFAULT NULL\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) + + // Test from->normal, to->global temporary. + tk.MustExec("drop table if exists tb3, tb4") + tk.MustExec("create table tb3(id int);") + tk.MustExec("create global temporary table tb4 like tb3 on commit delete rows;") + defer tk.MustExec("drop table if exists tb3, tb4") + tk.MustQuery("show create table tb4;").Check(testkit.Rows("tb4 CREATE GLOBAL TEMPORARY TABLE `tb4` (\n" + + " `id` int(11) DEFAULT NULL\n" + + ") ENGINE=memory DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS")) + + // Test from->global temporary, to->normal. + tk.MustExec("drop table if exists tb5, tb6") + tk.MustExec("create global temporary table tb5(id int) on commit delete rows;") + _, err = tk.Exec("create table tb6 like tb5;") + c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error()) + defer tk.MustExec("drop table if exists tb5, tb6") + + // Test from->global temporary, to->global temporary. + tk.MustExec("drop table if exists tb7, tb8") + tk.MustExec("create global temporary table tb7(id int) on commit delete rows;") + _, err = tk.Exec("create global temporary table tb8 like tb7 on commit delete rows;") + c.Assert(err.Error(), Equals, core.ErrOptOnTemporaryTable.GenWithStackByArgs("create table like").Error()) + defer tk.MustExec("drop table if exists tb7, tb8") } // TestCancelAddIndex1 tests canceling ddl job when the add index worker is not started. diff --git a/errno/errcode.go b/errno/errcode.go index d6b7c912a15d9..319ddd092bd82 100644 --- a/errno/errcode.go +++ b/errno/errcode.go @@ -923,6 +923,7 @@ const ( ErrTxnTooLarge = 8004 ErrWriteConflictInTiDB = 8005 ErrOptOnTemporaryTable = 8006 + ErrDropTableOnTemporaryTable = 8007 ErrUnsupportedReloadPlugin = 8018 ErrUnsupportedReloadPluginVar = 8019 ErrTableLocked = 8020 diff --git a/errno/errname.go b/errno/errname.go index 6ab983acc3f29..9b40c3c685aaa 100644 --- a/errno/errname.go +++ b/errno/errname.go @@ -925,6 +925,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrForUpdateCantRetry: mysql.Message("[%d] can not retry select for update statement", nil), ErrAdminCheckTable: mysql.Message("TiDB admin check table failed.", nil), ErrOptOnTemporaryTable: mysql.Message("`%s` is unsupported on temporary tables.", nil), + ErrDropTableOnTemporaryTable: mysql.Message("`drop global temporary table` applies on global temporary table only.", nil), ErrTxnTooLarge: mysql.Message("Transaction is too large, size: %d", nil), ErrWriteConflictInTiDB: mysql.Message("Write conflict, txnStartTS %d is stale", nil), ErrInvalidPluginID: mysql.Message("Wrong plugin id: %s, valid plugin id is [name]-[version], both name and version should not contain '-'", nil), diff --git a/errors.toml b/errors.toml index 3ef2ba977a86e..0fb9c2977f386 100644 --- a/errors.toml +++ b/errors.toml @@ -981,6 +981,11 @@ error = ''' `%-.192s`.`%-.192s` contains view recursion ''' +["planner:1562"] +error = ''' +Cannot create temporary table with partitions +''' + ["planner:1706"] error = ''' Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'. @@ -1156,6 +1161,11 @@ error = ''' `%s` is unsupported on temporary tables. ''' +["planner:8007"] +error = ''' +`drop global temporary table` applies on global temporary table only. +''' + ["planner:8108"] error = ''' Unsupported type %T @@ -1386,6 +1396,11 @@ error = ''' Unknown column '%-.192s' in '%-.192s' ''' +["table:1114"] +error = ''' +The table '%-.192s' is full +''' + ["table:1192"] error = ''' Can't execute the given command because you have active locked tables or an active transaction diff --git a/executor/adapter.go b/executor/adapter.go index faadfe267bfdf..77498e35c88dc 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -796,6 +796,8 @@ func (a *ExecStmt) buildExecutor() (Executor, error) { } b := newExecutorBuilder(ctx, a.InfoSchema, a.Ti, a.SnapshotTS, a.ExplicitStaleness, a.TxnScope) + b.snapshotTS = a.SnapshotTS + b.explicitStaleness = a.ExplicitStaleness e := b.build(a.Plan) if b.err != nil { return nil, errors.Trace(b.err) diff --git a/executor/builder.go b/executor/builder.go index fb94a1d8d55b2..1af2b34aebc59 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2662,6 +2662,10 @@ func buildNoRangeTableReader(b *executorBuilder, v *plannercore.PhysicalTableRea return nil, err } ts := v.GetTableScan() + if err = b.validCanReadTemporaryTable(ts.Table); err != nil { + return nil, err + } + tbl, _ := b.is.TableByID(ts.Table.ID) isPartition, physicalTableID := ts.IsPartition() if isPartition { @@ -2774,6 +2778,11 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E } ts := v.GetTableScan() + if err = b.validCanReadTemporaryTable(ts.Table); err != nil { + b.err = err + return nil + } + ret.ranges = ts.Ranges sctx := b.ctx.GetSessionVars().StmtCtx sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) @@ -2991,13 +3000,18 @@ func buildNoRangeIndexReader(b *executorBuilder, v *plannercore.PhysicalIndexRea } func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) Executor { + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + if err := b.validCanReadTemporaryTable(is.Table); err != nil { + b.err = err + return nil + } + ret, err := buildNoRangeIndexReader(b, v) if err != nil { b.err = err return nil } - is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) ret.ranges = is.Ranges sctx := b.ctx.GetSessionVars().StmtCtx sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) @@ -3145,13 +3159,18 @@ func buildNoRangeIndexLookUpReader(b *executorBuilder, v *plannercore.PhysicalIn } func (b *executorBuilder) buildIndexLookUpReader(v *plannercore.PhysicalIndexLookUpReader) Executor { + is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) + if err := b.validCanReadTemporaryTable(is.Table); err != nil { + b.err = err + return nil + } + ret, err := buildNoRangeIndexLookUpReader(b, v) if err != nil { b.err = err return nil } - is := v.IndexPlans[0].(*plannercore.PhysicalIndexScan) ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) ret.ranges = is.Ranges @@ -3255,6 +3274,12 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd } func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMergeReader) Executor { + ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) + if err := b.validCanReadTemporaryTable(ts.Table); err != nil { + b.err = err + return nil + } + ret, err := buildNoRangeIndexMergeReader(b, v) if err != nil { b.err = err @@ -3274,7 +3299,6 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg } } } - ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) sctx.TableIDs = append(sctx.TableIDs, ts.Table.ID) executorCounterIndexMergeReaderExecutor.Inc() @@ -4048,6 +4072,11 @@ func NewRowDecoder(ctx sessionctx.Context, schema *expression.Schema, tbl *model } func (b *executorBuilder) buildBatchPointGet(plan *plannercore.BatchPointGetPlan) Executor { + if err := b.validCanReadTemporaryTable(plan.TblInfo); err != nil { + b.err = err + return nil + } + startTS, err := b.getSnapshotTS() if err != nil { b.err = err @@ -4181,6 +4210,16 @@ func fullRangePartition(idxArr []int) bool { return len(idxArr) == 1 && idxArr[0] == plannercore.FullRange } +type emptySampler struct{} + +func (s *emptySampler) writeChunk(_ *chunk.Chunk) error { + return nil +} + +func (s *emptySampler) finished() bool { + return true +} + func (b *executorBuilder) buildTableSample(v *plannercore.PhysicalTableSample) *TableSampleExecutor { startTS, err := b.getSnapshotTS() if err != nil { @@ -4192,11 +4231,15 @@ func (b *executorBuilder) buildTableSample(v *plannercore.PhysicalTableSample) * table: v.TableInfo, startTS: startTS, } - if v.TableSampleInfo.AstNode.SampleMethod == ast.SampleMethodTypeTiDBRegion { + + if v.TableInfo.Meta().TempTableType != model.TempTableNone { + e.sampler = &emptySampler{} + } else if v.TableSampleInfo.AstNode.SampleMethod == ast.SampleMethodTypeTiDBRegion { e.sampler = newTableRegionSampler( b.ctx, v.TableInfo, startTS, v.TableSampleInfo.Partitions, v.Schema(), v.TableSampleInfo.FullSchema, e.retFieldTypes, v.Desc) } + return e } @@ -4295,3 +4338,20 @@ func (b *executorBuilder) buildCTETableReader(v *plannercore.PhysicalCTETable) E chkIdx: 0, } } + +func (b *executorBuilder) validCanReadTemporaryTable(tbl *model.TableInfo) error { + if tbl.TempTableType == model.TempTableNone { + return nil + } + + // Some tools like dumpling use history read to dump all table's records and will be fail if we return an error. + // So we do not check SnapshotTS here + + sessionVars := b.ctx.GetSessionVars() + + if sessionVars.TxnCtx.IsStaleness || b.explicitStaleness { + return errors.New("can not stale read temporary table") + } + + return nil +} diff --git a/executor/executor_test.go b/executor/executor_test.go index 0ca710c4a8456..f280f19c9cfbd 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -8404,15 +8404,15 @@ func (s testSerialSuite) TestTemporaryTableNoNetwork(c *C) { tk.MustExec("set tidb_enable_global_temporary_table=true") tk.MustExec("create global temporary table tmp_t (id int, a int, index(a)) on commit delete rows") - tk.MustExec("begin") - tk.MustExec("insert into tmp_t values (1, 1)") - tk.MustExec("insert into tmp_t values (2, 2)") - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy", "return(true)"), IsNil) defer func() { c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcServerBusy"), IsNil) }() + tk.MustExec("begin") + tk.MustExec("insert into tmp_t values (1, 1)") + tk.MustExec("insert into tmp_t values (2, 2)") + // Make sure the fail point works. // With that failpoint, all requests to the TiKV is discard. rs, err := tk1.Exec("select * from normal") @@ -8559,3 +8559,139 @@ func (s *testResourceTagSuite) TestResourceGroupTag(c *C) { c.Assert(checkCnt > 0, IsTrue, commentf) } } + +func (s *testStaleTxnSuite) TestInvalidReadTemporaryTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + // For mocktikv, safe point is not initialized, we manually insert it for snapshot to use. + safePointName := "tikv_gc_safe_point" + safePointValue := "20160102-15:04:05 -0700" + safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)" + updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s') + ON DUPLICATE KEY + UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment) + tk.MustExec(updateSafePoint) + + tk.MustExec("set @@tidb_enable_global_temporary_table=1") + tk.MustExec("use test") + tk.MustExec("drop table if exists tmp1") + tk.MustExec("create global temporary table tmp1 " + + "(id int not null primary key, code int not null, value int default null, unique key code(code))" + + "on commit delete rows") + + // sleep 1us to make test stale + time.Sleep(time.Microsecond) + + queries := []struct { + sql string + }{ + { + sql: "select * from tmp1 where id=1", + }, + { + sql: "select * from tmp1 where code=1", + }, + { + sql: "select * from tmp1 where id in (1, 2, 3)", + }, + { + sql: "select * from tmp1 where code in (1, 2, 3)", + }, + { + sql: "select * from tmp1 where id > 1", + }, + { + sql: "select /*+use_index(tmp1, code)*/ * from tmp1 where code > 1", + }, + { + sql: "select /*+use_index(tmp1, code)*/ code from tmp1 where code > 1", + }, + { + sql: "select /*+ use_index_merge(tmp1, primary, code) */ * from tmp1 where id > 1 or code > 2", + }, + } + + addStaleReadToSQL := func(sql string) string { + idx := strings.Index(sql, " where ") + if idx < 0 { + return "" + } + return sql[0:idx] + " as of timestamp NOW(6)" + sql[idx:] + } + + for _, query := range queries { + sql := addStaleReadToSQL(query.sql) + if sql != "" { + tk.MustGetErrMsg(sql, "can not stale read temporary table") + } + } + + tk.MustExec("start transaction read only as of timestamp NOW(6)") + for _, query := range queries { + tk.MustGetErrMsg(query.sql, "can not stale read temporary table") + } + tk.MustExec("commit") + + for _, query := range queries { + tk.MustExec(query.sql) + } + + tk.MustExec("set transaction read only as of timestamp NOW(6)") + tk.MustExec("start transaction") + for _, query := range queries { + tk.MustGetErrMsg(query.sql, "can not stale read temporary table") + } + tk.MustExec("commit") + + for _, query := range queries { + tk.MustExec(query.sql) + } + + tk.MustExec("set @@tidb_snapshot=NOW(6)") + for _, query := range queries { + // Will success here for compatibility with some tools like dumping + rs := tk.MustQuery(query.sql) + rs.Check(testkit.Rows()) + } +} + +func (s *testSuite) TestEmptyTableSampleTemporaryTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + // For mocktikv, safe point is not initialized, we manually insert it for snapshot to use. + safePointName := "tikv_gc_safe_point" + safePointValue := "20160102-15:04:05 -0700" + safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)" + updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s') + ON DUPLICATE KEY + UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment) + tk.MustExec(updateSafePoint) + + tk.MustExec("set @@tidb_enable_global_temporary_table=1") + tk.MustExec("use test") + tk.MustExec("drop table if exists tmp1") + tk.MustExec("create global temporary table tmp1 " + + "(id int not null primary key, code int not null, value int default null, unique key code(code))" + + "on commit delete rows") + + // sleep 1us to make test stale + time.Sleep(time.Microsecond) + + // test tablesample return empty + rs := tk.MustQuery("select * from tmp1 tablesample regions()") + rs.Check(testkit.Rows()) + + tk.MustExec("begin") + tk.MustExec("insert into tmp1 values (1, 1, 1)") + rs = tk.MustQuery("select * from tmp1 tablesample regions()") + rs.Check(testkit.Rows()) + tk.MustExec("commit") + + // tablesample should not return error for compatibility of tools like dumpling + tk.MustExec("set @@tidb_snapshot=NOW(6)") + rs = tk.MustQuery("select * from tmp1 tablesample regions()") + rs.Check(testkit.Rows()) + + tk.MustExec("begin") + rs = tk.MustQuery("select * from tmp1 tablesample regions()") + rs.Check(testkit.Rows()) + tk.MustExec("commit") +} diff --git a/executor/point_get.go b/executor/point_get.go index 65200df9e965a..384a8b67722bc 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -42,6 +42,11 @@ import ( ) func (b *executorBuilder) buildPointGet(p *plannercore.PointGetPlan) Executor { + if err := b.validCanReadTemporaryTable(p.TblInfo); err != nil { + b.err = err + return nil + } + startTS, err := b.getSnapshotTS() if err != nil { b.err = err diff --git a/executor/set_test.go b/executor/set_test.go index d0a7967cf1c3b..e0dbb14935521 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -884,9 +884,9 @@ func (s *testSuite5) TestValidateSetVar(c *C) { result = tk.MustQuery("select @@tmp_table_size;") result.Check(testkit.Rows("167772161")) - tk.MustExec("set @@tmp_table_size=18446744073709551615") + tk.MustExec("set @@tmp_table_size=9223372036854775807") result = tk.MustQuery("select @@tmp_table_size;") - result.Check(testkit.Rows("18446744073709551615")) + result.Check(testkit.Rows("9223372036854775807")) _, err = tk.Exec("set @@tmp_table_size=18446744073709551616") c.Assert(terror.ErrorEqual(err, variable.ErrWrongTypeForVar), IsTrue) diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 6ca8aac96f0f5..1f313edade59d 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -15,6 +15,7 @@ package executor_test import ( "fmt" + "strings" "time" . "github.com/pingcap/check" @@ -861,79 +862,88 @@ func (s *testStaleTxnSuite) TestSetTransactionInfoSchema(c *C) { c.Assert(tk.Se.GetInfoSchema().SchemaMetaVersion(), Equals, schemaVer3) } -func (s *testStaleTxnSuite) TestStaleSelect(c *C) { +func (s *testStaleTxnSuite) TestStaleReadTemporaryTable(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - defer tk.MustExec("drop table if exists t") - tk.MustExec("create table t (id int)") - - tolerance := 50 * time.Millisecond - - tk.MustExec("insert into t values (1)") - time.Sleep(tolerance) - time1 := time.Now() - - tk.MustExec("insert into t values (2)") - time.Sleep(tolerance) - time2 := time.Now() - - tk.MustExec("insert into t values (3)") - time.Sleep(tolerance) - - staleRows := testkit.Rows("1") - staleSQL := fmt.Sprintf(`select * from t as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000")) + // For mocktikv, safe point is not initialized, we manually insert it for snapshot to use. + safePointName := "tikv_gc_safe_point" + safePointValue := "20160102-15:04:05 -0700" + safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)" + updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s') + ON DUPLICATE KEY + UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment) + tk.MustExec(updateSafePoint) - // test normal stale select - tk.MustQuery(staleSQL).Check(staleRows) + tk.MustExec("set @@tidb_enable_global_temporary_table=1") + tk.MustExec("use test") + tk.MustExec("drop table if exists tmp1") + tk.MustExec("create global temporary table tmp1 " + + "(id int not null primary key, code int not null, value int default null, unique key code(code))" + + "on commit delete rows") + time.Sleep(time.Second) + tk.MustGetErrMsg("select * from tmp1 as of timestamp NOW() where id=1", "can not stale read temporary table") + + queries := []struct { + sql string + }{ + { + sql: "select * from tmp1 where id=1", + }, + { + sql: "select * from tmp1 where code=1", + }, + { + sql: "select * from tmp1 where id in (1, 2, 3)", + }, + { + sql: "select * from tmp1 where code in (1, 2, 3)", + }, + { + sql: "select * from tmp1 where id > 1", + }, + { + sql: "select /*+use_index(tmp1, code)*/ * from tmp1 where code > 1", + }, + { + sql: "select /*+use_index(tmp1, code)*/ code from tmp1 where code > 1", + }, + { + sql: "select /*+ use_index_merge(tmp1, primary, code) */ * from tmp1 where id > 1 or code > 2", + }, + } - // test stale select in txn - tk.MustExec("begin") - c.Assert(tk.ExecToErr(staleSQL), NotNil) - tk.MustExec("commit") + addStaleReadToSQL := func(sql string) string { + idx := strings.Index(sql, " where ") + if idx < 0 { + return "" + } + return sql[0:idx] + " as of timestamp NOW()" + sql[idx:] + } - // test prepared stale select - tk.MustExec(fmt.Sprintf(`prepare s from "%s"`, staleSQL)) - tk.MustQuery("execute s") + for _, query := range queries { + sql := addStaleReadToSQL(query.sql) + if sql != "" { + tk.MustGetErrMsg(sql, "can not stale read temporary table") + } + } - // test prepared stale select in txn - tk.MustExec("begin") - c.Assert(tk.ExecToErr(staleSQL), NotNil) + tk.MustExec("start transaction read only as of timestamp NOW()") + for _, query := range queries { + tk.MustGetErrMsg(query.sql, "can not stale read temporary table") + } tk.MustExec("commit") - // test stale select in stale txn - tk.MustExec(fmt.Sprintf(`start transaction read only as of timestamp '%s'`, time2.Format("2006-1-2 15:04:05.000"))) - c.Assert(tk.ExecToErr(staleSQL), NotNil) - tk.MustExec("commit") + for _, query := range queries { + tk.MustExec(query.sql) + } - // test prepared stale select in stale txn - tk.MustExec(fmt.Sprintf(`start transaction read only as of timestamp '%s'`, time2.Format("2006-1-2 15:04:05.000"))) - c.Assert(tk.ExecToErr(staleSQL), NotNil) + tk.MustExec("set transaction read only as of timestamp NOW()") + tk.MustExec("start transaction") + for _, query := range queries { + tk.MustGetErrMsg(query.sql, "can not stale read temporary table") + } tk.MustExec("commit") - // test prepared stale select with schema change - tk.MustExec("alter table t add column c int") - tk.MustExec("insert into t values (4, 5)") - time.Sleep(10 * time.Millisecond) - tk.MustQuery("execute s").Check(staleRows) - - // test dynamic timestamp stale select - time3 := time.Now() - tk.MustExec("alter table t add column d int") - tk.MustExec("insert into t values (4, 4, 4)") - time.Sleep(tolerance) - time4 := time.Now() - staleRows = testkit.Rows("1 ", "2 ", "3 ", "4 5") - tk.MustQuery(fmt.Sprintf("select * from t as of timestamp CURRENT_TIMESTAMP(3) - INTERVAL %d MICROSECOND", time4.Sub(time3).Microseconds())).Check(staleRows) - - // test prepared dynamic timestamp stale select - time5 := time.Now() - tk.MustExec(fmt.Sprintf(`prepare v from "select * from t as of timestamp CURRENT_TIMESTAMP(3) - INTERVAL %d MICROSECOND"`, time5.Sub(time3).Microseconds())) - tk.MustQuery("execute v").Check(staleRows) - - // test point get - time6 := time.Now() - tk.MustExec("insert into t values (5, 5, 5)") - time.Sleep(tolerance) - tk.MustQuery(fmt.Sprintf("select * from t as of timestamp '%s' where c=5", time6.Format("2006-1-2 15:04:05.000"))).Check(testkit.Rows("4 5 ")) + for _, query := range queries { + tk.MustExec(query.sql) + } } diff --git a/kv/option.go b/kv/option.go index de5a1d8834c40..6b1e830e12984 100644 --- a/kv/option.go +++ b/kv/option.go @@ -61,6 +61,8 @@ const ( MatchStoreLabels // ResourceGroupTag indicates the resource group of the kv request. ResourceGroupTag + // KVFilter indicates the filter to ignore key-values in the transaction's memory buffer. + KVFilter ) // ReplicaReadType is the type of replica to read data from diff --git a/planner/core/errors.go b/planner/core/errors.go index 04fa1797d95bd..e501af7676c5f 100644 --- a/planner/core/errors.go +++ b/planner/core/errors.go @@ -94,9 +94,12 @@ var ( ErrCTERecursiveForbiddenJoinOrder = dbterror.ClassOptimizer.NewStd(mysql.ErrCTERecursiveForbiddenJoinOrder) ErrInvalidRequiresSingleReference = dbterror.ClassOptimizer.NewStd(mysql.ErrInvalidRequiresSingleReference) // Since we cannot know if user logged in with a password, use message of ErrAccessDeniedNoPassword instead - ErrAccessDenied = dbterror.ClassOptimizer.NewStdErr(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) - ErrBadNull = dbterror.ClassOptimizer.NewStd(mysql.ErrBadNull) - ErrNotSupportedWithSem = dbterror.ClassOptimizer.NewStd(mysql.ErrNotSupportedWithSem) - ErrAsOf = dbterror.ClassOptimizer.NewStd(mysql.ErrAsOf) - ErrOptOnTemporaryTable = dbterror.ClassOptimizer.NewStd(mysql.ErrOptOnTemporaryTable) + ErrAccessDenied = dbterror.ClassOptimizer.NewStdErr(mysql.ErrAccessDenied, mysql.MySQLErrName[mysql.ErrAccessDeniedNoPassword]) + ErrBadNull = dbterror.ClassOptimizer.NewStd(mysql.ErrBadNull) + ErrNotSupportedWithSem = dbterror.ClassOptimizer.NewStd(mysql.ErrNotSupportedWithSem) + ErrAsOf = dbterror.ClassOptimizer.NewStd(mysql.ErrAsOf) + ErrOptOnTemporaryTable = dbterror.ClassOptimizer.NewStd(mysql.ErrOptOnTemporaryTable) + ErrDropTableOnTemporaryTable = dbterror.ClassOptimizer.NewStd(mysql.ErrDropTableOnTemporaryTable) + // ErrPartitionNoTemporary returns when partition at temporary mode + ErrPartitionNoTemporary = dbterror.ClassOptimizer.NewStd(mysql.ErrPartitionNoTemporary) ) diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 4dca6ddb50170..5f7190d166d3c 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/parser/format" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl" "github.com/pingcap/tidb/domain" @@ -33,6 +34,7 @@ import ( "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/store/tikv/oracle" + "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" driver "github.com/pingcap/tidb/types/parser_driver" "github.com/pingcap/tidb/util" @@ -53,7 +55,7 @@ func InTxnRetry(p *preprocessor) { p.flag |= inTxnRetry } -// WithPreprocessorReturn returns a PreprocessOpt to initialize the PreprocesorReturn. +// WithPreprocessorReturn returns a PreprocessOpt to initialize the PreprocessorReturn. func WithPreprocessorReturn(ret *PreprocessorReturn) PreprocessOpt { return func(p *preprocessor) { p.PreprocessorReturn = ret @@ -91,7 +93,7 @@ func TryAddExtraLimit(ctx sessionctx.Context, node ast.StmtNode) ast.StmtNode { } // Preprocess resolves table names of the node, and checks some statements validation. -// prepreocssReturn used to extract the infoschema for the tableName and the timestamp from the asof clause. +// preprocessReturn used to extract the infoschema for the tableName and the timestamp from the asof clause. func Preprocess(ctx sessionctx.Context, node ast.Node, preprocessOpt ...PreprocessOpt) error { v := preprocessor{ctx: ctx, tableAliasInJoin: make([]map[string]interface{}, 0), withName: make(map[string]interface{})} for _, optFn := range preprocessOpt { @@ -339,6 +341,37 @@ func bindableStmtType(node ast.StmtNode) byte { return TypeInvalid } +func (p *preprocessor) tableByName(tn *ast.TableName) (table.Table, error) { + currentDB := p.ctx.GetSessionVars().CurrentDB + if tn.Schema.String() != "" { + currentDB = tn.Schema.L + } + if currentDB == "" { + return nil, errors.Trace(ErrNoDB) + } + sName := model.NewCIStr(currentDB) + tbl, err := p.ensureInfoSchema().TableByName(sName, tn.Name) + if err != nil { + // We should never leak that the table doesn't exist (i.e. attach ErrTableNotExists) + // unless we know that the user has permissions to it, should it exist. + // By checking here, this makes all SELECT/SHOW/INSERT/UPDATE/DELETE statements safe. + currentUser, activeRoles := p.ctx.GetSessionVars().User, p.ctx.GetSessionVars().ActiveRoles + if pm := privilege.GetPrivilegeManager(p.ctx); pm != nil { + if !pm.RequestVerification(activeRoles, sName.L, tn.Name.O, "", mysql.AllPrivMask) { + u := currentUser.Username + h := currentUser.Hostname + if currentUser.AuthHostname != "" { + u = currentUser.AuthUsername + h = currentUser.AuthHostname + } + return nil, ErrTableaccessDenied.GenWithStackByArgs(p.stmtType(), u, h, tn.Name.O) + } + } + return nil, err + } + return tbl, err +} + func (p *preprocessor) checkBindGrammar(originNode, hintedNode ast.StmtNode, defaultDB string) { origTp := bindableStmtType(originNode) hintedTp := bindableStmtType(hintedNode) @@ -357,6 +390,39 @@ func (p *preprocessor) checkBindGrammar(originNode, hintedNode ast.StmtNode, def return } } + + // Check the bind operation is not on any temporary table. + var resNode ast.ResultSetNode + switch n := originNode.(type) { + case *ast.SelectStmt: + resNode = n.From.TableRefs + case *ast.DeleteStmt: + resNode = n.TableRefs.TableRefs + case *ast.UpdateStmt: + resNode = n.TableRefs.TableRefs + case *ast.InsertStmt: + resNode = n.Table.TableRefs + } + if resNode != nil { + tblNames := extractTableList(resNode, nil, false) + for _, tn := range tblNames { + tbl, err := p.tableByName(tn) + if err != nil { + // If the operation is order is: drop table -> drop binding + // The table doesn't exist, it is not an error. + if terror.ErrorEqual(err, infoschema.ErrTableNotExists) { + continue + } + p.err = err + return + } + if tbl.Meta().TempTableType != model.TempTableNone { + p.err = ddl.ErrOptOnTemporaryTable.GenWithStackByArgs("create binding") + return + } + } + } + originSQL := parser.Normalize(utilparser.RestoreWithDefaultDB(originNode, defaultDB, originNode.Text())) hintedSQL := parser.Normalize(utilparser.RestoreWithDefaultDB(hintedNode, defaultDB, hintedNode.Text())) if originSQL != hintedSQL { @@ -600,17 +666,7 @@ func (p *preprocessor) checkDropDatabaseGrammar(stmt *ast.DropDatabaseStmt) { func (p *preprocessor) checkAdminCheckTableGrammar(stmt *ast.AdminStmt) { for _, table := range stmt.Tables { - currentDB := p.ctx.GetSessionVars().CurrentDB - if table.Schema.String() != "" { - currentDB = table.Schema.L - } - if currentDB == "" { - p.err = errors.Trace(ErrNoDB) - return - } - sName := model.NewCIStr(currentDB) - tName := table.Name - tableInfo, err := p.ensureInfoSchema().TableByName(sName, tName) + tableInfo, err := p.tableByName(table) if err != nil { p.err = err return @@ -639,10 +695,19 @@ func (p *preprocessor) checkCreateTableGrammar(stmt *ast.CreateTableStmt) { p.err = err return } - if tableInfo.Meta().TempTableType != model.TempTableNone { + tableMetaInfo := tableInfo.Meta() + if tableMetaInfo.TempTableType != model.TempTableNone { p.err = ErrOptOnTemporaryTable.GenWithStackByArgs("create table like") return } + if stmt.TemporaryKeyword != ast.TemporaryNone { + err := checkReferInfoForTemporaryTable(tableMetaInfo) + if err != nil { + p.err = err + return + } + + } } if stmt.TemporaryKeyword != ast.TemporaryNone { for _, opt := range stmt.Options { @@ -774,6 +839,29 @@ func (p *preprocessor) checkDropTableGrammar(stmt *ast.DropTableStmt) { p.err = expression.ErrFunctionsNoopImpl.GenWithStackByArgs("DROP TEMPORARY TABLE") return } + if stmt.TemporaryKeyword == ast.TemporaryNone { + return + } + currentDB := model.NewCIStr(p.ctx.GetSessionVars().CurrentDB) + for _, t := range stmt.Tables { + if isIncorrectName(t.Name.String()) { + p.err = ddl.ErrWrongTableName.GenWithStackByArgs(t.Name.String()) + return + } + if t.Schema.String() != "" { + currentDB = t.Schema + } + tableInfo, err := p.ensureInfoSchema().TableByName(currentDB, t.Name) + if err != nil { + p.err = err + return + } + if tableInfo.Meta().TempTableType != model.TempTableGlobal { + p.err = ErrDropTableOnTemporaryTable + return + } + } + } func (p *preprocessor) checkDropTableNames(tables []*ast.TableName) { @@ -1023,6 +1111,23 @@ func checkTableEngine(engineName string) error { return nil } +func checkReferInfoForTemporaryTable(tableMetaInfo *model.TableInfo) error { + if tableMetaInfo.AutoRandomBits != 0 { + return ErrOptOnTemporaryTable.GenWithStackByArgs("auto_random") + } + if tableMetaInfo.PreSplitRegions != 0 { + return ErrOptOnTemporaryTable.GenWithStackByArgs("pre split regions") + } + if tableMetaInfo.Partition != nil { + return ErrPartitionNoTemporary + } + if tableMetaInfo.ShardRowIDBits != 0 { + return ErrOptOnTemporaryTable.GenWithStackByArgs("shard_row_id_bits") + } + + return nil +} + // checkColumn checks if the column definition is valid. // See https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html func checkColumn(colDef *ast.ColumnDef) error { @@ -1246,27 +1351,12 @@ func (p *preprocessor) handleTableName(tn *ast.TableName) { return } - table, err := p.ensureInfoSchema().TableByName(tn.Schema, tn.Name) + table, err := p.tableByName(tn) if err != nil { - // We should never leak that the table doesn't exist (i.e. attach ErrTableNotExists) - // unless we know that the user has permissions to it, should it exist. - // By checking here, this makes all SELECT/SHOW/INSERT/UPDATE/DELETE statements safe. - currentUser, activeRoles := p.ctx.GetSessionVars().User, p.ctx.GetSessionVars().ActiveRoles - if pm := privilege.GetPrivilegeManager(p.ctx); pm != nil { - if !pm.RequestVerification(activeRoles, tn.Schema.L, tn.Name.O, "", mysql.AllPrivMask) { - u := currentUser.Username - h := currentUser.Hostname - if currentUser.AuthHostname != "" { - u = currentUser.AuthUsername - h = currentUser.AuthHostname - } - p.err = ErrTableaccessDenied.GenWithStackByArgs(p.stmtType(), u, h, tn.Name.O) - return - } - } p.err = err return } + tableInfo := table.Meta() dbInfo, _ := p.ensureInfoSchema().SchemaByName(tn.Schema) // tableName should be checked as sequence object. @@ -1450,6 +1540,7 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { if p.err != nil { return } + p.ExplicitStaleness = true } p.initedLastSnapshotTS = true } diff --git a/planner/core/preprocess_test.go b/planner/core/preprocess_test.go index d9f053f509e92..e32148c572728 100644 --- a/planner/core/preprocess_test.go +++ b/planner/core/preprocess_test.go @@ -337,3 +337,29 @@ func (s *testValidatorSuite) TestForeignKey(c *C) { s.runSQL(c, "ALTER TABLE test.t1 ADD CONSTRAINT fk FOREIGN KEY (c) REFERENCES test2.t (e)", false, nil) } + +func (s *testValidatorSuite) TestDropGlobalTempTable(c *C) { + defer testleak.AfterTest(c)() + defer func() { + s.dom.Close() + s.store.Close() + }() + + ctx := context.Background() + execSQLList := []string{ + "use test", + "set tidb_enable_global_temporary_table=true", + "create table tb(id int);", + "create global temporary table temp(id int) on commit delete rows;", + "create global temporary table temp1(id int) on commit delete rows;", + } + for _, execSQL := range execSQLList { + _, err := s.se.Execute(ctx, execSQL) + c.Assert(err, IsNil) + } + s.is = s.dom.InfoSchema() + s.runSQL(c, "drop global temporary table tb;", false, core.ErrDropTableOnTemporaryTable) + s.runSQL(c, "drop global temporary table temp", false, nil) + s.runSQL(c, "drop global temporary table test.tb;", false, core.ErrDropTableOnTemporaryTable) + s.runSQL(c, "drop global temporary table test.temp1", false, nil) +} diff --git a/session/session.go b/session/session.go index 880fa13a6671d..7ef59197fb3ca 100644 --- a/session/session.go +++ b/session/session.go @@ -70,6 +70,7 @@ import ( "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/store/tikv" + tikvstore "github.com/pingcap/tidb/store/tikv/kv" tikvutil "github.com/pingcap/tidb/store/tikv/util" "github.com/pingcap/tidb/tablecodec" "github.com/pingcap/tidb/telemetry" @@ -83,6 +84,7 @@ import ( "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/sli" "github.com/pingcap/tidb/util/sqlexec" + "github.com/pingcap/tidb/util/tableutil" "github.com/pingcap/tidb/util/timeutil" ) @@ -474,10 +476,6 @@ func (s *session) doCommit(ctx context.Context) error { if err != nil { return err } - if err = s.removeTempTableFromBuffer(); err != nil { - return err - } - // mockCommitError and mockGetTSErrorInRetry use to test PR #8743. failpoint.Inject("mockCommitError", func(val failpoint.Value) { if val.(bool) && tikv.IsMockCommitErrorEnable() { @@ -535,41 +533,23 @@ func (s *session) doCommit(ctx context.Context) error { s.txn.SetOption(kv.GuaranteeLinearizability, s.GetSessionVars().TxnCtx.IsExplicit && s.GetSessionVars().GuaranteeLinearizability) } + if tables := s.GetSessionVars().TxnCtx.GlobalTemporaryTables; len(tables) > 0 { + s.txn.SetOption(kv.KVFilter, temporaryTableKVFilter(tables)) + } return s.txn.Commit(tikvutil.SetSessionID(ctx, s.GetSessionVars().ConnectionID)) } -// removeTempTableFromBuffer filters out the temporary table key-values. -func (s *session) removeTempTableFromBuffer() error { - tables := s.GetSessionVars().TxnCtx.GlobalTemporaryTables - if len(tables) == 0 { - return nil - } - memBuffer := s.txn.GetMemBuffer() - // Reset and new an empty stage buffer. - defer func() { - s.txn.cleanup() - }() - for tid := range tables { - seekKey := tablecodec.EncodeTablePrefix(tid) - endKey := tablecodec.EncodeTablePrefix(tid + 1) - iter, err := memBuffer.Iter(seekKey, endKey) - if err != nil { - return err - } - for iter.Valid() && iter.Key().HasPrefix(seekKey) { - if err = memBuffer.Delete(iter.Key()); err != nil { - return err - } - s.txn.UpdateEntriesCountAndSize() - if err = iter.Next(); err != nil { - return err - } - } +type temporaryTableKVFilter map[int64]tableutil.TempTable + +func (m temporaryTableKVFilter) IsUnnecessaryKeyValue(key, value []byte, flags tikvstore.KeyFlags) bool { + tid := tablecodec.DecodeTableID(key) + if _, ok := m[tid]; ok { + return true } - // Flush to the root membuffer. - s.txn.flushStmtBuf() - return nil + + // This is the default filter for all tables. + return tablecodec.IsUntouchedIndexKValue(key, value) } // errIsNoisy is used to filter DUPLCATE KEY errors. diff --git a/session/session_test.go b/session/session_test.go index 69fc02bcf803a..6cd608f7b5e59 100644 --- a/session/session_test.go +++ b/session/session_test.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/ddl/placement" "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" @@ -4593,6 +4594,36 @@ func (s *testSessionSuite) TestInTxnPSProtoPointGet(c *C) { tk.MustExec("commit") } +func (s *testSessionSuite) TestTMPTableSize(c *C) { + // Test the @@tmp_table_size system variable. + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_global_temporary_table=on") + tk.MustExec("create global temporary table t (c1 int, c2 varchar(512)) on commit delete rows") + + tk.MustQuery("select @@global.tmp_table_size").Check(testkit.Rows(strconv.Itoa(variable.DefTMPTableSize))) + c.Assert(tk.Se.GetSessionVars().TMPTableSize, Equals, int64(variable.DefTMPTableSize)) + + // Min value 1024, so the result is change to 1024, with a warning. + tk.MustExec("set @@global.tmp_table_size = 123") + tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1292 Truncated incorrect tmp_table_size value: '123'")) + + // Change the session scope value. + tk.MustExec("set @@session.tmp_table_size = 2097152") + c.Assert(tk.Se.GetSessionVars().TMPTableSize, Equals, int64(2097152)) + + // Check in another sessin, change session scope value does not affect the global scope. + tk1 := testkit.NewTestKit(c, s.store) + tk1.MustQuery("select @@global.tmp_table_size").Check(testkit.Rows("1024")) + + // The value is now 1024, check the error when table size exceed it. + tk.MustExec("set @@session.tmp_table_size = 1024") + tk.MustExec("begin") + tk.MustExec("insert into t values (1, repeat('x', 512))") + tk.MustExec("insert into t values (1, repeat('x', 512))") + tk.MustGetErrCode("insert into t values (1, repeat('x', 512))", errno.ErrRecordFileFull) +} + func (s *testSessionSuite) TestTiDBEnableGlobalTemporaryTable(c *C) { // Test the @@tidb_enable_global_temporary_table system variable. tk := testkit.NewTestKit(c, s.store) diff --git a/sessionctx/variable/noop.go b/sessionctx/variable/noop.go index 93b3c1c9ce084..fe3af2d33605d 100644 --- a/sessionctx/variable/noop.go +++ b/sessionctx/variable/noop.go @@ -83,7 +83,6 @@ var noopSysVars = []*SysVar{ {Scope: ScopeNone, Name: "performance_schema_max_statement_classes", Value: "168"}, {Scope: ScopeGlobal, Name: "server_id", Value: "0"}, {Scope: ScopeGlobal, Name: "innodb_flushing_avg_loops", Value: "30"}, - {Scope: ScopeGlobal | ScopeSession, Name: TmpTableSize, Value: "16777216", Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxUint64, AutoConvertOutOfRange: true, IsHintUpdatable: true}, {Scope: ScopeGlobal, Name: "innodb_max_purge_lag", Value: "0"}, {Scope: ScopeGlobal | ScopeSession, Name: "preload_buffer_size", Value: "32768"}, {Scope: ScopeGlobal, Name: CheckProxyUsers, Value: Off, Type: TypeBool}, diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 813841c6bf051..069297eb837fc 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -844,6 +844,12 @@ type SessionVars struct { // see https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_cte_max_recursion_depth CTEMaxRecursionDepth int + // The temporary table size threshold + // In MySQL, when a temporary table exceed this size, it spills to disk. + // In TiDB, as we do not support spill to disk for now, an error is reported. + // See https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size + TMPTableSize int64 + // EnableGlobalTemporaryTable indicates whether to enable global temporary table EnableGlobalTemporaryTable bool } @@ -1058,6 +1064,7 @@ func NewSessionVars() *SessionVars { EnableIndexMergeJoin: DefTiDBEnableIndexMergeJoin, AllowFallbackToTiKV: make(map[kv.StoreType]struct{}), CTEMaxRecursionDepth: DefCTEMaxRecursionDepth, + TMPTableSize: DefTMPTableSize, EnableGlobalTemporaryTable: DefTiDBEnableGlobalTemporaryTable, } vars.KVVars = tikvstore.NewVariables(&vars.Killed) diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index e9abf05d1633f..6db3bba0fc146 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1692,8 +1692,14 @@ var defaultSysVars = []*SysVar{ {Scope: ScopeGlobal, Name: TiDBGCRunInterval, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64}, {Scope: ScopeGlobal, Name: TiDBGCLifetime, Value: "10m0s", Type: TypeDuration, MinValue: int64(time.Minute * 10), MaxValue: math.MaxInt64}, {Scope: ScopeGlobal, Name: TiDBGCConcurrency, Value: "-1", Type: TypeInt, MinValue: 1, MaxValue: 128, AllowAutoValue: true}, + {Scope: ScopeGlobal, Name: TiDBGCScanLockMode, Value: "PHYSICAL", Type: TypeEnum, PossibleValues: []string{"PHYSICAL", "LEGACY"}}, {Scope: ScopeGlobal, Name: TiDBGCScanLockMode, Value: "LEGACY", Type: TypeEnum, PossibleValues: []string{"PHYSICAL", "LEGACY"}}, + // See https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_tmp_table_size + {Scope: ScopeGlobal | ScopeSession, Name: TMPTableSize, Value: strconv.Itoa(DefTMPTableSize), Type: TypeUnsigned, MinValue: 1024, MaxValue: math.MaxInt64, AutoConvertOutOfRange: true, IsHintUpdatable: true, AllowEmpty: true, SetSession: func(s *SessionVars, val string) error { + s.TMPTableSize = tidbOptInt64(val, DefTMPTableSize) + return nil + }}, // variable for top SQL feature. {Scope: ScopeGlobal, Name: TiDBEnableTopSQL, Value: BoolToOnOff(DefTiDBTopSQLEnable), Type: TypeBool, Hidden: true, AllowEmpty: true, GetSession: func(s *SessionVars) (string, error) { return BoolToOnOff(TopSQLVariable.Enable.Load()), nil @@ -1859,8 +1865,8 @@ const ( MaxConnectErrors = "max_connect_errors" // TableDefinitionCache is the name for 'table_definition_cache' system variable. TableDefinitionCache = "table_definition_cache" - // TmpTableSize is the name for 'tmp_table_size' system variable. - TmpTableSize = "tmp_table_size" + // TMPTableSize is the name for 'tmp_table_size' system variable. + TMPTableSize = "tmp_table_size" // Timestamp is the name for 'timestamp' system variable. Timestamp = "timestamp" // ConnectTimeout is the name for 'connect_timeout' system variable. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 62be28a7d31ef..7d626686728db 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -720,6 +720,7 @@ const ( DefTiDBTopSQLReportIntervalSeconds = 60 DefTiDBEnableGlobalTemporaryTable = false DefTiDBEnableLocalTxn = false + DefTMPTableSize = 16777216 ) // Process global variables. diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index 78a5b5c492378..41f379a9d6353 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -104,6 +104,7 @@ func (s *testVarsutilSuite) TestNewSessionVars(c *C) { c.Assert(vars.ShardAllocateStep, Equals, int64(DefTiDBShardAllocateStep)) c.Assert(vars.AnalyzeVersion, Equals, DefTiDBAnalyzeVersion) c.Assert(vars.CTEMaxRecursionDepth, Equals, DefCTEMaxRecursionDepth) + c.Assert(vars.TMPTableSize, Equals, int64(DefTMPTableSize)) assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.MemQuota)) assertFieldsGreaterThanZero(c, reflect.ValueOf(vars.BatchSize)) diff --git a/store/driver/txn/txn_driver.go b/store/driver/txn/txn_driver.go index 886b0df39900a..93a4f4a9508df 100644 --- a/store/driver/txn/txn_driver.go +++ b/store/driver/txn/txn_driver.go @@ -176,6 +176,8 @@ func (txn *tikvTxn) SetOption(opt int, val interface{}) { txn.KVTxn.GetSnapshot().SetMatchStoreLabels(val.([]*metapb.StoreLabel)) case kv.ResourceGroupTag: txn.KVTxn.SetResourceGroupTag(val.([]byte)) + case kv.KVFilter: + txn.KVTxn.SetKVFilter(val.(tikv.KVFilter)) } } diff --git a/table/table.go b/table/table.go index b39304adcaaa6..f76210a9bdd42 100644 --- a/table/table.go +++ b/table/table.go @@ -97,6 +97,8 @@ var ( ErrSequenceHasRunOut = dbterror.ClassTable.NewStd(mysql.ErrSequenceRunOut) // ErrRowDoesNotMatchGivenPartitionSet returns when the destination partition conflict with the partition selection. ErrRowDoesNotMatchGivenPartitionSet = dbterror.ClassTable.NewStd(mysql.ErrRowDoesNotMatchGivenPartitionSet) + // ErrTempTableFull returns a table is full error, it's used by temporary table now. + ErrTempTableFull = dbterror.ClassTable.NewStd(mysql.ErrRecordFileFull) ) // RecordIterFunc is used for low-level record iteration. diff --git a/table/tables/tables.go b/table/tables/tables.go index 898d5668063c7..326bdd4701213 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -324,7 +324,12 @@ func (t *TableCommon) UpdateRecord(ctx context.Context, sctx sessionctx.Context, defer memBuffer.Cleanup(sh) if m := t.Meta(); m.TempTableType == model.TempTableGlobal { - addTemporaryTable(sctx, m) + if tmpTable := addTemporaryTable(sctx, m); tmpTable != nil { + if tmpTable.GetSize() > sctx.GetSessionVars().TMPTableSize { + return table.ErrTempTableFull.GenWithStackByArgs(m.Name.O) + } + defer handleTempTableSize(tmpTable, txn.Size(), txn) + } } var colIDs, binlogColIDs []int64 @@ -590,9 +595,20 @@ func TryGetCommonPkColumns(tbl table.Table) []*table.Column { return pkCols } -func addTemporaryTable(sctx sessionctx.Context, tblInfo *model.TableInfo) { +func addTemporaryTable(sctx sessionctx.Context, tblInfo *model.TableInfo) tableutil.TempTable { tempTable := sctx.GetSessionVars().GetTemporaryTable(tblInfo) tempTable.SetModified(true) + return tempTable +} + +// The size of a temporary table is calculated by accumulating the transaction size delta. +func handleTempTableSize(t tableutil.TempTable, txnSizeBefore int, txn kv.Transaction) { + txnSizeNow := txn.Size() + delta := txnSizeNow - txnSizeBefore + + oldSize := t.GetSize() + newSize := oldSize + int64(delta) + t.SetSize(newSize) } // AddRecord implements table.Table AddRecord interface. @@ -608,7 +624,12 @@ func (t *TableCommon) AddRecord(sctx sessionctx.Context, r []types.Datum, opts . } if m := t.Meta(); m.TempTableType == model.TempTableGlobal { - addTemporaryTable(sctx, m) + if tmpTable := addTemporaryTable(sctx, m); tmpTable != nil { + if tmpTable.GetSize() > sctx.GetSessionVars().TMPTableSize { + return nil, table.ErrTempTableFull.GenWithStackByArgs(m.Name.O) + } + defer handleTempTableSize(tmpTable, txn.Size(), txn) + } } var ctx context.Context @@ -1009,8 +1030,17 @@ func (t *TableCommon) RemoveRecord(ctx sessionctx.Context, h kv.Handle, r []type return err } + txn, err := ctx.Txn(true) + if err != nil { + return err + } if m := t.Meta(); m.TempTableType == model.TempTableGlobal { - addTemporaryTable(ctx, m) + if tmpTable := addTemporaryTable(ctx, m); tmpTable != nil { + if tmpTable.GetSize() > ctx.GetSessionVars().TMPTableSize { + return table.ErrTempTableFull.GenWithStackByArgs(m.Name.O) + } + defer handleTempTableSize(tmpTable, txn.Size(), txn) + } } // The table has non-public column and this column is doing the operation of "modify/change column". @@ -1795,6 +1825,8 @@ type TemporaryTable struct { stats *statistics.Table // The autoID allocator of this table. autoIDAllocator autoid.Allocator + // Table size. + size int64 } // TempTableFromMeta builds a TempTable from model.TableInfo. @@ -1825,3 +1857,13 @@ func (t *TemporaryTable) GetModified() bool { func (t *TemporaryTable) GetStats() interface{} { return t.stats } + +// GetSize gets the table size. +func (t *TemporaryTable) GetSize() int64 { + return t.size +} + +// SetSize sets the table size. +func (t *TemporaryTable) SetSize(v int64) { + t.size = v +} diff --git a/util/tableutil/tableutil.go b/util/tableutil/tableutil.go index 11cbe626dcc56..bf5d7caac2732 100644 --- a/util/tableutil/tableutil.go +++ b/util/tableutil/tableutil.go @@ -33,6 +33,9 @@ type TempTable interface { // The stats of this table (*statistics.Table). // Define the return type as interface{} here to avoid cycle imports. GetStats() interface{} + + GetSize() int64 + SetSize(int64) } // TempTableFromMeta builds a TempTable from *model.TableInfo. From 5b08e4c0ca44bedb872097bd4260276ab58da417 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 29 Jun 2021 20:11:26 +0800 Subject: [PATCH 04/38] telemetry: fix the bug about unnecessary error when run tidb only (#25264) (#25555) --- telemetry/data_slow_query.go | 2 +- telemetry/data_window.go | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/telemetry/data_slow_query.go b/telemetry/data_slow_query.go index dd73097293e6c..74526f96a18f0 100644 --- a/telemetry/data_slow_query.go +++ b/telemetry/data_slow_query.go @@ -143,7 +143,7 @@ func init() { currentSQBInfo["+Inf"] = 0 if mysql.TiDBReleaseVersion != "None" { - logutil.BgLogger().Info("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) + logutil.BgLogger().Debug("Telemetry slow query stats initialized", zap.String("currentSQBInfo", currentSQBInfo.String()), zap.String("lastSQBInfo", lastSQBInfo.String())) } } diff --git a/telemetry/data_window.go b/telemetry/data_window.go index cb86f23bc09a2..0311f3f02535f 100644 --- a/telemetry/data_window.go +++ b/telemetry/data_window.go @@ -18,7 +18,6 @@ import ( "sync" "time" - "github.com/pingcap/errors" "github.com/pingcap/tidb/domain/infosync" "github.com/pingcap/tidb/store/tikv/logutil" "github.com/prometheus/client_golang/api" @@ -110,12 +109,8 @@ func readSQLMetric(timepoint time.Time, SQLResult *sqlUsageData) error { promQL := "sum(tidb_executor_statement_total{}) by (instance,type)" result, err := querySQLMetric(ctx, timepoint, promQL) if err != nil { - if err1, ok := err.(*promv1.Error); ok { - return errors.Errorf("query metric error, msg: %v, detail: %v", err1.Msg, err1.Detail) - } - return errors.Errorf("query metric error: %v", err.Error()) + logutil.BgLogger().Warn("querySQLMetric got error") } - anylisSQLUsage(result, SQLResult) return nil } @@ -155,6 +150,9 @@ func querySQLMetric(ctx context.Context, queryTime time.Time, promQL string) (re } func anylisSQLUsage(promResult pmodel.Value, SQLResult *sqlUsageData) { + if promResult == nil { + return + } switch promResult.Type() { case pmodel.ValVector: matrix := promResult.(pmodel.Vector) From e6c94d9edeebfc43cadf06348cb158e25f29c762 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 6 Jul 2021 05:29:28 +0800 Subject: [PATCH 05/38] session: force tidb_multi_statement_mode=OFF when mode=WARN (#25723) (#25751) --- session/bootstrap.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/session/bootstrap.go b/session/bootstrap.go index fd448bd0eeef8..6224443b98902 100644 --- a/session/bootstrap.go +++ b/session/bootstrap.go @@ -492,11 +492,14 @@ const ( version69 = 69 // version70 adds mysql.user.plugin to allow multiple authentication plugins version70 = 70 + // version71 forces tidb_multi_statement_mode=OFF when tidb_multi_statement_mode=WARN + // This affects upgrades from v4.0 where the default was WARN. + version71 = 71 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version70 +var currentBootstrapVersion int64 = version71 var ( bootstrapVersion = []func(Session, int64){ @@ -570,6 +573,7 @@ var ( upgradeToVer68, upgradeToVer69, upgradeToVer70, + upgradeToVer71, } ) @@ -1507,6 +1511,13 @@ func upgradeToVer70(s Session, ver int64) { mustExecute(s, "UPDATE HIGH_PRIORITY mysql.user SET plugin='mysql_native_password'") } +func upgradeToVer71(s Session, ver int64) { + if ver >= version71 { + return + } + mustExecute(s, "UPDATE mysql.global_variables SET VARIABLE_VALUE='OFF' WHERE VARIABLE_NAME = 'tidb_multi_statement_mode' AND VARIABLE_VALUE = 'WARN'") +} + func writeOOMAction(s Session) { comment := "oom-action is `log` by default in v3.0.x, `cancel` by default in v4.0.11+" mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, %?) ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, From e96c2a6da635923d8964a2eedfb79ca2583d26c1 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Wed, 7 Jul 2021 20:11:29 +0800 Subject: [PATCH 06/38] planner: generate wrong plan when update has subquery (#25660) (#25698) --- planner/core/point_get_plan.go | 6 ++++++ planner/core/point_get_plan_test.go | 13 +++++++++++++ 2 files changed, 19 insertions(+) diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 6562894eae2db..c3d5c43107062 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -1280,6 +1280,12 @@ func findInPairs(colName string, pairs []nameValuePair) int { } func tryUpdatePointPlan(ctx sessionctx.Context, updateStmt *ast.UpdateStmt) Plan { + // avoid using the point_get when assignment_list contains the subquery in the UPDATE. + for _, list := range updateStmt.List { + if _, ok := list.Expr.(*ast.SubqueryExpr); ok { + return nil + } + } selStmt := &ast.SelectStmt{ Fields: &ast.FieldList{}, From: updateStmt.TableRefs, diff --git a/planner/core/point_get_plan_test.go b/planner/core/point_get_plan_test.go index 1306da4bed42c..b67bcf08cb465 100644 --- a/planner/core/point_get_plan_test.go +++ b/planner/core/point_get_plan_test.go @@ -206,6 +206,19 @@ func (s *testPointGetSuite) TestPointGetForUpdate(c *C) { tk.MustExec("rollback") } +func (s *testPointGetSuite) TestPointGetForUpdateWithSubquery(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("CREATE TABLE users (id bigint(20) unsigned NOT NULL primary key, name longtext DEFAULT NULL, company_id bigint(20) DEFAULT NULL)") + tk.MustExec("create table companies(id bigint primary key, name longtext default null)") + tk.MustExec("insert into companies values(14, 'Company14')") + tk.MustExec("insert into companies values(15, 'Company15')") + tk.MustExec("insert into users(id, company_id, name) values(239, 15, 'xxxx')") + tk.MustExec("UPDATE users SET name=(SELECT name FROM companies WHERE companies.id = users.company_id) WHERE id = 239") + + tk.MustQuery("select * from users").Check(testkit.Rows("239 Company15 15")) +} + func checkUseForUpdate(tk *testkit.TestKit, c *C, expectLock bool) { res := tk.MustQuery("explain format = 'brief' select * from fu where id = 6 for update") // Point_Get_1 1.00 root table:fu, handle:6 From 8d622028300288174695350de5984f1ed2ca6547 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 12 Jul 2021 11:43:32 +0800 Subject: [PATCH 07/38] planner: support stable result mode (#25971) (#25995) --- executor/set_test.go | 10 + planner/core/optimizer.go | 11 + planner/core/point_get_plan.go | 5 + planner/core/rule_stabilize_results.go | 125 +++++ planner/core/rule_stabilize_results_test.go | 218 ++++++++ .../testdata/stable_result_mode_suite_in.json | 83 +++ .../stable_result_mode_suite_out.json | 485 ++++++++++++++++++ sessionctx/variable/session.go | 3 + sessionctx/variable/sysvar.go | 4 + sessionctx/variable/tidb_vars.go | 4 + 10 files changed, 948 insertions(+) create mode 100644 planner/core/rule_stabilize_results.go create mode 100644 planner/core/rule_stabilize_results_test.go create mode 100644 planner/core/testdata/stable_result_mode_suite_in.json create mode 100644 planner/core/testdata/stable_result_mode_suite_out.json diff --git a/executor/set_test.go b/executor/set_test.go index e0dbb14935521..b4e5646ae4b02 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -508,6 +508,16 @@ func (s *testSerialSuite1) TestSetVar(c *C) { tk.MustQuery(`show warnings`).Check(testkit.Rows()) tk.MustExec("set @@tidb_enable_clustered_index = 'int_only'") tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1287 'INT_ONLY' is deprecated and will be removed in a future release. Please use 'ON' or 'OFF' instead")) + + // test for tidb_enable_stable_result_mode + tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set global tidb_enable_stable_result_mode = 1`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) + tk.MustExec(`set global tidb_enable_stable_result_mode = 0`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set tidb_enable_stable_result_mode=1`) + tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) + tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) } func (s *testSuite5) TestTruncateIncorrectIntSessionVar(c *C) { diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index 59c228767171a..aa9e7752de3b5 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -48,6 +48,7 @@ var IsReadOnly func(node ast.Node, vars *variable.SessionVars) bool const ( flagGcSubstitute uint64 = 1 << iota flagPrunColumns + flagStabilizeResults flagBuildKeyInfo flagDecorrelate flagEliminateAgg @@ -65,6 +66,7 @@ const ( var optRuleList = []logicalOptRule{ &gcSubstituter{}, &columnPruner{}, + &resultsStabilizer{}, &buildKeySolver{}, &decorrelateSolver{}, &aggregationEliminator{}, @@ -132,12 +134,21 @@ func CheckTableLock(ctx sessionctx.Context, is infoschema.InfoSchema, vs []visit return nil } +func checkStableResultMode(sctx sessionctx.Context) bool { + s := sctx.GetSessionVars() + st := s.StmtCtx + return s.EnableStableResultMode && (!st.InInsertStmt && !st.InUpdateStmt && !st.InDeleteStmt && !st.InLoadDataStmt) +} + // DoOptimize optimizes a logical plan to a physical plan. func DoOptimize(ctx context.Context, sctx sessionctx.Context, flag uint64, logic LogicalPlan) (PhysicalPlan, float64, error) { // if there is something after flagPrunColumns, do flagPrunColumnsAgain if flag&flagPrunColumns > 0 && flag-flagPrunColumns > flagPrunColumns { flag |= flagPrunColumnsAgain } + if checkStableResultMode(sctx) { + flag |= flagStabilizeResults + } logic, err := logicalOptimize(ctx, flag, logic) if err != nil { return nil, 0, err diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index c3d5c43107062..a15f89a967ba4 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -445,6 +445,11 @@ type PointPlanVal struct { // TryFastPlan tries to use the PointGetPlan for the query. func TryFastPlan(ctx sessionctx.Context, node ast.Node) (p Plan) { + if checkStableResultMode(ctx) { + // the rule of stabilizing results has not taken effect yet, so cannot generate a plan here in this mode + return nil + } + ctx.GetSessionVars().PlanID = 0 ctx.GetSessionVars().PlanColumnID = 0 switch x := node.(type) { diff --git a/planner/core/rule_stabilize_results.go b/planner/core/rule_stabilize_results.go new file mode 100644 index 0000000000000..f327bb70a98f3 --- /dev/null +++ b/planner/core/rule_stabilize_results.go @@ -0,0 +1,125 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import ( + "context" + + "github.com/pingcap/tidb/expression" + "github.com/pingcap/tidb/planner/util" +) + +/* + resultsStabilizer stabilizes query results. + NOTE: it's not a common rule for all queries, it's specially implemented for a few customers. + + Results of some queries are not stable, for example: + create table t (a int); insert into t values (1), (2); select a from t; + In the case above, the result can be `1 2` or `2 1`, which is not stable. + This rule stabilizes results by modifying or injecting a Sort operator: + 1. iterate the plan from the root, and ignore all input-order operators (Sel/Proj/Limit); + 2. when meeting the first non-input-order operator, + 2.1. if it's a Sort, update it by appending all output columns into its order-by list, + 2.2. otherwise, inject a new Sort upon this operator. +*/ +type resultsStabilizer struct { +} + +func (rs *resultsStabilizer) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + stable := rs.completeSort(lp) + if !stable { + lp = rs.injectSort(lp) + } + return lp, nil +} + +func (rs *resultsStabilizer) completeSort(lp LogicalPlan) bool { + if rs.isInputOrderKeeper(lp) { + return rs.completeSort(lp.Children()[0]) + } else if sort, ok := lp.(*LogicalSort); ok { + cols := sort.Schema().Columns // sort results by all output columns + if handleCol := rs.extractHandleCol(sort.Children()[0]); handleCol != nil { + cols = []*expression.Column{handleCol} // sort results by the handle column if we can get it + } + for _, col := range cols { + exist := false + for _, byItem := range sort.ByItems { + if col.Equal(nil, byItem.Expr) { + exist = true + break + } + } + if !exist { + sort.ByItems = append(sort.ByItems, &util.ByItems{Expr: col}) + } + } + return true + } + return false +} + +func (rs *resultsStabilizer) injectSort(lp LogicalPlan) LogicalPlan { + if rs.isInputOrderKeeper(lp) { + lp.SetChildren(rs.injectSort(lp.Children()[0])) + return lp + } + + byItems := make([]*util.ByItems, 0, len(lp.Schema().Columns)) + cols := lp.Schema().Columns + if handleCol := rs.extractHandleCol(lp); handleCol != nil { + cols = []*expression.Column{handleCol} + } + for _, col := range cols { + byItems = append(byItems, &util.ByItems{Expr: col}) + } + sort := LogicalSort{ + ByItems: byItems, + }.Init(lp.SCtx(), lp.SelectBlockOffset()) + sort.SetChildren(lp) + return sort +} + +func (rs *resultsStabilizer) isInputOrderKeeper(lp LogicalPlan) bool { + switch lp.(type) { + case *LogicalSelection, *LogicalProjection, *LogicalLimit: + return true + } + return false +} + +// extractHandleCols does the best effort to get the handle column. +func (rs *resultsStabilizer) extractHandleCol(lp LogicalPlan) *expression.Column { + switch x := lp.(type) { + case *LogicalSelection, *LogicalLimit: + handleCol := rs.extractHandleCol(lp.Children()[0]) + if x.Schema().Contains(handleCol) { + // some Projection Operator might be inlined, so check the column again here + return handleCol + } + case *DataSource: + if x.tableInfo.IsCommonHandle { + // Currently we deliberately don't support common handle case for simplicity. + return nil + } + handleCol := x.getPKIsHandleCol() + if handleCol != nil { + return handleCol + } + } + return nil +} + +func (rs *resultsStabilizer) name() string { + return "stabilize_results" +} diff --git a/planner/core/rule_stabilize_results_test.go b/planner/core/rule_stabilize_results_test.go new file mode 100644 index 0000000000000..00b3cf1fb12e5 --- /dev/null +++ b/planner/core/rule_stabilize_results_test.go @@ -0,0 +1,218 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package core_test + +import ( + "fmt" + "math" + "strings" + + . "github.com/pingcap/check" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + plannercore "github.com/pingcap/tidb/planner/core" + "github.com/pingcap/tidb/session" + "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/util/kvcache" + "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/util/testutil" +) + +var _ = Suite(&testRuleStabilizeResults{}) +var _ = SerialSuites(&testRuleStabilizeResultsSerial{}) + +type testRuleStabilizeResultsSerial struct { + store kv.Storage + dom *domain.Domain +} + +func (s *testRuleStabilizeResultsSerial) SetUpTest(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) +} + +func (s *testRuleStabilizeResultsSerial) TestPlanCache(c *C) { + tk := testkit.NewTestKit(c, s.store) + orgEnable := plannercore.PreparedPlanCacheEnabled() + defer func() { + plannercore.SetPreparedPlanCache(orgEnable) + }() + plannercore.SetPreparedPlanCache(true) + var err error + tk.Se, err = session.CreateSession4TestWithOpt(s.store, &session.Opt{ + PreparedPlanCache: kvcache.NewSimpleLRUCache(100, 0.1, math.MaxUint64), + }) + c.Assert(err, IsNil) + + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("prepare s1 from 'select * from t where a > ? limit 10'") + tk.MustExec("set @a = 10") + tk.MustQuery("execute s1 using @a").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) + tk.MustQuery("execute s1 using @a").Check(testkit.Rows()) + tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) // plan cache is still working +} + +func (s *testRuleStabilizeResultsSerial) TestSQLBinding(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + tk.MustQuery("explain select * from t where a > 0 limit 1").Check(testkit.Rows( + "Limit_12 1.00 root offset:0, count:1", + "└─TableReader_22 1.00 root data:Limit_21", + " └─Limit_21 1.00 cop[tikv] offset:0, count:1", + " └─TableRangeScan_20 1.00 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo")) + + tk.MustExec("create session binding for select * from t where a>0 limit 1 using select * from t use index(b) where a>0 limit 1") + tk.MustQuery("explain select * from t where a > 0 limit 1").Check(testkit.Rows( + "TopN_9 1.00 root test.t.a, offset:0, count:1", + "└─IndexLookUp_19 1.00 root ", + " ├─TopN_18(Build) 1.00 cop[tikv] test.t.a, offset:0, count:1", + " │ └─Selection_17 3333.33 cop[tikv] gt(test.t.a, 0)", + " │ └─IndexFullScan_15 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_16(Probe) 1.00 cop[tikv] table:t keep order:false, stats:pseudo")) +} + +func (s *testRuleStabilizeResultsSerial) TestClusteredIndex(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn + tk.MustExec("drop table if exists t") + tk.MustExec("CREATE TABLE t (a int,b int,c int, PRIMARY KEY (a,b))") + tk.MustQuery("explain select * from t limit 10").Check(testkit.Rows( + "TopN_7 10.00 root test.t.a, test.t.b, test.t.c, offset:0, count:10", + "└─TableReader_16 10.00 root data:TopN_15", + " └─TopN_15 10.00 cop[tikv] test.t.a, test.t.b, test.t.c, offset:0, count:10", + " └─TableFullScan_14 10000.00 cop[tikv] table:t keep order:false, stats:pseudo")) + tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff +} + +type testRuleStabilizeResults struct { + store kv.Storage + dom *domain.Domain + + testData testutil.TestData +} + +func (s *testRuleStabilizeResults) SetUpSuite(c *C) { + var err error + s.store, s.dom, err = newStoreWithBootstrap() + c.Assert(err, IsNil) + + s.testData, err = testutil.LoadTestSuiteData("testdata", "stable_result_mode_suite") + c.Assert(err, IsNil) +} + +func (s *testRuleStabilizeResults) TearDownSuite(c *C) { + c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) +} + +func (s *testRuleStabilizeResults) runTestData(c *C, tk *testkit.TestKit, name string) { + var input []string + var output []struct { + Plan []string + } + s.testData.GetTestCasesByName(name, c, &input, &output) + c.Assert(len(input), Equals, len(output)) + for i := range input { + s.testData.OnRecord(func() { + output[i].Plan = s.testData.ConvertRowsToStrings(tk.MustQuery("explain " + input[i]).Rows()) + }) + tk.MustQuery("explain " + input[i]).Check(testkit.Rows(output[i].Plan...)) + } +} + +func (s *testRuleStabilizeResults) TestStableResultMode(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultMode") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnDML(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int primary key, b int, c int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnDML") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnSubQuery(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnSubQuery") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnJoin(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") + s.runTestData(c, tk, "TestStableResultModeOnJoin") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnOtherOperators(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists t1") + tk.MustExec("drop table if exists t2") + tk.MustExec("create table t1 (a int primary key, b int, c int, d int, unique key(b))") + tk.MustExec("create table t2 (a int primary key, b int, c int, d int, unique key(b))") + s.runTestData(c, tk, "TestStableResultModeOnOtherOperators") +} + +func (s *testRuleStabilizeResults) TestStableResultModeOnPartitionTable(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec(fmt.Sprintf(`set tidb_partition_prune_mode='%v'`, variable.DefTiDBPartitionPruneMode)) + tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("drop table if exists thash") + tk.MustExec("drop table if exists trange") + tk.MustExec("create table thash (a int primary key, b int, c int, d int) partition by hash(a) partitions 4") + tk.MustExec(`create table trange (a int primary key, b int, c int, d int) partition by range(a) ( + partition p0 values less than (100), + partition p1 values less than (200), + partition p2 values less than (300), + partition p3 values less than (400))`) + tk.MustQuery("select @@tidb_partition_prune_mode").Check(testkit.Rows("static")) + s.runTestData(c, tk, "TestStableResultModeOnPartitionTable") +} + +func (s *testRuleStabilizeResults) TestHideStableResultSwitch(c *C) { + tk := testkit.NewTestKit(c, s.store) + rs := tk.MustQuery("show variables").Rows() + for _, r := range rs { + c.Assert(strings.ToLower(r[0].(string)), Not(Equals), "tidb_enable_stable_result_mode") + } + c.Assert(len(tk.MustQuery("show variables where variable_name like '%tidb_enable_stable_result_mode%'").Rows()), Equals, 0) +} diff --git a/planner/core/testdata/stable_result_mode_suite_in.json b/planner/core/testdata/stable_result_mode_suite_in.json new file mode 100644 index 0000000000000..7629e80fc3630 --- /dev/null +++ b/planner/core/testdata/stable_result_mode_suite_in.json @@ -0,0 +1,83 @@ +[ + { + "name": "TestStableResultMode", + "cases": [ + "select * from t use index(primary)", + "select b from t use index(b)", + "select a, b from t use index(b)", + "select b, c from t use index(b)", + "select b, c from t use index(primary)", + "select min(b), max(c) from t use index(primary) group by d", + "select min(b), max(c) from t use index(primary) group by a", + "select * from t use index(b) limit 10", + "select * from t use index(primary) limit 10", + "select b from t use index(b) order by b", + "select b, c, d from t use index(b) order by b", + "select t1.a, t2.a from t t1, t t2 where t1.a=t2.a", + "select b from t where a>0", + "select b from t where a>0 limit 1" + ] + }, + { + "name": "TestStableResultModeOnDML", + "cases": [ + "insert into t select * from t", + "insert into t select * from t where a>1", + "insert into t select t1.a, t2.b, t1.c+t2.c from t t1, t t2 where t1.a=t2.a", + "insert into t select min(a), max(b), sum(c) from t group by a", + "delete from t", + "delete from t where a>1", + "update t set a=a+1", + "update t set a=a+1 where a>1" + ] + }, + { + "name": "TestStableResultModeOnSubQuery", + "cases": [ + "select * from t1 where t1.a in (select b from t2)", + "select * from t1 where t1.a not in (select b from t2)", + "select * from t1 where t1.a in (select b from t2 where t2.c>t1.c)", + "select * from t1 where t1.a not in (select b from t2 where t2.c>t1.c)", + "select * from t1 where exists (select 1 from t2 where t2.c>t1.c)", + "select * from t1 where not exists (select 1 from t2 where t2.c>t1.c)", + "select * from t1 where exists (select 1 from t2 where t2.c=t1.c)", + "select * from t1 where not exists (select 1 from t2 where t2.c=t1.c)", + "select t1.* from t1, (select b from t2) tb where t1.b=tb.b" + ] + }, + { + "name": "TestStableResultModeOnJoin", + "cases": [ + "select * from t1, t2 where t1.a = t2.a", + "select * from t1, t2 where t1.a > t2.a and t1.b = t2.b and t1.c < t2.c", + "select t1.* from t1 left outer join t2 on t1.a=t2.a", + "select t1.* from t1 join t2 on t1.a!=t2.a" + ] + }, + { + "name": "TestStableResultModeOnOtherOperators", + "cases": [ + "select * from t1 where a = 1 or a = 222 or a = 33333", + "select * from t1 where a in (1, 2, 3, 4)", + "select b from t1 where b = 1 or b = 222 or b = 33333", + "select b from t1 where b in (1, 2, 3, 4)", + "select * from t1 where a > 10 union all select * from t2 where b > 20", + "select * from t1 where a > 10 union distinct select * from t2 where b > 20", + "select * from t1 where a > 10 intersect select * from t2 where b > 20", + "select * from t1 where a > 10 except select * from t2 where b > 20", + "select row_number() over(partition by a) as row_no, sum(b) over(partition by a) as sum_b from t1", + "select min(a), max(b), sum(c) from t1 group by d", + "select min(a), max(b), sum(c) from t1 group by d having max(b) < 20", + "select case when a=1 then 'a1' when a=2 then 'a2' else 'ax' end from t1 " + ] + }, + { + "name": "TestStableResultModeOnPartitionTable", + "cases": [ + "select * from thash where a in (1, 200)", + "select * from thash where a >= 50 and a <= 150", + "select * from trange where a in (1, 200)", + "select * from trange where a >= 50 and a <= 150" + ] + } +] diff --git a/planner/core/testdata/stable_result_mode_suite_out.json b/planner/core/testdata/stable_result_mode_suite_out.json new file mode 100644 index 0000000000000..6e00e3e1c65db --- /dev/null +++ b/planner/core/testdata/stable_result_mode_suite_out.json @@ -0,0 +1,485 @@ +[ + { + "Name": "TestStableResultMode", + "Cases": [ + { + "Plan": [ + "TableReader_10 10000.00 root data:TableFullScan_9", + "└─TableFullScan_9 10000.00 cop[tikv] table:t keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "IndexReader_10 10000.00 root index:IndexFullScan_9", + "└─IndexFullScan_9 10000.00 cop[tikv] table:t, index:b(b) keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.a", + "└─IndexReader_8 10000.00 root index:IndexFullScan_7", + " └─IndexFullScan_7 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.b, test.t.c", + "└─IndexLookUp_9 10000.00 root ", + " ├─IndexFullScan_7(Build) 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_5 10000.00 root test.t.b, test.t.c", + "└─TableReader_8 10000.00 root data:TableFullScan_7", + " └─TableFullScan_7 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_6 8000.00 root Column#5, Column#6", + "└─HashAgg_12 8000.00 root group by:test.t.d, funcs:min(Column#7)->Column#5, funcs:max(Column#8)->Column#6", + " └─TableReader_13 8000.00 root data:HashAgg_8", + " └─HashAgg_8 8000.00 cop[tikv] group by:test.t.d, funcs:min(test.t.b)->Column#7, funcs:max(test.t.c)->Column#8", + " └─TableFullScan_11 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_7 10000.00 root test.t.b, test.t.c", + "└─TableReader_10 10000.00 root data:TableFullScan_9", + " └─TableFullScan_9 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "TopN_8 10.00 root test.t.a, offset:0, count:10", + "└─IndexLookUp_17 10.00 root ", + " ├─TopN_16(Build) 10.00 cop[tikv] test.t.a, offset:0, count:10", + " │ └─IndexFullScan_14 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_15(Probe) 10.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Limit_11 10.00 root offset:0, count:10", + "└─TableReader_21 10.00 root data:Limit_20", + " └─Limit_20 10.00 cop[tikv] offset:0, count:10", + " └─TableFullScan_19 10.00 cop[tikv] table:t keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "IndexReader_11 10000.00 root index:IndexFullScan_10", + "└─IndexFullScan_10 10000.00 cop[tikv] table:t, index:b(b) keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_4 10000.00 root test.t.b, test.t.c, test.t.d", + "└─IndexLookUp_9 10000.00 root ", + " ├─IndexFullScan_7(Build) 10000.00 cop[tikv] table:t, index:b(b) keep order:false, stats:pseudo", + " └─TableRowIDScan_8(Probe) 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 12500.00 root test.t.a, test.t.a", + "└─HashJoin_30 12500.00 root inner join, equal:[eq(test.t.a, test.t.a)]", + " ├─IndexReader_43(Build) 10000.00 root index:IndexFullScan_42", + " │ └─IndexFullScan_42 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─IndexReader_39(Probe) 10000.00 root index:IndexFullScan_38", + " └─IndexFullScan_38 10000.00 cop[tikv] table:t1, index:b(b) keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_5 3333.33 root test.t.b", + "└─TableReader_11 3333.33 root data:TableRangeScan_10", + " └─TableRangeScan_10 3333.33 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_7 1.00 root test.t.b", + "└─Limit_12 1.00 root offset:0, count:1", + " └─TableReader_22 1.00 root data:Limit_21", + " └─Limit_21 1.00 cop[tikv] offset:0, count:1", + " └─TableRangeScan_20 1.00 cop[tikv] table:t range:(0,+inf], keep order:true, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnDML", + "Cases": [ + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─TableReader_7 10000.00 root data:TableFullScan_6", + " └─TableFullScan_6 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─TableReader_8 3333.33 root data:TableRangeScan_7", + " └─TableRangeScan_7 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─Projection_9 12500.00 root test.t.a, test.t.b, plus(test.t.c, test.t.c)->Column#10", + " └─MergeJoin_10 12500.00 root inner join, left key:test.t.a, right key:test.t.a", + " ├─TableReader_34(Build) 10000.00 root data:TableFullScan_33", + " │ └─TableFullScan_33 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo", + " └─TableReader_32(Probe) 10000.00 root data:TableFullScan_31", + " └─TableFullScan_31 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Insert_1 N/A root N/A", + "└─Projection_7 10000.00 root test.t.a, test.t.b, cast(test.t.c, decimal(32,0) BINARY)->Column#9", + " └─TableReader_9 10000.00 root data:TableFullScan_8", + " └─TableFullScan_8 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Delete_3 N/A root N/A", + "└─TableReader_6 10000.00 root data:TableFullScan_5", + " └─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Delete_4 N/A root N/A", + "└─TableReader_7 3333.33 root data:TableRangeScan_6", + " └─TableRangeScan_6 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Update_3 N/A root N/A", + "└─TableReader_6 10000.00 root data:TableFullScan_5", + " └─TableFullScan_5 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Update_4 N/A root N/A", + "└─TableReader_7 3333.33 root data:TableRangeScan_6", + " └─TableRangeScan_6 3333.33 cop[tikv] table:t range:(1,+inf], keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnSubQuery", + "Cases": [ + { + "Plan": [ + "Sort_11 9990.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_23 9990.00 root inner join, equal:[eq(test.t1.a, test.t2.b)]", + " ├─HashAgg_36(Build) 7992.00 root group by:test.t2.b, funcs:firstrow(test.t2.b)->test.t2.b", + " │ └─IndexReader_43 9990.00 root index:IndexFullScan_42", + " │ └─IndexFullScan_42 9990.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_47(Probe) 10000.00 root data:TableFullScan_46", + " └─TableFullScan_46 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_11 8000.00 root CARTESIAN anti semi join, other cond:eq(test.t1.a, test.t2.b)", + " ├─IndexReader_17(Build) 10000.00 root index:IndexFullScan_16", + " │ └─IndexFullScan_16 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_13(Probe) 10000.00 root data:TableFullScan_12", + " └─TableFullScan_12 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_29 7992.00 root semi join, equal:[eq(test.t1.a, test.t2.b)], other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_43(Build) 9980.01 root data:Selection_42", + " │ └─Selection_42 9980.01 cop[tikv] not(isnull(test.t2.b)), not(isnull(test.t2.c))", + " │ └─TableFullScan_41 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_40(Probe) 9990.00 root data:Selection_39", + " └─Selection_39 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_38 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root CARTESIAN anti semi join, other cond:eq(test.t1.a, test.t2.b), gt(test.t2.c, test.t1.c)", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 7992.00 root CARTESIAN semi join, other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_18(Build) 9990.00 root data:Selection_17", + " │ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t2.c))", + " │ └─TableFullScan_16 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_15(Probe) 9990.00 root data:Selection_14", + " └─Selection_14 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root CARTESIAN anti semi join, other cond:gt(test.t2.c, test.t1.c)", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 7992.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 7992.00 root semi join, equal:[eq(test.t1.c, test.t2.c)]", + " ├─TableReader_18(Build) 9990.00 root data:Selection_17", + " │ └─Selection_17 9990.00 cop[tikv] not(isnull(test.t2.c))", + " │ └─TableFullScan_16 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_15(Probe) 9990.00 root data:Selection_14", + " └─Selection_14 9990.00 cop[tikv] not(isnull(test.t1.c))", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 8000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_12 8000.00 root anti semi join, equal:[eq(test.t1.c, test.t2.c)]", + " ├─TableReader_16(Build) 10000.00 root data:TableFullScan_15", + " │ └─TableFullScan_15 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_9 12487.50 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─Sort_10 12487.50 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.b", + " └─HashJoin_37 12487.50 root inner join, equal:[eq(test.t1.b, test.t2.b)]", + " ├─IndexReader_51(Build) 9990.00 root index:IndexFullScan_50", + " │ └─IndexFullScan_50 9990.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_46(Probe) 9990.00 root data:Selection_45", + " └─Selection_45 9990.00 cop[tikv] not(isnull(test.t1.b))", + " └─TableFullScan_44 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnJoin", + "Cases": [ + { + "Plan": [ + "Sort_9 12500.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a, test.t2.b, test.t2.c, test.t2.d", + "└─MergeJoin_11 12500.00 root inner join, left key:test.t1.a, right key:test.t2.a", + " ├─TableReader_35(Build) 10000.00 root data:TableFullScan_34", + " │ └─TableFullScan_34 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo", + " └─TableReader_33(Probe) 10000.00 root data:TableFullScan_32", + " └─TableFullScan_32 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 12475.01 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a, test.t2.b, test.t2.c, test.t2.d", + "└─HashJoin_42 12475.01 root inner join, equal:[eq(test.t1.b, test.t2.b)], other cond:gt(test.t1.a, test.t2.a), lt(test.t1.c, test.t2.c)", + " ├─TableReader_61(Build) 9980.01 root data:Selection_60", + " │ └─Selection_60 9980.01 cop[tikv] not(isnull(test.t2.b)), not(isnull(test.t2.c))", + " │ └─TableFullScan_59 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_54(Probe) 9980.01 root data:Selection_53", + " └─Selection_53 9980.01 cop[tikv] not(isnull(test.t1.b)), not(isnull(test.t1.c))", + " └─TableFullScan_52 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_7 12500.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_19 12500.00 root left outer join, equal:[eq(test.t1.a, test.t2.a)]", + " ├─IndexReader_30(Build) 10000.00 root index:IndexFullScan_29", + " │ └─IndexFullScan_29 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_26(Probe) 10000.00 root data:TableFullScan_25", + " └─TableFullScan_25 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_8 100000000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─Sort_9 100000000.00 root test.t1.a, test.t1.b, test.t1.c, test.t1.d, test.t2.a", + " └─HashJoin_11 100000000.00 root CARTESIAN inner join, other cond:ne(test.t1.a, test.t2.a)", + " ├─IndexReader_18(Build) 10000.00 root index:IndexFullScan_17", + " │ └─IndexFullScan_17 10000.00 cop[tikv] table:t2, index:b(b) keep order:false, stats:pseudo", + " └─TableReader_14(Probe) 10000.00 root data:TableFullScan_13", + " └─TableFullScan_13 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnOtherOperators", + "Cases": [ + { + "Plan": [ + "Batch_Point_Get_9 3.00 root table:t1 handle:[1 222 33333], keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 4.00 root table:t1 handle:[1 2 3 4], keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 3.00 root table:t1, index:b(b) keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Batch_Point_Get_9 4.00 root table:t1, index:b(b) keep order:true, desc:false" + ] + }, + { + "Plan": [ + "Sort_11 6666.67 root Column#9, Column#10, Column#11, Column#12", + "└─Union_13 6666.67 root ", + " ├─TableReader_16 3333.33 root data:TableRangeScan_15", + " │ └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo", + " └─TableReader_20 3333.33 root data:Selection_19", + " └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_12 5333.33 root Column#9, Column#10, Column#11, Column#12", + "└─HashAgg_14 5333.33 root group by:Column#10, Column#11, Column#12, Column#9, funcs:firstrow(Column#9)->Column#9, funcs:firstrow(Column#10)->Column#10, funcs:firstrow(Column#11)->Column#11, funcs:firstrow(Column#12)->Column#12", + " └─Union_15 6666.67 root ", + " ├─TableReader_18 3333.33 root data:TableRangeScan_17", + " │ └─TableRangeScan_17 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo", + " └─TableReader_22 3333.33 root data:Selection_21", + " └─Selection_21 3333.33 cop[tikv] gt(test.t2.b, 20)", + " └─TableFullScan_20 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_11 2666.67 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_13 2666.67 root CARTESIAN semi join, other cond:nulleq(test.t1.a, test.t2.a), nulleq(test.t1.b, test.t2.b), nulleq(test.t1.c, test.t2.c), nulleq(test.t1.d, test.t2.d)", + " ├─TableReader_20(Build) 3333.33 root data:Selection_19", + " │ └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " │ └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_16(Probe) 3333.33 root data:TableRangeScan_15", + " └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_11 2666.67 root test.t1.a, test.t1.b, test.t1.c, test.t1.d", + "└─HashJoin_13 2666.67 root anti semi join, equal:[nulleq(test.t1.a, test.t2.a) nulleq(test.t1.b, test.t2.b) nulleq(test.t1.c, test.t2.c) nulleq(test.t1.d, test.t2.d)]", + " ├─TableReader_20(Build) 3333.33 root data:Selection_19", + " │ └─Selection_19 3333.33 cop[tikv] gt(test.t2.b, 20)", + " │ └─TableFullScan_18 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", + " └─TableReader_16(Probe) 3333.33 root data:TableRangeScan_15", + " └─TableRangeScan_15 3333.33 cop[tikv] table:t1 range:(10,+inf], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_10 10000.00 root Column#8, Column#7", + "└─Sort_11 10000.00 root test.t1.a, Column#7, Column#8", + " └─Window_13 10000.00 root row_number()->Column#8 over(partition by test.t1.a rows between current row and current row)", + " └─Window_14 10000.00 root sum(cast(test.t1.b, decimal(32,0) BINARY))->Column#7 over(partition by test.t1.a)", + " └─TableReader_17 10000.00 root data:TableFullScan_16", + " └─TableFullScan_16 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_6 8000.00 root Column#5, Column#6, Column#7", + "└─HashAgg_12 8000.00 root group by:test.t1.d, funcs:min(Column#8)->Column#5, funcs:max(Column#9)->Column#6, funcs:sum(Column#10)->Column#7", + " └─TableReader_13 8000.00 root data:HashAgg_8", + " └─HashAgg_8 8000.00 cop[tikv] group by:test.t1.d, funcs:min(test.t1.a)->Column#8, funcs:max(test.t1.b)->Column#9, funcs:sum(test.t1.c)->Column#10", + " └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_9 6400.00 root Column#5, Column#6, Column#7", + "└─Selection_11 6400.00 root lt(Column#6, 20)", + " └─HashAgg_16 8000.00 root group by:test.t1.d, funcs:min(Column#11)->Column#5, funcs:max(Column#12)->Column#6, funcs:sum(Column#13)->Column#7", + " └─TableReader_17 8000.00 root data:HashAgg_12", + " └─HashAgg_12 8000.00 cop[tikv] group by:test.t1.d, funcs:min(test.t1.a)->Column#11, funcs:max(test.t1.b)->Column#12, funcs:sum(test.t1.c)->Column#13", + " └─TableFullScan_15 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Projection_4 10000.00 root case(eq(test.t1.a, 1), a1, eq(test.t1.a, 2), a2, ax)->Column#5", + "└─TableReader_12 10000.00 root data:TableFullScan_11", + " └─TableFullScan_11 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo" + ] + } + ] + }, + { + "Name": "TestStableResultModeOnPartitionTable", + "Cases": [ + { + "Plan": [ + "Sort_10 4.00 root test.thash.a", + "└─PartitionUnion_12 4.00 root ", + " ├─Batch_Point_Get_13 2.00 root table:thash handle:[1 200], keep order:false, desc:false", + " └─Batch_Point_Get_14 2.00 root table:thash handle:[1 200], keep order:false, desc:false" + ] + }, + { + "Plan": [ + "Sort_12 400.00 root test.thash.a", + "└─PartitionUnion_14 400.00 root ", + " ├─TableReader_16 100.00 root data:TableRangeScan_15", + " │ └─TableRangeScan_15 100.00 cop[tikv] table:thash, partition:p0 range:[50,150], keep order:false, stats:pseudo", + " ├─TableReader_18 100.00 root data:TableRangeScan_17", + " │ └─TableRangeScan_17 100.00 cop[tikv] table:thash, partition:p1 range:[50,150], keep order:false, stats:pseudo", + " ├─TableReader_20 100.00 root data:TableRangeScan_19", + " │ └─TableRangeScan_19 100.00 cop[tikv] table:thash, partition:p2 range:[50,150], keep order:false, stats:pseudo", + " └─TableReader_22 100.00 root data:TableRangeScan_21", + " └─TableRangeScan_21 100.00 cop[tikv] table:thash, partition:p3 range:[50,150], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 4.00 root test.trange.a", + "└─PartitionUnion_12 4.00 root ", + " ├─TableReader_14 2.00 root data:TableRangeScan_13", + " │ └─TableRangeScan_13 2.00 cop[tikv] table:trange, partition:p0 range:[1,1], [200,200], keep order:false, stats:pseudo", + " └─TableReader_16 2.00 root data:TableRangeScan_15", + " └─TableRangeScan_15 2.00 cop[tikv] table:trange, partition:p2 range:[1,1], [200,200], keep order:false, stats:pseudo" + ] + }, + { + "Plan": [ + "Sort_10 200.00 root test.trange.a", + "└─PartitionUnion_12 200.00 root ", + " ├─TableReader_14 100.00 root data:TableRangeScan_13", + " │ └─TableRangeScan_13 100.00 cop[tikv] table:trange, partition:p0 range:[50,150], keep order:false, stats:pseudo", + " └─TableReader_16 100.00 root data:TableRangeScan_15", + " └─TableRangeScan_15 100.00 cop[tikv] table:trange, partition:p1 range:[50,150], keep order:false, stats:pseudo" + ] + } + ] + } +] diff --git a/sessionctx/variable/session.go b/sessionctx/variable/session.go index 069297eb837fc..b8ee35d0fcc40 100644 --- a/sessionctx/variable/session.go +++ b/sessionctx/variable/session.go @@ -852,6 +852,9 @@ type SessionVars struct { // EnableGlobalTemporaryTable indicates whether to enable global temporary table EnableGlobalTemporaryTable bool + + // EnableStableResultMode if stabilize query results. + EnableStableResultMode bool } // AllocMPPTaskID allocates task id for mpp tasks. It will reset the task id if the query's diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index 6db3bba0fc146..dcc81e80d5db0 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1759,6 +1759,10 @@ var defaultSysVars = []*SysVar{ s.EnableGlobalTemporaryTable = TiDBOptOn(val) return nil }}, + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStableResultMode, Value: BoolToOnOff(DefTiDBEnableStableResultMode), Hidden: true, Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + s.EnableStableResultMode = TiDBOptOn(val) + return nil + }}, } // FeedbackProbability points to the FeedbackProbability in statistics package. diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 7d626686728db..42fd63adf7cae 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -567,6 +567,9 @@ const ( TiDBEnableGlobalTemporaryTable = "tidb_enable_global_temporary_table" // TiDBEnableLocalTxn indicates whether to enable Local Txn. TiDBEnableLocalTxn = "tidb_enable_local_txn" + + // TiDBEnableStableResultMode indicates if stabilize query results. + TiDBEnableStableResultMode = "tidb_enable_stable_result_mode" ) // TiDB vars that have only global scope @@ -721,6 +724,7 @@ const ( DefTiDBEnableGlobalTemporaryTable = false DefTiDBEnableLocalTxn = false DefTMPTableSize = 16777216 + DefTiDBEnableStableResultMode = false ) // Process global variables. From fca0cb309893af5362a12ce759c2c6b2f74438cf Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 11:03:33 +0800 Subject: [PATCH 08/38] load: fix load data with non-utf8 will succeed (#26054) (#26144) --- executor/executor.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/executor/executor.go b/executor/executor.go index 978983010b9e7..5ee89ea2c1c9e 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1741,7 +1741,10 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { case *ast.LoadDataStmt: sc.DupKeyAsWarning = true sc.BadNullAsWarning = true - sc.TruncateAsWarning = !vars.StrictSQLMode + // With IGNORE or LOCAL, data-interpretation errors become warnings and the load operation continues, + // even if the SQL mode is restrictive. For details: https://dev.mysql.com/doc/refman/8.0/en/load-data.html + // TODO: since TiDB only support the LOCAL by now, so the TruncateAsWarning are always true here. + sc.TruncateAsWarning = true sc.InLoadDataStmt = true // return warning instead of error when load data meet no partition for value sc.IgnoreNoPartition = true From 59f73184b00116704e6e567ff92e3cbedd81be7b Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 11:29:33 +0800 Subject: [PATCH 09/38] util/stmtsummary: discard the plan if it is too long and enlarge the tidb_stmt_summary_max_stmt_count value to 3000 (#25843) (#25874) --- config/config.go | 2 +- config/config.toml.example | 2 +- util/plancodec/codec.go | 9 ++++++++ util/plancodec/codec_test.go | 6 +++++ util/stmtsummary/statement_summary.go | 5 +++++ util/stmtsummary/statement_summary_test.go | 26 ++++++++++++++++++++++ 6 files changed, 48 insertions(+), 2 deletions(-) diff --git a/config/config.go b/config/config.go index 90725ff40ed88..cb56c55feff2d 100644 --- a/config/config.go +++ b/config/config.go @@ -653,7 +653,7 @@ var defaultConf = Config{ StmtSummary: StmtSummary{ Enable: true, EnableInternalQuery: false, - MaxStmtCount: 200, + MaxStmtCount: 3000, MaxSQLLength: 4096, RefreshInterval: 1800, HistorySize: 24, diff --git a/config/config.toml.example b/config/config.toml.example index 1cc2674d01727..fb7890d07c63f 100644 --- a/config/config.toml.example +++ b/config/config.toml.example @@ -448,7 +448,7 @@ enable = true enable-internal-query = false # max number of statements kept in memory. -max-stmt-count = 200 +max-stmt-count = 3000 # max length of displayed normalized sql and sample sql. max-sql-length = 4096 diff --git a/util/plancodec/codec.go b/util/plancodec/codec.go index e7568493fed94..65fd2c64b899f 100644 --- a/util/plancodec/codec.go +++ b/util/plancodec/codec.go @@ -40,6 +40,12 @@ const ( separatorStr = "\t" ) +var ( + // PlanDiscardedEncoded indicates the discard plan because it is too long + PlanDiscardedEncoded = "[discard]" + planDiscardedDecoded = "(plan discarded because too long)" +) + var decoderPool = sync.Pool{ New: func() interface{} { return &planDecoder{} @@ -87,6 +93,9 @@ type planInfo struct { func (pd *planDecoder) decode(planString string) (string, error) { str, err := decompress(planString) if err != nil { + if planString == PlanDiscardedEncoded { + return planDiscardedDecoded, nil + } return "", err } return pd.buildPlanTree(str) diff --git a/util/plancodec/codec_test.go b/util/plancodec/codec_test.go index 1f98adda4cf99..a3375673c95d4 100644 --- a/util/plancodec/codec_test.go +++ b/util/plancodec/codec_test.go @@ -50,3 +50,9 @@ func (s *testPlanCodecSuite) TestEncodeTaskType(c *C) { _, err = decodeTaskType("1_x") c.Assert(err, NotNil) } + +func (s *testPlanCodecSuite) TestDecodeDiscardPlan(c *C) { + plan, err := DecodePlan(PlanDiscardedEncoded) + c.Assert(err, IsNil) + c.Assert(plan, DeepEquals, planDiscardedDecoded) +} diff --git a/util/stmtsummary/statement_summary.go b/util/stmtsummary/statement_summary.go index 149196e0a4c31..8d0598b86aa0f 100644 --- a/util/stmtsummary/statement_summary.go +++ b/util/stmtsummary/statement_summary.go @@ -629,10 +629,15 @@ func (ssbd *stmtSummaryByDigest) collectHistorySummaries(historySize int) []*stm return ssElements } +var maxEncodedPlanSizeInBytes = 1024 * 1024 + func newStmtSummaryByDigestElement(sei *StmtExecInfo, beginTime int64, intervalSeconds int64) *stmtSummaryByDigestElement { // sampleSQL / authUsers(sampleUser) / samplePlan / prevSQL / indexNames store the values shown at the first time, // because it compacts performance to update every time. samplePlan, planHint := sei.PlanGenerator() + if len(samplePlan) > maxEncodedPlanSizeInBytes { + samplePlan = plancodec.PlanDiscardedEncoded + } ssElement := &stmtSummaryByDigestElement{ beginTime: beginTime, sampleSQL: formatSQL(sei.OriginalSQL), diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index f09398df68423..7782d23e1f7c3 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/tidb/store/tikv/util" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/execdetails" + "github.com/pingcap/tidb/util/plancodec" ) var _ = Suite(&testStmtSummarySuite{}) @@ -435,6 +436,31 @@ func (s *testStmtSummarySuite) TestAddStatement(c *C) { c.Assert(s.ssMap.summaryMap.Size(), Equals, 4) _, ok = s.ssMap.summaryMap.Get(key) c.Assert(ok, IsTrue) + + // Test for plan too large + stmtExecInfo7 := stmtExecInfo1 + stmtExecInfo7.PlanDigest = "plan_digest7" + stmtExecInfo7.PlanGenerator = func() (string, string) { + buf := make([]byte, maxEncodedPlanSizeInBytes+1) + for i := range buf { + buf[i] = 'a' + } + return string(buf), "" + } + key = &stmtSummaryByDigestKey{ + schemaName: stmtExecInfo7.SchemaName, + digest: stmtExecInfo7.Digest, + planDigest: stmtExecInfo7.PlanDigest, + } + s.ssMap.AddStatement(stmtExecInfo7) + c.Assert(s.ssMap.summaryMap.Size(), Equals, 5) + v, ok := s.ssMap.summaryMap.Get(key) + c.Assert(ok, IsTrue) + stmt := v.(*stmtSummaryByDigest) + c.Assert(stmt.digest, DeepEquals, key.digest) + e := stmt.history.Back() + ssElement := e.Value.(*stmtSummaryByDigestElement) + c.Assert(ssElement.samplePlan, Equals, plancodec.PlanDiscardedEncoded) } func matchStmtSummaryByDigest(first, second *stmtSummaryByDigest) bool { From c60975a4c778d77c04cb62986e5408ab06613876 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 11:43:33 +0800 Subject: [PATCH 10/38] server: try to make test TestTopSQLAgent stable (#25399) (#25604) --- executor/set_test.go | 7 +++++++ server/tidb_test.go | 23 +++++++++++++++++++---- util/topsql/reporter/client.go | 6 ++++++ 3 files changed, 32 insertions(+), 4 deletions(-) diff --git a/executor/set_test.go b/executor/set_test.go index b4e5646ae4b02..339e3a7ae3d7d 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -23,6 +23,7 @@ import ( "strconv" . "github.com/pingcap/check" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/tidb/config" @@ -1376,6 +1377,12 @@ func (s *testSuite5) TestSetClusterConfigJSONData(c *C) { } func (s *testSerialSuite) TestSetTopSQLVariables(c *C) { + c.Assert(failpoint.Enable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop", `return(true)`), IsNil) + defer func() { + err := failpoint.Disable("github.com/pingcap/tidb/domain/skipLoadSysVarCacheLoop") + c.Assert(err, IsNil) + }() + tk := testkit.NewTestKit(c, s.store) tk.MustExec("set @@global.tidb_enable_top_sql='On';") tk.MustQuery("select @@global.tidb_enable_top_sql;").Check(testkit.Rows("1")) diff --git a/server/tidb_test.go b/server/tidb_test.go index a28bce7a52bbd..cbf49eb8686fb 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -94,6 +94,20 @@ func (ts *tidbTestSuite) SetUpSuite(c *C) { func (ts *tidbTestTopSQLSuite) SetUpSuite(c *C) { ts.tidbTestSuiteBase.SetUpSuite(c) + + // Initialize global variable for top-sql test. + db, err := sql.Open("mysql", ts.getDSN()) + c.Assert(err, IsNil, Commentf("Error connecting")) + defer func() { + err := db.Close() + c.Assert(err, IsNil) + }() + + dbt := &DBTest{c, db} + dbt.mustExec("set @@global.tidb_top_sql_precision_seconds=1;") + dbt.mustExec("set @@global.tidb_top_sql_report_interval_seconds=2;") + dbt.mustExec("set @@global.tidb_top_sql_max_statement_count=5;") + tracecpu.GlobalSQLCPUProfiler.Run() } @@ -1498,19 +1512,20 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLAgent(c *C) { checkFn(0) // set correct address, should collect records dbt.mustExec(fmt.Sprintf("set @@tidb_top_sql_agent_address='%v';", agentServer.Address())) - agentServer.WaitCollectCnt(1, time.Second*4) + agentServer.WaitCollectCnt(1, time.Second*8) checkFn(5) - // agent server shutdown - agentServer.Stop() // run another set of SQL queries cancel() + cancel = runWorkload(11, 20) + // agent server shutdown + agentServer.Stop() // agent server restart agentServer, err = mockTopSQLReporter.StartMockAgentServer() c.Assert(err, IsNil) dbt.mustExec(fmt.Sprintf("set @@tidb_top_sql_agent_address='%v';", agentServer.Address())) // check result - agentServer.WaitCollectCnt(1, time.Second*4) + agentServer.WaitCollectCnt(2, time.Second*8) checkFn(5) } diff --git a/util/topsql/reporter/client.go b/util/topsql/reporter/client.go index 8a06f3bc49b04..87504d646902b 100644 --- a/util/topsql/reporter/client.go +++ b/util/topsql/reporter/client.go @@ -185,6 +185,12 @@ func (r *GRPCReportClient) tryEstablishConnection(ctx context.Context, targetRPC // Address is not changed, skip. return nil } + + if r.conn != nil { + err := r.conn.Close() + logutil.BgLogger().Warn("[top-sql] grpc client close connection failed", zap.Error(err)) + } + r.conn, err = r.dial(ctx, targetRPCAddr) if err != nil { return err From 90d79ba0020cf61bdcbe361fe7f83d5d8693361a Mon Sep 17 00:00:00 2001 From: Yilin Chen Date: Thu, 15 Jul 2021 14:37:33 +0800 Subject: [PATCH 11/38] store/tikv: clear RPC error only if prewrite succeeds (#25803) --- store/tikv/2pc.go | 10 ++--- store/tikv/prewrite.go | 11 ++--- store/tikv/region_request.go | 2 - store/tikv/region_request_test.go | 2 - store/tikv/tests/async_commit_fail_test.go | 48 ---------------------- 5 files changed, 7 insertions(+), 66 deletions(-) diff --git a/store/tikv/2pc.go b/store/tikv/2pc.go index 49096254743dc..f8be95e741a99 100644 --- a/store/tikv/2pc.go +++ b/store/tikv/2pc.go @@ -101,7 +101,6 @@ type twoPhaseCommitter struct { maxCommitTS uint64 prewriteStarted bool prewriteCancelled uint32 - prewriteFailed uint32 useOnePC uint32 onePCCommitTS uint64 @@ -1061,13 +1060,10 @@ func (c *twoPhaseCommitter) execute(ctx context.Context) (err error) { start := time.Now() err = c.prewriteMutations(bo, c.mutations) - // Return an undetermined error only if we don't know the transaction fails. - // If it fails due to a write conflict or a already existed unique key, we - // needn't return an undetermined error even if such an error is set. - if atomic.LoadUint32(&c.prewriteFailed) == 1 { - c.setUndeterminedErr(nil) - } if err != nil { + // TODO: Now we return an undetermined error as long as one of the prewrite + // RPCs fails. However, if there are multiple errors and some of the errors + // are not RPC failures, we can return the actual error instead of undetermined. if undeterminedErr := c.getUndeterminedErr(); undeterminedErr != nil { logutil.Logger(ctx).Error("2PC commit result undetermined", zap.Error(err), diff --git a/store/tikv/prewrite.go b/store/tikv/prewrite.go index 0e6afa96010b2..ae1a5d9bf5c81 100644 --- a/store/tikv/prewrite.go +++ b/store/tikv/prewrite.go @@ -203,7 +203,6 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff if err != nil { return errors.Trace(err) } - failpoint.Inject("forceRecursion", func() { same = false }) if same { continue } @@ -217,6 +216,9 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff prewriteResp := resp.Resp.(*pb.PrewriteResponse) keyErrs := prewriteResp.GetErrors() if len(keyErrs) == 0 { + // Clear the RPC Error since the request is evaluated successfully. + sender.rpcError = nil + if batch.isPrimary { // After writing the primary key, if the size of the transaction is larger than 32M, // start the ttlManager. The ttlManager will be closed in tikvTxn.Commit(). @@ -277,17 +279,12 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff // Check already exists error if alreadyExist := keyErr.GetAlreadyExist(); alreadyExist != nil { e := &tikverr.ErrKeyExist{AlreadyExist: alreadyExist} - err = c.extractKeyExistsErr(e) - if err != nil { - atomic.StoreUint32(&c.prewriteFailed, 1) - } - return err + return c.extractKeyExistsErr(e) } // Extract lock from key error lock, err1 := extractLockFromKeyErr(keyErr) if err1 != nil { - atomic.StoreUint32(&c.prewriteFailed, 1) return errors.Trace(err1) } logutil.BgLogger().Info("prewrite encounters lock", diff --git a/store/tikv/region_request.go b/store/tikv/region_request.go index 26381c926b386..b3ebb095d8fea 100644 --- a/store/tikv/region_request.go +++ b/store/tikv/region_request.go @@ -561,8 +561,6 @@ func (s *RegionRequestSender) SendReqCtx( continue } } else { - // Clear the RPC Error since the request is evaluated successfully on a store. - s.rpcError = nil if s.leaderReplicaSelector != nil { s.leaderReplicaSelector.OnSendSuccess() } diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index 5f3a4a130af0d..3dd9d37ed330a 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -218,8 +218,6 @@ func (s *testRegionRequestToSingleStoreSuite) TestOnSendFailedWithStoreRestart(c resp, err = s.regionRequestSender.SendReq(s.bo, req, region.Region, time.Second) c.Assert(err, IsNil) c.Assert(resp.Resp, NotNil) - // The RPC error should be nil since it's evaluated successfully. - c.Assert(s.regionRequestSender.rpcError, IsNil) } func (s *testRegionRequestToSingleStoreSuite) TestOnSendFailedWithCloseKnownStoreThenUseNewOne(c *C) { diff --git a/store/tikv/tests/async_commit_fail_test.go b/store/tikv/tests/async_commit_fail_test.go index cfe1ad0595eb1..a791f16c54e86 100644 --- a/store/tikv/tests/async_commit_fail_test.go +++ b/store/tikv/tests/async_commit_fail_test.go @@ -233,51 +233,3 @@ func (s *testAsyncCommitFailSuite) TestAsyncCommitContextCancelCausingUndetermin c.Assert(err, NotNil) c.Assert(txn.GetCommitter().GetUndeterminedErr(), NotNil) } - -// TestAsyncCommitRPCErrorThenWriteConflict verifies that the determined failure error overwrites undetermined error. -func (s *testAsyncCommitFailSuite) TestAsyncCommitRPCErrorThenWriteConflict(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *WithTiKV { - return - } - - txn := s.beginAsyncCommit(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult", `1*return("timeout")->return("writeConflict")`), IsNil) - defer func() { - c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult"), IsNil) - }() - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(txn.GetCommitter().GetUndeterminedErr(), IsNil) -} - -// TestAsyncCommitRPCErrorThenWriteConflictInChild verifies that the determined failure error in a child recursion -// overwrites the undetermined error in the parent. -func (s *testAsyncCommitFailSuite) TestAsyncCommitRPCErrorThenWriteConflictInChild(c *C) { - // This test doesn't support tikv mode because it needs setting failpoint in unistore. - if *WithTiKV { - return - } - - txn := s.beginAsyncCommit(c) - err := txn.Set([]byte("a"), []byte("va")) - c.Assert(err, IsNil) - - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult", `1*return("timeout")->return("writeConflict")`), IsNil) - c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/forceRecursion", `return`), IsNil) - - defer func() { - c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/rpcPrewriteResult"), IsNil) - c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/forceRecursion"), IsNil) - }() - - ctx := context.WithValue(context.Background(), util.SessionID, uint64(1)) - err = txn.Commit(ctx) - c.Assert(err, NotNil) - c.Assert(txn.GetCommitter().GetUndeterminedErr(), IsNil) -} From bc1d3ea8c6a266c89e264adfb590fa10d6cf22f8 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 16:39:33 +0800 Subject: [PATCH 12/38] planner/core: thoroughly push down count-distinct agg in the MPP mode. (#25662) (#26194) --- planner/core/exhaust_physical_plans.go | 6 ++- planner/core/fragment.go | 7 +++ planner/core/physical_plans.go | 2 + planner/core/rule_eliminate_projection.go | 4 +- planner/core/task.go | 28 +++++++++++- .../testdata/integration_serial_suite_in.json | 5 ++- .../integration_serial_suite_out.json | 44 +++++++++++++++++++ 7 files changed, 90 insertions(+), 6 deletions(-) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 37dbd04c10792..59b635eada379 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -2424,7 +2424,11 @@ func (la *LogicalAggregation) tryToGetMppHashAggs(prop *property.PhysicalPropert childProp := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64} agg := NewPhysicalHashAgg(la, la.stats.ScaleByExpectCnt(prop.ExpectedCnt), childProp) agg.SetSchema(la.schema.Clone()) - agg.MppRunMode = MppTiDB + if la.HasDistinct() { + agg.MppRunMode = MppScalar + } else { + agg.MppRunMode = MppTiDB + } hashAggs = append(hashAggs, agg) } return diff --git a/planner/core/fragment.go b/planner/core/fragment.go index 7315da176e6b8..c4464c1d19f9b 100644 --- a/planner/core/fragment.go +++ b/planner/core/fragment.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/logutil" + "github.com/pingcap/tipb/go-tipb" "go.uber.org/zap" ) @@ -40,6 +41,8 @@ type Fragment struct { ExchangeSender *PhysicalExchangeSender // data exporter IsRoot bool + + singleton bool // indicates if this is a task running on a single node. } type tasksAndFrags struct { @@ -121,6 +124,7 @@ func (f *Fragment) init(p PhysicalPlan) error { } f.TableScan = x case *PhysicalExchangeReceiver: + f.singleton = x.children[0].(*PhysicalExchangeSender).ExchangeType == tipb.ExchangeType_PassThrough f.ExchangeReceivers = append(f.ExchangeReceivers, x) case *PhysicalUnionAll: return errors.New("unexpected union all detected") @@ -246,6 +250,9 @@ func (e *mppTaskGenerator) generateMPPTasksForFragment(f *Fragment) (tasks []*kv for _, r := range f.ExchangeReceivers { childrenTasks = append(childrenTasks, r.Tasks...) } + if f.singleton { + childrenTasks = childrenTasks[0:1] + } tasks = e.constructMPPTasksByChildrenTasks(childrenTasks) } if err != nil { diff --git a/planner/core/physical_plans.go b/planner/core/physical_plans.go index 4e5c6cbfd58e5..e58aa1f760823 100644 --- a/planner/core/physical_plans.go +++ b/planner/core/physical_plans.go @@ -1008,6 +1008,8 @@ const ( Mpp2Phase // MppTiDB runs agg on TiDB (and a partial agg on TiFlash if in 2 phase agg) MppTiDB + // MppScalar also has 2 phases. The second phase runs in a single task. + MppScalar ) type basePhysicalAgg struct { diff --git a/planner/core/rule_eliminate_projection.go b/planner/core/rule_eliminate_projection.go index 5731495f9c2d2..ebc6b23d2b57d 100644 --- a/planner/core/rule_eliminate_projection.go +++ b/planner/core/rule_eliminate_projection.go @@ -43,14 +43,14 @@ func canProjectionBeEliminatedStrict(p *PhysicalProjection) bool { // the align the output schema. In the future, we can solve this in-compatibility by // passing down the aggregation mode to TiFlash. if physicalAgg, ok := p.Children()[0].(*PhysicalHashAgg); ok { - if physicalAgg.MppRunMode == Mpp1Phase || physicalAgg.MppRunMode == Mpp2Phase { + if physicalAgg.MppRunMode == Mpp1Phase || physicalAgg.MppRunMode == Mpp2Phase || physicalAgg.MppRunMode == MppScalar { if physicalAgg.isFinalAgg() { return false } } } if physicalAgg, ok := p.Children()[0].(*PhysicalStreamAgg); ok { - if physicalAgg.MppRunMode == Mpp1Phase || physicalAgg.MppRunMode == Mpp2Phase { + if physicalAgg.MppRunMode == Mpp1Phase || physicalAgg.MppRunMode == Mpp2Phase || physicalAgg.MppRunMode == MppScalar { if physicalAgg.isFinalAgg() { return false } diff --git a/planner/core/task.go b/planner/core/task.go index 26261836f5e07..827a9df78717e 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1926,9 +1926,9 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { } // TODO: how to set 2-phase cost? newMpp.addCost(p.GetCost(inputRows, false, true)) - finalAgg.SetCost(mpp.cost()) + finalAgg.SetCost(newMpp.cost()) if proj != nil { - proj.SetCost(mpp.cost()) + proj.SetCost(newMpp.cost()) } return newMpp case MppTiDB: @@ -1946,6 +1946,30 @@ func (p *PhysicalHashAgg) attach2TaskForMpp(tasks ...task) task { t.addCost(p.GetCost(inputRows, true, false)) finalAgg.SetCost(t.cost()) return t + case MppScalar: + proj := p.convertAvgForMPP() + partialAgg, finalAgg := p.newPartialAggregate(kv.TiFlash, true) + if partialAgg == nil || finalAgg == nil { + return invalidTask + } + attachPlan2Task(partialAgg, mpp) + prop := &property.PhysicalProperty{TaskTp: property.MppTaskType, ExpectedCnt: math.MaxFloat64, MPPPartitionTp: property.AnyType} + newMpp := mpp.enforceExchangerImpl(prop) + attachPlan2Task(finalAgg, newMpp) + if proj == nil { + proj = PhysicalProjection{ + Exprs: make([]expression.Expression, 0, len(p.Schema().Columns)), + }.Init(p.ctx, p.statsInfo(), p.SelectBlockOffset()) + for _, col := range p.Schema().Columns { + proj.Exprs = append(proj.Exprs, col) + } + proj.SetSchema(p.schema) + } + attachPlan2Task(proj, newMpp) + newMpp.addCost(p.GetCost(inputRows, false, true)) + finalAgg.SetCost(newMpp.cost()) + proj.SetCost(newMpp.cost()) + return newMpp default: return invalidTask } diff --git a/planner/core/testdata/integration_serial_suite_in.json b/planner/core/testdata/integration_serial_suite_in.json index 267f5e532ddc6..95476da8172c8 100644 --- a/planner/core/testdata/integration_serial_suite_in.json +++ b/planner/core/testdata/integration_serial_suite_in.json @@ -276,7 +276,10 @@ "desc format = 'brief' select * from t join ( select count(distinct value), id from t group by id) as A on A.id = t.id", "desc format = 'brief' select * from t join ( select count(1/value), id from t group by id) as A on A.id = t.id", "desc format = 'brief' select /*+hash_agg()*/ sum(id) from (select value, id from t where id > value group by id, value)A group by value /*the exchange should have only one partition column: test.t.value*/", - "desc format = 'brief' select /*+hash_agg()*/ sum(B.value) from t as B where B.id+1 > (select count(*) from t where t.id= B.id and t.value=B.value) group by B.id /*the exchange should have only one partition column: test.t.id*/" + "desc format = 'brief' select /*+hash_agg()*/ sum(B.value) from t as B where B.id+1 > (select count(*) from t where t.id= B.id and t.value=B.value) group by B.id /*the exchange should have only one partition column: test.t.id*/", + "desc format = 'brief' select count(distinct value) from t", + "desc format = 'brief' select count(distinct x ) from (select count(distinct value) x from t) t", + "desc format = 'brief' select count(distinct value), count(value), avg(value) from t" ] }, { diff --git a/planner/core/testdata/integration_serial_suite_out.json b/planner/core/testdata/integration_serial_suite_out.json index 3a300466d7a19..7675994769ac5 100644 --- a/planner/core/testdata/integration_serial_suite_out.json +++ b/planner/core/testdata/integration_serial_suite_out.json @@ -2339,6 +2339,50 @@ " └─ExchangeSender 10000.00 batchCop[tiflash] ExchangeType: HashPartition, Hash Cols: test.t.id", " └─TableFullScan 10000.00 batchCop[tiflash] table:B keep order:false, stats:pseudo" ] + }, + { + "SQL": "desc format = 'brief' select count(distinct value) from t", + "Plan": [ + "TableReader 1.00 root data:ExchangeSender", + "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 batchCop[tiflash] Column#4", + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(distinct test.t.value)->Column#4", + " └─ExchangeReceiver 1.00 batchCop[tiflash] ", + " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 batchCop[tiflash] group by:test.t.value, ", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "desc format = 'brief' select count(distinct x ) from (select count(distinct value) x from t) t", + "Plan": [ + "TableReader 1.00 root data:ExchangeSender", + "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 batchCop[tiflash] Column#5", + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(distinct Column#4)->Column#5", + " └─ExchangeReceiver 1.00 batchCop[tiflash] ", + " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 batchCop[tiflash] group by:Column#4, ", + " └─Projection 1.00 batchCop[tiflash] Column#4", + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(distinct test.t.value)->Column#4", + " └─ExchangeReceiver 1.00 batchCop[tiflash] ", + " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 batchCop[tiflash] group by:test.t.value, ", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ] + }, + { + "SQL": "desc format = 'brief' select count(distinct value), count(value), avg(value) from t", + "Plan": [ + "TableReader 1.00 root data:ExchangeSender", + "└─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─Projection 1.00 batchCop[tiflash] Column#4, Column#5, div(Column#6, cast(case(eq(Column#7, 0), 1, Column#7), decimal(20,0) BINARY))->Column#6", + " └─HashAgg 1.00 batchCop[tiflash] funcs:count(distinct test.t.value)->Column#4, funcs:sum(Column#8)->Column#5, funcs:sum(Column#9)->Column#7, funcs:sum(Column#10)->Column#6", + " └─ExchangeReceiver 1.00 batchCop[tiflash] ", + " └─ExchangeSender 1.00 batchCop[tiflash] ExchangeType: PassThrough", + " └─HashAgg 1.00 batchCop[tiflash] group by:test.t.value, funcs:count(test.t.value)->Column#8, funcs:count(test.t.value)->Column#9, funcs:sum(test.t.value)->Column#10", + " └─TableFullScan 10000.00 batchCop[tiflash] table:t keep order:false, stats:pseudo" + ] } ] }, From ce98048d034ba31abff4e03f83f7d0954549b9c3 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 16:51:32 +0800 Subject: [PATCH 13/38] planner/core: fix duplicate enum items (#26145) (#26202) --- planner/core/plan_to_pb.go | 4 ---- planner/core/plan_to_pb_test.go | 9 +++++++++ 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/planner/core/plan_to_pb.go b/planner/core/plan_to_pb.go index 1b6ef79bbc6bc..985fb0a72878d 100644 --- a/planner/core/plan_to_pb.go +++ b/planner/core/plan_to_pb.go @@ -16,7 +16,6 @@ package core import ( "github.com/pingcap/errors" "github.com/pingcap/parser/model" - "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/distsql" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/expression/aggregation" @@ -279,9 +278,6 @@ func (e *PhysicalExchangeReceiver) ToPB(ctx sessionctx.Context, storeType kv.Sto fieldTypes := make([]*tipb.FieldType, 0, len(e.Schema().Columns)) for _, column := range e.Schema().Columns { pbType := expression.ToPBFieldType(column.RetType) - if column.RetType.Tp == mysql.TypeEnum { - pbType.Elems = append(pbType.Elems, column.RetType.Elems...) - } fieldTypes = append(fieldTypes, pbType) } ecExec := &tipb.ExchangeReceiver{ diff --git a/planner/core/plan_to_pb_test.go b/planner/core/plan_to_pb_test.go index e16e4aaf826c8..0162cce7397b5 100644 --- a/planner/core/plan_to_pb_test.go +++ b/planner/core/plan_to_pb_test.go @@ -73,4 +73,13 @@ func (s *testDistsqlSuite) TestColumnToProto(c *C) { } pc = util.ColumnToProto(col1) c.Assert(pc.Collation, Equals, int32(-8)) + + tp = types.NewFieldType(mysql.TypeEnum) + tp.Flag = 10 + tp.Elems = []string{"a", "b"} + col2 := &model.ColumnInfo{ + FieldType: *tp, + } + pc = util.ColumnToProto(col2) + c.Assert(len(pc.Elems), Equals, 2) } From 6ccbd0b9e9d5676010ef4dde4f4d952965ec0b96 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Thu, 15 Jul 2021 17:09:33 +0800 Subject: [PATCH 14/38] session, ddl: amend txn correctly when "modify column" needs reorg data with tidb_enable_amend_pessimistic_txn=on. (#26269) (#26273) --- ddl/db_test.go | 69 +++++++++++++++++++++++++++++++++++++++ session/schema_amender.go | 4 ++- 2 files changed, 72 insertions(+), 1 deletion(-) diff --git a/ddl/db_test.go b/ddl/db_test.go index af671f025927c..e5c944e99c833 100644 --- a/ddl/db_test.go +++ b/ddl/db_test.go @@ -317,6 +317,75 @@ func (s *testDBSuite2) TestAddUniqueIndexRollback(c *C) { testAddIndexRollback(c, s.store, s.lease, idxName, addIdxSQL, errMsg, hasNullValsInKey) } +func (s *testSerialDBSuite) TestWriteReorgForColumnTypeChangeOnAmendTxn(c *C) { + tk2 := testkit.NewTestKit(c, s.store) + tk2.MustExec("use test_db") + tk2.MustExec("set global tidb_enable_amend_pessimistic_txn = ON;") + defer func() { + tk2.MustExec("set global tidb_enable_amend_pessimistic_txn = OFF;") + }() + + d := s.dom.DDL() + originalHook := d.GetHook() + defer d.(ddl.DDLForTest).SetHook(originalHook) + testInsertOnModifyColumn := func(sql string, startColState, commitColState model.SchemaState, retStrs []string, retErr error) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test_db") + tk.MustExec("drop table if exists t1") + tk.MustExec("create table t1 (c1 int, c2 int, c3 int, unique key(c1))") + tk.MustExec("insert into t1 values (20, 20, 20);") + + var checkErr error + tk1 := testkit.NewTestKit(c, s.store) + hook := &ddl.TestDDLCallback{Do: s.dom} + times := 0 + hook.OnJobUpdatedExported = func(job *model.Job) { + if job.Type != model.ActionModifyColumn || checkErr != nil || + (job.SchemaState != startColState && job.SchemaState != commitColState) { + return + } + + if job.SchemaState == startColState { + tk1.MustExec("use test_db") + tk1.MustExec("begin pessimistic;") + tk1.MustExec("insert into t1 values(101, 102, 103)") + return + } + if times == 0 { + _, checkErr = tk1.Exec("commit;") + } + times++ + } + d.(ddl.DDLForTest).SetHook(hook) + + tk.MustExec(sql) + if retErr == nil { + c.Assert(checkErr, IsNil) + } else { + c.Assert(strings.Contains(checkErr.Error(), retErr.Error()), IsTrue) + } + tk.MustQuery("select * from t1;").Check(testkit.Rows(retStrs...)) + + tk.MustExec("admin check table t1") + } + + // Testing it needs reorg data. + ddlStatement := "alter table t1 change column c2 cc smallint;" + testInsertOnModifyColumn(ddlStatement, model.StateNone, model.StateWriteReorganization, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + testInsertOnModifyColumn(ddlStatement, model.StateDeleteOnly, model.StateWriteReorganization, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + testInsertOnModifyColumn(ddlStatement, model.StateWriteOnly, model.StateWriteReorganization, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + testInsertOnModifyColumn(ddlStatement, model.StateNone, model.StatePublic, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + testInsertOnModifyColumn(ddlStatement, model.StateDeleteOnly, model.StatePublic, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + testInsertOnModifyColumn(ddlStatement, model.StateWriteOnly, model.StatePublic, []string{"20 20 20"}, domain.ErrInfoSchemaChanged) + + // Testing it needs not reorg data. This case only have two state: none, public. + ddlStatement = "alter table t1 change column c2 cc bigint;" + testInsertOnModifyColumn(ddlStatement, model.StateNone, model.StateWriteReorganization, []string{"20 20 20"}, nil) + testInsertOnModifyColumn(ddlStatement, model.StateWriteOnly, model.StateWriteReorganization, []string{"20 20 20"}, nil) + testInsertOnModifyColumn(ddlStatement, model.StateNone, model.StatePublic, []string{"20 20 20", "101 102 103"}, nil) + testInsertOnModifyColumn(ddlStatement, model.StateWriteOnly, model.StatePublic, []string{"20 20 20"}, nil) +} + func (s *testSerialDBSuite) TestAddExpressionIndexRollback(c *C) { config.UpdateGlobal(func(conf *config.Config) { conf.Experimental.AllowsExpressionIndex = true diff --git a/session/schema_amender.go b/session/schema_amender.go index 4d00b38873b37..bee9055dd9070 100644 --- a/session/schema_amender.go +++ b/session/schema_amender.go @@ -194,8 +194,9 @@ func colChangeAmendable(colAtStart *model.ColumnInfo, colAtCommit *model.ColumnI // collectModifyColAmendOps is used to check if there is only column size increasing change.Other column type changes // such as column change from nullable to not null or column type change are not supported by now. func (a *amendCollector) collectModifyColAmendOps(tblAtStart, tblAtCommit table.Table) ([]amendOp, error) { - for _, colAtCommit := range tblAtCommit.Cols() { + for _, colAtCommit := range tblAtCommit.WritableCols() { colAtStart := findColByID(tblAtStart, colAtCommit.ID) + // It can't find colAtCommit's ID from tblAtStart's public columns when "modify/change column" needs reorg data. if colAtStart != nil { err := colChangeAmendable(colAtStart.ColumnInfo, colAtCommit.ColumnInfo) if err != nil { @@ -206,6 +207,7 @@ func (a *amendCollector) collectModifyColAmendOps(tblAtStart, tblAtCommit table. // is newly added or modified from an original column.Report error to solve the issue // https://github.com/pingcap/tidb/issues/21470. This change will make amend fail for adding column // and modifying columns at the same time. + // In addition, amended operations are not currently supported and it goes to this logic when "modify/change column" needs reorg data. return nil, errors.Errorf("column=%v id=%v is not found for table=%v checking column modify", colAtCommit.Name, colAtCommit.ID, tblAtCommit.Meta().Name.String()) } From 253045aa51a222eadb72afc0303aac2a2c2092e3 Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Thu, 15 Jul 2021 17:39:33 +0800 Subject: [PATCH 15/38] lock_resolver: avoid pessimistic transactions using resolveLocksForWrite (#25973) --- store/tikv/lock_resolver.go | 6 ++++-- store/tikv/prewrite.go | 2 +- store/tikv/tests/async_commit_test.go | 23 +++++++++++++++++++++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index 7ff963113bb3b..6fdcab23c32df 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -433,8 +433,10 @@ func (lr *LockResolver) resolveLocks(bo *Backoffer, callerStartTS uint64, locks return msBeforeTxnExpired.value(), pushed, nil } -func (lr *LockResolver) resolveLocksForWrite(bo *Backoffer, callerStartTS uint64, locks []*Lock) (int64, error) { - msBeforeTxnExpired, _, err := lr.resolveLocks(bo, callerStartTS, locks, true, false) +func (lr *LockResolver) resolveLocksForWrite(bo *Backoffer, callerStartTS, callerForUpdateTS uint64, locks []*Lock) (int64, error) { + // The forWrite parameter is only useful for optimistic transactions which can avoid deadlock between large transactions, + // so only use forWrite if the callerForUpdateTS is zero. + msBeforeTxnExpired, _, err := lr.resolveLocks(bo, callerStartTS, locks, callerForUpdateTS == 0, false) return msBeforeTxnExpired, err } diff --git a/store/tikv/prewrite.go b/store/tikv/prewrite.go index ae1a5d9bf5c81..0a599b303b464 100644 --- a/store/tikv/prewrite.go +++ b/store/tikv/prewrite.go @@ -293,7 +293,7 @@ func (action actionPrewrite) handleSingleBatch(c *twoPhaseCommitter, bo *Backoff locks = append(locks, lock) } start := time.Now() - msBeforeExpired, err := c.store.lockResolver.resolveLocksForWrite(bo, c.startTS, locks) + msBeforeExpired, err := c.store.lockResolver.resolveLocksForWrite(bo, c.startTS, c.forUpdateTS, locks) if err != nil { return errors.Trace(err) } diff --git a/store/tikv/tests/async_commit_test.go b/store/tikv/tests/async_commit_test.go index f67482e69a44e..93e0961a5f4ab 100644 --- a/store/tikv/tests/async_commit_test.go +++ b/store/tikv/tests/async_commit_test.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tidb/store/mockstore/unistore" "github.com/pingcap/tidb/store/tikv" tikverr "github.com/pingcap/tidb/store/tikv/error" + "github.com/pingcap/tidb/store/tikv/kv" "github.com/pingcap/tidb/store/tikv/mockstore/cluster" "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/store/tikv/tikvrpc" @@ -178,6 +179,7 @@ func (s *testAsyncCommitSuite) lockKeysWithAsyncCommit(c *C, keys, values [][]by tpc, err := txnProbe.NewCommitter(0) c.Assert(err, IsNil) tpc.SetPrimaryKey(primaryKey) + tpc.SetUseAsyncCommit() ctx := context.Background() err = tpc.PrewriteAllMutations(ctx) @@ -539,3 +541,24 @@ func (m *mockResolveClient) SendRequest(ctx context.Context, addr string, req *t func (m *mockResolveClient) Close() error { return m.inner.Close() } + +// TestPessimisticTxnResolveAsyncCommitLock tests that pessimistic transactions resolve non-expired async-commit locks during the prewrite phase. +// Pessimistic transactions will resolve locks immediately during the prewrite phase because of the special logic for handling non-pessimistic lock conflict. +// However, async-commit locks can't be resolved until they expire. This test covers it. +func (s *testAsyncCommitSuite) TestPessimisticTxnResolveAsyncCommitLock(c *C) { + ctx := context.Background() + k := []byte("k") + + txn, err := s.store.Begin() + c.Assert(err, IsNil) + txn.SetPessimistic(true) + err = txn.LockKeys(ctx, &kv.LockCtx{ForUpdateTS: txn.StartTS()}, []byte("k1")) + c.Assert(err, IsNil) + + // Lock the key with a async-commit lock. + s.lockKeysWithAsyncCommit(c, [][]byte{}, [][]byte{}, k, k, false) + + txn.Set(k, k) + err = txn.Commit(context.Background()) + c.Assert(err, IsNil) +} From baa3b7232689883fbc76208a016a9fb72aa309eb Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Fri, 16 Jul 2021 12:29:34 +0800 Subject: [PATCH 16/38] mpp: check the tiflash availabilities before launching mpp queries. (#26130) (#26192) --- go.mod | 2 +- go.sum | 4 +- store/copr/batch_coprocessor.go | 98 ++++++++++++++++++------- store/copr/mpp.go | 6 +- store/mockstore/unistore/tikv/server.go | 7 +- store/tikv/region_cache.go | 15 ++-- store/tikv/region_request_test.go | 3 + store/tikv/tikvrpc/tikvrpc.go | 10 +++ 8 files changed, 108 insertions(+), 37 deletions(-) diff --git a/go.mod b/go.mod index 8aa71eafad3cd..0b4c2cb7e9cf2 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/pingcap/failpoint v0.0.0-20210316064728-7acb0f0a3dfd github.com/pingcap/fn v0.0.0-20200306044125-d5540d389059 github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 - github.com/pingcap/kvproto v0.0.0-20210602120243-804ac0a6ce21 + github.com/pingcap/kvproto v0.0.0-20210712050333-b66fdbd6bfd5 github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0 github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 diff --git a/go.sum b/go.sum index 6dcbd8e96ae86..a8f9b1a76031c 100644 --- a/go.sum +++ b/go.sum @@ -432,8 +432,8 @@ github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989/go.mod h1:O17Xtb github.com/pingcap/kvproto v0.0.0-20191211054548-3c6b38ea5107/go.mod h1:WWLmULLO7l8IOcQG+t+ItJ3fEcrL5FxF0Wu+HrMy26w= github.com/pingcap/kvproto v0.0.0-20200411081810-b85805c9476c/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/kvproto v0.0.0-20210219064844-c1844a4775d6/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= -github.com/pingcap/kvproto v0.0.0-20210602120243-804ac0a6ce21 h1:WLViPiCazVwK0wULKpmwLVP/aA8NvyyfOUqQy0bPEPk= -github.com/pingcap/kvproto v0.0.0-20210602120243-804ac0a6ce21/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= +github.com/pingcap/kvproto v0.0.0-20210712050333-b66fdbd6bfd5 h1:LN/ml4lm5+AYdn+N/CJ102wFUph2OIpo8hHqi8QxKiQ= +github.com/pingcap/kvproto v0.0.0-20210712050333-b66fdbd6bfd5/go.mod h1:IOdRDPLyda8GX2hE/jO7gqaCV/PNFh8BZQCQZXfIOqI= github.com/pingcap/log v0.0.0-20191012051959-b742a5d432e9/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index ade644f411897..1e8aadda2b8a2 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/kvproto/pkg/coprocessor" "github.com/pingcap/kvproto/pkg/kvrpcpb" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/mpp" "github.com/pingcap/log" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/store/driver/backoff" @@ -102,46 +103,90 @@ func (rs *batchCopResponse) RespTime() time.Duration { // 2. for the remaining regions: // if there is only 1 available store, then put the region to the related store // otherwise, use a greedy algorithm to put it into the store with highest weight -func balanceBatchCopTask(originalTasks []*batchCopTask) []*batchCopTask { +func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks []*batchCopTask, isMPP bool) []*batchCopTask { if len(originalTasks) <= 1 { return originalTasks } + cache := kvStore.GetRegionCache() storeTaskMap := make(map[uint64]*batchCopTask) + // storeCandidateRegionMap stores all the possible store->region map. Its content is + // store id -> region signature -> region info. We can see it as store id -> region lists. storeCandidateRegionMap := make(map[uint64]map[string]RegionInfo) totalRegionCandidateNum := 0 totalRemainingRegionNum := 0 - for _, task := range originalTasks { - taskStoreID := task.regionInfos[0].AllStores[0] - batchTask := &batchCopTask{ - storeAddr: task.storeAddr, - cmdType: task.cmdType, - ctx: task.ctx, - regionInfos: []RegionInfo{task.regionInfos[0]}, + if !isMPP { + for _, task := range originalTasks { + taskStoreID := task.regionInfos[0].AllStores[0] + batchTask := &batchCopTask{ + storeAddr: task.storeAddr, + cmdType: task.cmdType, + ctx: task.ctx, + regionInfos: []RegionInfo{task.regionInfos[0]}, + } + storeTaskMap[taskStoreID] = batchTask + } + } else { + // decide the available stores + stores := cache.RegionCache.GetTiFlashStores() + var wg sync.WaitGroup + var mu sync.Mutex + wg.Add(len(stores)) + for i := range stores { + go func(idx int) { + defer wg.Done() + s := stores[idx] + aliveReq := tikvrpc.NewRequest(tikvrpc.CmdMPPAlive, &mpp.IsAliveRequest{}, kvrpcpb.Context{}) + aliveReq.StoreTp = tikvrpc.TiFlash + alive := false + resp, err := kvStore.GetTiKVClient().SendRequest(ctx, s.GetAddr(), aliveReq, tikv.ReadTimeoutMedium) + if err != nil { + logutil.BgLogger().Warn("Cannot detect store's availablity", zap.String("store address", s.GetAddr()), zap.String("err message", err.Error())) + } else { + rpcResp := resp.Resp.(*mpp.IsAliveResponse) + if rpcResp.Available { + alive = true + } else { + logutil.BgLogger().Warn("Cannot detect store's availablity", zap.String("store address", s.GetAddr())) + } + } + if !alive { + return + } + + mu.Lock() + defer mu.Unlock() + storeTaskMap[s.StoreID()] = &batchCopTask{ + storeAddr: s.GetAddr(), + cmdType: originalTasks[0].cmdType, + ctx: &tikv.RPCContext{Addr: s.GetAddr(), Store: s}, + } + }(i) } - storeTaskMap[taskStoreID] = batchTask + wg.Wait() } for _, task := range originalTasks { - taskStoreID := task.regionInfos[0].AllStores[0] for index, ri := range task.regionInfos { // for each region, figure out the valid store num validStoreNum := 0 - if index == 0 { + if index == 0 && !isMPP { continue } - if len(ri.AllStores) <= 1 { - validStoreNum = 1 - } else { - for _, storeID := range ri.AllStores { - if _, ok := storeTaskMap[storeID]; ok { - validStoreNum++ - } + var validStoreID uint64 + for _, storeID := range ri.AllStores { + if _, ok := storeTaskMap[storeID]; ok { + validStoreNum++ + // original store id might be invalid, so we have to set it again. + validStoreID = storeID } } - if validStoreNum == 1 { + if validStoreNum == 0 { + logutil.BgLogger().Warn("Meet regions that don't have an available store. Give up balancing") + return originalTasks + } else if validStoreNum == 1 { // if only one store is valid, just put it to storeTaskMap - storeTaskMap[taskStoreID].regionInfos = append(storeTaskMap[taskStoreID].regionInfos, ri) + storeTaskMap[validStoreID].regionInfos = append(storeTaskMap[validStoreID].regionInfos, ri) } else { // if more than one store is valid, put the region // to store candidate map @@ -239,12 +284,15 @@ func balanceBatchCopTask(originalTasks []*batchCopTask) []*batchCopTask { var ret []*batchCopTask for _, task := range storeTaskMap { - ret = append(ret, task) + if len(task.regionInfos) > 0 { + ret = append(ret, task) + } } return ret } -func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, storeType kv.StoreType) ([]*batchCopTask, error) { +func buildBatchCopTasks(bo *Backoffer, store *kvStore, ranges *KeyRanges, storeType kv.StoreType, isMPP bool) ([]*batchCopTask, error) { + cache := store.GetRegionCache() start := time.Now() const cmdType = tikvrpc.CmdBatchCop rangesLen := ranges.Len() @@ -318,7 +366,7 @@ func buildBatchCopTasks(bo *Backoffer, cache *RegionCache, ranges *KeyRanges, st } logutil.BgLogger().Debug(msg) } - batchTasks = balanceBatchCopTask(batchTasks) + batchTasks = balanceBatchCopTask(bo.GetCtx(), store, batchTasks, isMPP) if log.GetLevel() <= zap.DebugLevel { msg := "After region balance:" for _, task := range batchTasks { @@ -345,7 +393,7 @@ func (c *CopClient) sendBatch(ctx context.Context, req *kv.Request, vars *tikv.V ctx = context.WithValue(ctx, tikv.TxnStartKey(), req.StartTs) bo := backoff.NewBackofferWithVars(ctx, copBuildTaskMaxBackoff, vars) ranges := NewKeyRanges(req.KeyRanges) - tasks, err := buildBatchCopTasks(bo, c.store.GetRegionCache(), ranges, req.StoreType) + tasks, err := buildBatchCopTasks(bo, c.store.kvStore, ranges, req.StoreType, false) if err != nil { return copErrorResponse{err} } @@ -486,7 +534,7 @@ func (b *batchCopIterator) retryBatchCopTask(ctx context.Context, bo *Backoffer, ranges = append(ranges, *ran) }) } - return buildBatchCopTasks(bo, b.store.GetRegionCache(), NewKeyRanges(ranges), b.req.StoreType) + return buildBatchCopTasks(bo, b.store, NewKeyRanges(ranges), b.req.StoreType, false) } const readTimeoutUltraLong = 3600 * time.Second // For requests that may scan many regions for tiflash. diff --git a/store/copr/mpp.go b/store/copr/mpp.go index 4be45c3288e23..e0915765629c6 100644 --- a/store/copr/mpp.go +++ b/store/copr/mpp.go @@ -47,8 +47,8 @@ func (c *batchCopTask) GetAddress() string { func (c *MPPClient) selectAllTiFlashStore() []kv.MPPTaskMeta { resultTasks := make([]kv.MPPTaskMeta, 0) - for _, addr := range c.store.GetRegionCache().GetTiFlashStoreAddrs() { - task := &batchCopTask{storeAddr: addr, cmdType: tikvrpc.CmdMPPTask} + for _, s := range c.store.GetRegionCache().GetTiFlashStores() { + task := &batchCopTask{storeAddr: s.GetAddr(), cmdType: tikvrpc.CmdMPPTask} resultTasks = append(resultTasks, task) } return resultTasks @@ -62,7 +62,7 @@ func (c *MPPClient) ConstructMPPTasks(ctx context.Context, req *kv.MPPBuildTasks return c.selectAllTiFlashStore(), nil } ranges := NewKeyRanges(req.KeyRanges) - tasks, err := buildBatchCopTasks(bo, c.store.GetRegionCache(), ranges, kv.TiFlash) + tasks, err := buildBatchCopTasks(bo, c.store, ranges, kv.TiFlash, true) if err != nil { return nil, errors.Trace(err) } diff --git a/store/mockstore/unistore/tikv/server.go b/store/mockstore/unistore/tikv/server.go index 8d8d2553fc823..03c8d5252a6fa 100644 --- a/store/mockstore/unistore/tikv/server.go +++ b/store/mockstore/unistore/tikv/server.go @@ -636,7 +636,12 @@ func (mrm *MockRegionManager) removeMPPTaskHandler(taskID int64, storeID uint64) return errors.New("cannot find mpp task") } -// DispatchMPPTask implements implements the tikvpb.TikvServer interface. +// IsAlive implements the tikvpb.TikvServer interface. +func (svr *Server) IsAlive(_ context.Context, _ *mpp.IsAliveRequest) (*mpp.IsAliveResponse, error) { + panic("todo") +} + +// DispatchMPPTask implements the tikvpb.TikvServer interface. func (svr *Server) DispatchMPPTask(_ context.Context, _ *mpp.DispatchTaskRequest) (*mpp.DispatchTaskResponse, error) { panic("todo") } diff --git a/store/tikv/region_cache.go b/store/tikv/region_cache.go index 12295947b2448..3b287679efced 100644 --- a/store/tikv/region_cache.go +++ b/store/tikv/region_cache.go @@ -1614,17 +1614,17 @@ func (c *RegionCache) PDClient() pd.Client { return c.pdClient } -// GetTiFlashStoreAddrs returns addresses of all tiflash nodes. -func (c *RegionCache) GetTiFlashStoreAddrs() []string { +// GetTiFlashStores returns information of all tiflash nodes. +func (c *RegionCache) GetTiFlashStores() []*Store { c.storeMu.RLock() defer c.storeMu.RUnlock() - var addrs []string + var stores []*Store for _, s := range c.storeMu.stores { if s.storeType == tikvrpc.TiFlash { - addrs = append(addrs, s.addr) + stores = append(stores, s) } } - return addrs + return stores } // btreeItem is BTree's Item that uses []byte to compare. @@ -1946,6 +1946,11 @@ func (s *Store) IsTiFlash() bool { return s.storeType == tikvrpc.TiFlash } +// StoreID returns storeID. +func (s *Store) StoreID() uint64 { + return s.storeID +} + // initResolve resolves the address of the store that never resolved and returns an // empty string if it's a tombstone. func (s *Store) initResolve(bo *Backoffer, c *RegionCache) (addr string, err error) { diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index 3dd9d37ed330a..a517bdfe76eaa 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -392,6 +392,9 @@ func (s *mockTikvGrpcServer) KvDeleteRange(context.Context, *kvrpcpb.DeleteRange func (s *mockTikvGrpcServer) RawGet(context.Context, *kvrpcpb.RawGetRequest) (*kvrpcpb.RawGetResponse, error) { return nil, errors.New("unreachable") } +func (s *mockTikvGrpcServer) IsAlive(context.Context, *mpp.IsAliveRequest) (*mpp.IsAliveResponse, error) { + return nil, errors.New("unreachable") +} func (s *mockTikvGrpcServer) RawBatchGet(context.Context, *kvrpcpb.RawBatchGetRequest) (*kvrpcpb.RawBatchGetResponse, error) { return nil, errors.New("unreachable") } diff --git a/store/tikv/tikvrpc/tikvrpc.go b/store/tikv/tikvrpc/tikvrpc.go index ae450b6019799..c2087376b59b1 100644 --- a/store/tikv/tikvrpc/tikvrpc.go +++ b/store/tikv/tikvrpc/tikvrpc.go @@ -77,6 +77,7 @@ const ( CmdMPPTask CmdMPPConn CmdMPPCancel + CmdMPPAlive CmdMvccGetByKey CmdType = 1024 + iota CmdMvccGetByStartTs @@ -153,6 +154,8 @@ func (t CmdType) String() string { return "EstablishMPPConnection" case CmdMPPCancel: return "CancelMPPTask" + case CmdMPPAlive: + return "MPPAlive" case CmdMvccGetByKey: return "MvccGetByKey" case CmdMvccGetByStartTs: @@ -384,6 +387,11 @@ func (req *Request) CancelMPPTask() *mpp.CancelTaskRequest { return req.Req.(*mpp.CancelTaskRequest) } +// IsMPPAlive returns IsAlive task in request +func (req *Request) IsMPPAlive() *mpp.IsAliveRequest { + return req.Req.(*mpp.IsAliveRequest) +} + // MvccGetByKey returns MvccGetByKeyRequest in request. func (req *Request) MvccGetByKey() *kvrpcpb.MvccGetByKeyRequest { return req.Req.(*kvrpcpb.MvccGetByKeyRequest) @@ -913,6 +921,8 @@ func CallRPC(ctx context.Context, client tikvpb.TikvClient, req *Request) (*Resp case CmdMPPCancel: // it cannot use the ctx with cancel(), otherwise this cmd will fail. resp.Resp, err = client.CancelMPPTask(ctx, req.CancelMPPTask()) + case CmdMPPAlive: + resp.Resp, err = client.IsAlive(ctx, req.IsMPPAlive()) case CmdCopStream: var streamClient tikvpb.Tikv_CoprocessorStreamClient streamClient, err = client.CoprocessorStream(ctx, req.Cop()) From 56fd6199e58cab97a2d468b32a781e7e6115e452 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=99=8E?= Date: Fri, 16 Jul 2021 16:55:33 +0800 Subject: [PATCH 17/38] ci: fix check errors (#26295) --- ddl/column_type_change_test.go | 4 ++-- executor/builder.go | 4 ++-- go.mod | 1 + go.sum | 3 ++- planner/core/logical_plan_builder.go | 1 - planner/core/preprocess.go | 2 +- server/http_handler_test.go | 2 ++ server/tidb_test.go | 4 ++-- store/copr/batch_coprocessor.go | 4 ++-- store/tikv/backoff.go | 1 - store/tikv/region_request_test.go | 2 ++ 11 files changed, 16 insertions(+), 12 deletions(-) diff --git a/ddl/column_type_change_test.go b/ddl/column_type_change_test.go index 4abbab1b7a29c..04092c0c0e8d6 100644 --- a/ddl/column_type_change_test.go +++ b/ddl/column_type_change_test.go @@ -1678,7 +1678,7 @@ func (s *testColumnTypeChangeSuite) TestChangingColOriginDefaultValueAfterAddCol tk1 := testkit.NewTestKit(c, s.store) tk1.MustExec("use test") - tk.MustExec(fmt.Sprintf("set time_zone = 'UTC'")) + tk.MustExec("set time_zone = 'UTC'") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b int not null, unique key(a))") tk.MustExec("insert into t values(1, 1)") @@ -1763,7 +1763,7 @@ func (s *testColumnTypeChangeSuite) TestChangingColOriginDefaultValueAfterAddCol tk1 := testkit.NewTestKit(c, s.store) tk1.MustExec("use test") - tk.MustExec(fmt.Sprintf("set time_zone = 'UTC'")) + tk.MustExec("set time_zone = 'UTC'") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a VARCHAR(31) NULL DEFAULT 'wwrzfwzb01j6ddj', b DECIMAL(12,0) NULL DEFAULT '-729850476163')") tk.MustExec("ALTER TABLE t ADD COLUMN x CHAR(218) NULL DEFAULT 'lkittuae'") diff --git a/executor/builder.go b/executor/builder.go index 1af2b34aebc59..5e69418656385 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -2790,7 +2790,7 @@ func (b *executorBuilder) buildTableReader(v *plannercore.PhysicalTableReader) E if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } - // When isPartition is set, it means the union rewriting is done, so a partition reader is prefered. + // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. if ok, _ := ts.IsPartition(); ok { return ret } @@ -3019,7 +3019,7 @@ func (b *executorBuilder) buildIndexReader(v *plannercore.PhysicalIndexReader) E if !b.ctx.GetSessionVars().UseDynamicPartitionPrune() { return ret } - // When isPartition is set, it means the union rewriting is done, so a partition reader is prefered. + // When isPartition is set, it means the union rewriting is done, so a partition reader is preferred. if ok, _ := is.IsPartition(); ok { return ret } diff --git a/go.mod b/go.mod index 0b4c2cb7e9cf2..d565743c3dd1b 100644 --- a/go.mod +++ b/go.mod @@ -74,6 +74,7 @@ require ( golang.org/x/tools v0.1.0 google.golang.org/grpc v1.27.1 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + honnef.co/go/tools v0.2.0 // indirect modernc.org/mathutil v1.2.2 // indirect sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 diff --git a/go.sum b/go.sum index a8f9b1a76031c..69b937805e7c8 100644 --- a/go.sum +++ b/go.sum @@ -925,8 +925,9 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE= +honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 1be83d87786ab..c0ffdc9921edb 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -3634,7 +3634,6 @@ func (ds *DataSource) addExtraPIDColumn(info *extraPIDInfo) { info.Columns = append(info.Columns, pidCol) info.TblIDs = append(info.TblIDs, ds.TableInfo().ID) - return } var ( diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 5f7190d166d3c..31a43b0c3da06 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -129,7 +129,7 @@ const ( // PreprocessorReturn is used to retain information obtained in the preprocessor. type PreprocessorReturn struct { - initedLastSnapshotTS bool + initedLastSnapshotTS bool //nolint ExplicitStaleness bool SnapshotTSEvaluator func(sessionctx.Context) (uint64, error) // LastSnapshotTS is the last evaluated snapshotTS if any diff --git a/server/http_handler_test.go b/server/http_handler_test.go index abb466f897080..fbfce82624021 100644 --- a/server/http_handler_test.go +++ b/server/http_handler_test.go @@ -1238,6 +1238,7 @@ func (ts *HTTPHandlerTestSerialSuite) TestPostSettings(c *C) { form = make(url.Values) form.Set("tidb_deadlock_history_capacity", "5") resp, err = ts.formStatus("/settings", form) + c.Assert(err, IsNil) c.Assert(len(deadlockhistory.GlobalDeadlockHistory.GetAll()), Equals, 5) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[0].ID, Equals, uint64(6)) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[4].ID, Equals, uint64(10)) @@ -1248,6 +1249,7 @@ func (ts *HTTPHandlerTestSerialSuite) TestPostSettings(c *C) { form = make(url.Values) form.Set("tidb_deadlock_history_capacity", "6") resp, err = ts.formStatus("/settings", form) + c.Assert(err, IsNil) deadlockhistory.GlobalDeadlockHistory.Push(dummyRecord()) c.Assert(len(deadlockhistory.GlobalDeadlockHistory.GetAll()), Equals, 6) c.Assert(deadlockhistory.GlobalDeadlockHistory.GetAll()[0].ID, Equals, uint64(7)) diff --git a/server/tidb_test.go b/server/tidb_test.go index cbf49eb8686fb..faaf5352560a2 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -1500,7 +1500,7 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLAgent(c *C) { agentServer.HangFromNow(time.Second * 6) // run another set of SQL queries cancel() - cancel = runWorkload(11, 20) + _ = runWorkload(11, 20) agentServer.WaitCollectCnt(1, time.Second*8) checkFn(5) @@ -1517,7 +1517,7 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLAgent(c *C) { // run another set of SQL queries cancel() - cancel = runWorkload(11, 20) + _ = runWorkload(11, 20) // agent server shutdown agentServer.Stop() // agent server restart diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 1e8aadda2b8a2..d964007206899 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -141,13 +141,13 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] alive := false resp, err := kvStore.GetTiKVClient().SendRequest(ctx, s.GetAddr(), aliveReq, tikv.ReadTimeoutMedium) if err != nil { - logutil.BgLogger().Warn("Cannot detect store's availablity", zap.String("store address", s.GetAddr()), zap.String("err message", err.Error())) + logutil.BgLogger().Warn("Cannot detect store's availability", zap.String("store address", s.GetAddr()), zap.String("err message", err.Error())) } else { rpcResp := resp.Resp.(*mpp.IsAliveResponse) if rpcResp.Available { alive = true } else { - logutil.BgLogger().Warn("Cannot detect store's availablity", zap.String("store address", s.GetAddr())) + logutil.BgLogger().Warn("Cannot detect store's availability", zap.String("store address", s.GetAddr())) } } if !alive { diff --git a/store/tikv/backoff.go b/store/tikv/backoff.go index 9dd4c3d8b0e04..34285a8a8d204 100644 --- a/store/tikv/backoff.go +++ b/store/tikv/backoff.go @@ -29,7 +29,6 @@ type BackoffConfig = retry.Config // Maximum total sleep time(in ms) for kv/cop commands. const ( gcResolveLockMaxBackoff = 100000 - pdRPCMaxBackoff = 20000 // CommitSecondaryMaxBackoff is max sleep time of the 'commit' command CommitSecondaryMaxBackoff = 41000 ) diff --git a/store/tikv/region_request_test.go b/store/tikv/region_request_test.go index a517bdfe76eaa..f35dc217a7313 100644 --- a/store/tikv/region_request_test.go +++ b/store/tikv/region_request_test.go @@ -863,6 +863,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector(c *C) { region.lastAccess = time.Now().Unix() replicaSelector, err = newReplicaSelector(cache, regionLoc.Region) + c.Assert(err, IsNil) c.Assert(replicaSelector, NotNil) cache.testingKnobs.mockRequestLiveness = func(s *Store, bo *Backoffer) livenessState { return reachable @@ -944,6 +945,7 @@ func (s *testRegionRequestToThreeStoresSuite) TestReplicaSelector(c *C) { replicaSelector, _ = newReplicaSelector(cache, regionLoc.Region) replicaSelector.next(s.bo) rpcCtx, err = replicaSelector.next(s.bo) + c.Assert(err, IsNil) replicaSelector.OnSendSuccess() // Verify the regionStore is updated and the workTiKVIdx points to the leader. leaderStore, leaderPeer, _, _ = region.WorkStorePeer(region.getStore()) From 7a4c020a2d0d5b64ead528e568671315783d9a57 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Fri, 16 Jul 2021 17:07:33 +0800 Subject: [PATCH 18/38] expression: uncomment pushdown for JSONUnquote expression (#24504) (#26265) --- expression/expression.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/expression/expression.go b/expression/expression.go index daa3be56a9959..6a456329bd0ce 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -961,7 +961,7 @@ func scalarExprSupportedByTiKV(sf *ScalarFunction) bool { ast.JSONType, ast.JSONExtract, ast.JSONObject, ast.JSONArray, ast.JSONMerge, ast.JSONSet, ast.JSONInsert /*ast.JSONReplace,*/, ast.JSONRemove, ast.JSONLength, // FIXME: JSONUnquote is incompatible with Coprocessor - // ast.JSONUnquote, + ast.JSONUnquote, // date functions. ast.DateFormat, ast.FromDays /*ast.ToDays,*/, ast.DayOfYear, ast.DayOfMonth, ast.Year, ast.Month, From 415021632298c5cfbf5492bbae407d665dcf9fe2 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Fri, 16 Jul 2021 17:19:33 +0800 Subject: [PATCH 19/38] executor, privilege: require CONFIG privilege for is.cluster_config (#26071) (#26150) --- executor/memtable_reader.go | 4 +++ privilege/privileges/privileges_test.go | 40 +++++++++++++++++-------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index f98306b5da51d..84abc029367c4 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/kvproto/pkg/diagnosticspb" "github.com/pingcap/log" "github.com/pingcap/parser/model" + "github.com/pingcap/parser/mysql" "github.com/pingcap/parser/terror" "github.com/pingcap/sysutil" "github.com/pingcap/tidb/config" @@ -158,6 +159,9 @@ func fetchClusterConfig(sctx sessionctx.Context, nodeTypes, nodeAddrs set.String rows [][]types.Datum err error } + if !hasPriv(sctx, mysql.ConfigPriv) { + return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("CONFIG") + } serversInfo, err := infoschema.GetClusterServerInfo(sctx) failpoint.Inject("mockClusterConfigServerInfo", func(val failpoint.Value) { if s := val.(string); len(s) > 0 { diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index 96fe0a9c593fe..3cc81da2a4ab4 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1449,10 +1449,8 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeInfoschema(c *C) { tk.MustExec("GRANT SUPER ON *.* to uroot1 WITH GRANT OPTION") // super not process tk.MustExec("GRANT SUPER, PROCESS, RESTRICTED_TABLES_ADMIN ON *.* to uroot2 WITH GRANT OPTION") tk.Se.Auth(&auth.UserIdentity{ - Username: "uroot1", - Hostname: "localhost", - AuthUsername: "uroot", - AuthHostname: "%", + Username: "uroot1", + Hostname: "localhost", }, nil, nil) sem.Enable() @@ -1466,10 +1464,8 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeInfoschema(c *C) { // That is unless we have the RESTRICTED_TABLES_ADMIN privilege tk.Se.Auth(&auth.UserIdentity{ - Username: "uroot2", - Hostname: "localhost", - AuthUsername: "uroot", - AuthHostname: "%", + Username: "uroot2", + Hostname: "localhost", }, nil, nil) // flip from is NOT NULL etc @@ -1478,6 +1474,28 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeInfoschema(c *C) { tk.MustQuery(`SELECT COUNT(*) FROM information_schema.CLUSTER_STATEMENTS_SUMMARY WHERE length(instance) = 36`).Check(testkit.Rows("0")) } +func (s *testPrivilegeSuite) TestClusterConfigInfoschema(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("CREATE USER ccnobody, ccconfig") + tk.MustExec("GRANT CONFIG ON *.* TO ccconfig") + + // incorrect permissions + tk.Se.Auth(&auth.UserIdentity{ + Username: "ccnobody", + Hostname: "localhost", + }, nil, nil) + + err := tk.QueryToErr("SELECT * FROM information_schema.cluster_config") + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the CONFIG privilege(s) for this operation") + + // With correct permissions + tk.Se.Auth(&auth.UserIdentity{ + Username: "ccconfig", + Hostname: "localhost", + }, nil, nil) + tk.MustQuery("SELECT * FROM information_schema.cluster_config") +} + func (s *testPrivilegeSuite) TestSecurityEnhancedModeStatusVars(c *C) { // Without TiKV the status var list does not include tidb_gc_leader_desc // So we can only test that the dynamic privilege is grantable. @@ -1487,10 +1505,8 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeStatusVars(c *C) { tk.MustExec("CREATE USER unostatus, ustatus") tk.MustExec("GRANT RESTRICTED_STATUS_ADMIN ON *.* to ustatus") tk.Se.Auth(&auth.UserIdentity{ - Username: "unostatus", - Hostname: "localhost", - AuthUsername: "uroot", - AuthHostname: "%", + Username: "unostatus", + Hostname: "localhost", }, nil, nil) } From a82339821ccff507b08bb73338b6f8d4256476c5 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 15:35:34 +0800 Subject: [PATCH 20/38] session: Fix txn.onStmtEnd is not called when some special error occurs in ExecutePreparedStmt (#26056) (#26080) --- session/session.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/session/session.go b/session/session.go index 7ef59197fb3ca..610380b71151d 100644 --- a/session/session.go +++ b/session/session.go @@ -1908,6 +1908,7 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ return nil, err } s.txn.onStmtStart(preparedStmt.SQLDigest.String()) + defer s.txn.onStmtEnd() var is infoschema.InfoSchema var snapshotTS uint64 if preparedStmt.ForUpdateRead { @@ -1924,14 +1925,10 @@ func (s *session) ExecutePreparedStmt(ctx context.Context, stmtID uint32, args [ } else { is = s.GetInfoSchema().(infoschema.InfoSchema) } - var rs sqlexec.RecordSet if ok { - rs, err = s.cachedPlanExec(ctx, is, snapshotTS, stmtID, preparedStmt, args) - } else { - rs, err = s.preparedStmtExec(ctx, is, snapshotTS, stmtID, preparedStmt, args) + return s.cachedPlanExec(ctx, is, snapshotTS, stmtID, preparedStmt, args) } - s.txn.onStmtEnd() - return rs, err + return s.preparedStmtExec(ctx, is, snapshotTS, stmtID, preparedStmt, args) } func (s *session) DropPreparedStmt(stmtID uint32) error { From d9e61d5a0b4c9e4e9ad7af88bd80b6dbafc6da50 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 15:51:35 +0800 Subject: [PATCH 21/38] planner: fix incorrect result of set type for merge join (#25672) (#25695) --- executor/index_lookup_merge_join_test.go | 18 +++++++++++++----- planner/core/exhaust_physical_plans.go | 14 ++++++++------ 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/executor/index_lookup_merge_join_test.go b/executor/index_lookup_merge_join_test.go index 2158026f1966f..1817dca79a3ae 100644 --- a/executor/index_lookup_merge_join_test.go +++ b/executor/index_lookup_merge_join_test.go @@ -159,13 +159,21 @@ func (s *testSuite9) TestIssue20549(c *C) { testkit.Rows("1")) } -func (s *testSuite9) TestIssue24473(c *C) { +func (s *testSuite9) TestIssue24473AndIssue25669(c *C) { tk := testkit.NewTestKitWithInit(c, s.store) tk.MustExec("drop table if exists x, t2, t3") tk.MustExec("CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") - tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Check( - testkit.Rows("y y", "b b", "x x", "x x", "x x", "x x")) - tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Check( - testkit.Rows("y y", "b b", "x x", "x x", "x x", "x x")) + tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( + testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) + tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( + testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) + + tk.MustExec("drop table if exists x, t2, t3") + tk.MustExec("CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") + tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") + tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( + testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) + tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( + testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) } diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 59b635eada379..9dffea4e21c7c 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -150,14 +150,15 @@ func (p *LogicalJoin) GetMergeJoin(prop *property.PhysicalProperty, schema *expr // The leftProperties caches all the possible properties that are provided by its children. leftJoinKeys, rightJoinKeys, isNullEQ, hasNullEQ := p.GetJoinKeys() - // EnumType Unsupported: merge join conflicts with index order. ref: https://github.com/pingcap/tidb/issues/24473 + // EnumType/SetType Unsupported: merge join conflicts with index order. + // ref: https://github.com/pingcap/tidb/issues/24473, https://github.com/pingcap/tidb/issues/25669 for _, leftKey := range leftJoinKeys { - if leftKey.RetType.Tp == mysql.TypeEnum { + if leftKey.RetType.Tp == mysql.TypeEnum || leftKey.RetType.Tp == mysql.TypeSet { return nil } } for _, rightKey := range rightJoinKeys { - if rightKey.RetType.Tp == mysql.TypeEnum { + if rightKey.RetType.Tp == mysql.TypeEnum || rightKey.RetType.Tp == mysql.TypeSet { return nil } } @@ -529,14 +530,15 @@ func (p *LogicalJoin) constructIndexMergeJoin( return nil } - // EnumType Unsupported: merge join conflicts with index order. ref: https://github.com/pingcap/tidb/issues/24473 + // EnumType/SetType Unsupported: merge join conflicts with index order. + // ref: https://github.com/pingcap/tidb/issues/24473, https://github.com/pingcap/tidb/issues/25669 for _, innerKey := range join.InnerJoinKeys { - if innerKey.RetType.Tp == mysql.TypeEnum { + if innerKey.RetType.Tp == mysql.TypeEnum || innerKey.RetType.Tp == mysql.TypeSet { return nil } } for _, outerKey := range join.OuterJoinKeys { - if outerKey.RetType.Tp == mysql.TypeEnum { + if outerKey.RetType.Tp == mysql.TypeEnum || outerKey.RetType.Tp == mysql.TypeSet { return nil } } From 344c5bfc7cde8302a741ac89437107fdb5aa5b70 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 16:21:35 +0800 Subject: [PATCH 22/38] planner: make sure limit outputs no more columns than its child (#25345) (#25980) --- planner/core/integration_test.go | 9 +++++++++ planner/core/rule_column_pruning.go | 10 ++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 394d5c02b0c1e..9b80c58b61d18 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -3772,6 +3772,15 @@ func (s *testIntegrationSuite) TestIssue24281(c *C) { "UNION select 1 as v1, 2 as v2") } +func (s *testIntegrationSuite) TestLimitWindowColPrune(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values(1)") + tk.MustQuery("select count(a) f1, row_number() over (order by count(a)) as f2 from t limit 1").Check(testkit.Rows("1 1")) +} + func (s *testIntegrationSuite) TestIncrementalAnalyzeStatsVer2(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/planner/core/rule_column_pruning.go b/planner/core/rule_column_pruning.go index a6b16603b1585..212e9b6f4d242 100644 --- a/planner/core/rule_column_pruning.go +++ b/planner/core/rule_column_pruning.go @@ -443,8 +443,14 @@ func (p *LogicalLimit) PruneColumns(parentUsedCols []*expression.Column) error { return nil } - p.inlineProjection(parentUsedCols) - return p.children[0].PruneColumns(parentUsedCols) + savedUsedCols := make([]*expression.Column, len(parentUsedCols)) + copy(savedUsedCols, parentUsedCols) + if err := p.children[0].PruneColumns(parentUsedCols); err != nil { + return err + } + p.schema = nil + p.inlineProjection(savedUsedCols) + return nil } func (*columnPruner) name() string { From 6cae8aed85542b149af2ff56c63eaebb6f5f3288 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 16:35:34 +0800 Subject: [PATCH 23/38] executor: fix prepared stale read statement not work (#25746) (#25800) --- distsql/request_builder.go | 2 +- executor/builder.go | 12 ++++++ executor/stale_txn_test.go | 42 ++++++++++++++++++ planner/core/common_plans.go | 83 ++++++++++++++++++++++++++++++------ 4 files changed, 124 insertions(+), 15 deletions(-) diff --git a/distsql/request_builder.go b/distsql/request_builder.go index ba48dc94f2803..ed97014d6154b 100644 --- a/distsql/request_builder.go +++ b/distsql/request_builder.go @@ -62,7 +62,7 @@ func (builder *RequestBuilder) Build() (*kv.Request, error) { assertScope := val.(string) if len(assertScope) > 0 { if builder.IsStaleness && assertScope != builder.TxnScope { - panic("batch point get staleness option fail") + panic("request builder get staleness option fail") } } }) diff --git a/executor/builder.go b/executor/builder.go index 5e69418656385..cfd4b461f2b8d 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -17,6 +17,7 @@ import ( "bytes" "context" "sort" + "strconv" "strings" "sync" "time" @@ -678,6 +679,8 @@ func (b *executorBuilder) buildPrepare(v *plannercore.Prepare) Executor { func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { b.snapshotTS = v.SnapshotTS + b.explicitStaleness = v.IsStaleness + b.txnScope = v.TxnScope if b.snapshotTS != 0 { b.is, b.err = domain.GetDomain(b.ctx).GetSnapshotInfoSchema(b.snapshotTS) } @@ -691,6 +694,15 @@ func (b *executorBuilder) buildExecute(v *plannercore.Execute) Executor { plan: v.Plan, outputNames: v.OutputNames(), } + failpoint.Inject("assertExecutePrepareStatementStalenessOption", func(val failpoint.Value) { + vs := strings.Split(val.(string), "_") + assertTS, assertTxnScope := vs[0], vs[1] + if strconv.FormatUint(b.snapshotTS, 10) != assertTS || + assertTxnScope != b.txnScope { + panic("execute prepare statement have wrong staleness option") + } + }) + return e } diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 1f313edade59d..ebf966c5e3183 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -947,3 +947,45 @@ func (s *testStaleTxnSuite) TestStaleReadTemporaryTable(c *C) { tk.MustExec(query.sql) } } + +func (s *testStaleTxnSerialSuite) TestStaleReadPrepare(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + defer tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + time.Sleep(2 * time.Second) + conf := *config.GetGlobalConfig() + oldConf := conf + defer config.StoreGlobalConfig(&oldConf) + conf.Labels = map[string]string{ + placement.DCLabelKey: "sh", + } + config.StoreGlobalConfig(&conf) + time1 := time.Now() + tso := oracle.ComposeTS(time1.Unix()*1000, 0) + time.Sleep(200 * time.Millisecond) + failpoint.Enable("github.com/pingcap/tidb/executor/assertExecutePrepareStatementStalenessOption", + fmt.Sprintf(`return("%v_%v")`, tso, "sh")) + tk.MustExec(fmt.Sprintf(`prepare p1 from "select * from t as of timestamp '%v'"`, time1.Format("2006-1-2 15:04:05"))) + tk.MustExec("execute p1") + // assert execute prepared statement in stale read txn + tk.MustExec(`prepare p2 from "select * from t"`) + tk.MustExec(fmt.Sprintf("start transaction read only as of timestamp '%v'", time1.Format("2006-1-2 15:04:05"))) + tk.MustExec("execute p2") + tk.MustExec("commit") + + // assert execute prepared statement in stale read txn + tk.MustExec(fmt.Sprintf("set transaction read only as of timestamp '%v'", time1.Format("2006-1-2 15:04:05"))) + tk.MustExec("execute p2") + failpoint.Disable("github.com/pingcap/tidb/executor/assertExecutePrepareStatementStalenessOption") + + // test prepared stale select in stale txn + tk.MustExec(fmt.Sprintf(`start transaction read only as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000"))) + c.Assert("execute p1", NotNil) + tk.MustExec("commit") + + // assert execute prepared statement should be error after set transaction read only as of + tk.MustExec(fmt.Sprintf(`set transaction read only as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000"))) + c.Assert("execute p1", NotNil) +} diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index ec5833be9973c..a213aa7afa104 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/parser/ast" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" + "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/infoschema" @@ -32,6 +33,7 @@ import ( "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/table" "github.com/pingcap/tidb/table/tables" "github.com/pingcap/tidb/types" @@ -184,6 +186,8 @@ type Execute struct { PrepareParams []types.Datum ExecID uint32 SnapshotTS uint64 + IsStaleness bool + TxnScope string Stmt ast.StmtNode StmtType string Plan Plan @@ -257,19 +261,11 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont vars.PreparedParams = append(vars.PreparedParams, val) } } - - var snapshotTS uint64 - if preparedObj.SnapshotTSEvaluator != nil { - if vars.InTxn() { - return ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") - } - // if preparedObj.SnapshotTSEvaluator != nil, it is a stale read SQL: - // which means its infoschema is specified by the SQL, not the current/latest infoschema - var err error - snapshotTS, err = preparedObj.SnapshotTSEvaluator(sctx) - if err != nil { - return errors.Trace(err) - } + snapshotTS, txnScope, isStaleness, err := e.handleExecuteBuilderOption(sctx, preparedObj) + if err != nil { + return err + } + if isStaleness { is, err = domain.GetDomain(sctx).GetSnapshotInfoSchema(snapshotTS) if err != nil { return errors.Trace(err) @@ -291,15 +287,74 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont } prepared.SchemaVersion = is.SchemaMetaVersion() } - err := e.getPhysicalPlan(ctx, sctx, is, preparedObj) + err = e.getPhysicalPlan(ctx, sctx, is, preparedObj) if err != nil { return err } e.SnapshotTS = snapshotTS + e.TxnScope = txnScope + e.IsStaleness = isStaleness e.Stmt = prepared.Stmt return nil } +func (e *Execute) handleExecuteBuilderOption(sctx sessionctx.Context, + preparedObj *CachedPrepareStmt) (snapshotTS uint64, txnScope string, isStaleness bool, err error) { + snapshotTS = 0 + txnScope = oracle.GlobalTxnScope + isStaleness = false + err = nil + vars := sctx.GetSessionVars() + readTS := vars.TxnReadTS.PeakTxnReadTS() + if readTS > 0 { + // It means we meet following case: + // 1. prepare p from 'select * from t as of timestamp now() - x seconds' + // 1. set transaction read only as of timestamp ts2 + // 2. execute prepare p + // The execute statement would be refused due to timestamp conflict + if preparedObj.SnapshotTSEvaluator != nil { + err = ErrAsOf.FastGenWithCause("as of timestamp can't be set after set transaction read only as of.") + return + } + snapshotTS = vars.TxnReadTS.UseTxnReadTS() + isStaleness = true + txnScope = config.GetTxnScopeFromConfig() + return + } + // It means we meet following case: + // 1. prepare p from 'select * from t as of timestamp ts1' + // 1. begin + // 2. execute prepare p + // The execute statement would be refused due to timestamp conflict + if preparedObj.SnapshotTSEvaluator != nil { + if vars.InTxn() { + err = ErrAsOf.FastGenWithCause("as of timestamp can't be set in transaction.") + return + } + // if preparedObj.SnapshotTSEvaluator != nil, it is a stale read SQL: + // which means its infoschema is specified by the SQL, not the current/latest infoschema + snapshotTS, err = preparedObj.SnapshotTSEvaluator(sctx) + if err != nil { + err = errors.Trace(err) + return + } + isStaleness = true + txnScope = config.GetTxnScopeFromConfig() + return + } + // It means we meet following case: + // 1. prepare p from 'select * from t' + // 1. start transaction read only as of timestamp ts1 + // 2. execute prepare p + if vars.InTxn() && vars.TxnCtx.IsStaleness { + isStaleness = true + snapshotTS = vars.TxnCtx.StartTS + txnScope = vars.TxnCtx.TxnScope + return + } + return +} + func (e *Execute) checkPreparedPriv(ctx context.Context, sctx sessionctx.Context, preparedObj *CachedPrepareStmt, is infoschema.InfoSchema) error { if pm := privilege.GetPrivilegeManager(sctx); pm != nil { From d8d457862e502f45984f7f37080f5ccaf58cd7dd Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 16:49:34 +0800 Subject: [PATCH 24/38] executor: fix a bug that cte.iterOutTbl did not close correctly (#26129) (#26148) --- executor/builder.go | 20 +++++++++++++------- executor/cte.go | 19 ++++++++++++------- executor/cte_test.go | 3 +++ 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/executor/builder.go b/executor/builder.go index cfd4b461f2b8d..c1fca43893c4f 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -4265,17 +4265,23 @@ func (b *executorBuilder) buildCTE(v *plannercore.PhysicalCTE) Executor { return nil } - // 2. Build iterInTbl. + // 2. Build tables to store intermediate results. chkSize := b.ctx.GetSessionVars().MaxChunkSize tps := seedExec.base().retFieldTypes - iterOutTbl := cteutil.NewStorageRowContainer(tps, chkSize) - if err := iterOutTbl.OpenAndRef(); err != nil { - b.err = err - return nil - } - var resTbl cteutil.Storage var iterInTbl cteutil.Storage + var iterOutTbl cteutil.Storage + + if v.RecurPlan != nil { + // For non-recursive CTE, the result will be put into resTbl directly. + // So no need to build iterOutTbl. + iterOutTbl := cteutil.NewStorageRowContainer(tps, chkSize) + if err := iterOutTbl.OpenAndRef(); err != nil { + b.err = err + return nil + } + } + storageMap, ok := b.ctx.GetSessionVars().StmtCtx.CTEStorageMap.(map[int]*CTEStorages) if !ok { b.err = errors.New("type assertion for CTEStorageMap failed") diff --git a/executor/cte.go b/executor/cte.go index 6ac26a50541b0..4fe3414b97154 100644 --- a/executor/cte.go +++ b/executor/cte.go @@ -142,13 +142,18 @@ func (e *CTEExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { if !e.resTbl.Done() { resAction := setupCTEStorageTracker(e.resTbl, e.ctx, e.memTracker, e.diskTracker) iterInAction := setupCTEStorageTracker(e.iterInTbl, e.ctx, e.memTracker, e.diskTracker) - iterOutAction := setupCTEStorageTracker(e.iterOutTbl, e.ctx, e.memTracker, e.diskTracker) + var iterOutAction *chunk.SpillDiskAction + if e.iterOutTbl != nil { + iterOutAction = setupCTEStorageTracker(e.iterOutTbl, e.ctx, e.memTracker, e.diskTracker) + } failpoint.Inject("testCTEStorageSpill", func(val failpoint.Value) { if val.(bool) && config.GetGlobalConfig().OOMUseTmpStorage { defer resAction.WaitForTest() defer iterInAction.WaitForTest() - defer iterOutAction.WaitForTest() + if iterOutAction != nil { + defer iterOutAction.WaitForTest() + } } }) @@ -196,13 +201,13 @@ func (e *CTEExec) Close() (err error) { if err = e.recursiveExec.Close(); err != nil { return err } + // `iterInTbl` and `resTbl` are shared by multiple operators, + // so will be closed when the SQL finishes. + if err = e.iterOutTbl.DerefAndClose(); err != nil { + return err + } } - // `iterInTbl` and `resTbl` are shared by multiple operators, - // so will be closed when the SQL finishes. - if err = e.iterOutTbl.DerefAndClose(); err != nil { - return err - } return e.baseExecutor.Close() } diff --git a/executor/cte_test.go b/executor/cte_test.go index 4c6e1cf993975..6892a991c71a3 100644 --- a/executor/cte_test.go +++ b/executor/cte_test.go @@ -115,6 +115,9 @@ func (test *CTETestSuite) TestBasicCTE(c *check.C) { rows.Check(testkit.Rows("1")) rows = tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a );") rows.Check(testkit.Rows("1", "2")) + + rows = tk.MustQuery("with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2;") + rows.Check(testkit.Rows("1 1", "2 1", "3 1", "4 1", "5 1")) } func (test *CTESerialTestSuite) TestSpillToDisk(c *check.C) { From e8f8ae597d87d61dfde641a00003a9312f2e637a Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 17:03:35 +0800 Subject: [PATCH 25/38] txn: change lock into put record for unique index key lock (#25730) (#26225) --- executor/batch_point_get.go | 35 ++++++++++++++++++++++++ executor/point_get.go | 11 ++++++++ session/pessimistic_test.go | 54 +++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+) diff --git a/executor/batch_point_get.go b/executor/batch_point_get.go index 7bbb9de394c0a..5df8e21012d69 100644 --- a/executor/batch_point_get.go +++ b/executor/batch_point_get.go @@ -409,6 +409,23 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { if err != nil { return err } + // Change the unique index LOCK into PUT record. + if len(indexKeys) > 0 { + if !e.txn.Valid() { + return kv.ErrInvalidTxn + } + membuf := e.txn.GetMemBuffer() + for _, idxKey := range indexKeys { + handleVal := handleVals[string(idxKey)] + if len(handleVal) == 0 { + continue + } + err = membuf.Set(idxKey, handleVal) + if err != nil { + return err + } + } + } } // Fetch all values. values, err = batchGetter.BatchGet(ctx, keys) @@ -420,6 +437,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { if e.lock && rc { existKeys = make([]kv.Key, 0, 2*len(values)) } + changeLockToPutIdxKeys := make([]kv.Key, 0, len(indexKeys)) e.values = make([][]byte, 0, len(values)) for i, key := range keys { val := values[string(key)] @@ -439,6 +457,7 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { // lock primary key for clustered index table is redundant if len(indexKeys) != 0 { existKeys = append(existKeys, indexKeys[i]) + changeLockToPutIdxKeys = append(changeLockToPutIdxKeys, indexKeys[i]) } } } @@ -448,6 +467,22 @@ func (e *BatchPointGetExec) initialize(ctx context.Context) error { if err != nil { return err } + if len(changeLockToPutIdxKeys) > 0 { + if !e.txn.Valid() { + return kv.ErrInvalidTxn + } + for _, idxKey := range changeLockToPutIdxKeys { + membuf := e.txn.GetMemBuffer() + handleVal := handleVals[string(idxKey)] + if len(handleVal) == 0 { + return kv.ErrNotExist + } + err = membuf.Set(idxKey, handleVal) + if err != nil { + return err + } + } + } } e.handles = handles return nil diff --git a/executor/point_get.go b/executor/point_get.go index 384a8b67722bc..51d61b25fe36c 100644 --- a/executor/point_get.go +++ b/executor/point_get.go @@ -247,6 +247,17 @@ func (e *PointGetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { if err != nil { return err } + // Change the unique index LOCK into PUT record. + if e.lock && len(e.handleVal) > 0 { + if !e.txn.Valid() { + return kv.ErrInvalidTxn + } + memBuffer := e.txn.GetMemBuffer() + err = memBuffer.Set(e.idxKey, e.handleVal) + if err != nil { + return err + } + } } if len(e.handleVal) == 0 { return nil diff --git a/session/pessimistic_test.go b/session/pessimistic_test.go index 5e8e0ffd42415..07ea9e72b6d6b 100644 --- a/session/pessimistic_test.go +++ b/session/pessimistic_test.go @@ -2594,3 +2594,57 @@ func (s *testPessimisticSuite) TestAsyncCommitCalTSFail(c *C) { tk2.MustExec("update tk set c2 = c2 + 1") tk2.MustExec("commit") } + +func (s *testPessimisticSuite) TestChangeLockToPut(c *C) { + tk := testkit.NewTestKitWithInit(c, s.store) + tk2 := testkit.NewTestKitWithInit(c, s.store) + + tk.MustExec("use test") + tk2.MustExec("use test") + tk.MustExec("drop table if exists tk") + tk.MustExec("create table t1(c1 varchar(20) key, c2 int, c3 int, unique key k1(c2), key k2(c3))") + tk.MustExec(`insert into t1 values ("1", 1, 1), ("2", 2, 2), ("3", 3, 3)`) + + // Test point get change lock to put. + for _, mode := range []string{"REPEATABLE-READ", "READ-COMMITTED"} { + tk.MustExec(fmt.Sprintf(`set tx_isolation = "%s"`, mode)) + tk.MustExec("begin pessimistic") + tk.MustQuery(`select * from t1 where c1 = "1" for update`).Check(testkit.Rows("1 1 1")) + tk.MustExec("commit") + tk.MustExec("begin pessimistic") + tk.MustQuery(`select * from t1 where c1 = "1" for update`).Check(testkit.Rows("1 1 1")) + tk.MustExec("commit") + tk.MustExec("admin check table t1") + tk2.MustExec("begin") + tk2.MustQuery(`select * from t1 use index(k1) where c2 = "1" for update`).Check(testkit.Rows("1 1 1")) + tk2.MustQuery(`select * from t1 use index(k1) where c2 = "3" for update`).Check(testkit.Rows("3 3 3")) + tk2.MustExec("commit") + tk2.MustExec("begin") + tk2.MustQuery(`select * from t1 use index(k2) where c3 = 1`).Check(testkit.Rows("1 1 1")) + tk2.MustQuery("select * from t1 use index(k2) where c3 > 1").Check(testkit.Rows("2 2 2", "3 3 3")) + tk2.MustExec("commit") + } + + // Test batch point get change lock to put. + for _, mode := range []string{"REPEATABLE-READ", "READ-COMMITTED"} { + tk.MustExec(fmt.Sprintf(`set tx_isolation = "%s"`, mode)) + tk.MustExec("begin pessimistic") + tk.MustQuery(`select * from t1 where c1 in ("1", "5", "3") for update`).Check(testkit.Rows("1 1 1", "3 3 3")) + tk.MustExec("commit") + tk.MustExec("begin pessimistic") + tk.MustQuery(`select * from t1 where c1 in ("1", "2", "8") for update`).Check(testkit.Rows("1 1 1", "2 2 2")) + tk.MustExec("commit") + tk.MustExec("admin check table t1") + tk2.MustExec("begin") + tk2.MustQuery(`select * from t1 use index(k1) where c2 in ("1", "2", "3") for update`).Check(testkit.Rows("1 1 1", "2 2 2", "3 3 3")) + tk2.MustQuery(`select * from t1 use index(k2) where c2 in ("2") for update`).Check(testkit.Rows("2 2 2")) + tk2.MustExec("commit") + tk2.MustExec("begin") + tk2.MustQuery(`select * from t1 use index(k2) where c3 in (5, 8)`).Check(testkit.Rows()) + tk2.MustQuery(`select * from t1 use index(k2) where c3 in (1, 8) for update`).Check(testkit.Rows("1 1 1")) + tk2.MustQuery(`select * from t1 use index(k2) where c3 > 1`).Check(testkit.Rows("2 2 2", "3 3 3")) + tk2.MustExec("commit") + } + + tk.MustExec("admin check table t1") +} From 09bba1a45e6a6712dec54645207724976f76325c Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 20:47:34 +0800 Subject: [PATCH 26/38] exec: access the table_storage_stats need privilege (#26298) (#26352) --- executor/infoschema_reader.go | 18 ++++++++++++++-- executor/infoschema_reader_test.go | 33 ++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index b01972726991a..c3f1b4d8dce4d 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -1653,19 +1653,33 @@ func (e *tableStorageStatsRetriever) initialize(sctx sessionctx.Context) error { } } + // Privilege checker. + checker := func(db, table string) bool { + if pm := privilege.GetPrivilegeManager(sctx); pm != nil { + return pm.RequestVerification(sctx.GetSessionVars().ActiveRoles, db, table, "", mysql.AllPrivMask) + } + return true + } + // Extract the tables to the initialTable. for _, DB := range databases { // The user didn't specified the table, extract all tables of this db to initialTable. if len(tables) == 0 { tbs := is.SchemaTables(model.NewCIStr(DB)) for _, tb := range tbs { - e.initialTables = append(e.initialTables, &initialTable{DB, tb.Meta()}) + // For every db.table, check it's privileges. + if checker(DB, tb.Meta().Name.L) { + e.initialTables = append(e.initialTables, &initialTable{DB, tb.Meta()}) + } } } else { // The user specified the table, extract the specified tables of this db to initialTable. for tb := range tables { if tb, err := is.TableByName(model.NewCIStr(DB), model.NewCIStr(tb)); err == nil { - e.initialTables = append(e.initialTables, &initialTable{DB, tb.Meta()}) + // For every db.table, check it's privileges. + if checker(DB, tb.Meta().Name.L) { + e.initialTables = append(e.initialTables, &initialTable{DB, tb.Meta()}) + } } } } diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go index 1170453d368b0..29f44f8a6e8b1 100644 --- a/executor/infoschema_reader_test.go +++ b/executor/infoschema_reader_test.go @@ -873,6 +873,39 @@ func (s *testInfoschemaClusterTableSuite) TestTableStorageStats(c *C) { "test 2", )) c.Assert(len(tk.MustQuery("select TABLE_NAME from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql';").Rows()), Equals, 24) + + // More tests about the privileges. + tk.MustExec("create user 'testuser'@'localhost'") + tk.MustExec("create user 'testuser2'@'localhost'") + tk.MustExec("create user 'testuser3'@'localhost'") + tk1 := testkit.NewTestKit(c, store) + defer tk1.MustExec("drop user 'testuser'@'localhost'") + defer tk1.MustExec("drop user 'testuser2'@'localhost'") + defer tk1.MustExec("drop user 'testuser3'@'localhost'") + + tk.MustExec("grant all privileges on *.* to 'testuser2'@'localhost'") + tk.MustExec("grant select on *.* to 'testuser3'@'localhost'") + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "testuser", + Hostname: "localhost", + }, nil, nil), Equals, true) + + // User has no access to this schema, so the result set is empty. + tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows("0")) + + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "testuser2", + Hostname: "localhost", + }, nil, nil), Equals, true) + + tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows("24")) + + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "testuser3", + Hostname: "localhost", + }, nil, nil), Equals, true) + + tk.MustQuery("select count(1) from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'mysql'").Check(testkit.Rows("24")) } func (s *testInfoschemaTableSuite) TestSequences(c *C) { From bb40d0e829ea5d1ca3a7132209ef75774c64583e Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 21:03:34 +0800 Subject: [PATCH 27/38] *: update parser to fix ODBC-styled literal (#25570) (#25578) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d565743c3dd1b..376c3a8ba9099 100644 --- a/go.mod +++ b/go.mod @@ -45,7 +45,7 @@ require ( github.com/pingcap/goleveldb v0.0.0-20191226122134-f82aafb29989 github.com/pingcap/kvproto v0.0.0-20210712050333-b66fdbd6bfd5 github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 - github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0 + github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4 github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible github.com/pingcap/tipb v0.0.0-20210603161937-cfb5a9225f95 diff --git a/go.sum b/go.sum index 69b937805e7c8..bde20ee3e2244 100644 --- a/go.sum +++ b/go.sum @@ -439,8 +439,8 @@ github.com/pingcap/log v0.0.0-20200511115504-543df19646ad/go.mod h1:4rbK1p9ILyIf github.com/pingcap/log v0.0.0-20201112100606-8f1e84a3abc8/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 h1:ERrF0fTuIOnwfGbt71Ji3DKbOEaP189tjym50u8gpC8= github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4/go.mod h1:4rbK1p9ILyIfb6hU7OG2CiWSqMXnp3JMbiaVJ6mvoY8= -github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0 h1:VVYIGdmi1xr+zaL1+rFWKgNATkmjyF3ddcNmPzdN1ko= -github.com/pingcap/parser v0.0.0-20210610025415-8d8b6346d3f0/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= +github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4 h1:NASsbyMTNW8pbYfoO/YTykO6MQJiNRa094lwCPU6R2Q= +github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4/go.mod h1:xZC8I7bug4GJ5KtHhgAikjTfU4kBv1Sbo3Pf1MZ6lVw= github.com/pingcap/sysutil v0.0.0-20200206130906-2bfa6dc40bcd/go.mod h1:EB/852NMQ+aRKioCpToQ94Wl7fktV+FNnxf3CX/TTXI= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 h1:A9KL9R+lWSVPH8IqUuH1QSTRJ5FGoY1bT2IcfPKsWD8= github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041UWP+NqYzrJ3fMgC/Hw9wnmQ/tUkp/JaHly8= From 2f01a5c96479e5490e4b18f73aeef5c6a78594ee Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 21:43:34 +0800 Subject: [PATCH 28/38] executor, privileges: fix infoschema.user_privileges privilege requirements (#26070) (#26311) --- executor/infoschema_reader.go | 3 +- executor/infoschema_reader_test.go | 12 ++++-- privilege/privilege.go | 2 +- privilege/privileges/cache.go | 18 +++++--- privilege/privileges/privileges.go | 4 +- privilege/privileges/privileges_test.go | 56 +++++++++++++++++++++++++ 6 files changed, 83 insertions(+), 12 deletions(-) diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index c3f1b4d8dce4d..672790feae2b3 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -1251,7 +1251,8 @@ func (e *memtableRetriever) setDataForProcessList(ctx sessionctx.Context) { func (e *memtableRetriever) setDataFromUserPrivileges(ctx sessionctx.Context) { pm := privilege.GetPrivilegeManager(ctx) - e.rows = pm.UserPrivilegesTable() + // The results depend on the user querying the information. + e.rows = pm.UserPrivilegesTable(ctx.GetSessionVars().ActiveRoles, ctx.GetSessionVars().User.Username, ctx.GetSessionVars().User.Hostname) } func (e *memtableRetriever) setDataForMetricTables(ctx sessionctx.Context) { diff --git a/executor/infoschema_reader_test.go b/executor/infoschema_reader_test.go index 29f44f8a6e8b1..90d6e11f14eda 100644 --- a/executor/infoschema_reader_test.go +++ b/executor/infoschema_reader_test.go @@ -360,17 +360,23 @@ func (s *testInfoschemaTableSuite) TestUserPrivileges(c *C) { func (s *testInfoschemaTableSuite) TestUserPrivilegesTable(c *C) { tk := testkit.NewTestKit(c, s.store) + tk1 := testkit.NewTestKit(c, s.store) + // test the privilege of new user for information_schema.user_privileges tk.MustExec("create user usageuser") + c.Assert(tk.Se.Auth(&auth.UserIdentity{ + Username: "usageuser", + Hostname: "127.0.0.1", + }, nil, nil), IsTrue) tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def USAGE NO")) // the usage row disappears when there is a non-dynamic privilege added - tk.MustExec("GRANT SELECT ON *.* to usageuser") + tk1.MustExec("GRANT SELECT ON *.* to usageuser") tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def Select NO")) // test grant privilege - tk.MustExec("GRANT SELECT ON *.* to usageuser WITH GRANT OPTION") + tk1.MustExec("GRANT SELECT ON *.* to usageuser WITH GRANT OPTION") tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def Select YES")) // test DYNAMIC privs - tk.MustExec("GRANT BACKUP_ADMIN ON *.* to usageuser") + tk1.MustExec("GRANT BACKUP_ADMIN ON *.* to usageuser") tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type`).Check(testkit.Rows("'usageuser'@'%' def BACKUP_ADMIN NO", "'usageuser'@'%' def Select YES")) } diff --git a/privilege/privilege.go b/privilege/privilege.go index f732d9da1199b..2d2c5e0b99f43 100644 --- a/privilege/privilege.go +++ b/privilege/privilege.go @@ -63,7 +63,7 @@ type Manager interface { DBIsVisible(activeRole []*auth.RoleIdentity, db string) bool // UserPrivilegesTable provide data for INFORMATION_SCHEMA.USER_PRIVILEGES table. - UserPrivilegesTable() [][]types.Datum + UserPrivilegesTable(activeRoles []*auth.RoleIdentity, user, host string) [][]types.Datum // ActiveRoles active roles for current session. // The first illegal role will be returned. diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index caf25df1eeaaf..c3fd231ef9c44 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -1413,15 +1413,23 @@ func privToString(priv mysql.PrivilegeType, allPrivs []mysql.PrivilegeType, allP return strings.Join(pstrs, ",") } -// UserPrivilegesTable provide data for INFORMATION_SCHEMA.USERS_PRIVILEGE table. -func (p *MySQLPrivilege) UserPrivilegesTable() [][]types.Datum { +// UserPrivilegesTable provide data for INFORMATION_SCHEMA.USERS_PRIVILEGES table. +func (p *MySQLPrivilege) UserPrivilegesTable(activeRoles []*auth.RoleIdentity, user, host string) [][]types.Datum { + // Seeing all users requires SELECT ON * FROM mysql.* + // The SUPER privilege (or any other dynamic privilege) doesn't help here. + // This is verified against MySQL. + showOtherUsers := p.RequestVerification(activeRoles, user, host, mysql.SystemDB, "", "", mysql.SelectPriv) var rows [][]types.Datum - for _, user := range p.User { - rows = appendUserPrivilegesTableRow(rows, user) + for _, u := range p.User { + if showOtherUsers || u.match(user, host) { + rows = appendUserPrivilegesTableRow(rows, u) + } } for _, dynamicPrivs := range p.Dynamic { for _, dynamicPriv := range dynamicPrivs { - rows = appendDynamicPrivRecord(rows, dynamicPriv) + if showOtherUsers || dynamicPriv.match(user, host) { + rows = appendDynamicPrivRecord(rows, dynamicPriv) + } } } return rows diff --git a/privilege/privileges/privileges.go b/privilege/privileges/privileges.go index 744c4882c319a..a993f133c7a59 100644 --- a/privilege/privileges/privileges.go +++ b/privilege/privileges/privileges.go @@ -460,9 +460,9 @@ func (p *UserPrivileges) DBIsVisible(activeRoles []*auth.RoleIdentity, db string } // UserPrivilegesTable implements the Manager interface. -func (p *UserPrivileges) UserPrivilegesTable() [][]types.Datum { +func (p *UserPrivileges) UserPrivilegesTable(activeRoles []*auth.RoleIdentity, user, host string) [][]types.Datum { mysqlPriv := p.Handle.Get() - return mysqlPriv.UserPrivilegesTable() + return mysqlPriv.UserPrivilegesTable(activeRoles, user, host) } // ShowGrants implements privilege.Manager ShowGrants interface. diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index 3cc81da2a4ab4..a274f74a9375b 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1745,3 +1745,59 @@ func (s *testPrivilegeSuite) TestDynamicPrivsRegistration(c *C) { tk.MustExec(sqlGrant) } } + +func (s *testPrivilegeSuite) TestInfoschemaUserPrivileges(c *C) { + // Being able to read all privileges from information_schema.user_privileges requires a very specific set of permissions. + // SUPER user is not sufficient. It was observed in MySQL to require SELECT on mysql.* + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("CREATE USER isnobody, isroot, isselectonmysqluser, isselectonmysql") + tk.MustExec("GRANT SUPER ON *.* TO isroot") + tk.MustExec("GRANT SELECT ON mysql.user TO isselectonmysqluser") + tk.MustExec("GRANT SELECT ON mysql.* TO isselectonmysql") + + // First as Nobody + tk.Se.Auth(&auth.UserIdentity{ + Username: "isnobody", + Hostname: "localhost", + }, nil, nil) + + // I can see myself, but I can not see other users + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isnobody'@'%'"`).Check(testkit.Rows("'isnobody'@'%' def USAGE NO")) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isroot'@'%'"`).Check(testkit.Rows()) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isselectonmysqluser'@'%'"`).Check(testkit.Rows()) + + // Basically the same result as as isselectonmysqluser + tk.Se.Auth(&auth.UserIdentity{ + Username: "isselectonmysqluser", + Hostname: "localhost", + }, nil, nil) + + // Now as isselectonmysqluser + // Tests discovered issue that SELECT on mysql.user is not sufficient. It must be on mysql.* + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isnobody'@'%'"`).Check(testkit.Rows()) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isroot'@'%'"`).Check(testkit.Rows()) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isselectonmysqluser'@'%'"`).Check(testkit.Rows("'isselectonmysqluser'@'%' def USAGE NO")) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isselectonmysql'@'%'"`).Check(testkit.Rows()) + + // Now as root + tk.Se.Auth(&auth.UserIdentity{ + Username: "isroot", + Hostname: "localhost", + }, nil, nil) + + // I can see myself, but I can not see other users + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isnobody'@'%'"`).Check(testkit.Rows()) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isroot'@'%'"`).Check(testkit.Rows("'isroot'@'%' def Super NO")) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isselectonmysqluser'@'%'"`).Check(testkit.Rows()) + + // Now as isselectonmysqluser + tk.Se.Auth(&auth.UserIdentity{ + Username: "isselectonmysql", + Hostname: "localhost", + }, nil, nil) + + // Now as isselectonmysqluser + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isnobody'@'%'"`).Check(testkit.Rows("'isnobody'@'%' def USAGE NO")) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isroot'@'%'"`).Check(testkit.Rows("'isroot'@'%' def Super NO")) + tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee = "'isselectonmysqluser'@'%'"`).Check(testkit.Rows("'isselectonmysqluser'@'%' def USAGE NO")) +} From 4ed39214441c4afcd7c1e87c02b257651d6666fd Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 22:03:34 +0800 Subject: [PATCH 29/38] planner: handle other-conditions from subqueries correctly when constructing IndexJoin (#25817) (#25819) --- planner/core/exhaust_physical_plans.go | 5 +++++ planner/core/integration_test.go | 13 ++++++++----- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 9dffea4e21c7c..fd2f94a98a2a1 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -468,6 +468,11 @@ func (p *LogicalJoin) constructIndexJoin( lhs, ok1 := c.GetArgs()[0].(*expression.Column) rhs, ok2 := c.GetArgs()[1].(*expression.Column) if ok1 && ok2 { + if lhs.InOperand || rhs.InOperand { + // if this other-cond is from a `[not] in` sub-query, do not convert it into eq-cond since + // IndexJoin cannot deal with NULL correctly in this case; please see #25799 for more details. + continue + } outerSchema, innerSchema := p.Children()[outerIdx].Schema(), p.Children()[1-outerIdx].Schema() if outerSchema.Contains(lhs) && innerSchema.Contains(rhs) { outerHashKeys = append(outerHashKeys, lhs) diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index 9b80c58b61d18..85bd81fcad7fc 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -3772,13 +3772,16 @@ func (s *testIntegrationSuite) TestIssue24281(c *C) { "UNION select 1 as v1, 2 as v2") } -func (s *testIntegrationSuite) TestLimitWindowColPrune(c *C) { +func (s *testIntegrationSuite) TestIssue25799(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1)") - tk.MustQuery("select count(a) f1, row_number() over (order by count(a)) as f2 from t limit 1").Check(testkit.Rows("1 1")) + tk.MustExec("drop table if exists t1, t2") + tk.MustExec(`create table t1 (a float default null, b smallint(6) DEFAULT NULL)`) + tk.MustExec(`insert into t1 values (1, 1)`) + tk.MustExec(`create table t2 (a float default null, b tinyint(4) DEFAULT NULL, key b (b))`) + tk.MustExec(`insert into t2 values (null, 1)`) + tk.HasPlan(`select /*+ TIDB_INLJ(t2@sel_2) */ t1.a, t1.b from t1 where t1.a not in (select t2.a from t2 where t1.b=t2.b)`, `IndexJoin`) + tk.MustQuery(`select /*+ TIDB_INLJ(t2@sel_2) */ t1.a, t1.b from t1 where t1.a not in (select t2.a from t2 where t1.b=t2.b)`).Check(testkit.Rows()) } func (s *testIntegrationSuite) TestIncrementalAnalyzeStatsVer2(c *C) { From 39d41b1fa6dfc972f06dd3a7869bdc502b1c4709 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 22:21:34 +0800 Subject: [PATCH 30/38] executor, privilege: require CONFIG or Process privilege for is.cluster_* (#26220) (#26297) --- executor/infoschema_reader.go | 20 +++++-- executor/memtable_reader.go | 14 +++++ privilege/privileges/privileges_test.go | 69 +++++++++++++++++++++++-- 3 files changed, 96 insertions(+), 7 deletions(-) diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 672790feae2b3..8da8313acf84b 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -78,6 +78,9 @@ type memtableRetriever struct { // retrieve implements the infoschemaRetriever interface func (e *memtableRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { + if e.table.Name.O == infoschema.TableClusterInfo && !hasPriv(sctx, mysql.ProcessPriv) { + return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS") + } if e.retrieved { return nil, nil } @@ -1014,10 +1017,21 @@ func (e *memtableRetriever) dataForTiKVStoreStatus(ctx sessionctx.Context) (err } func hasPriv(ctx sessionctx.Context, priv mysql.PrivilegeType) bool { - if pm := privilege.GetPrivilegeManager(ctx); pm != nil { - return pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, "", "", "", priv) + pm := privilege.GetPrivilegeManager(ctx) + if pm == nil { + // internal session created with createSession doesn't has the PrivilegeManager. For most experienced cases before, + // we use it like this: + // ``` + // checker := privilege.GetPrivilegeManager(ctx) + // if checker != nil && !checker.RequestVerification(ctx.GetSessionVars().ActiveRoles, schema.Name.L, table.Name.L, "", mysql.AllPrivMask) { + // continue + // } + // do something. + // ``` + // So once the privilege manager is nil, it's a signature of internal sql, so just passing the checker through. + return true } - return false + return pm.RequestVerification(ctx.GetSessionVars().ActiveRoles, "", "", "", priv) } func (e *memtableRetriever) setDataForTableDataLockWaits(ctx sessionctx.Context) error { diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 84abc029367c4..656bf1a900ef9 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -293,6 +293,17 @@ type clusterServerInfoRetriever struct { // retrieve implements the memTableRetriever interface func (e *clusterServerInfoRetriever) retrieve(ctx context.Context, sctx sessionctx.Context) ([][]types.Datum, error) { + switch e.serverInfoType { + case diagnosticspb.ServerInfoType_LoadInfo, + diagnosticspb.ServerInfoType_SystemInfo: + if !hasPriv(sctx, mysql.ProcessPriv) { + return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS") + } + case diagnosticspb.ServerInfoType_HardwareInfo: + if !hasPriv(sctx, mysql.ConfigPriv) { + return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("CONFIG") + } + } if e.extractor.SkipRequest || e.retrieved { return nil, nil } @@ -485,6 +496,9 @@ func (h *logResponseHeap) Pop() interface{} { } func (e *clusterLogRetriever) initialize(ctx context.Context, sctx sessionctx.Context) ([]chan logStreamResult, error) { + if !hasPriv(sctx, mysql.ProcessPriv) { + return nil, plannercore.ErrSpecificAccessDenied.GenWithStackByArgs("PROCESS") + } serversInfo, err := infoschema.GetClusterServerInfo(sctx) failpoint.Inject("mockClusterLogServerInfo", func(val failpoint.Value) { // erase the error diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index a274f74a9375b..b8ac28c74e991 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -1458,7 +1458,9 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeInfoschema(c *C) { // Even though we have super, we still can't read protected information from tidb_servers_info, cluster_* tables tk.MustQuery(`SELECT COUNT(*) FROM information_schema.tidb_servers_info WHERE ip IS NOT NULL`).Check(testkit.Rows("0")) - tk.MustQuery(`SELECT COUNT(*) FROM information_schema.cluster_info WHERE status_address IS NOT NULL`).Check(testkit.Rows("0")) + err := tk.QueryToErr(`SELECT COUNT(*) FROM information_schema.cluster_info WHERE status_address IS NOT NULL`) + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") // 36 = a UUID. Normally it is an IP address. tk.MustQuery(`SELECT COUNT(*) FROM information_schema.CLUSTER_STATEMENTS_SUMMARY WHERE length(instance) != 36`).Check(testkit.Rows("0")) @@ -1476,24 +1478,83 @@ func (s *testPrivilegeSuite) TestSecurityEnhancedModeInfoschema(c *C) { func (s *testPrivilegeSuite) TestClusterConfigInfoschema(c *C) { tk := testkit.NewTestKit(c, s.store) - tk.MustExec("CREATE USER ccnobody, ccconfig") + tk.MustExec("CREATE USER ccnobody, ccconfig, ccprocess") tk.MustExec("GRANT CONFIG ON *.* TO ccconfig") + tk.MustExec("GRANT Process ON *.* TO ccprocess") - // incorrect permissions + // incorrect/no permissions tk.Se.Auth(&auth.UserIdentity{ Username: "ccnobody", Hostname: "localhost", }, nil, nil) + tk.MustQuery("SHOW GRANTS").Check(testkit.Rows("GRANT USAGE ON *.* TO 'ccnobody'@'%'")) err := tk.QueryToErr("SELECT * FROM information_schema.cluster_config") + c.Assert(err, NotNil) c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the CONFIG privilege(s) for this operation") - // With correct permissions + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_hardware") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the CONFIG privilege(s) for this operation") + + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_info") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_load") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_systeminfo") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_log WHERE time BETWEEN '2021-07-13 00:00:00' AND '2021-07-13 02:00:00' AND message like '%'") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + // With correct/CONFIG permissions tk.Se.Auth(&auth.UserIdentity{ Username: "ccconfig", Hostname: "localhost", }, nil, nil) + + tk.MustQuery("SHOW GRANTS").Check(testkit.Rows("GRANT CONFIG ON *.* TO 'ccconfig'@'%'")) + // Needs CONFIG privilege tk.MustQuery("SELECT * FROM information_schema.cluster_config") + tk.MustQuery("SELECT * FROM information_schema.cluster_HARDWARE") + // Missing Process privilege + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_INFO") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_LOAD") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_SYSTEMINFO") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + err = tk.QueryToErr("SELECT * FROM information_schema.cluster_LOG WHERE time BETWEEN '2021-07-13 00:00:00' AND '2021-07-13 02:00:00' AND message like '%'") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") + + // With correct/Process permissions + tk.Se.Auth(&auth.UserIdentity{ + Username: "ccprocess", + Hostname: "localhost", + }, nil, nil) + tk.MustQuery("SHOW GRANTS").Check(testkit.Rows("GRANT Process ON *.* TO 'ccprocess'@'%'")) + // Needs Process privilege + tk.MustQuery("SELECT * FROM information_schema.CLUSTER_info") + tk.MustQuery("SELECT * FROM information_schema.CLUSTER_load") + tk.MustQuery("SELECT * FROM information_schema.CLUSTER_systeminfo") + tk.MustQuery("SELECT * FROM information_schema.CLUSTER_log WHERE time BETWEEN '1970-07-13 00:00:00' AND '1970-07-13 02:00:00' AND message like '%'") + // Missing CONFIG privilege + err = tk.QueryToErr("SELECT * FROM information_schema.CLUSTER_config") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the CONFIG privilege(s) for this operation") + err = tk.QueryToErr("SELECT * FROM information_schema.CLUSTER_hardware") + c.Assert(err, NotNil) + c.Assert(err.Error(), Equals, "[planner:1227]Access denied; you need (at least one of) the CONFIG privilege(s) for this operation") } func (s *testPrivilegeSuite) TestSecurityEnhancedModeStatusVars(c *C) { From afc6b16af7330e25a25728945d5b191f1d72f220 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Mon, 19 Jul 2021 22:33:34 +0800 Subject: [PATCH 31/38] test: make test TestTopSQLCPUProfile stable (#25651) (#25656) --- server/tidb_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/server/tidb_test.go b/server/tidb_test.go index faaf5352560a2..7af40f7e62587 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -1220,6 +1220,7 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLCPUProfile(c *C) { dbt.mustExec("set @@global.tidb_enable_top_sql='On';") dbt.mustExec("set @@tidb_top_sql_agent_address='127.0.0.1:4001';") dbt.mustExec("set @@global.tidb_top_sql_precision_seconds=1;") + dbt.mustExec("set @@global.tidb_txn_mode = 'pessimistic'") // Test case 1: DML query: insert/update/replace/delete/select cases1 := []struct { @@ -1278,9 +1279,12 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLCPUProfile(c *C) { ctx, cancel := context.WithCancel(context.Background()) cases2[i].cancel = cancel prepare, args := ca.prepare, ca.args + var stmt *sql.Stmt go ts.loopExec(ctx, c, func(db *sql.DB) { - stmt, err := db.Prepare(prepare) - c.Assert(err, IsNil) + if stmt == nil { + stmt, err = db.Prepare(prepare) + c.Assert(err, IsNil) + } if strings.HasPrefix(prepare, "select") { rows, err := stmt.Query(args...) c.Assert(err, IsNil) @@ -1315,9 +1319,13 @@ func (ts *tidbTestTopSQLSuite) TestTopSQLCPUProfile(c *C) { ctx, cancel := context.WithCancel(context.Background()) cases3[i].cancel = cancel prepare, args := ca.prepare, ca.args + doPrepare := true go ts.loopExec(ctx, c, func(db *sql.DB) { - _, err := db.Exec(fmt.Sprintf("prepare stmt from '%v'", prepare)) - c.Assert(err, IsNil) + if doPrepare { + doPrepare = false + _, err := db.Exec(fmt.Sprintf("prepare stmt from '%v'", prepare)) + c.Assert(err, IsNil) + } sqlBuf := bytes.NewBuffer(nil) sqlBuf.WriteString("execute stmt ") for i := range args { From 7277e6b5af13668af6423fe865de5a66d4852bc6 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 10:09:34 +0800 Subject: [PATCH 32/38] planner: rename stable-result-mode to ordered-result-mode (#26093) (#26135) --- executor/set_test.go | 18 ++--- planner/core/optimizer.go | 2 +- ...lize_results.go => rule_result_reorder.go} | 28 ++++---- ...ts_test.go => rule_result_reorder_test.go} | 72 +++++++++---------- ...json => ordered_result_mode_suite_in.json} | 12 ++-- ...son => ordered_result_mode_suite_out.json} | 12 ++-- sessionctx/variable/sysvar.go | 2 +- sessionctx/variable/tidb_vars.go | 6 +- 8 files changed, 76 insertions(+), 76 deletions(-) rename planner/core/{rule_stabilize_results.go => rule_result_reorder.go} (81%) rename planner/core/{rule_stabilize_results_test.go => rule_result_reorder_test.go} (76%) rename planner/core/testdata/{stable_result_mode_suite_in.json => ordered_result_mode_suite_in.json} (92%) rename planner/core/testdata/{stable_result_mode_suite_out.json => ordered_result_mode_suite_out.json} (98%) diff --git a/executor/set_test.go b/executor/set_test.go index 339e3a7ae3d7d..a2cd9bdd605c8 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -510,15 +510,15 @@ func (s *testSerialSuite1) TestSetVar(c *C) { tk.MustExec("set @@tidb_enable_clustered_index = 'int_only'") tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1287 'INT_ONLY' is deprecated and will be removed in a future release. Please use 'ON' or 'OFF' instead")) - // test for tidb_enable_stable_result_mode - tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) - tk.MustExec(`set global tidb_enable_stable_result_mode = 1`) - tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) - tk.MustExec(`set global tidb_enable_stable_result_mode = 0`) - tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) - tk.MustExec(`set tidb_enable_stable_result_mode=1`) - tk.MustQuery(`select @@global.tidb_enable_stable_result_mode`).Check(testkit.Rows("0")) - tk.MustQuery(`select @@tidb_enable_stable_result_mode`).Check(testkit.Rows("1")) + // test for tidb_enable_ordered_result_mode + tk.MustQuery(`select @@tidb_enable_ordered_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set global tidb_enable_ordered_result_mode = 1`) + tk.MustQuery(`select @@global.tidb_enable_ordered_result_mode`).Check(testkit.Rows("1")) + tk.MustExec(`set global tidb_enable_ordered_result_mode = 0`) + tk.MustQuery(`select @@global.tidb_enable_ordered_result_mode`).Check(testkit.Rows("0")) + tk.MustExec(`set tidb_enable_ordered_result_mode=1`) + tk.MustQuery(`select @@global.tidb_enable_ordered_result_mode`).Check(testkit.Rows("0")) + tk.MustQuery(`select @@tidb_enable_ordered_result_mode`).Check(testkit.Rows("1")) } func (s *testSuite5) TestTruncateIncorrectIntSessionVar(c *C) { diff --git a/planner/core/optimizer.go b/planner/core/optimizer.go index aa9e7752de3b5..c8d34f5f70416 100644 --- a/planner/core/optimizer.go +++ b/planner/core/optimizer.go @@ -66,7 +66,7 @@ const ( var optRuleList = []logicalOptRule{ &gcSubstituter{}, &columnPruner{}, - &resultsStabilizer{}, + &resultReorder{}, &buildKeySolver{}, &decorrelateSolver{}, &aggregationEliminator{}, diff --git a/planner/core/rule_stabilize_results.go b/planner/core/rule_result_reorder.go similarity index 81% rename from planner/core/rule_stabilize_results.go rename to planner/core/rule_result_reorder.go index f327bb70a98f3..d97820740dbc1 100644 --- a/planner/core/rule_stabilize_results.go +++ b/planner/core/rule_result_reorder.go @@ -21,30 +21,30 @@ import ( ) /* - resultsStabilizer stabilizes query results. + resultReorder reorder query results. NOTE: it's not a common rule for all queries, it's specially implemented for a few customers. - Results of some queries are not stable, for example: + Results of some queries are not ordered, for example: create table t (a int); insert into t values (1), (2); select a from t; - In the case above, the result can be `1 2` or `2 1`, which is not stable. - This rule stabilizes results by modifying or injecting a Sort operator: + In the case above, the result can be `1 2` or `2 1`, which is not ordered. + This rule reorders results by modifying or injecting a Sort operator: 1. iterate the plan from the root, and ignore all input-order operators (Sel/Proj/Limit); 2. when meeting the first non-input-order operator, 2.1. if it's a Sort, update it by appending all output columns into its order-by list, 2.2. otherwise, inject a new Sort upon this operator. */ -type resultsStabilizer struct { +type resultReorder struct { } -func (rs *resultsStabilizer) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { - stable := rs.completeSort(lp) - if !stable { +func (rs *resultReorder) optimize(ctx context.Context, lp LogicalPlan) (LogicalPlan, error) { + ordered := rs.completeSort(lp) + if !ordered { lp = rs.injectSort(lp) } return lp, nil } -func (rs *resultsStabilizer) completeSort(lp LogicalPlan) bool { +func (rs *resultReorder) completeSort(lp LogicalPlan) bool { if rs.isInputOrderKeeper(lp) { return rs.completeSort(lp.Children()[0]) } else if sort, ok := lp.(*LogicalSort); ok { @@ -69,7 +69,7 @@ func (rs *resultsStabilizer) completeSort(lp LogicalPlan) bool { return false } -func (rs *resultsStabilizer) injectSort(lp LogicalPlan) LogicalPlan { +func (rs *resultReorder) injectSort(lp LogicalPlan) LogicalPlan { if rs.isInputOrderKeeper(lp) { lp.SetChildren(rs.injectSort(lp.Children()[0])) return lp @@ -90,7 +90,7 @@ func (rs *resultsStabilizer) injectSort(lp LogicalPlan) LogicalPlan { return sort } -func (rs *resultsStabilizer) isInputOrderKeeper(lp LogicalPlan) bool { +func (rs *resultReorder) isInputOrderKeeper(lp LogicalPlan) bool { switch lp.(type) { case *LogicalSelection, *LogicalProjection, *LogicalLimit: return true @@ -99,7 +99,7 @@ func (rs *resultsStabilizer) isInputOrderKeeper(lp LogicalPlan) bool { } // extractHandleCols does the best effort to get the handle column. -func (rs *resultsStabilizer) extractHandleCol(lp LogicalPlan) *expression.Column { +func (rs *resultReorder) extractHandleCol(lp LogicalPlan) *expression.Column { switch x := lp.(type) { case *LogicalSelection, *LogicalLimit: handleCol := rs.extractHandleCol(lp.Children()[0]) @@ -120,6 +120,6 @@ func (rs *resultsStabilizer) extractHandleCol(lp LogicalPlan) *expression.Column return nil } -func (rs *resultsStabilizer) name() string { - return "stabilize_results" +func (rs *resultReorder) name() string { + return "result_reorder" } diff --git a/planner/core/rule_stabilize_results_test.go b/planner/core/rule_result_reorder_test.go similarity index 76% rename from planner/core/rule_stabilize_results_test.go rename to planner/core/rule_result_reorder_test.go index 00b3cf1fb12e5..9702a81620b1a 100644 --- a/planner/core/rule_stabilize_results_test.go +++ b/planner/core/rule_result_reorder_test.go @@ -29,21 +29,21 @@ import ( "github.com/pingcap/tidb/util/testutil" ) -var _ = Suite(&testRuleStabilizeResults{}) -var _ = SerialSuites(&testRuleStabilizeResultsSerial{}) +var _ = Suite(&testRuleReorderResults{}) +var _ = SerialSuites(&testRuleReorderResultsSerial{}) -type testRuleStabilizeResultsSerial struct { +type testRuleReorderResultsSerial struct { store kv.Storage dom *domain.Domain } -func (s *testRuleStabilizeResultsSerial) SetUpTest(c *C) { +func (s *testRuleReorderResultsSerial) SetUpTest(c *C) { var err error s.store, s.dom, err = newStoreWithBootstrap() c.Assert(err, IsNil) } -func (s *testRuleStabilizeResultsSerial) TestPlanCache(c *C) { +func (s *testRuleReorderResultsSerial) TestPlanCache(c *C) { tk := testkit.NewTestKit(c, s.store) orgEnable := plannercore.PreparedPlanCacheEnabled() defer func() { @@ -57,7 +57,7 @@ func (s *testRuleStabilizeResultsSerial) TestPlanCache(c *C) { c.Assert(err, IsNil) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") tk.MustExec("prepare s1 from 'select * from t where a > ? limit 10'") @@ -68,10 +68,10 @@ func (s *testRuleStabilizeResultsSerial) TestPlanCache(c *C) { tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) // plan cache is still working } -func (s *testRuleStabilizeResultsSerial) TestSQLBinding(c *C) { +func (s *testRuleReorderResultsSerial) TestSQLBinding(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") tk.MustQuery("explain select * from t where a > 0 limit 1").Check(testkit.Rows( @@ -90,10 +90,10 @@ func (s *testRuleStabilizeResultsSerial) TestSQLBinding(c *C) { " └─TableRowIDScan_16(Probe) 1.00 cop[tikv] table:t keep order:false, stats:pseudo")) } -func (s *testRuleStabilizeResultsSerial) TestClusteredIndex(c *C) { +func (s *testRuleReorderResultsSerial) TestClusteredIndex(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("drop table if exists t") tk.MustExec("CREATE TABLE t (a int,b int,c int, PRIMARY KEY (a,b))") @@ -105,27 +105,27 @@ func (s *testRuleStabilizeResultsSerial) TestClusteredIndex(c *C) { tk.Se.GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOff } -type testRuleStabilizeResults struct { +type testRuleReorderResults struct { store kv.Storage dom *domain.Domain testData testutil.TestData } -func (s *testRuleStabilizeResults) SetUpSuite(c *C) { +func (s *testRuleReorderResults) SetUpSuite(c *C) { var err error s.store, s.dom, err = newStoreWithBootstrap() c.Assert(err, IsNil) - s.testData, err = testutil.LoadTestSuiteData("testdata", "stable_result_mode_suite") + s.testData, err = testutil.LoadTestSuiteData("testdata", "ordered_result_mode_suite") c.Assert(err, IsNil) } -func (s *testRuleStabilizeResults) TearDownSuite(c *C) { +func (s *testRuleReorderResults) TearDownSuite(c *C) { c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) } -func (s *testRuleStabilizeResults) runTestData(c *C, tk *testkit.TestKit, name string) { +func (s *testRuleReorderResults) runTestData(c *C, tk *testkit.TestKit, name string) { var input []string var output []struct { Plan []string @@ -140,62 +140,62 @@ func (s *testRuleStabilizeResults) runTestData(c *C, tk *testkit.TestKit, name s } } -func (s *testRuleStabilizeResults) TestStableResultMode(c *C) { +func (s *testRuleReorderResults) TestOrderedResultMode(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int primary key, b int, c int, d int, key(b))") - s.runTestData(c, tk, "TestStableResultMode") + s.runTestData(c, tk, "TestOrderedResultMode") } -func (s *testRuleStabilizeResults) TestStableResultModeOnDML(c *C) { +func (s *testRuleReorderResults) TestOrderedResultModeOnDML(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int primary key, b int, c int, key(b))") - s.runTestData(c, tk, "TestStableResultModeOnDML") + s.runTestData(c, tk, "TestOrderedResultModeOnDML") } -func (s *testRuleStabilizeResults) TestStableResultModeOnSubQuery(c *C) { +func (s *testRuleReorderResults) TestOrderedResultModeOnSubQuery(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t1") tk.MustExec("drop table if exists t2") tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") - s.runTestData(c, tk, "TestStableResultModeOnSubQuery") + s.runTestData(c, tk, "TestOrderedResultModeOnSubQuery") } -func (s *testRuleStabilizeResults) TestStableResultModeOnJoin(c *C) { +func (s *testRuleReorderResults) TestOrderedResultModeOnJoin(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t1") tk.MustExec("drop table if exists t2") tk.MustExec("create table t1 (a int primary key, b int, c int, d int, key(b))") tk.MustExec("create table t2 (a int primary key, b int, c int, d int, key(b))") - s.runTestData(c, tk, "TestStableResultModeOnJoin") + s.runTestData(c, tk, "TestOrderedResultModeOnJoin") } -func (s *testRuleStabilizeResults) TestStableResultModeOnOtherOperators(c *C) { +func (s *testRuleReorderResults) TestOrderedResultModeOnOtherOperators(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists t1") tk.MustExec("drop table if exists t2") tk.MustExec("create table t1 (a int primary key, b int, c int, d int, unique key(b))") tk.MustExec("create table t2 (a int primary key, b int, c int, d int, unique key(b))") - s.runTestData(c, tk, "TestStableResultModeOnOtherOperators") + s.runTestData(c, tk, "TestOrderedResultModeOnOtherOperators") } -func (s *testRuleStabilizeResults) TestStableResultModeOnPartitionTable(c *C) { +func (s *testRuleReorderResults) TestOrderedResultModeOnPartitionTable(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec(fmt.Sprintf(`set tidb_partition_prune_mode='%v'`, variable.DefTiDBPartitionPruneMode)) - tk.MustExec("set tidb_enable_stable_result_mode=1") + tk.MustExec("set tidb_enable_ordered_result_mode=1") tk.MustExec("drop table if exists thash") tk.MustExec("drop table if exists trange") tk.MustExec("create table thash (a int primary key, b int, c int, d int) partition by hash(a) partitions 4") @@ -205,14 +205,14 @@ func (s *testRuleStabilizeResults) TestStableResultModeOnPartitionTable(c *C) { partition p2 values less than (300), partition p3 values less than (400))`) tk.MustQuery("select @@tidb_partition_prune_mode").Check(testkit.Rows("static")) - s.runTestData(c, tk, "TestStableResultModeOnPartitionTable") + s.runTestData(c, tk, "TestOrderedResultModeOnPartitionTable") } -func (s *testRuleStabilizeResults) TestHideStableResultSwitch(c *C) { +func (s *testRuleReorderResults) TestHideStableResultSwitch(c *C) { tk := testkit.NewTestKit(c, s.store) rs := tk.MustQuery("show variables").Rows() for _, r := range rs { - c.Assert(strings.ToLower(r[0].(string)), Not(Equals), "tidb_enable_stable_result_mode") + c.Assert(strings.ToLower(r[0].(string)), Not(Equals), "tidb_enable_ordered_result_mode") } - c.Assert(len(tk.MustQuery("show variables where variable_name like '%tidb_enable_stable_result_mode%'").Rows()), Equals, 0) + c.Assert(len(tk.MustQuery("show variables where variable_name like '%tidb_enable_ordered_result_mode%'").Rows()), Equals, 0) } diff --git a/planner/core/testdata/stable_result_mode_suite_in.json b/planner/core/testdata/ordered_result_mode_suite_in.json similarity index 92% rename from planner/core/testdata/stable_result_mode_suite_in.json rename to planner/core/testdata/ordered_result_mode_suite_in.json index 7629e80fc3630..beabb1e713c3a 100644 --- a/planner/core/testdata/stable_result_mode_suite_in.json +++ b/planner/core/testdata/ordered_result_mode_suite_in.json @@ -1,6 +1,6 @@ [ { - "name": "TestStableResultMode", + "name": "TestOrderedResultMode", "cases": [ "select * from t use index(primary)", "select b from t use index(b)", @@ -19,7 +19,7 @@ ] }, { - "name": "TestStableResultModeOnDML", + "name": "TestOrderedResultModeOnDML", "cases": [ "insert into t select * from t", "insert into t select * from t where a>1", @@ -32,7 +32,7 @@ ] }, { - "name": "TestStableResultModeOnSubQuery", + "name": "TestOrderedResultModeOnSubQuery", "cases": [ "select * from t1 where t1.a in (select b from t2)", "select * from t1 where t1.a not in (select b from t2)", @@ -46,7 +46,7 @@ ] }, { - "name": "TestStableResultModeOnJoin", + "name": "TestOrderedResultModeOnJoin", "cases": [ "select * from t1, t2 where t1.a = t2.a", "select * from t1, t2 where t1.a > t2.a and t1.b = t2.b and t1.c < t2.c", @@ -55,7 +55,7 @@ ] }, { - "name": "TestStableResultModeOnOtherOperators", + "name": "TestOrderedResultModeOnOtherOperators", "cases": [ "select * from t1 where a = 1 or a = 222 or a = 33333", "select * from t1 where a in (1, 2, 3, 4)", @@ -72,7 +72,7 @@ ] }, { - "name": "TestStableResultModeOnPartitionTable", + "name": "TestOrderedResultModeOnPartitionTable", "cases": [ "select * from thash where a in (1, 200)", "select * from thash where a >= 50 and a <= 150", diff --git a/planner/core/testdata/stable_result_mode_suite_out.json b/planner/core/testdata/ordered_result_mode_suite_out.json similarity index 98% rename from planner/core/testdata/stable_result_mode_suite_out.json rename to planner/core/testdata/ordered_result_mode_suite_out.json index 6e00e3e1c65db..b4d8cc32cba43 100644 --- a/planner/core/testdata/stable_result_mode_suite_out.json +++ b/planner/core/testdata/ordered_result_mode_suite_out.json @@ -1,6 +1,6 @@ [ { - "Name": "TestStableResultMode", + "Name": "TestOrderedResultMode", "Cases": [ { "Plan": [ @@ -112,7 +112,7 @@ ] }, { - "Name": "TestStableResultModeOnDML", + "Name": "TestOrderedResultModeOnDML", "Cases": [ { "Plan": [ @@ -178,7 +178,7 @@ ] }, { - "Name": "TestStableResultModeOnSubQuery", + "Name": "TestOrderedResultModeOnSubQuery", "Cases": [ { "Plan": [ @@ -282,7 +282,7 @@ ] }, { - "Name": "TestStableResultModeOnJoin", + "Name": "TestOrderedResultModeOnJoin", "Cases": [ { "Plan": [ @@ -330,7 +330,7 @@ ] }, { - "Name": "TestStableResultModeOnOtherOperators", + "Name": "TestOrderedResultModeOnOtherOperators", "Cases": [ { "Plan": [ @@ -436,7 +436,7 @@ ] }, { - "Name": "TestStableResultModeOnPartitionTable", + "Name": "TestOrderedResultModeOnPartitionTable", "Cases": [ { "Plan": [ diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index dcc81e80d5db0..05da31bea6143 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1759,7 +1759,7 @@ var defaultSysVars = []*SysVar{ s.EnableGlobalTemporaryTable = TiDBOptOn(val) return nil }}, - {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableStableResultMode, Value: BoolToOnOff(DefTiDBEnableStableResultMode), Hidden: true, Type: TypeBool, SetSession: func(s *SessionVars, val string) error { + {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableOrderedResultMode, Value: BoolToOnOff(DefTiDBEnableOrderedResultMode), Hidden: true, Type: TypeBool, SetSession: func(s *SessionVars, val string) error { s.EnableStableResultMode = TiDBOptOn(val) return nil }}, diff --git a/sessionctx/variable/tidb_vars.go b/sessionctx/variable/tidb_vars.go index 42fd63adf7cae..00c445ada3de6 100644 --- a/sessionctx/variable/tidb_vars.go +++ b/sessionctx/variable/tidb_vars.go @@ -568,8 +568,8 @@ const ( // TiDBEnableLocalTxn indicates whether to enable Local Txn. TiDBEnableLocalTxn = "tidb_enable_local_txn" - // TiDBEnableStableResultMode indicates if stabilize query results. - TiDBEnableStableResultMode = "tidb_enable_stable_result_mode" + // TiDBEnableOrderedResultMode indicates if stabilize query results. + TiDBEnableOrderedResultMode = "tidb_enable_ordered_result_mode" ) // TiDB vars that have only global scope @@ -724,7 +724,7 @@ const ( DefTiDBEnableGlobalTemporaryTable = false DefTiDBEnableLocalTxn = false DefTMPTableSize = 16777216 - DefTiDBEnableStableResultMode = false + DefTiDBEnableOrderedResultMode = false ) // Process global variables. From 3e01fd6ffb4a8fb9954441e2b4d0fba40f3584aa Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 10:23:34 +0800 Subject: [PATCH 33/38] util/ranger: fix wrong range calculation of prefix index when appending ranges to point ranges (#26066) (#26262) --- util/ranger/detacher.go | 18 ++++++++-- util/ranger/ranger_test.go | 38 ++++++++++++++++++++ util/ranger/testdata/ranger_suite_in.json | 8 +++++ util/ranger/testdata/ranger_suite_out.json | 41 ++++++++++++++++++++++ 4 files changed, 103 insertions(+), 2 deletions(-) diff --git a/util/ranger/detacher.go b/util/ranger/detacher.go index 6b1efe5433f99..f26e96c42d7f8 100644 --- a/util/ranger/detacher.go +++ b/util/ranger/detacher.go @@ -268,8 +268,21 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi res.EqOrInCount = eqOrInCount ranges, err = d.buildCNFIndexRange(tpSlice, eqOrInCount, accessConds) if err != nil { - return res, err + return nil, err + } + + // Though ranges are built from equal/in conditions, some range may not be a single point after UnionRanges in buildCNFIndexRange. + // In order to prepare for the following appendRanges2PointRanges, we set d.mergeConsecutive to false and call buildCNFIndexRange + // again to get pointRanges, in which each range must be a single point. If we use ranges rather than pointRanges when calling + // appendRanges2PointRanges, wrong ranges would be calculated as issue https://github.com/pingcap/tidb/issues/26029 describes. + mergeConsecutive := d.mergeConsecutive + d.mergeConsecutive = false + pointRanges, err := d.buildCNFIndexRange(tpSlice, eqOrInCount, accessConds) + if err != nil { + return nil, err } + d.mergeConsecutive = mergeConsecutive + res.Ranges = ranges res.AccessConds = accessConds res.RemainedConds = filterConds @@ -293,6 +306,7 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi } if len(pointRes.Ranges[0].LowVal) > eqOrInCount { res = pointRes + pointRanges = pointRes.Ranges eqOrInCount = len(res.Ranges[0].LowVal) newConditions = newConditions[:0] newConditions = append(newConditions, conditions[:offset]...) @@ -314,7 +328,7 @@ func (d *rangeDetacher) detachCNFCondAndBuildRangeForIndex(conditions []expressi return &DetachRangeResult{}, nil } if len(tailRes.AccessConds) > 0 { - res.Ranges = appendRanges2PointRanges(res.Ranges, tailRes.Ranges) + res.Ranges = appendRanges2PointRanges(pointRanges, tailRes.Ranges) res.AccessConds = append(res.AccessConds, tailRes.AccessConds...) } res.RemainedConds = append(res.RemainedConds, tailRes.RemainedConds...) diff --git a/util/ranger/ranger_test.go b/util/ranger/ranger_test.go index 3f4e59848b044..29e1c3fc4a7bf 100644 --- a/util/ranger/ranger_test.go +++ b/util/ranger/ranger_test.go @@ -1754,3 +1754,41 @@ func (s *testRangerSuite) TestIndexRangeForDecimal(c *C) { testKit.MustQuery(tt).Check(testkit.Rows(output[i].Result...)) } } + +func (s *testRangerSuite) TestPrefixIndexAppendPointRanges(c *C) { + defer testleak.AfterTest(c)() + dom, store, err := newDomainStoreWithBootstrap(c) + defer func() { + dom.Close() + store.Close() + }() + c.Assert(err, IsNil) + testKit := testkit.NewTestKit(c, store) + testKit.MustExec("USE test") + testKit.MustExec("DROP TABLE IF EXISTS IDT_20755") + testKit.MustExec("CREATE TABLE `IDT_20755` (\n" + + " `COL1` varchar(20) DEFAULT NULL,\n" + + " `COL2` tinyint(16) DEFAULT NULL,\n" + + " `COL3` timestamp NULL DEFAULT NULL,\n" + + " KEY `u_m_col` (`COL1`(10),`COL2`,`COL3`)\n" + + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") + testKit.MustExec("INSERT INTO IDT_20755 VALUES(\"牾窓螎刳闌蜹瑦詬鍖湪槢壿玟瞏膍敗特森撇縆\", 73, \"2010-06-03 07:29:05\")") + testKit.MustExec("INSERT INTO IDT_20755 VALUES(\"xxxxxxxxxxxxxxx\", 73, \"2010-06-03 07:29:05\")") + + var input []string + var output []struct { + SQL string + Plan []string + Result []string + } + s.testData.GetTestCases(c, &input, &output) + for i, tt := range input { + s.testData.OnRecord(func() { + output[i].SQL = tt + output[i].Plan = s.testData.ConvertRowsToStrings(testKit.MustQuery("explain format = 'brief' " + tt).Rows()) + output[i].Result = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) + }) + testKit.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...)) + testKit.MustQuery(tt).Check(testkit.Rows(output[i].Result...)) + } +} diff --git a/util/ranger/testdata/ranger_suite_in.json b/util/ranger/testdata/ranger_suite_in.json index 857ad1af8be86..c1641c0f251f7 100644 --- a/util/ranger/testdata/ranger_suite_in.json +++ b/util/ranger/testdata/ranger_suite_in.json @@ -101,5 +101,13 @@ "select * from t2 use index(idx) where a = 1 and b <= -1;", "select * from t2 use index(idx) where a = 1 and b >= -1;" ] + }, + { + "name": "TestPrefixIndexAppendPointRanges", + "cases": [ + "select * from IDT_20755 use index (u_m_col) where col1 in (\"牾窓螎刳闌蜹瑦詬鍖湪槢壿玟瞏膍敗特森撇縆\", \"物碃貞枕騫摨聫嚣蜻禼担堋黕詖蝒毎槒阆畒郒\", \"剮毵樍穋摻瀽鬦擀钟鷫產冖悄乮曙枱诠鑡轰砠\") and col2 in (72, 39, 73) and col3 != \"2024-10-19 08:55:32\"", + "select * from IDT_20755 use index (u_m_col) where col1 = \"xxxxxxxxxxxxxxx\" and col2 in (72, 73) and col3 != \"2024-10-19 08:55:32\"", + "select * from IDT_20755 use index (u_m_col) where col1 = \"xxxxxxxxxxxxxxx\" and col2 in (72, 73, 74) and col3 != \"2024-10-19 08:55:32\"" + ] } ] diff --git a/util/ranger/testdata/ranger_suite_out.json b/util/ranger/testdata/ranger_suite_out.json index 9def6a398c661..fb40fffb6fe48 100644 --- a/util/ranger/testdata/ranger_suite_out.json +++ b/util/ranger/testdata/ranger_suite_out.json @@ -648,5 +648,46 @@ ] } ] + }, + { + "Name": "TestPrefixIndexAppendPointRanges", + "Cases": [ + { + "SQL": "select * from IDT_20755 use index (u_m_col) where col1 in (\"牾窓螎刳闌蜹瑦詬鍖湪槢壿玟瞏膍敗特森撇縆\", \"物碃貞枕騫摨聫嚣蜻禼担堋黕詖蝒毎槒阆畒郒\", \"剮毵樍穋摻瀽鬦擀钟鷫產冖悄乮曙枱诠鑡轰砠\") and col2 in (72, 39, 73) and col3 != \"2024-10-19 08:55:32\"", + "Plan": [ + "IndexLookUp 5.99 root ", + "├─IndexRangeScan(Build) 5.99 cop[tikv] table:IDT_20755, index:u_m_col(COL1, COL2, COL3) range:[\"剮毵樍穋摻瀽鬦擀钟鷫\" 39 -inf,\"剮毵樍穋摻瀽鬦擀钟鷫\" 39 2024-10-19 08:55:32), (\"剮毵樍穋摻瀽鬦擀钟鷫\" 39 2024-10-19 08:55:32,\"剮毵樍穋摻瀽鬦擀钟鷫\" 39 +inf], [\"剮毵樍穋摻瀽鬦擀钟鷫\" 72 -inf,\"剮毵樍穋摻瀽鬦擀钟鷫\" 72 2024-10-19 08:55:32), (\"剮毵樍穋摻瀽鬦擀钟鷫\" 72 2024-10-19 08:55:32,\"剮毵樍穋摻瀽鬦擀钟鷫\" 72 +inf], [\"剮毵樍穋摻瀽鬦擀钟鷫\" 73 -inf,\"剮毵樍穋摻瀽鬦擀钟鷫\" 73 2024-10-19 08:55:32), (\"剮毵樍穋摻瀽鬦擀钟鷫\" 73 2024-10-19 08:55:32,\"剮毵樍穋摻瀽鬦擀钟鷫\" 73 +inf], [\"物碃貞枕騫摨聫嚣蜻禼\" 39 -inf,\"物碃貞枕騫摨聫嚣蜻禼\" 39 2024-10-19 08:55:32), (\"物碃貞枕騫摨聫嚣蜻禼\" 39 2024-10-19 08:55:32,\"物碃貞枕騫摨聫嚣蜻禼\" 39 +inf], [\"物碃貞枕騫摨聫嚣蜻禼\" 72 -inf,\"物碃貞枕騫摨聫嚣蜻禼\" 72 2024-10-19 08:55:32), (\"物碃貞枕騫摨聫嚣蜻禼\" 72 2024-10-19 08:55:32,\"物碃貞枕騫摨聫嚣蜻禼\" 72 +inf], [\"物碃貞枕騫摨聫嚣蜻禼\" 73 -inf,\"物碃貞枕騫摨聫嚣蜻禼\" 73 2024-10-19 08:55:32), (\"物碃貞枕騫摨聫嚣蜻禼\" 73 2024-10-19 08:55:32,\"物碃貞枕騫摨聫嚣蜻禼\" 73 +inf], [\"牾窓螎刳闌蜹瑦詬鍖湪\" 39 -inf,\"牾窓螎刳闌蜹瑦詬鍖湪\" 39 2024-10-19 08:55:32), (\"牾窓螎刳闌蜹瑦詬鍖湪\" 39 2024-10-19 08:55:32,\"牾窓螎刳闌蜹瑦詬鍖湪\" 39 +inf], [\"牾窓螎刳闌蜹瑦詬鍖湪\" 72 -inf,\"牾窓螎刳闌蜹瑦詬鍖湪\" 72 2024-10-19 08:55:32), (\"牾窓螎刳闌蜹瑦詬鍖湪\" 72 2024-10-19 08:55:32,\"牾窓螎刳闌蜹瑦詬鍖湪\" 72 +inf], [\"牾窓螎刳闌蜹瑦詬鍖湪\" 73 -inf,\"牾窓螎刳闌蜹瑦詬鍖湪\" 73 2024-10-19 08:55:32), (\"牾窓螎刳闌蜹瑦詬鍖湪\" 73 2024-10-19 08:55:32,\"牾窓螎刳闌蜹瑦詬鍖湪\" 73 +inf], keep order:false, stats:pseudo", + "└─Selection(Probe) 5.99 cop[tikv] in(test.idt_20755.col1, \"牾窓螎刳闌蜹瑦詬鍖湪槢壿玟瞏膍敗特森撇縆\", \"物碃貞枕騫摨聫嚣蜻禼担堋黕詖蝒毎槒阆畒郒\", \"剮毵樍穋摻瀽鬦擀钟鷫產冖悄乮曙枱诠鑡轰砠\")", + " └─TableRowIDScan 5.99 cop[tikv] table:IDT_20755 keep order:false, stats:pseudo" + ], + "Result": [ + "牾窓螎刳闌蜹瑦詬鍖湪槢壿玟瞏膍敗特森撇縆 73 2010-06-03 07:29:05" + ] + }, + { + "SQL": "select * from IDT_20755 use index (u_m_col) where col1 = \"xxxxxxxxxxxxxxx\" and col2 in (72, 73) and col3 != \"2024-10-19 08:55:32\"", + "Plan": [ + "IndexLookUp 1.33 root ", + "├─IndexRangeScan(Build) 1.33 cop[tikv] table:IDT_20755, index:u_m_col(COL1, COL2, COL3) range:[\"xxxxxxxxxx\" 72 -inf,\"xxxxxxxxxx\" 72 2024-10-19 08:55:32), (\"xxxxxxxxxx\" 72 2024-10-19 08:55:32,\"xxxxxxxxxx\" 72 +inf], [\"xxxxxxxxxx\" 73 -inf,\"xxxxxxxxxx\" 73 2024-10-19 08:55:32), (\"xxxxxxxxxx\" 73 2024-10-19 08:55:32,\"xxxxxxxxxx\" 73 +inf], keep order:false, stats:pseudo", + "└─Selection(Probe) 1.33 cop[tikv] eq(test.idt_20755.col1, \"xxxxxxxxxxxxxxx\")", + " └─TableRowIDScan 1.33 cop[tikv] table:IDT_20755 keep order:false, stats:pseudo" + ], + "Result": [ + "xxxxxxxxxxxxxxx 73 2010-06-03 07:29:05" + ] + }, + { + "SQL": "select * from IDT_20755 use index (u_m_col) where col1 = \"xxxxxxxxxxxxxxx\" and col2 in (72, 73, 74) and col3 != \"2024-10-19 08:55:32\"", + "Plan": [ + "IndexLookUp 2.00 root ", + "├─IndexRangeScan(Build) 2.00 cop[tikv] table:IDT_20755, index:u_m_col(COL1, COL2, COL3) range:[\"xxxxxxxxxx\" 72 -inf,\"xxxxxxxxxx\" 72 2024-10-19 08:55:32), (\"xxxxxxxxxx\" 72 2024-10-19 08:55:32,\"xxxxxxxxxx\" 72 +inf], [\"xxxxxxxxxx\" 73 -inf,\"xxxxxxxxxx\" 73 2024-10-19 08:55:32), (\"xxxxxxxxxx\" 73 2024-10-19 08:55:32,\"xxxxxxxxxx\" 73 +inf], [\"xxxxxxxxxx\" 74 -inf,\"xxxxxxxxxx\" 74 2024-10-19 08:55:32), (\"xxxxxxxxxx\" 74 2024-10-19 08:55:32,\"xxxxxxxxxx\" 74 +inf], keep order:false, stats:pseudo", + "└─Selection(Probe) 2.00 cop[tikv] eq(test.idt_20755.col1, \"xxxxxxxxxxxxxxx\")", + " └─TableRowIDScan 2.00 cop[tikv] table:IDT_20755 keep order:false, stats:pseudo" + ], + "Result": [ + "xxxxxxxxxxxxxxx 73 2010-06-03 07:29:05" + ] + } + ] } ] From 413468abd9db7153709c79078487a5edd5fed443 Mon Sep 17 00:00:00 2001 From: Lei Zhao Date: Tue, 20 Jul 2021 10:45:35 +0800 Subject: [PATCH 34/38] store/tikv: fix backoff panic when resolving async-commit locks (#25862) --- store/tikv/lock_resolver.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/store/tikv/lock_resolver.go b/store/tikv/lock_resolver.go index 6fdcab23c32df..20b4964a48cd4 100644 --- a/store/tikv/lock_resolver.go +++ b/store/tikv/lock_resolver.go @@ -787,8 +787,10 @@ func (lr *LockResolver) resolveLockAsync(bo *Backoffer, l *Lock, status TxnStatu for region, locks := range keysByRegion { curLocks := locks curRegion := region + resolveBo, cancel := bo.Fork() + defer cancel() go func() { - errChan <- lr.resolveRegionLocks(bo, l, curRegion, curLocks, status) + errChan <- lr.resolveRegionLocks(resolveBo, l, curRegion, curLocks, status) }() } @@ -823,11 +825,11 @@ func (lr *LockResolver) checkAllSecondaries(bo *Backoffer, l *Lock, status *TxnS } errChan := make(chan error, len(regions)) - checkBo, cancel := bo.Fork() - defer cancel() for regionID, keys := range regions { curRegionID := regionID curKeys := keys + checkBo, cancel := bo.Fork() + defer cancel() go func() { errChan <- lr.checkSecondaries(checkBo, l.TxnID, curKeys, curRegionID, &shared) From 40a9ad712e005e40a9c31c6f2d75e4af1723e324 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 14:27:35 +0800 Subject: [PATCH 35/38] executor: support forbid tiflash for stale read (#25828) (#25985) --- executor/adapter.go | 2 + executor/compiler.go | 10 ++++ executor/executor.go | 1 + executor/stale_txn_test.go | 95 +++++++++++++++++++++++++++++++++++ executor/tiflash_test.go | 36 +++++++++++++ planner/core/common_plans.go | 1 + planner/core/planbuilder.go | 15 ++++++ planner/core/preprocess.go | 11 ++++ sessionctx/stmtctx/stmtctx.go | 1 + 9 files changed, 172 insertions(+) diff --git a/executor/adapter.go b/executor/adapter.go index 77498e35c88dc..f94091026321c 100644 --- a/executor/adapter.go +++ b/executor/adapter.go @@ -908,6 +908,8 @@ func (a *ExecStmt) FinishExecuteStmt(txnTS uint64, err error, hasMoreResults boo } // Reset DurationParse due to the next statement may not need to be parsed (not a text protocol query). sessVars.DurationParse = 0 + // Clean the stale read flag when statement execution finish + sessVars.StmtCtx.IsStaleness = false } // CloseRecordSet will finish the execution of current statement and do some record work diff --git a/executor/compiler.go b/executor/compiler.go index e5a6f9b0c6d69..c8c1c8087ad36 100644 --- a/executor/compiler.go +++ b/executor/compiler.go @@ -15,8 +15,10 @@ package executor import ( "context" + "fmt" "github.com/opentracing/opentracing-go" + "github.com/pingcap/failpoint" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/config" @@ -63,6 +65,14 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (*ExecStm return nil, err } + failpoint.Inject("assertStmtCtxIsStaleness", func(val failpoint.Value) { + expected := val.(bool) + got := c.Ctx.GetSessionVars().StmtCtx.IsStaleness + if got != expected { + panic(fmt.Sprintf("stmtctx isStaleness wrong, expected:%v, got:%v", expected, got)) + } + }) + CountStmtNode(stmtNode, c.Ctx.GetSessionVars().InRestrictedSQL) var lowerPriority bool if c.Ctx.GetSessionVars().StmtCtx.Priority == mysql.NoPriority { diff --git a/executor/executor.go b/executor/executor.go index 5ee89ea2c1c9e..8dab312b09f1a 100644 --- a/executor/executor.go +++ b/executor/executor.go @@ -1660,6 +1660,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { DiskTracker: disk.NewTracker(memory.LabelForSQLText, -1), TaskID: stmtctx.AllocateTaskID(), CTEStorageMap: map[int]*CTEStorages{}, + IsStaleness: false, } sc.MemTracker.AttachToGlobalTracker(GlobalMemoryUsageTracker) globalConfig := config.GetGlobalConfig() diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index ebf966c5e3183..79413097b78c5 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -227,6 +227,8 @@ func (s *testStaleTxnSerialSuite) TestSelectAsOf(c *C) { c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) } } + failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleTSO") + failpoint.Disable("github.com/pingcap/tidb/executor/assertStaleTSOWithTolerance") } func (s *testStaleTxnSerialSuite) TestStaleReadKVRequest(c *C) { @@ -989,3 +991,96 @@ func (s *testStaleTxnSerialSuite) TestStaleReadPrepare(c *C) { tk.MustExec(fmt.Sprintf(`set transaction read only as of timestamp '%s'`, time1.Format("2006-1-2 15:04:05.000"))) c.Assert("execute p1", NotNil) } + +func (s *testStaleTxnSuite) TestStmtCtxStaleFlag(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + defer tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + time.Sleep(2 * time.Second) + time1 := time.Now().Format("2006-1-2 15:04:05") + testcases := []struct { + sql string + hasStaleFlag bool + }{ + // assert select as of statement + { + sql: fmt.Sprintf("select * from t as of timestamp '%v'", time1), + hasStaleFlag: true, + }, + // assert select statement + { + sql: "select * from t", + hasStaleFlag: false, + }, + // assert select statement in stale transaction + { + sql: fmt.Sprintf("start transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: "select * from t", + hasStaleFlag: true, + }, + { + sql: "commit", + hasStaleFlag: false, + }, + // assert select statement after set transaction + { + sql: fmt.Sprintf("set transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: "select * from t", + hasStaleFlag: true, + }, + // assert select statement after consumed set transaction + { + sql: "select * from t", + hasStaleFlag: false, + }, + // assert prepare statement with select as of statement + { + sql: fmt.Sprintf(`prepare p from 'select * from t as of timestamp "%v"'`, time1), + hasStaleFlag: false, + }, + // assert execute statement with select as of statement + { + sql: "execute p", + hasStaleFlag: true, + }, + // assert prepare common select statement + { + sql: "prepare p1 from 'select * from t'", + hasStaleFlag: false, + }, + { + sql: "execute p1", + hasStaleFlag: false, + }, + // assert execute select statement in stale transaction + { + sql: fmt.Sprintf("start transaction read only as of timestamp '%v'", time1), + hasStaleFlag: false, + }, + { + sql: "execute p1", + hasStaleFlag: true, + }, + { + sql: "commit", + hasStaleFlag: false, + }, + } + + for _, testcase := range testcases { + failpoint.Enable("github.com/pingcap/tidb/exector/assertStmtCtxIsStaleness", + fmt.Sprintf("return(%v)", testcase.hasStaleFlag)) + tk.MustExec(testcase.sql) + failpoint.Disable("github.com/pingcap/tidb/exector/assertStmtCtxIsStaleness") + // assert stale read flag should be false after each statement execution + c.Assert(tk.Se.GetSessionVars().StmtCtx.IsStaleness, IsFalse) + } +} diff --git a/executor/tiflash_test.go b/executor/tiflash_test.go index 1c5d092021db4..ce63c2df527f2 100644 --- a/executor/tiflash_test.go +++ b/executor/tiflash_test.go @@ -14,6 +14,7 @@ package executor_test import ( + "bytes" "fmt" "math/rand" "strings" @@ -855,3 +856,38 @@ func (s *tiflashTestSuite) TestTiFlashPartitionTableBroadcastJoin(c *C) { } } } + +func (s *tiflashTestSuite) TestForbidTiflashDuringStaleRead(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a bigint(20))") + tk.MustExec("alter table t set tiflash replica 1") + tb := testGetTableByName(c, tk.Se, "test", "t") + err := domain.GetDomain(tk.Se).DDL().UpdateTableReplicaInfo(tk.Se, tb.Meta().ID, true) + c.Assert(err, IsNil) + time.Sleep(2 * time.Second) + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + tk.MustExec("insert into t values (9223372036854775807)") + rows := tk.MustQuery("explain select avg(a) from t").Rows() + resBuff := bytes.NewBufferString("") + for _, row := range rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + res := resBuff.String() + c.Assert(strings.Contains(res, "tiflash"), IsTrue) + c.Assert(strings.Contains(res, "tikv"), IsFalse) + tk.MustExec("set transaction read only as of timestamp now(1)") + rows = tk.MustQuery("explain select avg(a) from t").Rows() + resBuff = bytes.NewBufferString("") + for _, row := range rows { + fmt.Fprintf(resBuff, "%s\n", row) + } + res = resBuff.String() + c.Assert(strings.Contains(res, "tiflash"), IsFalse) + c.Assert(strings.Contains(res, "tikv"), IsTrue) +} diff --git a/planner/core/common_plans.go b/planner/core/common_plans.go index a213aa7afa104..7747f751a407e 100644 --- a/planner/core/common_plans.go +++ b/planner/core/common_plans.go @@ -270,6 +270,7 @@ func (e *Execute) OptimizePreparedPlan(ctx context.Context, sctx sessionctx.Cont if err != nil { return errors.Trace(err) } + sctx.GetSessionVars().StmtCtx.IsStaleness = true } if prepared.SchemaVersion != is.SchemaMetaVersion() { // In order to avoid some correctness issues, we have to clear the diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 6a7e5caf7d392..379acb99e9a2c 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1086,6 +1086,10 @@ func getPossibleAccessPaths(ctx sessionctx.Context, tableHints *tableHintInfo, i } available = removeIgnoredPaths(available, ignored, tblInfo) + if ctx.GetSessionVars().StmtCtx.IsStaleness { + // skip tiflash if the statement is for stale read until tiflash support stale read + available = removeTiflashDuringStaleRead(available) + } // If we have got "FORCE" or "USE" index hint but got no available index, // we have to use table scan. @@ -1141,6 +1145,17 @@ func removeIgnoredPaths(paths, ignoredPaths []*util.AccessPath, tblInfo *model.T return remainedPaths } +func removeTiflashDuringStaleRead(paths []*util.AccessPath) []*util.AccessPath { + n := 0 + for _, path := range paths { + if path.StoreType != kv.TiFlash { + paths[n] = path + n++ + } + } + return paths[:n] +} + func (b *PlanBuilder) buildSelectLock(src LogicalPlan, lock *ast.SelectLockInfo) (*LogicalLock, error) { selectLock := LogicalLock{ Lock: lock, diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 31a43b0c3da06..179103a746408 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -1493,6 +1493,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { } txnCtx := p.ctx.GetSessionVars().TxnCtx p.TxnScope = txnCtx.TxnScope + // It means we meet following case: + // 1. start transaction read only as of timestamp ts + // 2. select statement if txnCtx.IsStaleness { p.LastSnapshotTS = txnCtx.StartTS p.ExplicitStaleness = txnCtx.IsStaleness @@ -1509,6 +1512,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { p.err = ErrAsOf.FastGenWithCause("can't use select as of while already set transaction as of") return } + // it means we meet following case: + // 1. set transaction read only as of timestamp ts + // 2. select statement if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(sessionctx.Context) (uint64, error) { return ts, nil @@ -1522,6 +1528,8 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { if p.err != nil { return } + // It means we meet following case: + // select statement with as of timestamp if !p.initedLastSnapshotTS { p.SnapshotTSEvaluator = func(ctx sessionctx.Context) (uint64, error) { return calculateTsExpr(ctx, node) @@ -1542,6 +1550,9 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { } p.ExplicitStaleness = true } + if p.flag&inPrepare == 0 { + p.ctx.GetSessionVars().StmtCtx.IsStaleness = p.ExplicitStaleness + } p.initedLastSnapshotTS = true } diff --git a/sessionctx/stmtctx/stmtctx.go b/sessionctx/stmtctx/stmtctx.go index 23fc0f52664f6..23031506cfc3e 100644 --- a/sessionctx/stmtctx/stmtctx.go +++ b/sessionctx/stmtctx/stmtctx.go @@ -86,6 +86,7 @@ type StatementContext struct { IgnoreNoPartition bool OptimDependOnMutableConst bool IgnoreExplainIDSuffix bool + IsStaleness bool // mu struct holds variables that change during execution. mu struct { From 8f54e03c9d21cfd7dbeda31bee098e71fa0e46ec Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 15:15:35 +0800 Subject: [PATCH 36/38] =?UTF-8?q?expression:=20prevent=20function=20of=20c?= =?UTF-8?q?astYearAsTime=20from=20pushing=20down=20to=20Ti=E2=80=A6=20(#25?= =?UTF-8?q?975)=20(#26370)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- expression/expression.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/expression/expression.go b/expression/expression.go index 6a456329bd0ce..795661b98df62 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -1026,7 +1026,10 @@ func scalarExprSupportedByFlash(function *ScalarFunction) bool { } case ast.Cast: switch function.Function.PbCode() { - case tipb.ScalarFuncSig_CastIntAsInt, tipb.ScalarFuncSig_CastIntAsReal, tipb.ScalarFuncSig_CastIntAsDecimal, tipb.ScalarFuncSig_CastIntAsString, tipb.ScalarFuncSig_CastIntAsTime, + case tipb.ScalarFuncSig_CastIntAsTime: + // ban the function of casting year type as time type pushing down to tiflash because of https://github.com/pingcap/tidb/issues/26215 + return function.GetArgs()[0].GetType().Tp != mysql.TypeYear + case tipb.ScalarFuncSig_CastIntAsInt, tipb.ScalarFuncSig_CastIntAsReal, tipb.ScalarFuncSig_CastIntAsDecimal, tipb.ScalarFuncSig_CastIntAsString, tipb.ScalarFuncSig_CastRealAsInt, tipb.ScalarFuncSig_CastRealAsReal, tipb.ScalarFuncSig_CastRealAsDecimal, tipb.ScalarFuncSig_CastRealAsString, tipb.ScalarFuncSig_CastRealAsTime, tipb.ScalarFuncSig_CastStringAsInt, tipb.ScalarFuncSig_CastStringAsReal, tipb.ScalarFuncSig_CastStringAsDecimal, tipb.ScalarFuncSig_CastStringAsString, tipb.ScalarFuncSig_CastStringAsTime, tipb.ScalarFuncSig_CastDecimalAsInt /*, tipb.ScalarFuncSig_CastDecimalAsReal*/, tipb.ScalarFuncSig_CastDecimalAsDecimal, tipb.ScalarFuncSig_CastDecimalAsString, tipb.ScalarFuncSig_CastDecimalAsTime, From b1f47e741868853642b55cef215a3b42eb2e0380 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 16:39:35 +0800 Subject: [PATCH 37/38] executor: reject setting read ts to a future time (#25732) (#25763) --- executor/executor_test.go | 15 +++++++++++++ executor/set.go | 22 +++++++++++++------ executor/stale_txn_test.go | 23 ++++++++++++++++++++ metrics/metrics.go | 1 + metrics/session.go | 8 +++++++ planner/core/planbuilder.go | 3 +++ planner/core/preprocess.go | 5 +++++ sessionctx/context.go | 42 +++++++++++++++++++++++++++++++++++++ 8 files changed, 113 insertions(+), 6 deletions(-) diff --git a/executor/executor_test.go b/executor/executor_test.go index f280f19c9cfbd..b5c75f917f40a 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -2692,6 +2692,12 @@ func (s *testSuiteP2) TestHistoryRead(c *C) { // SnapshotTS Is not updated if check failed. c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0)) + // Setting snapshot to a time in the future will fail. (One day before the 2038 problem) + _, err = tk.Exec("set @@tidb_snapshot = '2038-01-18 03:14:07'") + c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time") + // SnapshotTS Is not updated if check failed. + c.Assert(tk.Se.GetSessionVars().SnapshotTS, Equals, uint64(0)) + curVer1, _ := s.store.CurrentVersion(kv.GlobalTxnScope) time.Sleep(time.Millisecond) snapshotTime := time.Now() @@ -2757,6 +2763,15 @@ func (s *testSuite2) TestLowResolutionTSORead(c *C) { tk.MustQuery("select * from low_resolution_tso").Check(testkit.Rows("2")) } +func (s *testSuite2) TestStaleReadFutureTime(c *C) { + tk := testkit.NewTestKit(c, s.store) + // Setting tx_read_ts to a time in the future will fail. (One day before the 2038 problem) + _, err := tk.Exec("set @@tx_read_ts = '2038-01-18 03:14:07'") + c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time") + // TxnReadTS Is not updated if check failed. + c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) +} + func (s *testSuite) TestScanControlSelection(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") diff --git a/executor/set.go b/executor/set.go index 5fee1a114abf1..4490bfaba5177 100644 --- a/executor/set.go +++ b/executor/set.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/expression" "github.com/pingcap/tidb/plugin" + "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/collate" @@ -93,14 +94,14 @@ func (e *SetExecutor) Next(ctx context.Context, req *chunk.Chunk) error { continue } - if err := e.setSysVariable(name, v); err != nil { + if err := e.setSysVariable(ctx, name, v); err != nil { return err } } return nil } -func (e *SetExecutor) setSysVariable(name string, v *expression.VarAssignment) error { +func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expression.VarAssignment) error { sessionVars := e.ctx.GetSessionVars() sysVar := variable.GetSysVar(name) if sysVar == nil { @@ -159,15 +160,24 @@ func (e *SetExecutor) setSysVariable(name string, v *expression.VarAssignment) e } newSnapshotTS := getSnapshotTSByName() newSnapshotIsSet := newSnapshotTS > 0 && newSnapshotTS != oldSnapshotTS - // We don't check snapshot with gc safe point for read_ts - // Client-go will automatically check the snapshotTS with gc safe point. It's unnecessary to check gc safe point during set executor. - if newSnapshotIsSet && name != variable.TiDBTxnReadTS { - err = gcutil.ValidateSnapshot(e.ctx, newSnapshotTS) + if newSnapshotIsSet { + if name == variable.TiDBTxnReadTS { + err = sessionctx.ValidateStaleReadTS(ctx, e.ctx, newSnapshotTS) + } else { + err = sessionctx.ValidateSnapshotReadTS(ctx, e.ctx, newSnapshotTS) + // Also check gc safe point for snapshot read. + // We don't check snapshot with gc safe point for read_ts + // Client-go will automatically check the snapshotTS with gc safe point. It's unnecessary to check gc safe point during set executor. + if err == nil { + err = gcutil.ValidateSnapshot(e.ctx, newSnapshotTS) + } + } if err != nil { fallbackOldSnapshotTS() return err } } + err = e.loadSnapshotInfoSchemaIfNeeded(newSnapshotTS) if err != nil { fallbackOldSnapshotTS() diff --git a/executor/stale_txn_test.go b/executor/stale_txn_test.go index 79413097b78c5..7742cef8dfd22 100644 --- a/executor/stale_txn_test.go +++ b/executor/stale_txn_test.go @@ -950,12 +950,35 @@ func (s *testStaleTxnSuite) TestStaleReadTemporaryTable(c *C) { } } +func (s *testStaleTxnSuite) TestStaleReadFutureTime(c *C) { + tk := testkit.NewTestKit(c, s.store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + defer tk.MustExec("drop table if exists t") + tk.MustExec("create table t (id int)") + + // Setting tx_read_ts to a time in the future will fail. (One day before the 2038 problem) + _, err := tk.Exec("start transaction read only as of timestamp '2038-01-18 03:14:07'") + c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time") + // Transaction should not be started and read ts should not be set if check fails + c.Assert(tk.Se.GetSessionVars().InTxn(), IsFalse) + c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) + + _, err = tk.Exec("set transaction read only as of timestamp '2038-01-18 03:14:07'") + c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time") + c.Assert(tk.Se.GetSessionVars().TxnReadTS.PeakTxnReadTS(), Equals, uint64(0)) + + _, err = tk.Exec("select * from t as of timestamp '2038-01-18 03:14:07'") + c.Assert(err, ErrorMatches, "cannot set read timestamp to a future time") +} + func (s *testStaleTxnSerialSuite) TestStaleReadPrepare(c *C) { tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t") defer tk.MustExec("drop table if exists t") tk.MustExec("create table t (id int)") + time.Sleep(2 * time.Second) conf := *config.GetGlobalConfig() oldConf := conf diff --git a/metrics/metrics.go b/metrics/metrics.go index 8cbca511c078e..efbb1229d5e32 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -128,6 +128,7 @@ func RegisterMetrics() { prometheus.MustRegister(StatementDeadlockDetectDuration) prometheus.MustRegister(StatementPessimisticRetryCount) prometheus.MustRegister(StatementLockKeysCount) + prometheus.MustRegister(ValidateReadTSFromPDCount) prometheus.MustRegister(UpdateSelfVersionHistogram) prometheus.MustRegister(UpdateStatsCounter) prometheus.MustRegister(WatchOwnerCounter) diff --git a/metrics/session.go b/metrics/session.go index 775ffc0f5c630..500dd17981efa 100644 --- a/metrics/session.go +++ b/metrics/session.go @@ -118,6 +118,14 @@ var ( Help: "Keys locking for a single statement", Buckets: prometheus.ExponentialBuckets(1, 2, 21), // 1 ~ 1048576 }) + + ValidateReadTSFromPDCount = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "tidb", + Subsystem: "session", + Name: "validate_read_ts_from_pd_count", + Help: "Counter of validating read ts by getting a timestamp from PD", + }) ) // Label constants. diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index 379acb99e9a2c..76619310a4272 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -2468,6 +2468,9 @@ func (b *PlanBuilder) buildSimple(ctx context.Context, node ast.StmtNode) (Plan, if err != nil { return nil, err } + if err := sessionctx.ValidateStaleReadTS(ctx, b.ctx, startTS); err != nil { + return nil, err + } p.StaleTxnStartTS = startTS } else if readTS > 0 { p.StaleTxnStartTS = readTS diff --git a/planner/core/preprocess.go b/planner/core/preprocess.go index 179103a746408..ab47a47d5f207 100644 --- a/planner/core/preprocess.go +++ b/planner/core/preprocess.go @@ -14,6 +14,7 @@ package core import ( + "context" "fmt" "math" "strings" @@ -1528,6 +1529,10 @@ func (p *preprocessor) handleAsOfAndReadTS(node *ast.AsOfClause) { if p.err != nil { return } + if err := sessionctx.ValidateStaleReadTS(context.Background(), p.ctx, ts); err != nil { + p.err = errors.Trace(err) + return + } // It means we meet following case: // select statement with as of timestamp if !p.initedLastSnapshotTS { diff --git a/sessionctx/context.go b/sessionctx/context.go index bd568c7e85f4b..b5b6ad3e5db94 100644 --- a/sessionctx/context.go +++ b/sessionctx/context.go @@ -16,11 +16,15 @@ package sessionctx import ( "context" "fmt" + "time" + "github.com/pingcap/errors" "github.com/pingcap/parser/model" "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/metrics" "github.com/pingcap/tidb/owner" "github.com/pingcap/tidb/sessionctx/variable" + "github.com/pingcap/tidb/store/tikv/oracle" "github.com/pingcap/tidb/util" "github.com/pingcap/tidb/util/kvcache" "github.com/pingcap/tidb/util/sli" @@ -149,3 +153,41 @@ const ( // LastExecuteDDL is the key for whether the session execute a ddl command last time. LastExecuteDDL basicCtxType = 3 ) + +// ValidateSnapshotReadTS strictly validates that readTS does not exceed the PD timestamp +func ValidateSnapshotReadTS(ctx context.Context, sctx Context, readTS uint64) error { + latestTS, err := sctx.GetStore().GetOracle().GetLowResolutionTimestamp(ctx, &oracle.Option{TxnScope: oracle.GlobalTxnScope}) + // If we fail to get latestTS or the readTS exceeds it, get a timestamp from PD to double check + if err != nil || readTS > latestTS { + metrics.ValidateReadTSFromPDCount.Inc() + currentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope) + if err != nil { + return errors.Errorf("fail to validate read timestamp: %v", err) + } + if readTS > currentVer.Ver { + return errors.Errorf("cannot set read timestamp to a future time") + } + } + return nil +} + +// How far future from now ValidateStaleReadTS allows at most +const allowedTimeFromNow = 100 * time.Millisecond + +// ValidateStaleReadTS validates that readTS does not exceed the current time not strictly. +func ValidateStaleReadTS(ctx context.Context, sctx Context, readTS uint64) error { + currentTS, err := sctx.GetStore().GetOracle().GetStaleTimestamp(ctx, oracle.GlobalTxnScope, 0) + // If we fail to calculate currentTS from local time, fallback to get a timestamp from PD + if err != nil { + metrics.ValidateReadTSFromPDCount.Inc() + currentVer, err := sctx.GetStore().CurrentVersion(oracle.GlobalTxnScope) + if err != nil { + return errors.Errorf("fail to validate read timestamp: %v", err) + } + currentTS = currentVer.Ver + } + if oracle.GetTimeFromTS(readTS).After(oracle.GetTimeFromTS(currentTS).Add(allowedTimeFromNow)) { + return errors.Errorf("cannot set read timestamp to a future time") + } + return nil +} From 858fec55c7aa8e173810b303cd28e7e17f87d462 Mon Sep 17 00:00:00 2001 From: ti-srebot <66930949+ti-srebot@users.noreply.github.com> Date: Tue, 20 Jul 2021 17:29:35 +0800 Subject: [PATCH 38/38] topsql: update tipb for topsql (#25801) (#25811) --- go.mod | 2 +- go.sum | 4 ++-- util/topsql/reporter/client.go | 8 ++++---- util/topsql/reporter/reporter_test.go | 24 ++++++++++++------------ util/topsql/topsql_test.go | 6 +++--- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 376c3a8ba9099..fb478da70ad28 100644 --- a/go.mod +++ b/go.mod @@ -48,7 +48,7 @@ require ( github.com/pingcap/parser v0.0.0-20210618053735-57843e8185c4 github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3 github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible - github.com/pingcap/tipb v0.0.0-20210603161937-cfb5a9225f95 + github.com/pingcap/tipb v0.0.0-20210628060001-1793e022b962 github.com/prometheus/client_golang v1.5.1 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.9.1 diff --git a/go.sum b/go.sum index bde20ee3e2244..f4172e9e02faf 100644 --- a/go.sum +++ b/go.sum @@ -447,8 +447,8 @@ github.com/pingcap/sysutil v0.0.0-20210315073920-cc0985d983a3/go.mod h1:tckvA041 github.com/pingcap/tidb-dashboard v0.0.0-20210312062513-eef5d6404638/go.mod h1:OzFN8H0EDMMqeulPhPMw2i2JaiZWOKFQ7zdRPhENNgo= github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible h1:ceznmu/lLseGHP/jKyOa/3u/5H3wtLLLqkH2V3ssSjg= github.com/pingcap/tidb-tools v4.0.9-0.20201127090955-2707c97b3853+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM= -github.com/pingcap/tipb v0.0.0-20210603161937-cfb5a9225f95 h1:Cj7FhGvYn8hrXDNcaHi0aTl0KdV67KTL+P5gBp3vqT4= -github.com/pingcap/tipb v0.0.0-20210603161937-cfb5a9225f95/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= +github.com/pingcap/tipb v0.0.0-20210628060001-1793e022b962 h1:9Y9Eci9LwAEhyXAlAU0bSix7Nemm3G267oyN3GVK+j0= +github.com/pingcap/tipb v0.0.0-20210628060001-1793e022b962/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= diff --git a/util/topsql/reporter/client.go b/util/topsql/reporter/client.go index 87504d646902b..41d71400c3959 100644 --- a/util/topsql/reporter/client.go +++ b/util/topsql/reporter/client.go @@ -110,10 +110,10 @@ func (r *GRPCReportClient) sendBatchCPUTimeRecord(ctx context.Context, records m } for _, record := range records { record := &tipb.CPUTimeRecord{ - TimestampList: record.TimestampList, - CpuTimeMsList: record.CPUTimeMsList, - SqlDigest: record.SQLDigest, - PlanDigest: record.PlanDigest, + RecordListTimestampSec: record.TimestampList, + RecordListCpuTimeMs: record.CPUTimeMsList, + SqlDigest: record.SQLDigest, + PlanDigest: record.PlanDigest, } if err := stream.Send(record); err != nil { return err diff --git a/util/topsql/reporter/reporter_test.go b/util/topsql/reporter/reporter_test.go index 7e4d2b2ddc9b5..8daa6997cdb2d 100644 --- a/util/topsql/reporter/reporter_test.go +++ b/util/topsql/reporter/reporter_test.go @@ -111,13 +111,13 @@ func (s *testTopSQLReporter) TestCollectAndSendBatch(c *C) { c.Assert(err, IsNil) id = n } - c.Assert(req.CpuTimeMsList, HasLen, 1) - for i := range req.CpuTimeMsList { - c.Assert(req.CpuTimeMsList[i], Equals, uint32(id)) + c.Assert(req.RecordListCpuTimeMs, HasLen, 1) + for i := range req.RecordListCpuTimeMs { + c.Assert(req.RecordListCpuTimeMs[i], Equals, uint32(id)) } - c.Assert(req.TimestampList, HasLen, 1) - for i := range req.TimestampList { - c.Assert(req.TimestampList[i], Equals, uint64(1)) + c.Assert(req.RecordListTimestampSec, HasLen, 1) + for i := range req.RecordListTimestampSec { + c.Assert(req.RecordListTimestampSec[i], Equals, uint64(1)) } normalizedSQL, exist := agentServer.GetSQLMetaByDigestBlocking(req.SqlDigest, time.Second) c.Assert(exist, IsTrue) @@ -152,13 +152,13 @@ func (s *testTopSQLReporter) TestCollectAndEvicted(c *C) { id = n } c.Assert(id >= maxSQLNum, IsTrue) - c.Assert(req.CpuTimeMsList, HasLen, 1) - for i := range req.CpuTimeMsList { - c.Assert(req.CpuTimeMsList[i], Equals, uint32(id)) + c.Assert(req.RecordListCpuTimeMs, HasLen, 1) + for i := range req.RecordListCpuTimeMs { + c.Assert(req.RecordListCpuTimeMs[i], Equals, uint32(id)) } - c.Assert(req.TimestampList, HasLen, 1) - for i := range req.TimestampList { - c.Assert(req.TimestampList[i], Equals, uint64(2)) + c.Assert(req.RecordListTimestampSec, HasLen, 1) + for i := range req.RecordListTimestampSec { + c.Assert(req.RecordListTimestampSec[i], Equals, uint64(2)) } normalizedSQL, exist := agentServer.GetSQLMetaByDigestBlocking(req.SqlDigest, time.Second) c.Assert(exist, IsTrue) diff --git a/util/topsql/topsql_test.go b/util/topsql/topsql_test.go index 0663a4f307b95..88748e110cff9 100644 --- a/util/topsql/topsql_test.go +++ b/util/topsql/topsql_test.go @@ -167,9 +167,9 @@ func (s *testSuite) TestTopSQLReporter(c *C) { records := server.GetLatestRecords() checkSQLPlanMap := map[string]struct{}{} for _, req := range records { - c.Assert(len(req.CpuTimeMsList) > 0, IsTrue) - c.Assert(req.CpuTimeMsList[0] > 0, IsTrue) - c.Assert(req.TimestampList[0] > 0, IsTrue) + c.Assert(len(req.RecordListCpuTimeMs) > 0, IsTrue) + c.Assert(req.RecordListCpuTimeMs[0] > 0, IsTrue) + c.Assert(req.RecordListCpuTimeMs[0] > 0, IsTrue) normalizedSQL, exist := server.GetSQLMetaByDigestBlocking(req.SqlDigest, time.Second) c.Assert(exist, IsTrue) expectedNormalizedSQL, exist := sqlMap[string(req.SqlDigest)]