From b66a85cf14792f2455145a93e6608d4c4bcc3b8d Mon Sep 17 00:00:00 2001 From: Hangjie Mo Date: Tue, 7 May 2024 12:43:06 +0800 Subject: [PATCH] *: global index support `index_merge` and `mem_index_merge` (#52971) close pingcap/tidb#43013 --- pkg/executor/builder.go | 16 ++ pkg/executor/index_merge_reader.go | 79 +++--- pkg/executor/mem_reader.go | 183 ++++++-------- pkg/kv/key.go | 4 +- pkg/planner/core/find_best_task.go | 22 +- pkg/planner/core/indexmerge_path.go | 29 +-- .../r/executor/partition/global_index.result | 7 +- .../r/globalindex/mem_index_merge.result | 237 ++++++++++++++++++ .../t/globalindex/mem_index_merge.test | 134 ++++++++++ tests/realtikvtest/flashbacktest/BUILD.bazel | 2 +- 10 files changed, 538 insertions(+), 175 deletions(-) create mode 100644 tests/integrationtest/r/globalindex/mem_index_merge.result create mode 100644 tests/integrationtest/t/globalindex/mem_index_merge.test diff --git a/pkg/executor/builder.go b/pkg/executor/builder.go index cf0b14538666b..49428a19c2ba1 100644 --- a/pkg/executor/builder.go +++ b/pkg/executor/builder.go @@ -1355,6 +1355,7 @@ func (b *executorBuilder) buildUnionScanFromReader(reader exec.Executor, v *plan } } } + us.partitionIDMap = x.partitionIDMap us.conditions, us.conditionsWithVirCol = plannercore.SplitSelCondsWithVirtualColumn(v.Conditions) us.columns = x.columns us.table = x.table @@ -3848,6 +3849,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd ts := v.TablePlans[0].(*plannercore.PhysicalTableScan) isCorColInPartialFilters := make([]bool, 0, partialPlanCount) isCorColInPartialAccess := make([]bool, 0, partialPlanCount) + hasGlobalIndex := false for i := 0; i < partialPlanCount; i++ { var tempReq *tipb.DAGRequest var err error @@ -3856,6 +3858,9 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd tempReq, err = buildIndexReq(b.ctx, is.Index.Columns, ts.HandleCols.NumCols(), v.PartialPlans[i]) descs = append(descs, is.Desc) indexes = append(indexes, is.Index) + if is.Index.Global { + hasGlobalIndex = true + } } else { ts := v.PartialPlans[i][0].(*plannercore.PhysicalTableScan) tempReq, _, err = buildTableReq(b, len(ts.Columns), v.PartialPlans[i]) @@ -3913,6 +3918,7 @@ func buildNoRangeIndexMergeReader(b *executorBuilder, v *plannercore.PhysicalInd byItems: v.ByItems, pushedLimit: v.PushedLimit, keepOrder: v.KeepOrder, + hasGlobalIndex: hasGlobalIndex, } collectTable := false e.tableRequest.CollectRangeCounts = &collectTable @@ -3954,10 +3960,14 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg } ret.ranges = make([][]*ranger.Range, 0, len(v.PartialPlans)) sctx := b.ctx.GetSessionVars().StmtCtx + hasGlobalIndex := false for i := 0; i < len(v.PartialPlans); i++ { if is, ok := v.PartialPlans[i][0].(*plannercore.PhysicalIndexScan); ok { ret.ranges = append(ret.ranges, is.Ranges) sctx.IndexNames = append(sctx.IndexNames, is.Table.Name.O+":"+is.Index.Name.O) + if is.Index.Global { + hasGlobalIndex = true + } } else { ret.ranges = append(ret.ranges, v.PartialPlans[i][0].(*plannercore.PhysicalTableScan).Ranges) if ret.table.Meta().IsCommonHandle { @@ -3984,6 +3994,12 @@ func (b *executorBuilder) buildIndexMergeReader(v *plannercore.PhysicalIndexMerg return nil } ret.partitionTableMode, ret.prunedPartitions = true, partitions + if hasGlobalIndex { + ret.partitionIDMap = make(map[int64]struct{}) + for _, p := range partitions { + ret.partitionIDMap[p.GetPhysicalID()] = struct{}{} + } + } return ret } diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go index b5013cf60f25f..7930f53dd8738 100644 --- a/pkg/executor/index_merge_reader.go +++ b/pkg/executor/index_merge_reader.go @@ -103,12 +103,14 @@ type IndexMergeReaderExecutor struct { // columns are only required by union scan. columns []*model.ColumnInfo + // partitionIDMap are only required by union scan with global index. + partitionIDMap map[int64]struct{} *dataReaderBuilder // fields about accessing partition tables partitionTableMode bool // if this IndexMerge is accessing a partition table prunedPartitions []table.PhysicalTable // pruned partition tables need to access - partitionKeyRanges [][][]kv.KeyRange // [partitionIdx][partialIndex][ranges] + partitionKeyRanges [][][]kv.KeyRange // [partialIndex][partitionIdx][ranges] // All fields above are immutable. @@ -127,9 +129,6 @@ type IndexMergeReaderExecutor struct { memTracker *memory.Tracker paging bool - // checkIndexValue is used to check the consistency of the index data. - *checkIndexValue // nolint:unused - partialPlans [][]base.PhysicalPlan tblPlans []base.PhysicalPlan partialNetDataSizes []float64 @@ -146,6 +145,8 @@ type IndexMergeReaderExecutor struct { // Whether it's intersection or union. isIntersection bool + + hasGlobalIndex bool } type indexMergeTableTask struct { @@ -182,12 +183,23 @@ func (e *IndexMergeReaderExecutor) Open(_ context.Context) (err error) { return err } } else { - e.partitionKeyRanges = make([][][]kv.KeyRange, len(e.prunedPartitions)) + e.partitionKeyRanges = make([][][]kv.KeyRange, len(e.indexes)) + tmpPartitionKeyRanges := make([][][]kv.KeyRange, len(e.prunedPartitions)) for i, p := range e.prunedPartitions { - if e.partitionKeyRanges[i], err = e.buildKeyRangesForTable(p); err != nil { + if tmpPartitionKeyRanges[i], err = e.buildKeyRangesForTable(p); err != nil { return err } } + for i, idx := range e.indexes { + if idx != nil && idx.Global { + keyRange, _ := distsql.IndexRangesToKVRanges(e.ctx.GetDistSQLCtx(), e.table.Meta().ID, idx.ID, e.ranges[i]) + e.partitionKeyRanges[i] = [][]kv.KeyRange{keyRange.FirstPartitionRange()} + } else { + for _, pKeyRanges := range tmpPartitionKeyRanges { + e.partitionKeyRanges[i] = append(e.partitionKeyRanges[i], pKeyRanges[i]) + } + } + } } e.finished = make(chan struct{}) e.resultCh = make(chan *indexMergeTableTask, atomic.LoadInt32(&LookupTableTaskChannelSize)) @@ -328,13 +340,10 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, var keyRanges [][]kv.KeyRange if e.partitionTableMode { - for _, pKeyRanges := range e.partitionKeyRanges { // get all keyRanges related to this PartialIndex - keyRanges = append(keyRanges, pKeyRanges[workID]) - } + keyRanges = e.partitionKeyRanges[workID] } else { keyRanges = [][]kv.KeyRange{e.keyRanges[workID]} } - failpoint.Inject("startPartialIndexWorkerErr", func() error { return errors.New("inject an error before start partialIndexWorker") }) @@ -376,7 +385,6 @@ func (e *IndexMergeReaderExecutor) startPartialIndexWorker(ctx context.Context, return } } - var builder distsql.RequestBuilder builder.SetDAGRequest(e.dagPBs[workID]). SetStartTS(e.startTS). @@ -1191,7 +1199,7 @@ func (w *indexMergeProcessWorker) fetchLoopUnion(ctx context.Context, fetchCh <- if w.indexMerge.pushedLimit != nil { pushedLimit = w.indexMerge.pushedLimit.Clone() } - distinctHandles := make(map[int64]*kv.HandleMap) + hMap := kv.NewHandleMap() for { var ok bool var task *indexMergeTableTask @@ -1223,19 +1231,12 @@ func (w *indexMergeProcessWorker) fetchLoopUnion(ctx context.Context, fetchCh <- fhs := make([]kv.Handle, 0, 8) memTracker.Consume(int64(cap(task.handles) * 8)) - - var tblID int64 - if w.indexMerge.partitionTableMode { - tblID = getPhysicalTableID(task.partitionTable) - } else { - tblID = getPhysicalTableID(w.indexMerge.table) - } - if _, ok := distinctHandles[tblID]; !ok { - distinctHandles[tblID] = kv.NewHandleMap() - } - hMap := distinctHandles[tblID] - for _, h := range handles { + if w.indexMerge.partitionTableMode { + if _, ok := h.(kv.PartitionHandle); !ok { + h = kv.NewPartitionHandle(task.partitionTable.GetPhysicalID(), h) + } + } if _, ok := hMap.Get(h); !ok { fhs = append(fhs, h) hMap.Set(h, true) @@ -1359,6 +1360,8 @@ type intersectionProcessWorker struct { // When rowDelta == memConsumeBatchSize, Consume(memUsage) rowDelta int64 mapUsageDelta int64 + + partitionIDMap map[int64]int } func (w *intersectionProcessWorker) consumeMemDelta() { @@ -1380,9 +1383,20 @@ func (w *intersectionProcessWorker) doIntersectionPerPartition(ctx context.Conte hMap = kv.NewMemAwareHandleMap[*int]() w.handleMapsPerWorker[task.parTblIdx] = hMap } - var mapDelta int64 - var rowDelta int64 + var mapDelta, rowDelta int64 for _, h := range task.handles { + if w.indexMerge.hasGlobalIndex { + if ph, ok := h.(kv.PartitionHandle); ok { + if v, exists := w.partitionIDMap[ph.PartitionID]; exists { + if hMap, ok = w.handleMapsPerWorker[v]; !ok { + hMap = kv.NewMemAwareHandleMap[*int]() + w.handleMapsPerWorker[v] = hMap + } + } + } else { + h = kv.NewPartitionHandle(task.partitionTable.GetPhysicalID(), h) + } + } // Use *int to avoid Get() again. if cntPtr, ok := hMap.Get(h); ok { (*cntPtr)++ @@ -1525,7 +1539,8 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet batchSize := w.indexMerge.Ctx().GetSessionVars().IndexLookupSize partCnt := 1 - if w.indexMerge.partitionTableMode { + // To avoid multi-threaded access the handle map, we only use one worker for indexMerge with global index. + if w.indexMerge.partitionTableMode && !w.indexMerge.hasGlobalIndex { partCnt = len(w.indexMerge.prunedPartitions) } workerCnt := min(partCnt, maxWorkerCnt) @@ -1536,6 +1551,13 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet } }) + partitionIDMap := make(map[int64]int) + if w.indexMerge.hasGlobalIndex { + for i, p := range w.indexMerge.prunedPartitions { + partitionIDMap[p.GetPhysicalID()] = i + } + } + workers := make([]*intersectionProcessWorker, 0, workerCnt) var collectWorker *intersectionCollectWorker wg := util.WaitGroupWrapper{} @@ -1566,6 +1588,7 @@ func (w *indexMergeProcessWorker) fetchLoopIntersection(ctx context.Context, fet indexMerge: w.indexMerge, memTracker: tracker, batchSize: batchSize, + partitionIDMap: partitionIDMap, } wg.RunWithRecover(func() { defer trace.StartRegion(ctx, "IndexMergeIntersectionProcessWorker").End() @@ -1692,7 +1715,7 @@ func (w *partialIndexWorker) needPartitionHandle() (bool, error) { if needPartitionHandle && !hasExtraCol { return needPartitionHandle, errors.Errorf("Internal error, needPartitionHandle != ret") } - return needPartitionHandle, nil + return needPartitionHandle || (col.ID == model.ExtraPidColID), nil } func (w *partialIndexWorker) fetchHandles( diff --git a/pkg/executor/mem_reader.go b/pkg/executor/mem_reader.go index 6363f0fb63ac9..1f32e6b3654e8 100644 --- a/pkg/executor/mem_reader.go +++ b/pkg/executor/mem_reader.go @@ -841,13 +841,14 @@ func buildMemIndexMergeReader(ctx context.Context, us *UnionScanExec, indexMerge } else { outputOffset := []int{len(indexMergeReader.indexes[i].Columns)} memReaders = append(memReaders, &memIndexReader{ - ctx: us.Ctx(), - index: indexMergeReader.indexes[i], - table: indexMergeReader.table.Meta(), - kvRanges: nil, - compareExec: compareExec{desc: indexMergeReader.descs[i]}, - retFieldTypes: exec.RetTypes(us), - outputOffset: outputOffset, + ctx: us.Ctx(), + index: indexMergeReader.indexes[i], + table: indexMergeReader.table.Meta(), + kvRanges: nil, + compareExec: compareExec{desc: indexMergeReader.descs[i]}, + retFieldTypes: exec.RetTypes(us), + outputOffset: outputOffset, + partitionIDMap: indexMergeReader.partitionIDMap, }) } } @@ -1015,49 +1016,78 @@ func (m *memIndexMergeReader) getMemRowsIter(ctx context.Context) (memRowsIter, return &defaultRowsIter{data: data}, nil } -func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, error) { - r, ctx := tracing.StartRegionEx(ctx, "memIndexMergeReader.getMemRows") - defer r.End() - tbls := []table.Table{m.table} - // [partNum][indexNum][rangeNum] - var kvRanges [][][]kv.KeyRange - if m.partitionMode { - tbls = tbls[:0] - for _, p := range m.partitionTables { - tbls = append(tbls, p) +func (m *memIndexMergeReader) getHandles() (handles []kv.Handle, err error) { + hMap := kv.NewHandleMap() + // loop each memReaders and fill handle map + for i, reader := range m.memReaders { + // [partitionNum][rangeNum] + var readerKvRanges [][]kv.KeyRange + if m.partitionMode { + readerKvRanges = m.partitionKVRanges[i] + } else { + readerKvRanges = [][]kv.KeyRange{m.indexMergeReader.keyRanges[i]} + } + for j, kr := range readerKvRanges { + switch r := reader.(type) { + case *memTableReader: + r.kvRanges = kr + case *memIndexReader: + r.kvRanges = kr + default: + return nil, errors.New("memReader have to be memTableReader or memIndexReader") + } + handles, err := reader.getMemRowsHandle() + if err != nil { + return nil, err + } + // Filter same row. + for _, handle := range handles { + if _, ok := handle.(kv.PartitionHandle); !ok && m.partitionMode { + pid := m.partitionTables[j].GetPhysicalID() + handle = kv.NewPartitionHandle(pid, handle) + } + if v, ok := hMap.Get(handle); !ok { + cnt := 1 + hMap.Set(handle, &cnt) + } else { + *(v.(*int))++ + } + } } - kvRanges = m.partitionKVRanges - } else { - kvRanges = append(kvRanges, m.indexMergeReader.keyRanges) - } - if len(kvRanges) != len(tbls) { - return nil, errors.Errorf("length of tbls(size: %d) should be equals to length of kvRanges(size: %d)", len(tbls), len(kvRanges)) } - tblKVRanges := make([]kv.KeyRange, 0, 16) - numHandles := 0 - var handles []kv.Handle - var err error - for i, tbl := range tbls { + // process handle map, return handles meets the requirements (union or intersection) + hMap.Range(func(h kv.Handle, val any) bool { if m.isIntersection { - handles, err = m.intersectionHandles(kvRanges[i]) + if *(val.(*int)) == len(m.memReaders) { + handles = append(handles, h) + } } else { - handles, err = m.unionHandles(kvRanges[i]) - } - if err != nil { - return nil, err + handles = append(handles, h) } - if len(handles) == 0 { - continue - } - numHandles += len(handles) - ranges, _ := distsql.TableHandlesToKVRanges(getPhysicalTableID(tbl), handles) - tblKVRanges = append(tblKVRanges, ranges...) + return true + }) + + return handles, nil +} + +func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, error) { + r, ctx := tracing.StartRegionEx(ctx, "memIndexMergeReader.getMemRows") + defer r.End() + + handles, err := m.getHandles() + if err != nil || len(handles) == 0 { + return nil, err } - if numHandles == 0 { - return nil, nil + var tblKVRanges []kv.KeyRange + if m.partitionMode { + // `tid` for partition handle is useless, so use 0 here. + tblKVRanges, _ = distsql.TableHandlesToKVRanges(0, handles) + } else { + tblKVRanges, _ = distsql.TableHandlesToKVRanges(getPhysicalTableID(m.table), handles) } + colIDs, pkColIDs, rd := getColIDAndPkColIDs(m.ctx, m.table, m.columns) memTblReader := &memTableReader{ @@ -1066,7 +1096,7 @@ func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, columns: m.columns, kvRanges: tblKVRanges, conditions: m.conditions, - addedRows: make([][]types.Datum, 0, numHandles), + addedRows: make([][]types.Datum, 0, len(handles)), retFieldTypes: m.retFieldTypes, colIDs: colIDs, pkColIDs: pkColIDs, @@ -1096,75 +1126,6 @@ func (m *memIndexMergeReader) getMemRows(ctx context.Context) ([][]types.Datum, return rows, err } -// Union all handles of all partial paths. -func (m *memIndexMergeReader) unionHandles(kvRanges [][]kv.KeyRange) (finalHandles []kv.Handle, err error) { - if len(m.memReaders) != len(kvRanges) { - return nil, errors.Errorf("len(kvRanges) should be equal to len(memReaders)") - } - - hMap := kv.NewHandleMap() - var handles []kv.Handle - for i, reader := range m.memReaders { - switch r := reader.(type) { - case *memTableReader: - r.kvRanges = kvRanges[i] - case *memIndexReader: - r.kvRanges = kvRanges[i] - default: - return nil, errors.New("memReader have to be memTableReader or memIndexReader") - } - if handles, err = reader.getMemRowsHandle(); err != nil { - return nil, err - } - // Filter same row. - for _, h := range handles { - if _, ok := hMap.Get(h); !ok { - finalHandles = append(finalHandles, h) - hMap.Set(h, true) - } - } - } - return finalHandles, nil -} - -// Intersect handles of each partial paths. -func (m *memIndexMergeReader) intersectionHandles(kvRanges [][]kv.KeyRange) (finalHandles []kv.Handle, err error) { - if len(m.memReaders) != len(kvRanges) { - return nil, errors.Errorf("len(kvRanges) should be equal to len(memReaders)") - } - - hMap := kv.NewHandleMap() - var handles []kv.Handle - for i, reader := range m.memReaders { - switch r := reader.(type) { - case *memTableReader: - r.kvRanges = kvRanges[i] - case *memIndexReader: - r.kvRanges = kvRanges[i] - default: - return nil, errors.New("memReader have to be memTableReader or memIndexReader") - } - if handles, err = reader.getMemRowsHandle(); err != nil { - return nil, err - } - for _, h := range handles { - if cntPtr, ok := hMap.Get(h); !ok { - cnt := 1 - hMap.Set(h, &cnt) - } else { - *(cntPtr.(*int))++ - } - } - } - hMap.Range(func(h kv.Handle, val any) bool { - if *(val.(*int)) == len(m.memReaders) { - finalHandles = append(finalHandles, h) - } - return true - }) - return finalHandles, nil -} - func (*memIndexMergeReader) getMemRowsHandle() ([]kv.Handle, error) { return nil, errors.New("getMemRowsHandle has not been implemented for memIndexMergeReader") } diff --git a/pkg/kv/key.go b/pkg/kv/key.go index 9b3479462a57c..d1ec7087d08af 100644 --- a/pkg/kv/key.go +++ b/pkg/kv/key.go @@ -553,9 +553,9 @@ func (m *HandleMap) Range(fn func(h Handle, val any) bool) { return } } - for _, v := range m.partitionInts { + for pid, v := range m.partitionInts { for h, val := range v { - if !fn(IntHandle(h), val) { + if !fn(NewPartitionHandle(pid, IntHandle(h)), val) { return } } diff --git a/pkg/planner/core/find_best_task.go b/pkg/planner/core/find_best_task.go index 2531d1550a4c9..654ccdf94958b 100644 --- a/pkg/planner/core/find_best_task.go +++ b/pkg/planner/core/find_best_task.go @@ -1614,7 +1614,10 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c if partPath.IsTablePath() { scan = ds.convertToPartialTableScan(prop, partPath, candidate.isMatchProp, byItems) } else { - scan = ds.convertToPartialIndexScan(prop, partPath, candidate.isMatchProp, byItems) + scan, err = ds.convertToPartialIndexScan(&cop.physPlanPartInfo, prop, partPath, candidate.isMatchProp, byItems) + if err != nil { + return invalidTask, err + } } scans = append(scans, scan) } @@ -1659,7 +1662,7 @@ func (ds *DataSource) convertToIndexMergeScan(prop *property.PhysicalProperty, c return task, nil } -func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (indexPlan base.PhysicalPlan) { +func (ds *DataSource) convertToPartialIndexScan(physPlanPartInfo *PhysPlanPartInfo, prop *property.PhysicalProperty, path *util.AccessPath, matchProp bool, byItems []*util.ByItems) (base.PhysicalPlan, error) { is := ds.getOriginalPhysicalIndexScan(prop, path, matchProp, false) // TODO: Consider using isIndexCoveringColumns() to avoid another TableRead indexConds := path.IndexFilters @@ -1670,6 +1673,14 @@ func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, // Add sort items for index scan for merge-sort operation between partitions. is.ByItems = byItems } + + // Add a `Selection` for `IndexScan` with global index. + // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. + indexConds, err := is.addSelectionConditionForGlobalIndex(ds, physPlanPartInfo, indexConds) + if err != nil { + return nil, err + } + if len(indexConds) > 0 { var selectivity float64 if path.CountAfterAccess > 0 { @@ -1683,10 +1694,9 @@ func (ds *DataSource) convertToPartialIndexScan(prop *property.PhysicalProperty, } indexPlan := PhysicalSelection{Conditions: indexConds}.Init(is.SCtx(), stats, ds.QueryBlockOffset()) indexPlan.SetChildren(is) - return indexPlan + return indexPlan, nil } - indexPlan = is - return indexPlan + return is, nil } func checkColinSchema(cols []*expression.Column, schema *expression.Schema) bool { @@ -2265,7 +2275,7 @@ func (is *PhysicalIndexScan) addPushedDownSelection(copTask *CopTask, p *DataSou copTask.rootTaskConds = append(copTask.rootTaskConds, newRootConds...) // Add a `Selection` for `IndexScan` with global index. - // It should pushdown to TiKV, DataSource schema doesn't contain this column. + // It should pushdown to TiKV, DataSource schema doesn't contain partition id column. indexConds, err := is.addSelectionConditionForGlobalIndex(p, &copTask.physPlanPartInfo, indexConds) if err != nil { return err diff --git a/pkg/planner/core/indexmerge_path.go b/pkg/planner/core/indexmerge_path.go index 2483e57f2c1ca..77c00ee489067 100644 --- a/pkg/planner/core/indexmerge_path.go +++ b/pkg/planner/core/indexmerge_path.go @@ -168,7 +168,7 @@ func (ds *DataSource) generateNormalIndexPartialPaths4DNF( } return false }) - partialPath := ds.buildIndexMergePartialPath(itemPaths) + partialPath := buildIndexMergePartialPath(itemPaths) if partialPath == nil { // for this dnf item, we couldn't generate an index merge partial path. // (1 member of (a)) or (3 member of (b)) or d=1; if one dnf item like d=1 here could walk index path, @@ -271,7 +271,7 @@ func (ds *DataSource) generateIndexMergeOrPaths(filters []expression.Expression) } } // 2.1: trade off on countAfterAccess. - minCountAfterAccessPath := ds.buildIndexMergePartialPath(oneAlternativeSet) + minCountAfterAccessPath := buildIndexMergePartialPath(oneAlternativeSet) indexCondsForP := minCountAfterAccessPath.AccessConds[:] indexCondsForP = append(indexCondsForP, minCountAfterAccessPath.IndexFilters...) if len(indexCondsForP) > 0 { @@ -289,7 +289,7 @@ func (ds *DataSource) generateIndexMergeOrPaths(filters []expression.Expression) sel = SelectionFactor } - possiblePath := ds.buildIndexMergeOrPath(filters, partialAlternativePaths, k, shouldKeepCurrentFilter) + possiblePath := buildIndexMergeOrPath(filters, partialAlternativePaths, k, shouldKeepCurrentFilter) if possiblePath == nil { return nil } @@ -425,7 +425,7 @@ func (ds *DataSource) accessPathsForConds( // buildIndexMergePartialPath chooses the best index path from all possible paths. // Now we choose the index with minimal estimate row count. -func (*DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.AccessPath) *util.AccessPath { +func buildIndexMergePartialPath(indexAccessPaths []*util.AccessPath) *util.AccessPath { if len(indexAccessPaths) == 1 { return indexAccessPaths[0] } @@ -446,7 +446,7 @@ func (*DataSource) buildIndexMergePartialPath(indexAccessPaths []*util.AccessPat } // buildIndexMergeOrPath generates one possible IndexMergePath. -func (ds *DataSource) buildIndexMergeOrPath( +func buildIndexMergeOrPath( filters []expression.Expression, partialAlternativePaths [][]*util.AccessPath, current int, @@ -455,25 +455,6 @@ func (ds *DataSource) buildIndexMergeOrPath( indexMergePath := &util.AccessPath{PartialAlternativeIndexPaths: partialAlternativePaths} indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[:current]...) indexMergePath.TableFilters = append(indexMergePath.TableFilters, filters[current+1:]...) - // If global index exists, index merge is not allowed. - // Global index is not compatible with IndexMergeReaderExecutor. - for i := range partialAlternativePaths { - // if one path's all alternatives are global index, warning it. - allGlobal := true - for _, oneAlternative := range partialAlternativePaths[i] { - // once we have a table alternative path - if oneAlternative.IsTablePath() { - allGlobal = false - } - if oneAlternative.Index != nil && !oneAlternative.Index.Global { - allGlobal = false - } - } - if allGlobal { - ds.SCtx().GetSessionVars().StmtCtx.AppendWarning(errors.NewNoStackError("global index is not compatible with index merge, so ignore it")) - return nil - } - } // since shouldKeepCurrentFilter may be changed in alternative paths converging, kept the filer expression anyway here. indexMergePath.KeepIndexMergeORSourceFilter = shouldKeepCurrentFilter // this filter will be merged into indexPath's table filters when converging. diff --git a/tests/integrationtest/r/executor/partition/global_index.result b/tests/integrationtest/r/executor/partition/global_index.result index 09a67368c4864..653aaf1d6db66 100644 --- a/tests/integrationtest/r/executor/partition/global_index.result +++ b/tests/integrationtest/r/executor/partition/global_index.result @@ -121,9 +121,10 @@ analyze table t; # when index_merge has global index as its partial path, ignore it. explain select /*+ use_index_merge(t, uidx_ac, idx_bc) */ * from t where a=1 or b=2; id estRows task access object operator info -TableReader_7 1.88 root partition:all data:Selection_6 -└─Selection_6 1.88 cop[tikv] or(eq(executor__partition__global_index.t.a, 1), eq(executor__partition__global_index.t.b, 2)) - └─TableFullScan_5 8.00 cop[tikv] table:t keep order:false +IndexMerge_12 1.88 root partition:all type: union +├─IndexRangeScan_8(Build) 1.00 cop[tikv] table:t, index:uidx_ac(a) range:[1,1], keep order:false +├─IndexRangeScan_10(Build) 1.00 cop[tikv] table:t, index:idx_bd(b, c) range:[2,2], keep order:false +└─TableRowIDScan_11(Probe) 1.88 cop[tikv] table:t keep order:false select /*+ use_index_merge(t, uidx_ac, idx_bc) */ * from t where a=1 or b=2; a b c d 1 1 1 1 diff --git a/tests/integrationtest/r/globalindex/mem_index_merge.result b/tests/integrationtest/r/globalindex/mem_index_merge.result new file mode 100644 index 0000000000000..9e5519410c6f6 --- /dev/null +++ b/tests/integrationtest/r/globalindex/mem_index_merge.result @@ -0,0 +1,237 @@ +set tidb_enable_global_index = true; +## Test IntHandle +CREATE TABLE `tpk2` ( +`a` int(11) DEFAULT NULL, +`b` int(11) DEFAULT NULL, +`c` int(11) NOT NULL, +`d` int(11) NOT NULL AUTO_INCREMENT, +KEY `idx_bc` (`b`,`c`), +UNIQUE KEY `uidx_a` (`a`), +UNIQUE KEY `uidx_ac` (`a`, `c`), +KEY `idx_c` (`c`) +) PARTITION BY HASH (`c`) PARTITIONS 5; +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); +begin; +insert into tpk2 values (2, 4, 2, 2); +## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +id estRows task access object operator info +Projection_5 11.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 11.00 root or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_11 11.00 root partition:all type: union + ├─IndexRangeScan_7(Build) 1.00 cop[tikv] table:tpk2, index:uidx_a(a) range:[1,1], keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 11.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +a b c d +1 2 1 1 +2 4 2 2 +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=2 or b=4; +a b c d +2 4 2 2 +## for indexMerge intersection +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +id estRows task access object operator info +Projection_5 1111.11 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 1111.11 root gt(globalindex__mem_index_merge.tpk2.a, 1), gt(globalindex__mem_index_merge.tpk2.c, 1) + └─IndexMerge_11 1111.11 root partition:all type: intersection + ├─IndexRangeScan_7(Build) 3333.33 cop[tikv] table:tpk2, index:uidx_a(a) range:(1,+inf], keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3333.33 cop[tikv] table:tpk2, index:idx_c(c) range:(1,+inf], keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 1111.11 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +a b c d +2 4 2 2 +3 6 3 3 +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 0 and c > 0; +a b c d +1 2 1 1 +2 4 2 2 +3 6 3 3 +## for indexMerge union with specified PARTITION +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +id estRows task access object operator info +Projection_5 11.00 root NULL globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 11.00 root NULL or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_12 11.00 root partition:p1 type: union + ├─Selection_9(Build) 1.00 cop[tikv] NULL in(_tidb_pid, pid1) + │ └─IndexRangeScan_7 1.00 cop[tikv] table:tpk2, index:uidx_a(a) range:[1,1], keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 10.00 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 11.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +a b c d +1 2 1 1 +## for indexMerge intersection with specified PARTITION +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +id estRows task access object operator info +Projection_5 1111.11 root NULL globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 1111.11 root NULL gt(globalindex__mem_index_merge.tpk2.a, 1), gt(globalindex__mem_index_merge.tpk2.c, 1) + └─IndexMerge_12 1111.11 root partition:p1 type: intersection + ├─Selection_9(Build) 3333.33 cop[tikv] NULL in(_tidb_pid, pid1) + │ └─IndexRangeScan_7 3333.33 cop[tikv] table:tpk2, index:uidx_a(a) range:(1,+inf], keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 3333.33 cop[tikv] table:tpk2, index:idx_c(c) range:(1,+inf], keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 1111.11 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +a b c d +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 0 and c > 0; +a b c d +1 2 1 1 +rollback; +## Test CommonHandle +drop table tpk2; +CREATE TABLE `tpk2` ( +`a` int(11) DEFAULT NULL, +`b` int(11) DEFAULT NULL, +`c` int(11) NOT NULL, +`d` int(11) NOT NULL, +KEY `idx_bc` (`b`,`c`), +UNIQUE KEY `uidx_a` (`a`), +UNIQUE KEY `uidx_ac` (`a`, `c`), +KEY `idx_c` (`c`), +PRIMARY KEY(`d`, `c`) +) PARTITION BY HASH (`d`) PARTITIONS 5; +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); +begin; +insert into tpk2 values (2, 4, 2, 2); +## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +id estRows task access object operator info +Projection_5 11.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 11.00 root or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_11 11.00 root partition:all type: union + ├─IndexRangeScan_7(Build) 1.00 cop[tikv] table:tpk2, index:uidx_a(a) range:[1,1], keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 10.00 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 11.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +a b c d +1 2 1 1 +2 4 2 2 +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=2 or b=4; +a b c d +2 4 2 2 +## for indexMerge intersection +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +id estRows task access object operator info +Projection_5 1111.11 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 1111.11 root gt(globalindex__mem_index_merge.tpk2.a, 1), gt(globalindex__mem_index_merge.tpk2.c, 1) + └─IndexMerge_11 1111.11 root partition:all type: intersection + ├─IndexRangeScan_7(Build) 3333.33 cop[tikv] table:tpk2, index:uidx_a(a) range:(1,+inf], keep order:false, stats:pseudo + ├─IndexRangeScan_9(Build) 3333.33 cop[tikv] table:tpk2, index:idx_c(c) range:(1,+inf], keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 1111.11 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +a b c d +2 4 2 2 +3 6 3 3 +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 0 and c > 0; +a b c d +1 2 1 1 +2 4 2 2 +3 6 3 3 +## for indexMerge union with specified PARTITION +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +id estRows task access object operator info +Projection_5 11.00 root NULL globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 11.00 root NULL or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_12 11.00 root partition:p1 type: union + ├─Selection_9(Build) 1.00 cop[tikv] NULL in(_tidb_pid, pid1) + │ └─IndexRangeScan_7 1.00 cop[tikv] table:tpk2, index:uidx_a(a) range:[1,1], keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 10.00 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 11.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +a b c d +1 2 1 1 +## for indexMerge intersection with specified PARTITION +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +id estRows task access object operator info +Projection_5 1111.11 root NULL globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 1111.11 root NULL gt(globalindex__mem_index_merge.tpk2.a, 1), gt(globalindex__mem_index_merge.tpk2.c, 1) + └─IndexMerge_12 1111.11 root partition:p1 type: intersection + ├─Selection_9(Build) 3333.33 cop[tikv] NULL in(_tidb_pid, pid1) + │ └─IndexRangeScan_7 3333.33 cop[tikv] table:tpk2, index:uidx_a(a) range:(1,+inf], keep order:false, stats:pseudo + ├─IndexRangeScan_10(Build) 3333.33 cop[tikv] table:tpk2, index:idx_c(c) range:(1,+inf], keep order:false, stats:pseudo + └─TableRowIDScan_11(Probe) 1111.11 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +a b c d +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 0 and c > 0; +a b c d +1 2 1 1 +## for indexMerge union in txn with order by limit +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +id estRows task access object operator info +Projection_8 1.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─Limit_15 1.00 root offset:0, count:1 + └─UnionScan_22 1.00 root or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_27 1.00 root partition:all type: union + ├─IndexRangeScan_23(Build) 0.91 cop[tikv] table:tpk2, index:uidx_ac(a, c) range:[1,1], keep order:true, stats:pseudo + ├─IndexRangeScan_25(Build) 0.91 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:true, stats:pseudo + └─TableRowIDScan_26(Probe) 1.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +a b c d +1 2 1 1 +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +id estRows task access object operator info +Projection_8 1.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─Limit_15 1.00 root offset:0, count:1 + └─UnionScan_22 1.00 root or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_27 1.00 root partition:all type: union + ├─IndexRangeScan_23(Build) 0.91 cop[tikv] table:tpk2, index:uidx_ac(a, c) range:[1,1], keep order:true, desc, stats:pseudo + ├─IndexRangeScan_25(Build) 0.91 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:true, desc, stats:pseudo + └─TableRowIDScan_26(Probe) 1.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +a b c d +2 4 2 2 +commit; +## for indexMerge union with order by limit +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +id estRows task access object operator info +Projection_26 1.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─IndexMerge_25 1.00 root partition:all type: union, limit embedded(offset:0, count:1) + ├─Limit_23(Build) 0.91 cop[tikv] offset:0, count:1 + │ └─IndexRangeScan_19 0.91 cop[tikv] table:tpk2, index:uidx_ac(a, c) range:[1,1], keep order:true, stats:pseudo + ├─Limit_24(Build) 0.91 cop[tikv] offset:0, count:1 + │ └─IndexRangeScan_21 0.91 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:true, stats:pseudo + └─TableRowIDScan_22(Probe) 1.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +a b c d +1 2 1 1 +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +id estRows task access object operator info +Projection_26 1.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─IndexMerge_25 1.00 root partition:all type: union, limit embedded(offset:0, count:1) + ├─Limit_23(Build) 0.91 cop[tikv] offset:0, count:1 + │ └─IndexRangeScan_19 0.91 cop[tikv] table:tpk2, index:uidx_ac(a, c) range:[1,1], keep order:true, desc, stats:pseudo + ├─Limit_24(Build) 0.91 cop[tikv] offset:0, count:1 + │ └─IndexRangeScan_21 0.91 cop[tikv] table:tpk2, index:idx_bc(b, c) range:[4,4], keep order:true, desc, stats:pseudo + └─TableRowIDScan_22(Probe) 1.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +a b c d +2 4 2 2 +## Test IndexWorker + TableWorker +drop table tpk2; +CREATE TABLE `tpk2` ( +`a` int(11) DEFAULT NULL, +`b` int(11), +`c` int(11) NOT NULL, +`d` int(11) NOT NULL AUTO_INCREMENT, +PRIMARY KEY (`b`), +UNIQUE KEY `uidx_a` (`a`) +) PARTITION BY HASH (`b`) PARTITIONS 5; +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); +begin; +insert into tpk2 values (2, 4, 2, 2); +## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=1 or b=4; +id estRows task access object operator info +Projection_5 2.00 root globalindex__mem_index_merge.tpk2.a, globalindex__mem_index_merge.tpk2.b, globalindex__mem_index_merge.tpk2.c, globalindex__mem_index_merge.tpk2.d +└─UnionScan_6 2.00 root or(eq(globalindex__mem_index_merge.tpk2.a, 1), eq(globalindex__mem_index_merge.tpk2.b, 4)) + └─IndexMerge_11 2.00 root partition:all type: union + ├─IndexRangeScan_7(Build) 1.00 cop[tikv] table:tpk2, index:uidx_a(a) range:[1,1], keep order:false, stats:pseudo + ├─TableRangeScan_9(Build) 1.00 cop[tikv] table:tpk2 range:[4,4], keep order:false, stats:pseudo + └─TableRowIDScan_10(Probe) 2.00 cop[tikv] table:tpk2 keep order:false, stats:pseudo +select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=1 or b=4; +a b c d +1 2 1 1 +2 4 2 2 +select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=2 or b=4; +a b c d +2 4 2 2 +rollback; diff --git a/tests/integrationtest/t/globalindex/mem_index_merge.test b/tests/integrationtest/t/globalindex/mem_index_merge.test new file mode 100644 index 0000000000000..fc852f4f8bf86 --- /dev/null +++ b/tests/integrationtest/t/globalindex/mem_index_merge.test @@ -0,0 +1,134 @@ +set tidb_enable_global_index = true; +--echo ## Test IntHandle +CREATE TABLE `tpk2` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) NOT NULL, + `d` int(11) NOT NULL AUTO_INCREMENT, + KEY `idx_bc` (`b`,`c`), + UNIQUE KEY `uidx_a` (`a`), + UNIQUE KEY `uidx_ac` (`a`, `c`), + KEY `idx_c` (`c`) +) PARTITION BY HASH (`c`) PARTITIONS 5; + +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); + +begin; +insert into tpk2 values (2, 4, 2, 2); + +--echo ## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=2 or b=4; + +--echo ## for indexMerge intersection +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 0 and c > 0; + +--echo ## for indexMerge union with specified PARTITION +--replace_regex /in\(_tidb_pid, [0-9]+\)/in(_tidb_pid, pid1)/ +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; + +--echo ## for indexMerge intersection with specified PARTITION +--replace_regex /in\(_tidb_pid, [0-9]+\)/in(_tidb_pid, pid1)/ +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 0 and c > 0; + +rollback; + +--echo ## Test CommonHandle +drop table tpk2; +CREATE TABLE `tpk2` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL, + `c` int(11) NOT NULL, + `d` int(11) NOT NULL, + KEY `idx_bc` (`b`,`c`), + UNIQUE KEY `uidx_a` (`a`), + UNIQUE KEY `uidx_ac` (`a`, `c`), + KEY `idx_c` (`c`), + PRIMARY KEY(`d`, `c`) +) PARTITION BY HASH (`d`) PARTITIONS 5; + +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); + +begin; +insert into tpk2 values (2, 4, 2, 2); + +--echo ## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 where a=2 or b=4; + +--echo ## for indexMerge intersection +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 where a > 0 and c > 0; + +--echo ## for indexMerge union with specified PARTITION +--replace_regex /in\(_tidb_pid, [0-9]+\)/in(_tidb_pid, pid1)/ +explain select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_bc) */ * from tpk2 partition(p1) where a=1 or b=4; + +--echo ## for indexMerge intersection with specified PARTITION +--replace_regex /in\(_tidb_pid, [0-9]+\)/in(_tidb_pid, pid1)/ +explain select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 1 and c > 1; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, idx_c) */ * from tpk2 partition(p1) where a > 0 and c > 0; + +--echo ## for indexMerge union in txn with order by limit +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; + +commit; + +--echo ## for indexMerge union with order by limit +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c limit 1; +explain select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; +select /*+ use_index_merge(tpk2, uidx_ac, idx_bc) */ * from tpk2 where a = 1 or b = 4 order by c desc limit 1; + +--echo ## Test IndexWorker + TableWorker +drop table tpk2; +CREATE TABLE `tpk2` ( + `a` int(11) DEFAULT NULL, + `b` int(11), + `c` int(11) NOT NULL, + `d` int(11) NOT NULL AUTO_INCREMENT, + PRIMARY KEY (`b`), + UNIQUE KEY `uidx_a` (`a`) +) PARTITION BY HASH (`b`) PARTITIONS 5; + +insert into tpk2 values (1, 2, 1, 1), (3, 6, 3, 3); + +begin; +insert into tpk2 values (2, 4, 2, 2); + +--echo ## for indexMerge union +explain select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=1 or b=4; +--sorted_result +select /*+ use_index_merge(tpk2, uidx_a, primary) */ * from tpk2 where a=2 or b=4; + +rollback; + diff --git a/tests/realtikvtest/flashbacktest/BUILD.bazel b/tests/realtikvtest/flashbacktest/BUILD.bazel index 041715f070d3a..0a5af8f14b605 100644 --- a/tests/realtikvtest/flashbacktest/BUILD.bazel +++ b/tests/realtikvtest/flashbacktest/BUILD.bazel @@ -2,7 +2,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test") go_test( name = "flashbacktest_test", - timeout = "short", + timeout = "long", srcs = [ "flashback_test.go", "main_test.go",