Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: record previous statement when commit is slow #11908

Merged
merged 5 commits into from
Sep 3, 2019
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
67 changes: 32 additions & 35 deletions executor/adapter.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ func (a *recordSet) NewChunk() *chunk.Chunk {
func (a *recordSet) Close() error {
err := a.executor.Close()
a.stmt.LogSlowQuery(a.txnStartTS, a.lastErr == nil)
a.stmt.Ctx.GetSessionVars().PrevStmt = a.stmt.OriginText()
a.stmt.logAudit()
return err
}
Expand Down Expand Up @@ -682,6 +683,16 @@ func (a *ExecStmt) logAudit() {
}
}

// FormatSQL is used to format the original SQL, e.g. truncating long SQL, appending prepared arguments.
func FormatSQL(sql string, sessVars *variable.SessionVars) string {
cfg := config.GetGlobalConfig()
length := len(sql)
if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(length) > maxQueryLen {
sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, length)
}
return QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo()
}

// LogSlowQuery is used to print the slow query in the log files.
func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
sessVars := a.Ctx.GetSessionVars()
Expand All @@ -695,11 +706,7 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
if costTime < threshold && level > zapcore.DebugLevel {
return
}
sql := a.Text
if maxQueryLen := atomic.LoadUint64(&cfg.Log.QueryLogMaxLen); uint64(len(sql)) > maxQueryLen {
sql = fmt.Sprintf("%.*q(len:%d)", maxQueryLen, sql, len(a.Text))
}
sql = QueryReplacer.Replace(sql) + sessVars.GetExecuteArgumentsInfo()
sql := FormatSQL(a.Text, sessVars)

var tableIDs, indexNames string
if len(sessVars.StmtCtx.TableIDs) > 0 {
Expand All @@ -712,38 +719,28 @@ func (a *ExecStmt) LogSlowQuery(txnTS uint64, succ bool) {
copTaskInfo := sessVars.StmtCtx.CopTasksDetails()
statsInfos := plannercore.GetStatsInfo(a.Plan)
memMax := sessVars.StmtCtx.MemTracker.MaxConsumed()
_, digest := sessVars.StmtCtx.SQLDigest()
slowItems := &variable.SlowQueryLogItems{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's better to be put inside the if...else... branch? If the log level is higher than debug, we can save an object allocation?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think it will return at https://github.com/pingcap/tidb/pull/11908/files#diff-350127760839dbfd52d23927f7ff2d95R706 if the log level is higher than debug.

TxnTS: txnTS,
SQL: sql,
Digest: digest,
TimeTotal: costTime,
TimeParse: a.Ctx.GetSessionVars().DurationParse,
TimeCompile: a.Ctx.GetSessionVars().DurationCompile,
IndexNames: indexNames,
StatsInfos: statsInfos,
CopTasks: copTaskInfo,
ExecDetail: execDetail,
MemMax: memMax,
Succ: succ,
}
if _, ok := a.StmtNode.(*ast.CommitStmt); ok {
slowItems.PrevStmt = FormatSQL(sessVars.PrevStmt, sessVars)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PrevStmt only records the last executed statement, it's not enough to find which sql in the transaction is slow.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is all what we could do :)

}
if costTime < threshold {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(&variable.SlowQueryLogItems{
TxnTS: txnTS,
SQL: sql,
Digest: digest,
TimeTotal: costTime,
TimeParse: a.Ctx.GetSessionVars().DurationParse,
TimeCompile: a.Ctx.GetSessionVars().DurationCompile,
IndexNames: indexNames,
StatsInfos: statsInfos,
CopTasks: copTaskInfo,
ExecDetail: execDetail,
MemMax: memMax,
Succ: succ,
}))
logutil.SlowQueryLogger.Debug(sessVars.SlowLogFormat(slowItems))
} else {
_, digest := sessVars.StmtCtx.SQLDigest()
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(&variable.SlowQueryLogItems{
TxnTS: txnTS,
SQL: sql,
Digest: digest,
TimeTotal: costTime,
TimeParse: a.Ctx.GetSessionVars().DurationParse,
TimeCompile: a.Ctx.GetSessionVars().DurationCompile,
IndexNames: indexNames,
StatsInfos: statsInfos,
CopTasks: copTaskInfo,
ExecDetail: execDetail,
MemMax: memMax,
Succ: succ,
}))
logutil.SlowQueryLogger.Warn(sessVars.SlowLogFormat(slowItems))
metrics.TotalQueryProcHistogram.Observe(costTime.Seconds())
metrics.TotalCopProcHistogram.Observe(execDetail.ProcessTime.Seconds())
metrics.TotalCopWaitHistogram.Observe(execDetail.WaitTime.Seconds())
Expand Down
26 changes: 17 additions & 9 deletions infoschema/slow_log.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ var slowQueryCols = []columnInfo{
{variable.SlowLogCopWaitAddr, mysql.TypeVarchar, 64, 0, nil, nil},
{variable.SlowLogMemMax, mysql.TypeLonglong, 20, 0, nil, nil},
{variable.SlowLogSucc, mysql.TypeTiny, 1, 0, nil, nil},
{variable.SlowLogPrevStmt, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil},
{variable.SlowLogQuerySQLStr, mysql.TypeLongBlob, types.UnspecifiedLength, 0, nil, nil},
}

Expand Down Expand Up @@ -87,6 +88,7 @@ func parseSlowLogFile(tz *time.Location, filePath string) ([][]types.Datum, erro
func ParseSlowLog(tz *time.Location, reader *bufio.Reader) ([][]types.Datum, error) {
var rows [][]types.Datum
startFlag := false
prevStmtPrefix := variable.SlowLogPrevStmt + variable.SlowLogSpaceMarkStr
crazycs520 marked this conversation as resolved.
Show resolved Hide resolved
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Make this to be a global const .

var st *slowQueryTuple
for {
lineByte, err := getOneLine(reader)
Expand All @@ -112,15 +114,19 @@ func ParseSlowLog(tz *time.Location, reader *bufio.Reader) ([][]types.Datum, err
// Parse slow log field.
if strings.HasPrefix(line, variable.SlowLogRowPrefixStr) {
line = line[len(variable.SlowLogRowPrefixStr):]
fieldValues := strings.Split(line, " ")
for i := 0; i < len(fieldValues)-1; i += 2 {
field := fieldValues[i]
if strings.HasSuffix(field, ":") {
field = field[:len(field)-1]
}
err = st.setFieldValue(tz, field, fieldValues[i+1])
if err != nil {
return rows, err
if strings.HasPrefix(line, prevStmtPrefix) {
st.prevStmt = line[len(prevStmtPrefix):]
} else {
fieldValues := strings.Split(line, " ")
for i := 0; i < len(fieldValues)-1; i += 2 {
field := fieldValues[i]
if strings.HasSuffix(field, ":") {
field = field[:len(field)-1]
}
err = st.setFieldValue(tz, field, fieldValues[i+1])
if err != nil {
return rows, err
}
}
}
} else if strings.HasSuffix(line, variable.SlowLogSQLSuffixStr) {
Expand Down Expand Up @@ -195,6 +201,7 @@ type slowQueryTuple struct {
maxWaitTime float64
maxWaitAddress string
memMax int64
prevStmt string
sql string
isInternal bool
succ bool
Expand Down Expand Up @@ -313,6 +320,7 @@ func (st *slowQueryTuple) convertToDatumRow() []types.Datum {
} else {
record = append(record, types.NewIntDatum(0))
}
record = append(record, types.NewStringDatum(st.prevStmt))
record = append(record, types.NewStringDatum(st.sql))
return record
}
Expand Down
3 changes: 2 additions & 1 deletion infoschema/slow_log_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func (s *testSuite) TestParseSlowLogFile(c *C) {
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160
# Mem_max: 70724
# Succ: false
# Prev_stmt: update t set i = 1;
select * from t;`)
reader := bufio.NewReader(slowLog)
loc, err := time.LoadLocation("Asia/Shanghai")
Expand All @@ -54,7 +55,7 @@ select * from t;`)
}
recordString += str
}
expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,select * from t;"
expectRecordString := "2019-04-28 15:24:04.309074,405888132465033227,,,0,0.216905,0.021,0,0,1,637,0,,,1,42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772,t1:1,t2:2,0.1,0.2,0.03,127.0.0.1:20160,0.05,0.6,0.8,0.0.0.0:20160,70724,0,update t set i = 1;,select * from t;"
c.Assert(expectRecordString, Equals, recordString)

// fix sql contain '# ' bug
Expand Down
5 changes: 3 additions & 2 deletions infoschema/tables_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,7 @@ func (s *testTableSuite) TestSlowQuery(c *C) {
# Cop_wait_avg: 0.05 Cop_wait_p90: 0.6 Cop_wait_max: 0.8 Cop_wait_addr: 0.0.0.0:20160
# Mem_max: 70724
# Succ: true
# Prev_stmt: update t set i = 2;
select * from t_slim;`))
c.Assert(f.Sync(), IsNil)
c.Assert(err, IsNil)
Expand All @@ -474,10 +475,10 @@ select * from t_slim;`))
tk.MustExec("set time_zone = '+08:00';")
re := tk.MustQuery("select * from information_schema.slow_query")
re.Check(testutil.RowsWithSep("|",
"2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|select * from t_slim;"))
"2019-02-12 19:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;"))
tk.MustExec("set time_zone = '+00:00';")
re = tk.MustQuery("select * from information_schema.slow_query")
re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|select * from t_slim;"))
re.Check(testutil.RowsWithSep("|", "2019-02-12 11:33:56.571953|406315658548871171|root|127.0.0.1|6|4.895492|0.161|0.101|0.092|1|100001|100000|test||0|42a1c8aae6f133e934d4bf0147491709a8812ea05ff8819ec522780fe657b772|t1:1,t2:2|0.1|0.2|0.03|127.0.0.1:20160|0.05|0.6|0.8|0.0.0.0:20160|70724|1|update t set i = 2;|select * from t_slim;"))

// Test for long query.
_, err = f.Write([]byte(`
Expand Down
1 change: 1 addition & 0 deletions session/tidb.go
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ func runStmt(ctx context.Context, sctx sessionctx.Context, s sqlexec.Statement)
// then it could include the transaction commit time.
if rs == nil {
s.(*executor.ExecStmt).LogSlowQuery(origTxnCtx.StartTS, err == nil)
sessVars.PrevStmt = s.OriginText()
}
}()

Expand Down
13 changes: 12 additions & 1 deletion sessionctx/variable/session.go
Original file line number Diff line number Diff line change
Expand Up @@ -406,11 +406,14 @@ type SessionVars struct {
// StartTime is the start time of the last query.
StartTime time.Time

// DurationParse is the duration of pasing SQL string to AST of the last query.
// DurationParse is the duration of parsing SQL string to AST of the last query.
DurationParse time.Duration

// DurationCompile is the duration of compiling AST to execution plan of the last query.
DurationCompile time.Duration

// PrevStmt is used to store the previous executed statement in the current session.
PrevStmt string
}

// ConnectionInfo present connection used by audit.
Expand Down Expand Up @@ -1040,6 +1043,8 @@ const (
SlowLogMemMax = "Mem_max"
// SlowLogSucc is used to indicate whether this sql execute successfully.
SlowLogSucc = "Succ"
// SlowLogPrevStmt is used to show the previous executed statement.
SlowLogPrevStmt = "Prev_stmt"
)

// SlowQueryLogItems is a collection of items that should be included in the
Expand All @@ -1057,6 +1062,7 @@ type SlowQueryLogItems struct {
ExecDetail execdetails.ExecDetails
MemMax int64
Succ bool
PrevStmt string
}

// SlowLogFormat uses for formatting slow log.
Expand All @@ -1077,6 +1083,7 @@ type SlowQueryLogItems struct {
// # Cop_wait: Avg_time: 10ms P90_time: 20ms Max_time: 30ms Max_Addr: 10.6.131.79
// # Memory_max: 4096
// # Succ: true
// # Prev_stmt: begin;
// select * from t_slim;
func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string {
var buf bytes.Buffer
Expand Down Expand Up @@ -1144,6 +1151,10 @@ func (s *SessionVars) SlowLogFormat(logItems *SlowQueryLogItems) string {

writeSlowLogItem(&buf, SlowLogSucc, strconv.FormatBool(logItems.Succ))

if logItems.PrevStmt != "" {
writeSlowLogItem(&buf, SlowLogPrevStmt, logItems.PrevStmt)
}

buf.WriteString(logItems.SQL)
if len(logItems.SQL) == 0 || logItems.SQL[len(logItems.SQL)-1] != ';' {
buf.WriteString(";")
Expand Down