diff --git a/DEPS.bzl b/DEPS.bzl index 35654c6047926..2e42eabd5a914 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -5871,26 +5871,26 @@ def go_deps(): name = "com_github_pingcap_log", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/log", - sha256 = "9b0ae182fcc611cc535eb18a6f332846f0744b905e48888c06c1f6aeda8036d5", - strip_prefix = "github.com/pingcap/log@v1.1.1-0.20230317032135-a0d097d16e22", + sha256 = "c393f943bb688e240dcf9a50ac1d915aa27371803524887ab61a235834bb7438", + strip_prefix = "github.com/pingcap/log@v1.1.1-0.20240314023424-862ccc32f18d", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20230317032135-a0d097d16e22.zip", - "http://ats.apps.svc/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20230317032135-a0d097d16e22.zip", - "https://cache.hawkingrei.com/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20230317032135-a0d097d16e22.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20230317032135-a0d097d16e22.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20240314023424-862ccc32f18d.zip", + "http://ats.apps.svc/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20240314023424-862ccc32f18d.zip", + "https://cache.hawkingrei.com/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20240314023424-862ccc32f18d.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/log/com_github_pingcap_log-v1.1.1-0.20240314023424-862ccc32f18d.zip", ], ) go_repository( name = "com_github_pingcap_sysutil", build_file_proto_mode = "disable_global", importpath = "github.com/pingcap/sysutil", - sha256 = "0540f3dfd25706302c0f484baae0b6bb6a6382893bbf8b14720debcb9756f1d0", - strip_prefix = "github.com/pingcap/sysutil@v1.0.1-0.20230407040306-fb007c5aff21", + sha256 = "c2ffa4a6d163ca73e73831de5abaa25d47abba59c41b8a549d70935442921a56", + strip_prefix = "github.com/pingcap/sysutil@v1.0.1-0.20240311050922-ae81ee01f3a5", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20230407040306-fb007c5aff21.zip", - "http://ats.apps.svc/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20230407040306-fb007c5aff21.zip", - "https://cache.hawkingrei.com/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20230407040306-fb007c5aff21.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20230407040306-fb007c5aff21.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20240311050922-ae81ee01f3a5.zip", + "http://ats.apps.svc/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20240311050922-ae81ee01f3a5.zip", + "https://cache.hawkingrei.com/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20240311050922-ae81ee01f3a5.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/pingcap/sysutil/com_github_pingcap_sysutil-v1.0.1-0.20240311050922-ae81ee01f3a5.zip", ], ) go_repository( diff --git a/cmd/benchdb/main.go b/cmd/benchdb/main.go index 3c3bff47d4658..10438ac5726a8 100644 --- a/cmd/benchdb/main.go +++ b/cmd/benchdb/main.go @@ -56,7 +56,7 @@ var ( func main() { flag.Parse() flag.PrintDefaults() - err := logutil.InitLogger(logutil.NewLogConfig(*logLevel, logutil.DefaultLogFormat, "", logutil.EmptyFileLogConfig, false)) + err := logutil.InitLogger(logutil.NewLogConfig(*logLevel, logutil.DefaultLogFormat, "", "", logutil.EmptyFileLogConfig, false)) terror.MustNil(err) err = store.Register("tikv", driver.TiKVDriver{}) terror.MustNil(err) diff --git a/cmd/tidb-server/main.go b/cmd/tidb-server/main.go index b9b73976ecf35..782340f7a0167 100644 --- a/cmd/tidb-server/main.go +++ b/cmd/tidb-server/main.go @@ -108,6 +108,7 @@ const ( nmLogLevel = "L" nmLogFile = "log-file" nmLogSlowQuery = "log-slow-query" + nmLogGeneral = "log-general" nmReportStatus = "report-status" nmStatusHost = "status-host" nmStatusPort = "status" @@ -163,6 +164,7 @@ var ( logLevel *string logFile *string logSlowQuery *string + logGeneral *string // Status reportStatus *bool @@ -216,6 +218,7 @@ func initFlagSet() *flag.FlagSet { logLevel = fset.String(nmLogLevel, "info", "log level: info, debug, warn, error, fatal") logFile = fset.String(nmLogFile, "", "log file path") logSlowQuery = fset.String(nmLogSlowQuery, "", "slow query file path") + logGeneral = fset.String(nmLogGeneral, "", "general log file path") // Status reportStatus = flagBoolean(fset, nmReportStatus, true, "If enable status report HTTP service.") @@ -580,6 +583,9 @@ func overrideConfig(cfg *config.Config, fset *flag.FlagSet) { if actualFlags[nmLogSlowQuery] { cfg.Log.SlowQueryFile = *logSlowQuery } + if actualFlags[nmLogGeneral] { + cfg.Log.GeneralLogFile = *logGeneral + } // Status if actualFlags[nmReportStatus] { diff --git a/go.mod b/go.mod index fedd84c17547c..2c78eabb5d25a 100644 --- a/go.mod +++ b/go.mod @@ -85,8 +85,8 @@ require ( github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c github.com/pingcap/fn v1.0.0 github.com/pingcap/kvproto v0.0.0-20240227073058-929ab83f9754 - github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 - github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 + github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d + github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20240116032918-9bb28c43bbfc github.com/prometheus/client_golang v1.19.0 diff --git a/go.sum b/go.sum index 78a903dc13edf..049e0d7b407cc 100644 --- a/go.sum +++ b/go.sum @@ -719,10 +719,10 @@ github.com/pingcap/kvproto v0.0.0-20240227073058-929ab83f9754 h1:nU9wDeMsID8EWaw github.com/pingcap/kvproto v0.0.0-20240227073058-929ab83f9754/go.mod h1:rXxWk2UnwfUhLXha1jxRWPADw9eMZGWEWCg92Tgmb/8= github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= github.com/pingcap/log v1.1.0/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22 h1:2SOzvGvE8beiC1Y4g9Onkvu6UmuBBOeWRGQEjJaT/JY= -github.com/pingcap/log v1.1.1-0.20230317032135-a0d097d16e22/go.mod h1:DWQW5jICDR7UJh4HtxXSM20Churx4CQL0fwL/SoOSA4= -github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 h1:QV6jqlfOkh8hqvEAgwBZa+4bSgO0EeKC7s5c6Luam2I= -github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21/go.mod h1:QYnjfA95ZaMefyl1NO8oPtKeb8pYUdnDVhQgf+qdpjM= +github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d h1:y3EueKVfVykdpTyfUnQGqft0ud+xVFuCdp1XkVL0X1E= +github.com/pingcap/log v1.1.1-0.20240314023424-862ccc32f18d/go.mod h1:ORfBOFp1eteu2odzsyaxI+b8TzJwgjwyQcGhI+9SfEA= +github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5 h1:T4pXRhBflzDeAhmOQHNPRRogMYxP13V7BkYw3ZsoSfE= +github.com/pingcap/sysutil v1.0.1-0.20240311050922-ae81ee01f3a5/go.mod h1:rlimy0GcTvjiJqvD5mXTRr8O2eNZPBrcUgiWVYp9530= github.com/pingcap/tipb v0.0.0-20240116032918-9bb28c43bbfc h1:sEp4lbExDfnMX8HXQyhZrhqo2/SgeFY5KOdo5akc8FM= github.com/pingcap/tipb v0.0.0-20240116032918-9bb28c43bbfc/go.mod h1:A7mrd7WHBl1o63LE2bIBGEJMTNWXqhgmYiOvMLxozfs= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= @@ -1474,6 +1474,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/pkg/config/config.go b/pkg/config/config.go index 716204baf623d..ac471b0be9e7c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -490,6 +490,8 @@ type Log struct { // ExpensiveThreshold is deprecated. ExpensiveThreshold uint `toml:"expensive-threshold" json:"expensive-threshold"` + GeneralLogFile string `toml:"general-log-file" json:"general-log-file"` + // The following items are deprecated. We need to keep them here temporarily // to support the upgrade process. They can be removed in future. @@ -1441,7 +1443,7 @@ var TableLockDelayClean = func() uint64 { // ToLogConfig converts *Log to *logutil.LogConfig. func (l *Log) ToLogConfig() *logutil.LogConfig { - return logutil.NewLogConfig(l.Level, l.Format, l.SlowQueryFile, l.File, l.getDisableTimestamp(), + return logutil.NewLogConfig(l.Level, l.Format, l.SlowQueryFile, l.GeneralLogFile, l.File, l.getDisableTimestamp(), func(config *zaplog.Config) { config.DisableErrorVerbose = l.getDisableErrorStack() }, func(config *zaplog.Config) { config.Timeout = l.Timeout }, ) diff --git a/pkg/config/config.toml.example b/pkg/config/config.toml.example index 47096de93e4af..c1438a9e8535a 100644 --- a/pkg/config/config.toml.example +++ b/pkg/config/config.toml.example @@ -154,6 +154,10 @@ format = "text" # Stores slow query log into separated files. slow-query-file = "tidb-slow.log" +# Stores general log into separated files. +# If it equals to empty, the general log will be written to the server log. +general-log-file = "" + # Make tidb panic if the write log operation hang for 15s # timeout = 15 @@ -171,6 +175,10 @@ max-days = 0 # Maximum number of old log files to retain. No clean up by default. max-backups = 0 +# Compression function for rotated files. +# It can be empty or "gzip", empty means compression disabled. +compression = "" + [security] # Path of file that contains list of trusted SSL CAs for connection with mysql client. ssl-ca = "" diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index caaa755d77294..e56b7ddd19b60 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -126,7 +126,7 @@ func TestLogConfig(t *testing.T) { require.Equal(t, expectedDisableErrorStack, conf.Log.DisableErrorStack) require.Equal(t, expectedEnableTimestamp, conf.Log.EnableTimestamp) require.Equal(t, expectedDisableTimestamp, conf.Log.DisableTimestamp) - require.Equal(t, logutil.NewLogConfig("info", "text", "tidb-slow.log", conf.Log.File, resultedDisableTimestamp, func(config *zaplog.Config) { config.DisableErrorVerbose = resultedDisableErrorVerbose }), conf.Log.ToLogConfig()) + require.Equal(t, logutil.NewLogConfig("info", "text", "tidb-slow.log", "", conf.Log.File, resultedDisableTimestamp, func(config *zaplog.Config) { config.DisableErrorVerbose = resultedDisableErrorVerbose }), conf.Log.ToLogConfig()) err := f.Truncate(0) require.NoError(t, err) _, err = f.Seek(0, 0) @@ -910,7 +910,7 @@ spilled-file-encryption-method = "aes128-ctr" require.Equal(t, GetGlobalConfig(), conf) // Test for log config. - require.Equal(t, logutil.NewLogConfig("info", "text", "tidb-slow.log", conf.Log.File, false, func(config *zaplog.Config) { config.DisableErrorVerbose = conf.Log.getDisableErrorStack() }), conf.Log.ToLogConfig()) + require.Equal(t, logutil.NewLogConfig("info", "text", "tidb-slow.log", "", conf.Log.File, false, func(config *zaplog.Config) { config.DisableErrorVerbose = conf.Log.getDisableErrorStack() }), conf.Log.ToLogConfig()) // Test for tracing config. tracingConf := &tracing.Configuration{ diff --git a/pkg/executor/cluster_table_test.go b/pkg/executor/cluster_table_test.go index 9d5deb209fac0..4f8466b5867a0 100644 --- a/pkg/executor/cluster_table_test.go +++ b/pkg/executor/cluster_table_test.go @@ -15,10 +15,12 @@ package executor_test import ( + "compress/gzip" "context" "fmt" "net" "os" + "strings" "testing" "time" @@ -208,71 +210,78 @@ select 10;` fileName2 := "tidb-slow-20236-2020-02-16T19-04-05.01.log" fileName3 := "tidb-slow-20236-2020-02-17T18-00-05.01.log" fileName4 := "tidb-slow-20236.log" - fileNames := []string{fileName0, fileName1, fileName2, fileName3, fileName4} defer config.RestoreFunc()() config.UpdateGlobal(func(conf *config.Config) { conf.Log.SlowQueryFile = fileName4 }) - prepareLogs(t, logData, fileNames) - defer func() { - removeFiles(t, fileNames) - }() - tk := testkit.NewTestKit(t, store) - loc, err := time.LoadLocation("Asia/Shanghai") - require.NoError(t, err) - tk.Session().GetSessionVars().TimeZone = loc - tk.MustExec("use information_schema") - cases := []struct { - prepareSQL string - sql string - result []string - }{ - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'", - result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time", - result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:01:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", - result: []string{"9"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')", - result: []string{"6"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc", - result: []string{"5"}, - }, - { - prepareSQL: "set @@time_zone = '+08:00'", - sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3", - result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, - }, - } - for _, cas := range cases { - if len(cas.prepareSQL) > 0 { - tk.MustExec(cas.prepareSQL) + for k := 0; k < 2; k++ { + // k = 0 for normal files + // k = 1 for compressed files + var fileNames []string + if k == 0 { + fileNames = []string{fileName0, fileName1, fileName2, fileName3, fileName4} + } else { + fileNames = []string{fileName0 + ".gz", fileName1 + ".gz", fileName2 + ".gz", fileName3 + ".gz", fileName4} + } + prepareLogs(t, logData, fileNames) + tk := testkit.NewTestKit(t, store) + loc, err := time.LoadLocation("Asia/Shanghai") + require.NoError(t, err) + tk.Session().GetSessionVars().TimeZone = loc + tk.MustExec("use information_schema") + cases := []struct { + prepareSQL string + sql string + result []string + }{ + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000'", + result: []string{"2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-17 12:00:05.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time", + result: []string{"2020-02-15 18:00:01.000000", "2020-02-15 19:00:05.000000", "2020-02-17 18:00:05.000000", "2020-02-17 19:00:00.000000", "2020-05-14 19:03:54.314615"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where (time > '2020-02-15 18:00:00' and time < '2020-02-15 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-14 20:00:00') order by time desc", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000", "2020-02-15 19:00:05.000000", "2020-02-15 18:00:01.000000"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where time > '2020-02-15 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc", + result: []string{"9"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where (time > '2020-02-16 18:00:00' and time < '2020-05-14 20:00:00') or (time > '2020-02-17 18:00:00' and time < '2020-05-17 20:00:00')", + result: []string{"6"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select count(*) from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-02-17 20:00:00.000000' order by time desc", + result: []string{"5"}, + }, + { + prepareSQL: "set @@time_zone = '+08:00'", + sql: "select time from cluster_slow_query where time > '2020-02-16 18:00:00.000000' and time < '2020-05-14 20:00:00.000000' order by time desc limit 3", + result: []string{"2020-05-14 19:03:54.314615", "2020-02-17 19:00:00.000000", "2020-02-17 18:00:05.000000"}, + }, + } + for _, cas := range cases { + if len(cas.prepareSQL) > 0 { + tk.MustExec(cas.prepareSQL) + } + tk.MustQuery(cas.sql).Check(testkit.RowsWithSep("|", cas.result...)) } - tk.MustQuery(cas.sql).Check(testkit.RowsWithSep("|", cas.result...)) + removeFiles(t, fileNames) } } @@ -308,12 +317,26 @@ func TestSQLDigestTextRetriever(t *testing.T) { } func prepareLogs(t *testing.T, logData []string, fileNames []string) { + writeFile := func(file string, data string) { + if strings.HasSuffix(file, ".gz") { + f, err := os.Create(file) + require.NoError(t, err) + gz := gzip.NewWriter(f) + _, err = gz.Write([]byte(data)) + require.NoError(t, err) + require.NoError(t, gz.Close()) + require.NoError(t, f.Close()) + } else { + f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + require.NoError(t, err) + _, err = f.Write([]byte(data)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + } + for i, log := range logData { - f, err := os.OpenFile(fileNames[i], os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - require.NoError(t, err) - _, err = f.Write([]byte(log)) - require.NoError(t, err) - require.NoError(t, f.Close()) + writeFile(fileNames[i], log) } } diff --git a/pkg/executor/slow_query.go b/pkg/executor/slow_query.go index 77e231e6381f2..fb1e3373342e1 100644 --- a/pkg/executor/slow_query.go +++ b/pkg/executor/slow_query.go @@ -16,6 +16,7 @@ package executor import ( "bufio" + "compress/gzip" "context" "fmt" "io" @@ -138,18 +139,11 @@ func (e *slowQueryRetriever) initialize(ctx context.Context, sctx sessionctx.Con e.initialized = true e.files, err = e.getAllFiles(ctx, sctx, sctx.GetSessionVars().SlowQueryFile) if e.extractor.Desc { - e.reverseLogFiles() + slices.Reverse(e.files) } return err } -func (e *slowQueryRetriever) reverseLogFiles() { - for i := 0; i < len(e.files)/2; i++ { - j := len(e.files) - i - 1 - e.files[i], e.files[j] = e.files[j], e.files[i] - } -} - func (e *slowQueryRetriever) close() error { for _, f := range e.files { err := f.file.Close() @@ -169,10 +163,11 @@ type parsedSlowLog struct { err error } -func (e *slowQueryRetriever) getNextFile() *os.File { +func (e *slowQueryRetriever) getNextFile() *logFile { if e.fileIdx >= len(e.files) { return nil } + ret := &e.files[e.fileIdx] file := e.files[e.fileIdx].file e.fileIdx++ if e.stats != nil { @@ -183,33 +178,60 @@ func (e *slowQueryRetriever) getNextFile() *os.File { e.stats.readFileNum++ } } - return file + return ret } -func (e *slowQueryRetriever) getPreviousFile() *os.File { +func (e *slowQueryRetriever) getPreviousReader() (*bufio.Reader, error) { fileIdx := e.fileIdx // fileIdx refer to the next file which should be read // so we need to set fileIdx to fileIdx - 2 to get the previous file. fileIdx = fileIdx - 2 if fileIdx < 0 { - return nil + return nil, nil } - file := e.files[fileIdx].file - _, err := file.Seek(0, io.SeekStart) + file := e.files[fileIdx] + _, err := file.file.Seek(0, io.SeekStart) if err != nil { - return nil + return nil, err } - return file + var reader *bufio.Reader + if !file.compressed { + reader = bufio.NewReader(file.file) + } else { + gr, err := gzip.NewReader(file.file) + if err != nil { + return nil, err + } + reader = bufio.NewReader(gr) + } + return reader, nil } -func (e *slowQueryRetriever) parseDataForSlowLog(ctx context.Context, sctx sessionctx.Context) { - defer e.wg.Done() +func (e *slowQueryRetriever) getNextReader() (*bufio.Reader, error) { file := e.getNextFile() if file == nil { + return nil, nil + } + var reader *bufio.Reader + if !file.compressed { + reader = bufio.NewReader(file.file) + } else { + gr, err := gzip.NewReader(file.file) + if err != nil { + return nil, err + } + reader = bufio.NewReader(gr) + } + return reader, nil +} + +func (e *slowQueryRetriever) parseDataForSlowLog(ctx context.Context, sctx sessionctx.Context) { + defer e.wg.Done() + reader, _ := e.getNextReader() + if reader == nil { close(e.taskList) return } - reader := bufio.NewReader(file) e.parseSlowLog(ctx, sctx, reader, ParseSlowLogBatchSize) } @@ -303,12 +325,12 @@ func (e *slowQueryRetriever) getBatchLog(ctx context.Context, reader *bufio.Read if err != nil { if err == io.EOF { e.fileLine = 0 - file := e.getNextFile() - if file == nil { - return [][]string{log}, nil + newReader, err := e.getNextReader() + if newReader == nil || err != nil { + return [][]string{log}, err } offset.length = len(log) - reader.Reset(file) + reader.Reset(newReader) continue } return [][]string{log}, err @@ -330,9 +352,9 @@ func (e *slowQueryRetriever) getBatchLogForReversedScan(ctx context.Context, rea // reader maybe change when read previous file. inputReader := reader defer func() { - file := e.getNextFile() - if file != nil { - inputReader.Reset(file) + newReader, _ := e.getNextReader() + if newReader != nil { + inputReader.Reset(newReader) } }() var line string @@ -355,11 +377,10 @@ func (e *slowQueryRetriever) getBatchLogForReversedScan(ctx context.Context, rea return decomposedSlowLogTasks, nil } e.fileLine = 0 - file := e.getPreviousFile() - if file == nil { + reader, err = e.getPreviousReader() + if reader == nil || err != nil { return decomposeToSlowLogTasks(logs, num), nil } - reader = bufio.NewReader(file) scanPreviousFile = true continue } @@ -825,7 +846,8 @@ func ParseTime(s string) (time.Time, error) { type logFile struct { file *os.File // The opened file handle - start, end time.Time // The start/end time of the log file + start time.Time // The start time of the log file + compressed bool // The file is compressed or not } // getAllFiles is used to get all slow-log needed to parse, it is exported for test. @@ -873,6 +895,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co if !strings.HasPrefix(path, prefix) { return nil } + compressed := strings.HasSuffix(path, ".gz") if isCtxDone(ctx) { return ctx.Err() } @@ -888,7 +911,7 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co } }() // Get the file start time. - fileStartTime, err := e.getFileStartTime(ctx, file) + fileStartTime, err := e.getFileStartTime(ctx, file, compressed) if err != nil { return handleErr(err) } @@ -904,30 +927,34 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co return nil } - // Get the file end time. - fileEndTime, err := e.getFileEndTime(ctx, file) - if err != nil { - return handleErr(err) - } - end := types.NewTime(types.FromGoTime(fileEndTime), mysql.TypeDatetime, types.MaxFsp) - inTimeRanges := false - for _, tr := range e.checker.timeRanges { - if !(start.Compare(tr.endTime) > 0 || end.Compare(tr.startTime) < 0) { - inTimeRanges = true - break + // If we want to get the end time from a compressed file, + // we need uncompress the whole file which is very slow and consume a lot of memeory. + if !compressed { + // Get the file end time. + fileEndTime, err := e.getFileEndTime(ctx, file) + if err != nil { + return handleErr(err) + } + end := types.NewTime(types.FromGoTime(fileEndTime), mysql.TypeDatetime, types.MaxFsp) + inTimeRanges := false + for _, tr := range e.checker.timeRanges { + if !(start.Compare(tr.endTime) > 0 || end.Compare(tr.startTime) < 0) { + inTimeRanges = true + break + } + } + if !inTimeRanges { + return nil } - } - if !inTimeRanges { - return nil } _, err = file.Seek(0, io.SeekStart) if err != nil { return handleErr(err) } logFiles = append(logFiles, logFile{ - file: file, - start: fileStartTime, - end: fileEndTime, + file: file, + start: fileStartTime, + compressed: compressed, }) skip = true return nil @@ -942,16 +969,46 @@ func (e *slowQueryRetriever) getAllFiles(ctx context.Context, sctx sessionctx.Co slices.SortFunc(logFiles, func(i, j logFile) int { return i.start.Compare(j.start) }) - return logFiles, err + // Assume no time range overlap in log files and remove unnecessary log files for compressed files. + var ret []logFile + for i, file := range logFiles { + if i == len(logFiles)-1 || !file.compressed { + ret = append(ret, file) + continue + } + start := types.NewTime(types.FromGoTime(logFiles[i].start), mysql.TypeDatetime, types.MaxFsp) + // use next file.start as endTime + end := types.NewTime(types.FromGoTime(logFiles[i+1].start), mysql.TypeDatetime, types.MaxFsp) + inTimeRanges := false + for _, tr := range e.checker.timeRanges { + if !(start.Compare(tr.endTime) > 0 || end.Compare(tr.startTime) < 0) { + inTimeRanges = true + break + } + } + if inTimeRanges { + ret = append(ret, file) + } + } + return ret, err } -func (*slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File) (time.Time, error) { +func (*slowQueryRetriever) getFileStartTime(ctx context.Context, file *os.File, compressed bool) (time.Time, error) { var t time.Time _, err := file.Seek(0, io.SeekStart) if err != nil { return t, err } - reader := bufio.NewReader(file) + var reader *bufio.Reader + if !compressed { + reader = bufio.NewReader(file) + } else { + gr, err := gzip.NewReader(file) + if err != nil { + return t, err + } + reader = bufio.NewReader(gr) + } maxNum := 128 for { lineByte, err := getOneLine(reader) diff --git a/pkg/executor/slow_query_test.go b/pkg/executor/slow_query_test.go index 03d46d6c19393..eeed59ce882db 100644 --- a/pkg/executor/slow_query_test.go +++ b/pkg/executor/slow_query_test.go @@ -17,6 +17,7 @@ package executor import ( "bufio" "bytes" + "compress/gzip" "context" "fmt" "os" @@ -367,133 +368,153 @@ select 7;` config.UpdateGlobal(func(conf *config.Config) { conf.Log.SlowQueryFile = fileName3 }) - fileNames := []string{fileName0, fileName1, fileName2, fileName3} - prepareLogs(t, logData, fileNames) - defer func() { - removeFiles(fileNames) - }() - - cases := []struct { - startTime string - endTime string - files []string - querys []string - }{ - { - startTime: "2020-02-15T18:00:00.000000+08:00", - endTime: "2020-02-17T20:00:00.000000+08:00", - files: []string{fileName1, fileName2, fileName3}, - querys: []string{ - "select 1;", - "select 2;", - "select 3;", - "select 4;", - "select 5;", - "select 6;", + for k := 0; k < 2; k++ { + // k = 0 for normal files + // k = 1 for compressed files + var fileNames []string + if k == 0 { + fileNames = []string{fileName0, fileName1, fileName2, fileName3} + } else { + fileNames = []string{fileName0 + ".gz", fileName1 + ".gz", fileName2 + ".gz", fileName3} + } + prepareLogs(t, logData, fileNames) + + cases := []struct { + startTime string + endTime string + files []string + querys []string + }{ + { + startTime: "2020-02-15T18:00:00.000000+08:00", + endTime: "2020-02-17T20:00:00.000000+08:00", + files: []string{fileName1, fileName2, fileName3}, + querys: []string{ + "select 1;", + "select 2;", + "select 3;", + "select 4;", + "select 5;", + "select 6;", + }, }, - }, - { - startTime: "2020-02-15T18:00:02.000000+08:00", - endTime: "2020-02-16T20:00:00.000000+08:00", - files: []string{fileName1, fileName2, fileName3}, - querys: []string{ - "select 2;", - "select 3;", - "select 4;", - "select 5;", + { + startTime: "2020-02-15T18:00:02.000000+08:00", + endTime: "2020-02-16T20:00:00.000000+08:00", + files: []string{fileName1, fileName2, fileName3}, + querys: []string{ + "select 2;", + "select 3;", + "select 4;", + "select 5;", + }, }, - }, - { - startTime: "2020-02-16T18:00:03.000000+08:00", - endTime: "2020-02-16T18:59:00.000000+08:00", - files: []string{fileName2}, - querys: []string{ - "select 4;", + { + startTime: "2020-02-16T18:00:03.000000+08:00", + endTime: "2020-02-16T18:59:00.000000+08:00", + files: []string{fileName2}, + querys: []string{ + "select 4;", + }, }, - }, - { - startTime: "2020-02-16T18:00:03.000000+08:00", - endTime: "2020-02-16T20:00:00.000000+08:00", - files: []string{fileName2, fileName3}, - querys: []string{ - "select 4;", - "select 5;", + { + startTime: "2020-02-16T18:00:03.000000+08:00", + endTime: "2020-02-16T20:00:00.000000+08:00", + files: []string{fileName2, fileName3}, + querys: []string{ + "select 4;", + "select 5;", + }, }, - }, - { - startTime: "2020-02-16T19:00:00.000000+08:00", - endTime: "2020-02-17T17:00:00.000000+08:00", - files: []string{fileName3}, - querys: []string{ - "select 5;", + { + startTime: "2020-02-16T19:00:00.000000+08:00", + endTime: "2020-02-17T17:00:00.000000+08:00", + files: []string{fileName3}, + querys: []string{ + "select 5;", + }, }, - }, - { - startTime: "2010-01-01T00:00:00.000000+08:00", - endTime: "2010-01-01T01:00:00.000000+08:00", - files: []string{}, - }, - { - startTime: "2020-03-01T00:00:00.000000+08:00", - endTime: "2010-03-01T01:00:00.000000+08:00", - files: []string{}, - }, - { - startTime: "", - endTime: "", - files: []string{fileName3}, - querys: []string{ - "select 5;", - "select 6;", - "select 7;", + { + startTime: "2010-01-01T00:00:00.000000+08:00", + endTime: "2010-01-01T01:00:00.000000+08:00", + files: []string{}, }, - }, - { - startTime: "2020-04-15T18:00:05.299063744+08:00", - endTime: "2020-04-15T18:00:05.299063744+08:00", - files: []string{fileName3}, - querys: []string{ - "select 7;", + { + startTime: "2020-03-01T00:00:00.000000+08:00", + endTime: "2010-03-01T01:00:00.000000+08:00", + files: []string{}, + }, + { + startTime: "", + endTime: "", + files: []string{fileName3}, + querys: []string{ + "select 5;", + "select 6;", + "select 7;", + }, + }, + { + startTime: "2020-04-15T18:00:05.299063744+08:00", + endTime: "2020-04-15T18:00:05.299063744+08:00", + files: []string{fileName3}, + querys: []string{ + "select 7;", + }, }, - }, - } - - loc, err := time.LoadLocation("Asia/Shanghai") - require.NoError(t, err) - sctx := mock.NewContext() - sctx.ResetSessionAndStmtTimeZone(loc) - sctx.GetSessionVars().SlowQueryFile = fileName3 - for i, cas := range cases { - extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0} - if extractor.Enable { - startTime, err := ParseTime(cas.startTime) - require.NoError(t, err) - endTime, err := ParseTime(cas.endTime) - require.NoError(t, err) - extractor.TimeRanges = []*plannercore.TimeRange{{StartTime: startTime, EndTime: endTime}} } - retriever, err := newSlowQueryRetriever() - require.NoError(t, err) - retriever.extractor = extractor - err = retriever.initialize(context.Background(), sctx) + + loc, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) - comment := fmt.Sprintf("case id: %v", i) - require.Equal(t, len(retriever.files), len(cas.files), comment) - if len(retriever.files) > 0 { - reader := bufio.NewReader(retriever.files[0].file) - rows, err := parseLog(retriever, sctx, reader) + sctx := mock.NewContext() + sctx.ResetSessionAndStmtTimeZone(loc) + sctx.GetSessionVars().SlowQueryFile = fileName3 + for i, cas := range cases { + extractor := &plannercore.SlowQueryExtractor{Enable: len(cas.startTime) > 0 && len(cas.endTime) > 0} + if extractor.Enable { + startTime, err := ParseTime(cas.startTime) + require.NoError(t, err) + endTime, err := ParseTime(cas.endTime) + require.NoError(t, err) + extractor.TimeRanges = []*plannercore.TimeRange{{StartTime: startTime, EndTime: endTime}} + } + retriever, err := newSlowQueryRetriever() require.NoError(t, err) - require.Equal(t, len(rows), len(cas.querys), comment) - for i, row := range rows { - require.Equal(t, row[len(row)-1].GetString(), cas.querys[i], comment) + retriever.extractor = extractor + err = retriever.initialize(context.Background(), sctx) + require.NoError(t, err) + comment := fmt.Sprintf("compressed: %v, case id: %v", k, i) + if len(retriever.files) > 0 { + var reader *bufio.Reader + reader, err := retriever.getNextReader() + require.NoError(t, err, comment) + rows, err := parseLog(retriever, sctx, reader) + require.NoError(t, err, comment) + require.Equal(t, len(rows), len(cas.querys), comment) + for i, row := range rows { + require.Equal(t, row[len(row)-1].GetString(), cas.querys[i], comment) + } } - } - for i, file := range retriever.files { - require.Equal(t, file.file.Name(), cas.files[i]) - require.NoError(t, file.file.Close()) + if k == 0 { + require.Equal(t, len(retriever.files), len(cas.files), comment) + for i, file := range retriever.files { + require.Equal(t, file.file.Name(), cas.files[i], comment) + } + } else { + // for compressed file, sometimes it will contains one more file. + require.True(t, (len(retriever.files) == len(cas.files)) || (len(retriever.files) == len(cas.files)+1), comment) + var fileNames []string + for _, file := range retriever.files { + fileNames = append(fileNames, strings.TrimSuffix(file.file.Name(), ".gz")) + } + for _, file := range cas.files { + require.Contains(t, fileNames, file, comment) + } + } + require.NoError(t, retriever.close()) } - require.NoError(t, retriever.close()) + removeFiles(fileNames) } } @@ -719,11 +740,21 @@ func checkGoroutineExists(keyword string) bool { func prepareLogs(t *testing.T, logData []string, fileNames []string) { writeFile := func(file string, data string) { - f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - require.NoError(t, err) - _, err = f.Write([]byte(data)) - require.NoError(t, err) - require.NoError(t, f.Close()) + if strings.HasSuffix(file, ".gz") { + f, err := os.Create(file) + require.NoError(t, err) + gz := gzip.NewWriter(f) + _, err = gz.Write([]byte(data)) + require.NoError(t, err) + require.NoError(t, gz.Close()) + require.NoError(t, f.Close()) + } else { + f, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + require.NoError(t, err) + _, err = f.Write([]byte(data)) + require.NoError(t, err) + require.NoError(t, f.Close()) + } } for i, log := range logData { diff --git a/pkg/session/session.go b/pkg/session/session.go index a4c4ed88988ca..98b43edca6836 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -3938,7 +3938,7 @@ func logGeneralQuery(execStmt *executor.ExecStmt, s *session, isPrepared bool) { if vars.EnableRedactLog != errors.RedactLogEnable { query += redact.String(vars.EnableRedactLog, vars.PlanCacheParams.String()) } - logutil.BgLogger().Info("GENERAL_LOG", + logutil.GeneralLogger.Info("GENERAL_LOG", zap.Uint64("conn", vars.ConnectionID), zap.String("session_alias", vars.SessionAlias), zap.String("user", vars.User.LoginString()), diff --git a/pkg/util/logutil/BUILD.bazel b/pkg/util/logutil/BUILD.bazel index b6893b1563238..4ee4b14c6da44 100644 --- a/pkg/util/logutil/BUILD.bazel +++ b/pkg/util/logutil/BUILD.bazel @@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "logutil", srcs = [ + "general_logger.go", "hex.go", "log.go", "slow_query_logger.go", diff --git a/pkg/util/logutil/general_logger.go b/pkg/util/logutil/general_logger.go new file mode 100644 index 0000000000000..0e295c9c54d1c --- /dev/null +++ b/pkg/util/logutil/general_logger.go @@ -0,0 +1,44 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logutil + +import ( + "github.com/pingcap/errors" + "github.com/pingcap/log" + "go.uber.org/zap" +) + +func newGeneralLogger(cfg *LogConfig) (*zap.Logger, *log.ZapProperties, error) { + // create the general logger + sqLogger, prop, err := log.InitLogger(newGeneralLogConfig(cfg)) + if err != nil { + return nil, nil, errors.Trace(err) + } + return sqLogger, prop, nil +} + +func newGeneralLogConfig(cfg *LogConfig) *log.Config { + // copy the global log config to general log config + // if the filename of general log config is empty, general log will behave the same as global log. + sqConfig := cfg.Config + // level of the global log config doesn't affect the general logger which determines whether to + // log by execution duration. + sqConfig.Level = LogConfig{}.Level + if len(cfg.GeneralLogFile) != 0 { + sqConfig.File = cfg.File + sqConfig.File.Filename = cfg.GeneralLogFile + } + return &sqConfig +} diff --git a/pkg/util/logutil/log.go b/pkg/util/logutil/log.go index 997071483476d..1f77023f8a76a 100644 --- a/pkg/util/logutil/log.go +++ b/pkg/util/logutil/log.go @@ -81,10 +81,13 @@ type LogConfig struct { // SlowQueryFile filename, default to File log config on empty. SlowQueryFile string + + // GeneralLogFile filenanme, default to File log config on empty. + GeneralLogFile string } // NewLogConfig creates a LogConfig. -func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, disableTimestamp bool, opts ...func(*log.Config)) *LogConfig { +func NewLogConfig(level, format, slowQueryFile string, generalLogFile string, fileCfg FileLogConfig, disableTimestamp bool, opts ...func(*log.Config)) *LogConfig { c := &LogConfig{ Config: log.Config{ Level: level, @@ -92,7 +95,8 @@ func NewLogConfig(level, format, slowQueryFile string, fileCfg FileLogConfig, di DisableTimestamp: disableTimestamp, File: fileCfg.FileLogConfig, }, - SlowQueryFile: slowQueryFile, + SlowQueryFile: slowQueryFile, + GeneralLogFile: generalLogFile, } for _, opt := range opts { opt(&c.Config) @@ -113,6 +117,9 @@ const ( // SlowQueryLogger is used to log slow query, InitLogger will modify it according to config file. var SlowQueryLogger = log.L() +// GeneralLogger is used to log general log, InitLogger will modify it according to config file. +var GeneralLogger = log.L() + // InitLogger initializes a logger with cfg. func InitLogger(cfg *LogConfig, opts ...zap.Option) error { opts = append(opts, zap.AddStacktrace(zapcore.FatalLevel)) @@ -128,6 +135,12 @@ func InitLogger(cfg *LogConfig, opts ...zap.Option) error { return errors.Trace(err) } + // init dedicated logger for general log + GeneralLogger, _, err = newGeneralLogger(cfg) + if err != nil { + return errors.Trace(err) + } + initGRPCLogger(gl) tikv.SetLogContextKey(CtxLogKey) return nil @@ -174,6 +187,11 @@ func ReplaceLogger(cfg *LogConfig, opts ...zap.Option) error { return errors.Trace(err) } + GeneralLogger, _, err = newGeneralLogger(cfg) + if err != nil { + return errors.Trace(err) + } + log.S().Infof("replaced global logger with config: %s", string(cfgJSON)) return nil diff --git a/pkg/util/logutil/log_test.go b/pkg/util/logutil/log_test.go index d0c08d5554e0a..0092aa5763a6a 100644 --- a/pkg/util/logutil/log_test.go +++ b/pkg/util/logutil/log_test.go @@ -57,7 +57,7 @@ func TestZapLoggerWithKeys(t *testing.T) { } fileCfg := FileLogConfig{log.FileLogConfig{Filename: fmt.Sprintf("zap_log_%s", uuid.NewString()), MaxSize: 4096}} - conf := NewLogConfig("info", DefaultLogFormat, "", fileCfg, false) + conf := NewLogConfig("info", DefaultLogFormat, "", "", fileCfg, false) err := InitLogger(conf) require.NoError(t, err) connID := uint64(123) @@ -66,7 +66,7 @@ func TestZapLoggerWithKeys(t *testing.T) { err = os.Remove(fileCfg.Filename) require.NoError(t, err) - conf = NewLogConfig("info", DefaultLogFormat, "", fileCfg, false) + conf = NewLogConfig("info", DefaultLogFormat, "", "", fileCfg, false) err = InitLogger(conf) require.NoError(t, err) ctx = WithConnID(context.Background(), connID) @@ -124,7 +124,7 @@ func TestZapLoggerWithCore(t *testing.T) { } fileCfg := FileLogConfig{log.FileLogConfig{Filename: "zap_log", MaxSize: 4096}} - conf := NewLogConfig("info", DefaultLogFormat, "", fileCfg, false) + conf := NewLogConfig("info", DefaultLogFormat, "", "", fileCfg, false) opt := zap.WrapCore(func(core zapcore.Core) zapcore.Core { return core.With([]zap.Field{zap.String("coreKey", "coreValue")}) @@ -165,7 +165,7 @@ func testZapLogger(ctx context.Context, t *testing.T, fileName, pattern string) } func TestSetLevel(t *testing.T) { - conf := NewLogConfig("info", DefaultLogFormat, "", EmptyFileLogConfig, false) + conf := NewLogConfig("info", DefaultLogFormat, "", "", EmptyFileLogConfig, false) err := InitLogger(conf) require.NoError(t, err) require.Equal(t, zap.InfoLevel, log.GetLevel()) @@ -181,37 +181,80 @@ func TestSetLevel(t *testing.T) { require.Equal(t, zap.DebugLevel, log.GetLevel()) } -func TestSlowQueryLoggerCreation(t *testing.T) { - level := "Error" - conf := NewLogConfig(level, DefaultLogFormat, "", EmptyFileLogConfig, false) - _, prop, err := newSlowQueryLogger(conf) - // assert after init slow query logger, the original conf is not changed - require.Equal(t, conf.Level, level) - require.NoError(t, err) - // slow query logger doesn't use the level of the global log config, and the - // level should be less than WarnLevel which is used by it to log slow query. - require.NotEqual(t, conf.Level, prop.Level.String()) - require.True(t, prop.Level.Level() <= zapcore.WarnLevel) +func TestSlowQueryLoggerAndGeneralLoggerCreation(t *testing.T) { + var prop *log.ZapProperties + var err error + for i := 0; i < 2; i++ { + level := "Error" + conf := NewLogConfig(level, DefaultLogFormat, "", "", EmptyFileLogConfig, false) + if i == 0 { + _, prop, err = newSlowQueryLogger(conf) + } else { + _, prop, err = newGeneralLogger(conf) + } + // assert after init logger, the original conf is not changed + require.Equal(t, conf.Level, level) + require.NoError(t, err) + // logger doesn't use the level of the global log config, and the + // level should be equals to InfoLevel. + require.NotEqual(t, conf.Level, prop.Level.String()) + require.True(t, prop.Level.Level() == zapcore.InfoLevel) + + level = "warn" + name := "test.log" + fileConf := FileLogConfig{ + log.FileLogConfig{ + Filename: name, + MaxSize: 10, + MaxDays: 10, + MaxBackups: 10, + }, + } + conf = NewLogConfig(level, DefaultLogFormat, name, "", fileConf, false) + if i == 0 { + slowQueryConf := newSlowQueryLogConfig(conf) + // slowQueryConf.MaxDays/MaxSize/MaxBackups should be same with global config. + require.Equal(t, fileConf.FileLogConfig, slowQueryConf.File) + } else { + generalConf := newGeneralLogConfig(conf) + // generalConf.MaxDays/MaxSize/MaxBackups should be same with global config. + require.Equal(t, fileConf.FileLogConfig, generalConf.File) + } + } +} - level = "warn" - slowQueryFn := "slow-query.log" +func TestCompressedLog(t *testing.T) { + level := "warn" fileConf := FileLogConfig{ log.FileLogConfig{ - Filename: slowQueryFn, - MaxSize: 10, - MaxDays: 10, - MaxBackups: 10, + Filename: "test.log", + MaxSize: 10, + MaxDays: 10, + MaxBackups: 10, + Compression: "xxx", }, } - conf = NewLogConfig(level, DefaultLogFormat, slowQueryFn, fileConf, false) - slowQueryConf := newSlowQueryLogConfig(conf) - // slowQueryConf.MaxDays/MaxSize/MaxBackups should be same with global config. - require.Equal(t, fileConf.FileLogConfig, slowQueryConf.File) + conf := NewLogConfig(level, DefaultLogFormat, "test.log", "", fileConf, false) + err := InitLogger(conf) + require.Error(t, err) + + fileConf = FileLogConfig{ + log.FileLogConfig{ + Filename: "test.log", + MaxSize: 10, + MaxDays: 10, + MaxBackups: 10, + Compression: "gzip", + }, + } + conf = NewLogConfig(level, DefaultLogFormat, "test.log", "", fileConf, false) + err = InitLogger(conf) + require.NoError(t, err) } func TestGlobalLoggerReplace(t *testing.T) { fileCfg := FileLogConfig{log.FileLogConfig{Filename: "zap_log", MaxDays: 0, MaxSize: 4096}} - conf := NewLogConfig("info", DefaultLogFormat, "", fileCfg, false) + conf := NewLogConfig("info", DefaultLogFormat, "", "", fileCfg, false) err := InitLogger(conf) require.NoError(t, err)