From a1bd3673e44619a7561387561d4fb85e6da6b949 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 19 Oct 2017 12:26:27 +0200 Subject: [PATCH 001/121] initial import of postgres output plugin --- plugins/outputs/all/all.go | 1 + plugins/outputs/postgresql/README.md | 11 ++++ plugins/outputs/postgresql/postgresql.go | 65 ++++++++++++++++++++++++ 3 files changed, 77 insertions(+) create mode 100644 plugins/outputs/postgresql/README.md create mode 100644 plugins/outputs/postgresql/postgresql.go diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 61270d5ad412e..407dd0935a656 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -34,6 +34,7 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" _ "github.com/influxdata/telegraf/plugins/outputs/nsq" _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" + _ "github.com/influxdata/telegraf/plugins/outputs/postgresql" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md new file mode 100644 index 0000000000000..df7e546f69d33 --- /dev/null +++ b/plugins/outputs/postgresql/README.md @@ -0,0 +1,11 @@ +# PostgreSQL Output Plugin + +This output plugin writes all metrics to PostgreSQL. + +### Configuration: + +```toml +# Send metrics to postgres +[[outputs.postgresql]] + # no configuration +``` diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go new file mode 100644 index 0000000000000..48acbc7540008 --- /dev/null +++ b/plugins/outputs/postgresql/postgresql.go @@ -0,0 +1,65 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" + "strings" +) + +type Postgresql struct { + db *sql.DB +} + +func (p *Postgresql) Connect() error { + fmt.Println("Connect") + + db, err := sql.Open("pgx", "host=localhost database=postgres") + + if err != nil { + fmt.Println("DB Connect failed") + return nil + } + fmt.Println("DB Connect") + p.db = db + + return nil +} + +func (p *Postgresql) Close() error { + fmt.Println("Close") + return nil +} + +func (p *Postgresql) SampleConfig() string { return "" } +func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } + +func (p *Postgresql) Write(metrics []telegraf.Metric) error { + + for _, m := range metrics { + var keys, values []string + for k, v := range m.Tags() { + keys = append(keys, k) + values = append(values, fmt.Sprintf("'%s'", v)) + } + for k, v := range m.Fields() { + keys = append(keys, k) + switch value := v.(type) { + case int: + values = append(values, fmt.Sprintf("%d", value)) + case float64: + values = append(values, fmt.Sprintf("%f", value)) + case string: + values = append(values, fmt.Sprintf("'%s'", value)) + } + } + fmt.Printf("INSERT INTO %v.%v (%v) VALUES (%v);\n", m.Tags()["host"], m.Name(), strings.Join(keys, ","), strings.Join(values, ",")) + } + + return nil +} + +func init() { + outputs.Add("postgresql", func() telegraf.Output { return &Postgresql{} }) +} From 0afa3507c15d54a3462fd529968a2fd58ca863d6 Mon Sep 17 00:00:00 2001 From: Sven Date: Fri, 20 Oct 2017 23:32:03 +0200 Subject: [PATCH 002/121] make address configurable --- plugins/outputs/postgresql/postgresql.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 48acbc7540008..16bd6912fe8f4 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -9,13 +9,14 @@ import ( ) type Postgresql struct { - db *sql.DB + db *sql.DB + Address string } func (p *Postgresql) Connect() error { fmt.Println("Connect") - db, err := sql.Open("pgx", "host=localhost database=postgres") + db, err := sql.Open("pgx", p.Address) if err != nil { fmt.Println("DB Connect failed") From bcca5c84fe6151f79f38ddeeaac5f40cb7061202 Mon Sep 17 00:00:00 2001 From: Sven Date: Sun, 22 Oct 2017 21:42:40 +0200 Subject: [PATCH 003/121] add helper functions for create table and insert --- plugins/outputs/postgresql/postgresql.go | 119 ++++++++++++++++++++--- 1 file changed, 104 insertions(+), 15 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 16bd6912fe8f4..f149d55ffc1b7 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -9,43 +9,131 @@ import ( ) type Postgresql struct { - db *sql.DB - Address string + db *sql.DB + Address string + CreateTables bool + TagsAsForeignkeys bool + Tables map[string]bool + SchemaTag string } func (p *Postgresql) Connect() error { - fmt.Println("Connect") - db, err := sql.Open("pgx", p.Address) - if err != nil { - fmt.Println("DB Connect failed") - return nil + return err } - fmt.Println("DB Connect") p.db = db + p.Tables = make(map[string]bool) return nil } func (p *Postgresql) Close() error { - fmt.Println("Close") - return nil + return p.db.Close() } func (p *Postgresql) SampleConfig() string { return "" } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } +func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { + var columns []string + var pk []string + + pk = append(pk, "time") + columns = append(columns, "time timestamptz") + + for column, _ := range metric.Tags() { + pk = append(pk, column) + columns = append(columns, fmt.Sprintf("%s text", column)) + } + + var datatype string + for column, v := range metric.Fields() { + switch v.(type) { + case int64: + datatype = "int" + case float64: + datatype = "real" + } + columns = append(columns, fmt.Sprintf("%s %s", column, datatype)) + } + + sql := fmt.Sprintf("CREATE TABLE %s(%s,PRIMARY KEY(%s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ",")) + fmt.Println(sql) + return sql +} + +func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface{}) { + var columns []string + var placeholder []string + var values []interface{} + + columns = append(columns, "time") + values = append(values, metric.Time().Format("2006-01-02 15:04:05 -0700")) + placeholder = append(placeholder, "?") + + for column, value := range metric.Tags() { + columns = append(columns, column) + values = append(values, value) + placeholder = append(placeholder, "?") + } + + for column, value := range metric.Fields() { + columns = append(columns, column) + values = append(values, value) + placeholder = append(placeholder, "?") + } + + sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", metric.Name(), strings.Join(columns, ","), strings.Join(placeholder, ",")) + fmt.Println(sql) + fmt.Println(values) + return sql, values +} + +func (p *Postgresql) writeMetric(metric telegraf.Metric) error { + tableName := metric.Name() + + if p.Tables[tableName] == false { + createStmt := p.generateCreateTable(metric) + _, err := p.db.Exec(createStmt) + if err != nil { + fmt.Println("Error creating table", err) + return err + } + p.Tables[tableName] = true + } + + sql, values := p.generateInsert(metric) + _, err := p.db.Exec(sql, values...) + if err != nil { + fmt.Println("Error during insert", err) + return err + } + + return nil +} + func (p *Postgresql) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + p.writeMetric(metric) + } + return nil + var tableName string - for _, m := range metrics { + for _, metric := range metrics { + var columns []string var keys, values []string - for k, v := range m.Tags() { - keys = append(keys, k) + + tableName = metric.Name() + + for name, v := range metric.Tags() { + keys = append(keys, name) values = append(values, fmt.Sprintf("'%s'", v)) } - for k, v := range m.Fields() { + + for k, v := range metric.Fields() { keys = append(keys, k) + columns = append(columns, k) switch value := v.(type) { case int: values = append(values, fmt.Sprintf("%d", value)) @@ -55,7 +143,8 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { values = append(values, fmt.Sprintf("'%s'", value)) } } - fmt.Printf("INSERT INTO %v.%v (%v) VALUES (%v);\n", m.Tags()["host"], m.Name(), strings.Join(keys, ","), strings.Join(values, ",")) + fmt.Printf("INSERT INTO %v (%v) VALUES (%v);\n", tableName, strings.Join(keys, ","), strings.Join(values, ",")) + } return nil From a13ac441f5cebc0fbc030f4d145b1526047e7f14 Mon Sep 17 00:00:00 2001 From: Sven Date: Sun, 22 Oct 2017 21:43:20 +0200 Subject: [PATCH 004/121] add tests --- plugins/outputs/postgresql/postgresql_test.go | 59 +++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 plugins/outputs/postgresql/postgresql_test.go diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go new file mode 100644 index 0000000000000..90780aa45aee2 --- /dev/null +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -0,0 +1,59 @@ +package postgresql + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + + "github.com/stretchr/testify/assert" +) + +func TestPostgresqlCreateStatement(t *testing.T) { + p := Postgresql{} + timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) + + var m telegraf.Metric + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) + assert.Equal(t, "CREATE TABLE m(time timestamptz,f real,PRIMARY KEY(time))", p.generateCreateTable(m)) + + m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, "CREATE TABLE m(time timestamptz,i int,PRIMARY KEY(time))", p.generateCreateTable(m)) + + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) + assert.Equal(t, "CREATE TABLE m(time timestamptz,f real,i int,PRIMARY KEY(time))", p.generateCreateTable(m)) + + m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, "CREATE TABLE m(time timestamptz,k text,i int,PRIMARY KEY(time,k))", p.generateCreateTable(m)) + + m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, "CREATE TABLE m(time timestamptz,k1 text,k2 text,i int,PRIMARY KEY(time,k1,k2))", p.generateCreateTable(m)) +} + +func TestPostgresqlInsertStatement(t *testing.T) { + p := Postgresql{} + timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) + + var m telegraf.Metric + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) + sql, values := p.generateInsert(m) + assert.Equal(t, "INSERT INTO m(time,f) VALUES(?,?)", sql) + assert.Equal(t, []interface{}{timestamp, float64(3.14)}, values) + + m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) + sql, values = p.generateInsert(m) + assert.Equal(t, "INSERT INTO m(time,i) VALUES(?,?)", sql) + + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) + sql, values = p.generateInsert(m) + assert.Equal(t, "INSERT INTO m(time,f,i) VALUES(?,?,?)", sql) + + m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) + sql, values = p.generateInsert(m) + assert.Equal(t, "INSERT INTO m(time,k,i) VALUES(?,?,?)", sql) + + m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) + sql, values = p.generateInsert(m) + assert.Equal(t, "INSERT INTO m(time,k1,k2,i) VALUES(?,?,?,?)", sql) +} From 1a6b1c4acefb5da7ebab14c61748b8e3567055f3 Mon Sep 17 00:00:00 2001 From: Sven Date: Tue, 24 Oct 2017 21:39:31 +0200 Subject: [PATCH 005/121] fix sql placeholder --- plugins/outputs/postgresql/postgresql.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index f149d55ffc1b7..13f2d77944c6f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -51,9 +51,9 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { for column, v := range metric.Fields() { switch v.(type) { case int64: - datatype = "int" + datatype = "int8" case float64: - datatype = "real" + datatype = "float8" } columns = append(columns, fmt.Sprintf("%s %s", column, datatype)) } @@ -65,28 +65,27 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface{}) { var columns []string - var placeholder []string var values []interface{} columns = append(columns, "time") values = append(values, metric.Time().Format("2006-01-02 15:04:05 -0700")) - placeholder = append(placeholder, "?") for column, value := range metric.Tags() { columns = append(columns, column) values = append(values, value) - placeholder = append(placeholder, "?") } for column, value := range metric.Fields() { columns = append(columns, column) values = append(values, value) - placeholder = append(placeholder, "?") + } + + var placeholder []string + for i := 1; i <= len(values); i++ { + placeholder = append(placeholder, fmt.Sprintf("$%d", i)) } sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", metric.Name(), strings.Join(columns, ","), strings.Join(placeholder, ",")) - fmt.Println(sql) - fmt.Println(values) return sql, values } From 5ea88687a913823a65385763e0d4d0377065dc40 Mon Sep 17 00:00:00 2001 From: Sven Date: Tue, 24 Oct 2017 22:20:48 +0200 Subject: [PATCH 006/121] let pgx handle time conversion, remove old code --- plugins/outputs/postgresql/postgresql.go | 35 +++--------------------- 1 file changed, 4 insertions(+), 31 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 13f2d77944c6f..6c84309a3ee07 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -68,7 +68,7 @@ func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface var values []interface{} columns = append(columns, "time") - values = append(values, metric.Time().Format("2006-01-02 15:04:05 -0700")) + values = append(values, metric.Time()) for column, value := range metric.Tags() { columns = append(columns, column) @@ -114,38 +114,11 @@ func (p *Postgresql) writeMetric(metric telegraf.Metric) error { func (p *Postgresql) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { - p.writeMetric(metric) - } - return nil - var tableName string - - for _, metric := range metrics { - var columns []string - var keys, values []string - - tableName = metric.Name() - - for name, v := range metric.Tags() { - keys = append(keys, name) - values = append(values, fmt.Sprintf("'%s'", v)) - } - - for k, v := range metric.Fields() { - keys = append(keys, k) - columns = append(columns, k) - switch value := v.(type) { - case int: - values = append(values, fmt.Sprintf("%d", value)) - case float64: - values = append(values, fmt.Sprintf("%f", value)) - case string: - values = append(values, fmt.Sprintf("'%s'", value)) - } + err := p.writeMetric(metric) + if err != nil { + return err } - fmt.Printf("INSERT INTO %v (%v) VALUES (%v);\n", tableName, strings.Join(keys, ","), strings.Join(values, ",")) - } - return nil } From 8aa03cfe4663ba35fa49cebf8e368d1fa863c589 Mon Sep 17 00:00:00 2001 From: Sven Date: Tue, 24 Oct 2017 22:21:41 +0200 Subject: [PATCH 007/121] adjust test cases to datatype changes --- plugins/outputs/postgresql/postgresql_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 90780aa45aee2..502763fef3de3 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -16,19 +16,19 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,f real,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, "CREATE TABLE m(time timestamptz,f float8,PRIMARY KEY(time))", p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,i int,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, "CREATE TABLE m(time timestamptz,i int8,PRIMARY KEY(time))", p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,f real,i int,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, "CREATE TABLE m(time timestamptz,f float8,i int8,PRIMARY KEY(time))", p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,k text,i int,PRIMARY KEY(time,k))", p.generateCreateTable(m)) + assert.Equal(t, "CREATE TABLE m(time timestamptz,k text,i int8,PRIMARY KEY(time,k))", p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,k1 text,k2 text,i int,PRIMARY KEY(time,k1,k2))", p.generateCreateTable(m)) + assert.Equal(t, "CREATE TABLE m(time timestamptz,k1 text,k2 text,i int8,PRIMARY KEY(time,k1,k2))", p.generateCreateTable(m)) } func TestPostgresqlInsertStatement(t *testing.T) { @@ -38,22 +38,22 @@ func TestPostgresqlInsertStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) sql, values := p.generateInsert(m) - assert.Equal(t, "INSERT INTO m(time,f) VALUES(?,?)", sql) - assert.Equal(t, []interface{}{timestamp, float64(3.14)}, values) + assert.Equal(t, "INSERT INTO m(time,f) VALUES($1,$2)", sql) + assert.EqualValues(t, []interface{}{timestamp, float64(3.14)}, values) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) sql, values = p.generateInsert(m) - assert.Equal(t, "INSERT INTO m(time,i) VALUES(?,?)", sql) + assert.Equal(t, "INSERT INTO m(time,i) VALUES($1,$2)", sql) m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) sql, values = p.generateInsert(m) - assert.Equal(t, "INSERT INTO m(time,f,i) VALUES(?,?,?)", sql) + assert.Equal(t, "INSERT INTO m(time,f,i) VALUES($1,$2,$3)", sql) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) sql, values = p.generateInsert(m) - assert.Equal(t, "INSERT INTO m(time,k,i) VALUES(?,?,?)", sql) + assert.Equal(t, "INSERT INTO m(time,k,i) VALUES($1,$2,$3)", sql) m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) sql, values = p.generateInsert(m) - assert.Equal(t, "INSERT INTO m(time,k1,k2,i) VALUES(?,?,?,?)", sql) + assert.Equal(t, "INSERT INTO m(time,k1,k2,i) VALUES($1,$2,$3,$4)", sql) } From 749e7575a59ad5041dd4f30ae639d3897f4b7cec Mon Sep 17 00:00:00 2001 From: Sven Date: Fri, 27 Oct 2017 17:08:30 +0200 Subject: [PATCH 008/121] remove debug prints --- plugins/outputs/postgresql/postgresql.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 6c84309a3ee07..e8a239b836f55 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -59,7 +59,6 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } sql := fmt.Sprintf("CREATE TABLE %s(%s,PRIMARY KEY(%s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ",")) - fmt.Println(sql) return sql } @@ -96,7 +95,6 @@ func (p *Postgresql) writeMetric(metric telegraf.Metric) error { createStmt := p.generateCreateTable(metric) _, err := p.db.Exec(createStmt) if err != nil { - fmt.Println("Error creating table", err) return err } p.Tables[tableName] = true From e1572580deb893fdcedc9d6cff058f24a694369b Mon Sep 17 00:00:00 2001 From: Sven Date: Fri, 27 Oct 2017 23:37:28 +0200 Subject: [PATCH 009/121] check if table exists before creating --- plugins/outputs/postgresql/postgresql.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e8a239b836f55..e49a064ddd890 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -88,10 +88,24 @@ func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface return sql, values } +func (p *Postgresql) tableExists(tableName string) bool { + stmt := "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname NOT IN ('information_schema','pg_catalog');" + result, err := p.db.Exec(stmt, tableName) + if err != nil { + return false + } + if count, _ := result.RowsAffected(); count == 1 { + p.Tables[tableName] = true + return true + } + return false + +} + func (p *Postgresql) writeMetric(metric telegraf.Metric) error { tableName := metric.Name() - if p.Tables[tableName] == false { + if p.Tables[tableName] == false && p.tableExists(tableName) == false { createStmt := p.generateCreateTable(metric) _, err := p.db.Exec(createStmt) if err != nil { From 05df41b03268bdd2c822af5cfe4491c14c41492b Mon Sep 17 00:00:00 2001 From: Sven Date: Sat, 28 Oct 2017 17:13:58 +0200 Subject: [PATCH 010/121] allow skipping tags --- plugins/outputs/postgresql/postgresql.go | 30 ++++++++++++++++++------ 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e49a064ddd890..add76b0713c4a 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -3,18 +3,18 @@ package postgresql import ( "database/sql" "fmt" + "log" + "strings" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "strings" ) type Postgresql struct { - db *sql.DB - Address string - CreateTables bool - TagsAsForeignkeys bool - Tables map[string]bool - SchemaTag string + db *sql.DB + Address string + IgnoredTags []string + Tables map[string]bool } func (p *Postgresql) Connect() error { @@ -32,6 +32,15 @@ func (p *Postgresql) Close() error { return p.db.Close() } +func contains(haystack []string, needle string) bool { + for _, key := range haystack { + if key == needle { + return true + } + } + return false +} + func (p *Postgresql) SampleConfig() string { return "" } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } @@ -43,6 +52,9 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { columns = append(columns, "time timestamptz") for column, _ := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } pk = append(pk, column) columns = append(columns, fmt.Sprintf("%s text", column)) } @@ -70,6 +82,9 @@ func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface values = append(values, metric.Time()) for column, value := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } columns = append(columns, column) values = append(values, value) } @@ -92,6 +107,7 @@ func (p *Postgresql) tableExists(tableName string) bool { stmt := "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname NOT IN ('information_schema','pg_catalog');" result, err := p.db.Exec(stmt, tableName) if err != nil { + log.Printf("E! Error checking for existence of metric table %s: %v", tableName, err) return false } if count, _ := result.RowsAffected(); count == 1 { From 4c35bffba3867196342704d9e6f1b9506e95dff7 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 18:26:26 +0100 Subject: [PATCH 011/121] refactoring --- plugins/outputs/postgresql/postgresql.go | 111 +++++++++++++---------- 1 file changed, 64 insertions(+), 47 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index add76b0713c4a..4f0bc2e3b642a 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -11,10 +11,11 @@ import ( ) type Postgresql struct { - db *sql.DB - Address string - IgnoredTags []string - Tables map[string]bool + db *sql.DB + Address string + IgnoredTags []string + TagsAsForeignkeys bool + Tables map[string]bool } func (p *Postgresql) Connect() error { @@ -55,8 +56,13 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { if contains(p.IgnoredTags, column) { continue } - pk = append(pk, column) - columns = append(columns, fmt.Sprintf("%s text", column)) + if p.TagsAsForeignkeys { + pk = append(pk, column+"_id") + columns = append(columns, fmt.Sprintf("%s_id int8", column)) + } else { + pk = append(pk, column) + columns = append(columns, fmt.Sprintf("%s text", column)) + } } var datatype string @@ -74,32 +80,14 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { return sql } -func (p *Postgresql) generateInsert(metric telegraf.Metric) (string, []interface{}) { - var columns []string - var values []interface{} - - columns = append(columns, "time") - values = append(values, metric.Time()) - - for column, value := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue - } - columns = append(columns, column) - values = append(values, value) - } - - for column, value := range metric.Fields() { - columns = append(columns, column) - values = append(values, value) - } +func (p *Postgresql) generateInsert(tablename string, columns []string, values []interface{}) (string, []interface{}) { var placeholder []string for i := 1; i <= len(values); i++ { placeholder = append(placeholder, fmt.Sprintf("$%d", i)) } - sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", metric.Name(), strings.Join(columns, ","), strings.Join(placeholder, ",")) + sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", tablename, strings.Join(columns, ","), strings.Join(placeholder, ",")) return sql, values } @@ -115,35 +103,64 @@ func (p *Postgresql) tableExists(tableName string) bool { return true } return false - } -func (p *Postgresql) writeMetric(metric telegraf.Metric) error { - tableName := metric.Name() +func (p *Postgresql) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + tablename := metric.Name() + + // create table if needed + if p.Tables[tablename] == false && p.tableExists(tablename) == false { + createStmt := p.generateCreateTable(metric) + _, err := p.db.Exec(createStmt) + if err != nil { + return err + } + p.Tables[tablename] = true + } - if p.Tables[tableName] == false && p.tableExists(tableName) == false { - createStmt := p.generateCreateTable(metric) - _, err := p.db.Exec(createStmt) - if err != nil { - return err + var columns []string + var values []interface{} + + columns = append(columns, "time") + values = append(values, metric.Time()) + + for column, value := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } + + if p.TagsAsForeignkeys { + // var value_id int + // query := fmt.Sprintf("SELECT %s_id FROM %s_%s WHERE %s=$1", column, metric.Name(), column, column) + // err := p.db.QueryRow(query, value).Scan(&value_id) + // + // if err != nil { + // query := fmt.Sprintf("INSERT INTO %s_%s(%s) VALUES($1) RETURNING %s_id", metric.Name(), column, column, column) + // err := p.db.QueryRow(query, value).Scan(&value_id) + // } + // columns = append(columns, column+"_id") + // values = append(values, value_id) + } else { + columns = append(columns, column) + values = append(values, value) + } } - p.Tables[tableName] = true - } - sql, values := p.generateInsert(metric) - _, err := p.db.Exec(sql, values...) - if err != nil { - fmt.Println("Error during insert", err) - return err - } + for column, value := range metric.Fields() { + columns = append(columns, column) + values = append(values, value) + } - return nil -} + var placeholder []string + for i := 1; i <= len(values); i++ { + placeholder = append(placeholder, fmt.Sprintf("$%d", i)) + } -func (p *Postgresql) Write(metrics []telegraf.Metric) error { - for _, metric := range metrics { - err := p.writeMetric(metric) + sql, values := p.generateInsert(tablename, columns, values) + _, err := p.db.Exec(sql, values...) if err != nil { + fmt.Println("Error during insert", err) return err } } From 07dcf4ef9e27b1cb7b0290f8d74c338db2f1dd27 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 20:05:15 +0100 Subject: [PATCH 012/121] implement TagsAsForeignkeys --- plugins/outputs/postgresql/postgresql.go | 30 ++++++++++++++---------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 4f0bc2e3b642a..8005ca76b0fc3 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -48,6 +48,7 @@ func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { var columns []string var pk []string + var sql []string pk = append(pk, "time") columns = append(columns, "time timestamptz") @@ -59,6 +60,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { if p.TagsAsForeignkeys { pk = append(pk, column+"_id") columns = append(columns, fmt.Sprintf("%s_id int8", column)) + sql = append(sql, fmt.Sprintf("CREATE TABLE %s_%s(%s_id serial primary key,%s text unique)", metric.Name(), column, column, column)) } else { pk = append(pk, column) columns = append(columns, fmt.Sprintf("%s text", column)) @@ -76,8 +78,8 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { columns = append(columns, fmt.Sprintf("%s %s", column, datatype)) } - sql := fmt.Sprintf("CREATE TABLE %s(%s,PRIMARY KEY(%s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ",")) - return sql + sql = append(sql, fmt.Sprintf("CREATE TABLE %s(%s,PRIMARY KEY(%s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ","))) + return strings.Join(sql, ";") } func (p *Postgresql) generateInsert(tablename string, columns []string, values []interface{}) (string, []interface{}) { @@ -131,16 +133,20 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } if p.TagsAsForeignkeys { - // var value_id int - // query := fmt.Sprintf("SELECT %s_id FROM %s_%s WHERE %s=$1", column, metric.Name(), column, column) - // err := p.db.QueryRow(query, value).Scan(&value_id) - // - // if err != nil { - // query := fmt.Sprintf("INSERT INTO %s_%s(%s) VALUES($1) RETURNING %s_id", metric.Name(), column, column, column) - // err := p.db.QueryRow(query, value).Scan(&value_id) - // } - // columns = append(columns, column+"_id") - // values = append(values, value_id) + var value_id int + + query := fmt.Sprintf("SELECT %s_id FROM %s_%s WHERE %s=$1", column, tablename, column, column) + err := p.db.QueryRow(query, value).Scan(&value_id) + if err != nil { + query := fmt.Sprintf("INSERT INTO %s_%s(%s) VALUES($1) RETURNING %s_id", tablename, column, column, column) + err := p.db.QueryRow(query, value).Scan(&value_id) + if err != nil { + return err + } + } + + columns = append(columns, column+"_id") + values = append(values, value_id) } else { columns = append(columns, column) values = append(values, value) From 8348b7e517a735f8978d2f0a446de9c1431c3b25 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 20:29:49 +0100 Subject: [PATCH 013/121] fix tests --- plugins/outputs/postgresql/postgresql_test.go | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 502763fef3de3..905323080b4a6 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -35,25 +35,18 @@ func TestPostgresqlInsertStatement(t *testing.T) { p := Postgresql{} timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) - var m telegraf.Metric - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - sql, values := p.generateInsert(m) + sql, _ := p.generateInsert("m", []string{"time", "f"}, []interface{}{timestamp, 3.1}) assert.Equal(t, "INSERT INTO m(time,f) VALUES($1,$2)", sql) - assert.EqualValues(t, []interface{}{timestamp, float64(3.14)}, values) - m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - sql, values = p.generateInsert(m) + sql, _ = p.generateInsert("m", []string{"time", "i"}, []interface{}{timestamp, 3}) assert.Equal(t, "INSERT INTO m(time,i) VALUES($1,$2)", sql) - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) - sql, values = p.generateInsert(m) + sql, _ = p.generateInsert("m", []string{"time", "f", "i"}, []interface{}{timestamp, 3.1, 3}) assert.Equal(t, "INSERT INTO m(time,f,i) VALUES($1,$2,$3)", sql) - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - sql, values = p.generateInsert(m) + sql, _ = p.generateInsert("m", []string{"time", "k", "i"}, []interface{}{timestamp, "v", 3}) assert.Equal(t, "INSERT INTO m(time,k,i) VALUES($1,$2,$3)", sql) - m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) - sql, values = p.generateInsert(m) + sql, _ = p.generateInsert("m", []string{"time", "k1", "k2", "i"}, []interface{}{timestamp, "v1", "v2", 3}) assert.Equal(t, "INSERT INTO m(time,k1,k2,i) VALUES($1,$2,$3,$4)", sql) } From feee01c6419cbf9822861092a5db55abf5ad48ed Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 20:58:23 +0100 Subject: [PATCH 014/121] add SampleConfig --- plugins/outputs/postgresql/postgresql.go | 26 +++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 8005ca76b0fc3..ea35205a50a9f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -42,7 +42,31 @@ func contains(haystack []string, needle string) bool { return false } -func (p *Postgresql) SampleConfig() string { return "" } +var sampleConfig = ` + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## + address = "host=localhost user=postgres sslmode=verify-full" + + ## A list of tags to exclude from storing. If not specified, all tags are stored. + # ignored_tags = ["foo", "bar"] + + ## Store tags as foreign keys in the metrics table. Default is false. + # tags_as_foreignkeys = false + +` + +func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { From 7b0315b2664eb109626f7c0ec8605ada9e9ebc5a Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 21:00:38 +0100 Subject: [PATCH 015/121] register driver --- plugins/outputs/postgresql/postgresql.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index ea35205a50a9f..5f3641b6a5ae9 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -6,6 +6,9 @@ import ( "log" "strings" + // register in driver. + _ "github.com/jackc/pgx/stdlib" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" ) From cd4363fa93beecb441c84c5f1994b4e40ccb758a Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 5 Nov 2017 21:04:43 +0100 Subject: [PATCH 016/121] update README --- plugins/outputs/postgresql/README.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index df7e546f69d33..61d5ace8d307d 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -7,5 +7,11 @@ This output plugin writes all metrics to PostgreSQL. ```toml # Send metrics to postgres [[outputs.postgresql]] - # no configuration + address = "host=localhost user=postgres sslmode=verify-full" + + ## A list of tags to exclude from storing. If not specified, all tags are stored. + # ignored_tags = ["foo", "bar"] + + ## Store tags as foreign keys in the metrics table. Default is false. + # tags_as_foreignkeys = false ``` From 8d5ad624a2bc6e157184a6641272a5e05ed9ec20 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Mon, 6 Nov 2017 20:31:12 +0100 Subject: [PATCH 017/121] prepare for create table template --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5f3641b6a5ae9..a18fc0499402e 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -105,7 +105,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { columns = append(columns, fmt.Sprintf("%s %s", column, datatype)) } - sql = append(sql, fmt.Sprintf("CREATE TABLE %s(%s,PRIMARY KEY(%s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ","))) + sql = append(sql, fmt.Sprintf("CREATE TABLE %[1]s(%[2]s,PRIMARY KEY(%[3]s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ","))) return strings.Join(sql, ";") } From a1e6b9a4827e76a00296fc14b63cb3088e022bde Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 7 Nov 2017 09:24:27 +0100 Subject: [PATCH 018/121] quote identifier --- plugins/outputs/postgresql/postgresql.go | 49 ++++++++++--------- plugins/outputs/postgresql/postgresql_test.go | 20 ++++---- 2 files changed, 36 insertions(+), 33 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index a18fc0499402e..c81685ff2b932 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -6,8 +6,7 @@ import ( "log" "strings" - // register in driver. - _ "github.com/jackc/pgx/stdlib" + "github.com/jackc/pgx" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" @@ -45,6 +44,10 @@ func contains(haystack []string, needle string) bool { return false } +func quoteIdent(name string) string { + return pgx.Identifier{name}.Sanitize() +} + var sampleConfig = ` ## specify address via a url matching: ## postgres://[pqgotest[:password]]@localhost[/dbname]\ @@ -77,20 +80,20 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { var pk []string var sql []string - pk = append(pk, "time") - columns = append(columns, "time timestamptz") + pk = append(pk, quoteIdent("time")) + columns = append(columns, quoteIdent("time")+" timestamptz") for column, _ := range metric.Tags() { if contains(p.IgnoredTags, column) { continue } if p.TagsAsForeignkeys { - pk = append(pk, column+"_id") - columns = append(columns, fmt.Sprintf("%s_id int8", column)) - sql = append(sql, fmt.Sprintf("CREATE TABLE %s_%s(%s_id serial primary key,%s text unique)", metric.Name(), column, column, column)) + pk = append(pk, quoteIdent(column+"_id")) + columns = append(columns, fmt.Sprintf("%s int8", quoteIdent(column+"_id"))) + sql = append(sql, fmt.Sprintf("CREATE TABLE %s(%s serial primary key,%s text unique)", quoteIdent(metric.Name()+"_"+column), quoteIdent(column+"_id"), quoteIdent(column))) } else { - pk = append(pk, column) - columns = append(columns, fmt.Sprintf("%s text", column)) + pk = append(pk, quoteIdent(column)) + columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) } } @@ -102,10 +105,10 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { case float64: datatype = "float8" } - columns = append(columns, fmt.Sprintf("%s %s", column, datatype)) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } - sql = append(sql, fmt.Sprintf("CREATE TABLE %[1]s(%[2]s,PRIMARY KEY(%[3]s))", metric.Name(), strings.Join(columns, ","), strings.Join(pk, ","))) + sql = append(sql, fmt.Sprintf("CREATE TABLE %[1]s(%[2]s,PRIMARY KEY(%[3]s))", quoteIdent(metric.Name()), strings.Join(columns, ","), strings.Join(pk, ","))) return strings.Join(sql, ";") } @@ -116,7 +119,12 @@ func (p *Postgresql) generateInsert(tablename string, columns []string, values [ placeholder = append(placeholder, fmt.Sprintf("$%d", i)) } - sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", tablename, strings.Join(columns, ","), strings.Join(placeholder, ",")) + var quoted []string + for _, column := range columns { + quoted = append(quoted, quoteIdent(column)) + } + + sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", quoteIdent(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) return sql, values } @@ -151,7 +159,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { var columns []string var values []interface{} - columns = append(columns, "time") + columns = append(columns, quoteIdent("time")) values = append(values, metric.Time()) for column, value := range metric.Tags() { @@ -162,34 +170,29 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { if p.TagsAsForeignkeys { var value_id int - query := fmt.Sprintf("SELECT %s_id FROM %s_%s WHERE %s=$1", column, tablename, column, column) + query := fmt.Sprintf("SELECT %s FROM %s WHERE %s=$1", quoteIdent(column+"_id"), quoteIdent(tablename+"_"+column), quoteIdent(column)) err := p.db.QueryRow(query, value).Scan(&value_id) if err != nil { - query := fmt.Sprintf("INSERT INTO %s_%s(%s) VALUES($1) RETURNING %s_id", tablename, column, column, column) + query := fmt.Sprintf("INSERT INTO %s(%s) VALUES($1) RETURNING %s", quoteIdent(tablename+"_"+column), quoteIdent(column), quoteIdent(column+"_id")) err := p.db.QueryRow(query, value).Scan(&value_id) if err != nil { return err } } - columns = append(columns, column+"_id") + columns = append(columns, quoteIdent(column+"_id")) values = append(values, value_id) } else { - columns = append(columns, column) + columns = append(columns, quoteIdent(column)) values = append(values, value) } } for column, value := range metric.Fields() { - columns = append(columns, column) + columns = append(columns, quoteIdent(column)) values = append(values, value) } - var placeholder []string - for i := 1; i <= len(values); i++ { - placeholder = append(placeholder, fmt.Sprintf("$%d", i)) - } - sql, values := p.generateInsert(tablename, columns, values) _, err := p.db.Exec(sql, values...) if err != nil { diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 905323080b4a6..4888cf56698c4 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -16,19 +16,19 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,f float8,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"f" float8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,i int8,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,f float8,i int8,PRIMARY KEY(time))", p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"f" float8,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,k text,i int8,PRIMARY KEY(time,k))", p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"k" text,"i" int8,PRIMARY KEY("time","k"))`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, "CREATE TABLE m(time timestamptz,k1 text,k2 text,i int8,PRIMARY KEY(time,k1,k2))", p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"k1" text,"k2" text,"i" int8,PRIMARY KEY("time","k1","k2"))`, p.generateCreateTable(m)) } func TestPostgresqlInsertStatement(t *testing.T) { @@ -36,17 +36,17 @@ func TestPostgresqlInsertStatement(t *testing.T) { timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) sql, _ := p.generateInsert("m", []string{"time", "f"}, []interface{}{timestamp, 3.1}) - assert.Equal(t, "INSERT INTO m(time,f) VALUES($1,$2)", sql) + assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) sql, _ = p.generateInsert("m", []string{"time", "i"}, []interface{}{timestamp, 3}) - assert.Equal(t, "INSERT INTO m(time,i) VALUES($1,$2)", sql) + assert.Equal(t, `INSERT INTO "m"("time","i") VALUES($1,$2)`, sql) sql, _ = p.generateInsert("m", []string{"time", "f", "i"}, []interface{}{timestamp, 3.1, 3}) - assert.Equal(t, "INSERT INTO m(time,f,i) VALUES($1,$2,$3)", sql) + assert.Equal(t, `INSERT INTO "m"("time","f","i") VALUES($1,$2,$3)`, sql) sql, _ = p.generateInsert("m", []string{"time", "k", "i"}, []interface{}{timestamp, "v", 3}) - assert.Equal(t, "INSERT INTO m(time,k,i) VALUES($1,$2,$3)", sql) + assert.Equal(t, `INSERT INTO "m"("time","k","i") VALUES($1,$2,$3)`, sql) sql, _ = p.generateInsert("m", []string{"time", "k1", "k2", "i"}, []interface{}{timestamp, "v1", "v2", 3}) - assert.Equal(t, "INSERT INTO m(time,k1,k2,i) VALUES($1,$2,$3,$4)", sql) + assert.Equal(t, `INSERT INTO "m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) } From b87f066356a7a90ffe0334fd6e7e11036a58fbd4 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 7 Nov 2017 12:40:40 +0100 Subject: [PATCH 019/121] refactor generateInsert --- plugins/outputs/postgresql/postgresql.go | 33 +++++++++---------- plugins/outputs/postgresql/postgresql_test.go | 11 +++---- 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index c81685ff2b932..443116e676310 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -88,9 +88,12 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { continue } if p.TagsAsForeignkeys { - pk = append(pk, quoteIdent(column+"_id")) - columns = append(columns, fmt.Sprintf("%s int8", quoteIdent(column+"_id"))) - sql = append(sql, fmt.Sprintf("CREATE TABLE %s(%s serial primary key,%s text unique)", quoteIdent(metric.Name()+"_"+column), quoteIdent(column+"_id"), quoteIdent(column))) + key := quoteIdent(column + "_id") + table := quoteIdent(metric.Name() + "_" + column) + + pk = append(pk, key) + columns = append(columns, fmt.Sprintf("%s int8", key)) + sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(%s serial primary key,%s text unique)", table, key, quoteIdent(column))) } else { pk = append(pk, quoteIdent(column)) columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) @@ -112,20 +115,16 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { return strings.Join(sql, ";") } -func (p *Postgresql) generateInsert(tablename string, columns []string, values []interface{}) (string, []interface{}) { - - var placeholder []string - for i := 1; i <= len(values); i++ { - placeholder = append(placeholder, fmt.Sprintf("$%d", i)) - } +func (p *Postgresql) generateInsert(tablename string, columns []string) string { - var quoted []string - for _, column := range columns { + var placeholder, quoted []string + for i, column := range columns { + placeholder = append(placeholder, fmt.Sprintf("$%d", i+1)) quoted = append(quoted, quoteIdent(column)) } sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", quoteIdent(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) - return sql, values + return sql } func (p *Postgresql) tableExists(tableName string) bool { @@ -159,7 +158,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { var columns []string var values []interface{} - columns = append(columns, quoteIdent("time")) + columns = append(columns, "time") values = append(values, metric.Time()) for column, value := range metric.Tags() { @@ -180,20 +179,20 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } } - columns = append(columns, quoteIdent(column+"_id")) + columns = append(columns, column+"_id") values = append(values, value_id) } else { - columns = append(columns, quoteIdent(column)) + columns = append(columns, column) values = append(values, value) } } for column, value := range metric.Fields() { - columns = append(columns, quoteIdent(column)) + columns = append(columns, column) values = append(values, value) } - sql, values := p.generateInsert(tablename, columns, values) + sql := p.generateInsert(tablename, columns) _, err := p.db.Exec(sql, values...) if err != nil { fmt.Println("Error during insert", err) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 4888cf56698c4..1e6cda6136c81 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -33,20 +33,19 @@ func TestPostgresqlCreateStatement(t *testing.T) { func TestPostgresqlInsertStatement(t *testing.T) { p := Postgresql{} - timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) - sql, _ := p.generateInsert("m", []string{"time", "f"}, []interface{}{timestamp, 3.1}) + sql := p.generateInsert("m", []string{"time", "f"}) assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) - sql, _ = p.generateInsert("m", []string{"time", "i"}, []interface{}{timestamp, 3}) + sql = p.generateInsert("m", []string{"time", "i"}) assert.Equal(t, `INSERT INTO "m"("time","i") VALUES($1,$2)`, sql) - sql, _ = p.generateInsert("m", []string{"time", "f", "i"}, []interface{}{timestamp, 3.1, 3}) + sql = p.generateInsert("m", []string{"time", "f", "i"}) assert.Equal(t, `INSERT INTO "m"("time","f","i") VALUES($1,$2,$3)`, sql) - sql, _ = p.generateInsert("m", []string{"time", "k", "i"}, []interface{}{timestamp, "v", 3}) + sql = p.generateInsert("m", []string{"time", "k", "i"}) assert.Equal(t, `INSERT INTO "m"("time","k","i") VALUES($1,$2,$3)`, sql) - sql, _ = p.generateInsert("m", []string{"time", "k1", "k2", "i"}, []interface{}{timestamp, "v1", "v2", 3}) + sql = p.generateInsert("m", []string{"time", "k1", "k2", "i"}) assert.Equal(t, `INSERT INTO "m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) } From 22d8a1e0c0824fa00a71d8efc7d7c0a6c9e9c17d Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 8 Nov 2017 00:01:40 +0100 Subject: [PATCH 020/121] use timestamp for time column to allow pg10 partitioning --- plugins/outputs/postgresql/postgresql.go | 2 +- plugins/outputs/postgresql/postgresql_test.go | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 443116e676310..fd37c14ae8e0e 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -81,7 +81,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { var sql []string pk = append(pk, quoteIdent("time")) - columns = append(columns, quoteIdent("time")+" timestamptz") + columns = append(columns, quoteIdent("time")+" timestamp") for column, _ := range metric.Tags() { if contains(p.IgnoredTags, column) { diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 1e6cda6136c81..6208088897f66 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -16,19 +16,19 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"f" float8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"f" float8,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"k" text,"i" int8,PRIMARY KEY("time","k"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k" text,"i" int8,PRIMARY KEY("time","k"))`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamptz,"k1" text,"k2" text,"i" int8,PRIMARY KEY("time","k1","k2"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k1" text,"k2" text,"i" int8,PRIMARY KEY("time","k1","k2"))`, p.generateCreateTable(m)) } func TestPostgresqlInsertStatement(t *testing.T) { From 212125f606eeb6c391e44dbbd5f2df70f75effc3 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 8 Nov 2017 16:46:03 +0100 Subject: [PATCH 021/121] remove nondeterministic tests --- plugins/outputs/postgresql/postgresql_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 6208088897f66..a065a222e8051 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -21,14 +21,9 @@ func TestPostgresqlCreateStatement(t *testing.T) { m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14), "i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k" text,"i" int8,PRIMARY KEY("time","k"))`, p.generateCreateTable(m)) - m, _ = metric.New("m", map[string]string{"k1": "v1", "k2": "v2"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k1" text,"k2" text,"i" int8,PRIMARY KEY("time","k1","k2"))`, p.generateCreateTable(m)) } func TestPostgresqlInsertStatement(t *testing.T) { From c3874cf59d408f7100a710f30fab423bb9977e19 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 8 Nov 2017 16:46:21 +0100 Subject: [PATCH 022/121] use template for create table query generation --- plugins/outputs/postgresql/postgresql.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index fd37c14ae8e0e..34cedd862619a 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -111,7 +111,13 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } - sql = append(sql, fmt.Sprintf("CREATE TABLE %[1]s(%[2]s,PRIMARY KEY(%[3]s))", quoteIdent(metric.Name()), strings.Join(columns, ","), strings.Join(pk, ","))) + template := "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" + + query := strings.Replace(template, "{TABLE}", quoteIdent(metric.Name()), -1) + query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) + query = strings.Replace(query, "{PK_COLUMNS}", strings.Join(pk, ","), -1) + + sql = append(sql, query) return strings.Join(sql, ";") } From d5f27b49bebe2bf6bc8883ba2ae1b239eb72e5d2 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 8 Nov 2017 16:55:56 +0100 Subject: [PATCH 023/121] make TableTemplate configurable --- plugins/outputs/postgresql/postgresql.go | 15 +++++++++++---- plugins/outputs/postgresql/postgresql_test.go | 4 ++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 34cedd862619a..e16431905cb44 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -17,6 +17,7 @@ type Postgresql struct { Address string IgnoredTags []string TagsAsForeignkeys bool + TableTemplate string Tables map[string]bool } @@ -111,9 +112,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } - template := "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" - - query := strings.Replace(template, "{TABLE}", quoteIdent(metric.Name()), -1) + query := strings.Replace(p.TableTemplate, "{TABLE}", quoteIdent(metric.Name()), -1) query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) query = strings.Replace(query, "{PK_COLUMNS}", strings.Join(pk, ","), -1) @@ -209,5 +208,13 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } func init() { - outputs.Add("postgresql", func() telegraf.Output { return &Postgresql{} }) + outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() }) +} + +func newPostgresql() *Postgresql { + p := Postgresql{} + if p.TableTemplate == "" { + p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" + } + return &p } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index a065a222e8051..a93bc11c6bb33 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -11,7 +11,7 @@ import ( ) func TestPostgresqlCreateStatement(t *testing.T) { - p := Postgresql{} + p := newPostgresql() timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) var m telegraf.Metric @@ -27,7 +27,7 @@ func TestPostgresqlCreateStatement(t *testing.T) { } func TestPostgresqlInsertStatement(t *testing.T) { - p := Postgresql{} + p := newPostgresql() sql := p.generateInsert("m", []string{"time", "f"}) assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) From 8ee4803d9c8a6b1bf5cea42ace43fcabed438302 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sat, 18 Nov 2017 17:41:21 +0100 Subject: [PATCH 024/121] add quoteLiteral helper function --- plugins/outputs/postgresql/postgresql.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e16431905cb44..332a6e33fc653 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -49,6 +49,10 @@ func quoteIdent(name string) string { return pgx.Identifier{name}.Sanitize() } +func quoteLiteral(name string) string { + return "'" + strings.Replace(name, "'", "''", -1) + "'" +} + var sampleConfig = ` ## specify address via a url matching: ## postgres://[pqgotest[:password]]@localhost[/dbname]\ From 7e3330c54e193ffc8b05e3bb16cfd64ac3c67b05 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sat, 18 Nov 2017 17:43:41 +0100 Subject: [PATCH 025/121] add tests for quoting --- plugins/outputs/postgresql/postgresql_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index a93bc11c6bb33..e4fc2e1c19076 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -10,6 +10,16 @@ import ( "github.com/stretchr/testify/assert" ) +func TestPostgresqlQuote(t *testing.T) { + assert.Equal(t, `"foo"`, quoteIdent("foo")) + assert.Equal(t, `"fo'o"`, quoteIdent("fo'o")) + assert.Equal(t, `"fo""o"`, quoteIdent("fo\"o")) + + assert.Equal(t, "'foo'", quoteLiteral("foo")) + assert.Equal(t, "'fo''o'", quoteLiteral("fo'o")) + assert.Equal(t, "'fo\"o'", quoteLiteral("fo\"o")) +} + func TestPostgresqlCreateStatement(t *testing.T) { p := newPostgresql() timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) From ee6fd6b2951f7f72c9d5b3efa348b8397ade1c75 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sat, 18 Nov 2017 17:50:06 +0100 Subject: [PATCH 026/121] add TABLELITERAL to template variables --- plugins/outputs/postgresql/postgresql.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 332a6e33fc653..1883b979dac67 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -75,6 +75,8 @@ var sampleConfig = ` ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false + ## Template to use for generating tables + # table_template = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" ` func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -117,8 +119,9 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } query := strings.Replace(p.TableTemplate, "{TABLE}", quoteIdent(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(metric.Name()), -1) query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) - query = strings.Replace(query, "{PK_COLUMNS}", strings.Join(pk, ","), -1) + query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) sql = append(sql, query) return strings.Join(sql, ";") @@ -218,7 +221,7 @@ func init() { func newPostgresql() *Postgresql { p := Postgresql{} if p.TableTemplate == "" { - p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" + p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({KEY_COLUMNS}))" } return &p } From 236ddb9335123522c8ca3d3a96be780e767b02e5 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sat, 18 Nov 2017 19:11:56 +0100 Subject: [PATCH 027/121] fix template in doc --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 1883b979dac67..bc7e77f9bad62 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -76,7 +76,7 @@ var sampleConfig = ` # tags_as_foreignkeys = false ## Template to use for generating tables - # table_template = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({PK_COLUMNS}))" + # table_template = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({KEY_COLUMNS}))" ` func (p *Postgresql) SampleConfig() string { return sampleConfig } From 09f311a9a77233a3ff7786d48086ef71ce46e633 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 19 Nov 2017 00:22:50 +0100 Subject: [PATCH 028/121] dont add primary key --- plugins/outputs/postgresql/postgresql.go | 5 +++-- plugins/outputs/postgresql/postgresql_test.go | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index bc7e77f9bad62..76fd5c631aa4b 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -76,7 +76,7 @@ var sampleConfig = ` # tags_as_foreignkeys = false ## Template to use for generating tables - # table_template = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({KEY_COLUMNS}))" + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" ` func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -184,6 +184,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { query := fmt.Sprintf("SELECT %s FROM %s WHERE %s=$1", quoteIdent(column+"_id"), quoteIdent(tablename+"_"+column), quoteIdent(column)) err := p.db.QueryRow(query, value).Scan(&value_id) if err != nil { + println(err) query := fmt.Sprintf("INSERT INTO %s(%s) VALUES($1) RETURNING %s", quoteIdent(tablename+"_"+column), quoteIdent(column), quoteIdent(column+"_id")) err := p.db.QueryRow(query, value).Scan(&value_id) if err != nil { @@ -221,7 +222,7 @@ func init() { func newPostgresql() *Postgresql { p := Postgresql{} if p.TableTemplate == "" { - p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS},PRIMARY KEY({KEY_COLUMNS}))" + p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS})" } return &p } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index e4fc2e1c19076..d195d0d8a0565 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -26,13 +26,13 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8)`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"i" int8,PRIMARY KEY("time"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"i" int8)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k" text,"i" int8,PRIMARY KEY("time","k"))`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k" text,"i" int8)`, p.generateCreateTable(m)) } From 9dc0fed20c83ff7774e0c50b5e827dd56492429b Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 26 Nov 2017 15:06:43 +0100 Subject: [PATCH 029/121] allow using jsonb for fields and tags and make it default --- plugins/outputs/postgresql/postgresql.go | 136 ++++++++++++------ plugins/outputs/postgresql/postgresql_test.go | 18 ++- 2 files changed, 107 insertions(+), 47 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 76fd5c631aa4b..5905d6a1acb07 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,6 +2,7 @@ package postgresql import ( "database/sql" + "encoding/json" "fmt" "log" "strings" @@ -17,6 +18,8 @@ type Postgresql struct { Address string IgnoredTags []string TagsAsForeignkeys bool + TagsAsJsonb bool + FieldsAsJsonb bool TableTemplate string Tables map[string]bool } @@ -88,34 +91,44 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { var sql []string pk = append(pk, quoteIdent("time")) - columns = append(columns, quoteIdent("time")+" timestamp") + columns = append(columns, "time timestamp") - for column, _ := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue + if p.TagsAsJsonb { + if len(metric.Tags()) > 0 { + columns = append(columns, "tags jsonb") } - if p.TagsAsForeignkeys { - key := quoteIdent(column + "_id") - table := quoteIdent(metric.Name() + "_" + column) + } else { + for column, _ := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } + if p.TagsAsForeignkeys { + key := quoteIdent(column + "_id") + table := quoteIdent(metric.Name() + "_" + column) - pk = append(pk, key) - columns = append(columns, fmt.Sprintf("%s int8", key)) - sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(%s serial primary key,%s text unique)", table, key, quoteIdent(column))) - } else { - pk = append(pk, quoteIdent(column)) - columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) + pk = append(pk, key) + columns = append(columns, fmt.Sprintf("%s int8", key)) + sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(%s serial primary key,%s text unique)", table, key, quoteIdent(column))) + } else { + pk = append(pk, quoteIdent(column)) + columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) + } } } - var datatype string - for column, v := range metric.Fields() { - switch v.(type) { - case int64: - datatype = "int8" - case float64: - datatype = "float8" + if p.FieldsAsJsonb { + columns = append(columns, "fields jsonb") + } else { + var datatype string + for column, v := range metric.Fields() { + switch v.(type) { + case int64: + datatype = "int8" + case float64: + datatype = "float8" + } + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } - columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } query := strings.Replace(p.TableTemplate, "{TABLE}", quoteIdent(metric.Name()), -1) @@ -169,42 +182,77 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { var columns []string var values []interface{} + var js map[string]interface{} columns = append(columns, "time") values = append(values, metric.Time()) - for column, value := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue + if p.TagsAsJsonb { + js = make(map[string]interface{}) + for column, value := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } + js[column] = value } - if p.TagsAsForeignkeys { - var value_id int - - query := fmt.Sprintf("SELECT %s FROM %s WHERE %s=$1", quoteIdent(column+"_id"), quoteIdent(tablename+"_"+column), quoteIdent(column)) - err := p.db.QueryRow(query, value).Scan(&value_id) + if len(js) > 0 { + d, err := json.Marshal(js) if err != nil { - println(err) - query := fmt.Sprintf("INSERT INTO %s(%s) VALUES($1) RETURNING %s", quoteIdent(tablename+"_"+column), quoteIdent(column), quoteIdent(column+"_id")) + return err + } + + columns = append(columns, "tags") + values = append(values, d) + } + } else { + for column, value := range metric.Tags() { + if contains(p.IgnoredTags, column) { + continue + } + if p.TagsAsForeignkeys { + var value_id int + + query := fmt.Sprintf("SELECT %s FROM %s WHERE %s=$1", quoteIdent(column+"_id"), quoteIdent(tablename+"_"+column), quoteIdent(column)) err := p.db.QueryRow(query, value).Scan(&value_id) if err != nil { - return err + log.Printf("W! Foreign key reference not found %s: %v", tablename, err) + query := fmt.Sprintf("INSERT INTO %s(%s) VALUES($1) RETURNING %s", quoteIdent(tablename+"_"+column), quoteIdent(column), quoteIdent(column+"_id")) + err := p.db.QueryRow(query, value).Scan(&value_id) + if err != nil { + return err + } } + + columns = append(columns, column+"_id") + values = append(values, value_id) + } else { + columns = append(columns, column) + values = append(values, value) } + } + } - columns = append(columns, column+"_id") - values = append(values, value_id) - } else { + if p.FieldsAsJsonb { + js = make(map[string]interface{}) + for column, value := range metric.Fields() { + js[column] = value + } + + d, err := json.Marshal(js) + if err != nil { + return err + } + + columns = append(columns, "fields") + values = append(values, d) + } else { + for column, value := range metric.Fields() { columns = append(columns, column) values = append(values, value) } } - for column, value := range metric.Fields() { - columns = append(columns, column) - values = append(values, value) - } - sql := p.generateInsert(tablename, columns) _, err := p.db.Exec(sql, values...) if err != nil { @@ -220,9 +268,9 @@ func init() { } func newPostgresql() *Postgresql { - p := Postgresql{} - if p.TableTemplate == "" { - p.TableTemplate = "CREATE TABLE {TABLE}({COLUMNS})" + return &Postgresql{ + TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", + TagsAsJsonb: true, + FieldsAsJsonb: true, } - return &p } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index d195d0d8a0565..4180f9900ca7b 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -26,19 +26,31 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"f" float8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamp,fields jsonb)`, p.generateCreateTable(m)) + + m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, `CREATE TABLE "m"(time timestamp,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) + + p.TagsAsJsonb = false + p.FieldsAsJsonb = false + + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) + assert.Equal(t, `CREATE TABLE "m"(time timestamp,"f" float8)`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamp,"i" int8)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"("time" timestamp,"k" text,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamp,"k" text,"i" int8)`, p.generateCreateTable(m)) } func TestPostgresqlInsertStatement(t *testing.T) { p := newPostgresql() + p.TagsAsJsonb = false + p.FieldsAsJsonb = false + sql := p.generateInsert("m", []string{"time", "f"}) assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) From 8fb25ae7e01ef21553dba8c1ba1a39be53886a16 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 28 Nov 2017 11:54:52 +0100 Subject: [PATCH 030/121] document jsonb settings remove IgnoredTags since tagexclude achieves the same --- plugins/outputs/postgresql/postgresql.go | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5905d6a1acb07..8ef18279faacb 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -16,7 +16,6 @@ import ( type Postgresql struct { db *sql.DB Address string - IgnoredTags []string TagsAsForeignkeys bool TagsAsJsonb bool FieldsAsJsonb bool @@ -72,14 +71,18 @@ var sampleConfig = ` ## address = "host=localhost user=postgres sslmode=verify-full" - ## A list of tags to exclude from storing. If not specified, all tags are stored. - # ignored_tags = ["foo", "bar"] - ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false ## Template to use for generating tables # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + + ## Use jsonb datatype for tags + # tags_as_jsonb = true + + ## Use jsonb datatype for fields + # fields_as_jsonb = true + ` func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -99,9 +102,6 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } } else { for column, _ := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue - } if p.TagsAsForeignkeys { key := quoteIdent(column + "_id") table := quoteIdent(metric.Name() + "_" + column) @@ -190,9 +190,6 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { if p.TagsAsJsonb { js = make(map[string]interface{}) for column, value := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue - } js[column] = value } @@ -207,9 +204,6 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } } else { for column, value := range metric.Tags() { - if contains(p.IgnoredTags, column) { - continue - } if p.TagsAsForeignkeys { var value_id int From a0868ddbaab9a7f8839ff922a0f28e5739e58e16 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 17 Jan 2018 17:12:33 +0100 Subject: [PATCH 031/121] document template better --- plugins/outputs/postgresql/postgresql.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 8ef18279faacb..d83e5891c0a8e 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -75,7 +75,16 @@ var sampleConfig = ` # tags_as_foreignkeys = false ## Template to use for generating tables + ## Available Variables: + ## {TABLE} - tablename as identifier + ## {TABLELITERAL} - tablename as string literal + ## {COLUMNS} - column definitions + ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) + + ## Default template # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + ## Example for timescale + # table_template = "CREATE TABLE {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval);" ## Use jsonb datatype for tags # tags_as_jsonb = true From 05a9096a18c4baf62d4e6c987a16a1d1d2b4b14d Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 23 Jan 2018 15:25:46 +0100 Subject: [PATCH 032/121] rework TagsAsForeignkeys to have produce 1 foreign key in measurement table --- plugins/outputs/postgresql/postgresql.go | 129 ++++++++++++++++------- 1 file changed, 88 insertions(+), 41 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index d83e5891c0a8e..02ec38d5cba94 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -13,6 +13,8 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) +var tag_table_suffix = "_tag" + type Postgresql struct { db *sql.DB Address string @@ -105,22 +107,34 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { pk = append(pk, quoteIdent("time")) columns = append(columns, "time timestamp") - if p.TagsAsJsonb { - if len(metric.Tags()) > 0 { - columns = append(columns, "tags jsonb") - } - } else { - for column, _ := range metric.Tags() { - if p.TagsAsForeignkeys { - key := quoteIdent(column + "_id") - table := quoteIdent(metric.Name() + "_" + column) - - pk = append(pk, key) - columns = append(columns, fmt.Sprintf("%s int8", key)) - sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(%s serial primary key,%s text unique)", table, key, quoteIdent(column))) + // handle tags if necessary + if len(metric.Tags()) > 0 { + if p.TagsAsForeignkeys { + // tags in separate table + var tag_columns []string + var tag_columndefs []string + columns = append(columns, "tag_id int") + + if p.TagsAsJsonb { + tag_columns = append(tag_columns, "tags") + tag_columndefs = append(tag_columndefs, "tags jsonb") } else { - pk = append(pk, quoteIdent(column)) - columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) + for column, _ := range metric.Tags() { + tag_columns = append(tag_columns, quoteIdent(column)) + tag_columndefs = append(tag_columndefs, fmt.Sprintf("%s text", quoteIdent(column))) + } + } + table := quoteIdent(metric.Name() + "_tag") + sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(tag_id serial primary key,%s,UNIQUE(%s))", table, strings.Join(tag_columndefs, ","), strings.Join(tag_columns, ","))) + } else { + // tags in measurement table + if p.TagsAsJsonb { + columns = append(columns, "tags jsonb") + } else { + for column, _ := range metric.Tags() { + pk = append(pk, quoteIdent(column)) + columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) + } } } } @@ -196,42 +210,75 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { columns = append(columns, "time") values = append(values, metric.Time()) - if p.TagsAsJsonb { - js = make(map[string]interface{}) - for column, value := range metric.Tags() { - js[column] = value - } + if len(metric.Tags()) > 0 { + if p.TagsAsForeignkeys { + // tags in separate table + var tag_id int + var where_columns []string + var where_values []interface{} + + if p.TagsAsJsonb { + js = make(map[string]interface{}) + for column, value := range metric.Tags() { + js[column] = value + } - if len(js) > 0 { - d, err := json.Marshal(js) - if err != nil { - return err + if len(js) > 0 { + d, err := json.Marshal(js) + if err != nil { + return err + } + + where_columns = append(where_columns, "tags") + where_values = append(where_values, d) + } + } else { + for column, value := range metric.Tags() { + where_columns = append(where_columns, column) + where_values = append(where_values, value) + } } - columns = append(columns, "tags") - values = append(values, d) - } - } else { - for column, value := range metric.Tags() { - if p.TagsAsForeignkeys { - var value_id int + var where_parts []string + for i, column := range where_columns { + where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) + } + query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", quoteIdent(tablename+"_tag"), strings.Join(where_parts, " AND ")) - query := fmt.Sprintf("SELECT %s FROM %s WHERE %s=$1", quoteIdent(column+"_id"), quoteIdent(tablename+"_"+column), quoteIdent(column)) - err := p.db.QueryRow(query, value).Scan(&value_id) + err := p.db.QueryRow(query, where_values...).Scan(&tag_id) + if err != nil { + log.Printf("I! Foreign key reference not found %s: %v", tablename, err) + query := p.generateInsert(tablename+"_tag", where_columns) + " RETURNING tag_id" + err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { - log.Printf("W! Foreign key reference not found %s: %v", tablename, err) - query := fmt.Sprintf("INSERT INTO %s(%s) VALUES($1) RETURNING %s", quoteIdent(tablename+"_"+column), quoteIdent(column), quoteIdent(column+"_id")) - err := p.db.QueryRow(query, value).Scan(&value_id) + return err + } + } + + columns = append(columns, "tag_id") + values = append(values, tag_id) + } else { + // tags in measurement table + if p.TagsAsJsonb { + js = make(map[string]interface{}) + for column, value := range metric.Tags() { + js[column] = value + } + + if len(js) > 0 { + d, err := json.Marshal(js) if err != nil { return err } - } - columns = append(columns, column+"_id") - values = append(values, value_id) + columns = append(columns, "tags") + values = append(values, d) + } } else { - columns = append(columns, column) - values = append(values, value) + for column, value := range metric.Tags() { + columns = append(columns, column) + values = append(values, value) + } } } } From 0fbbe990f246570df6c4fdd4147dee89fd9d00bb Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 23 Jan 2018 15:31:34 +0100 Subject: [PATCH 033/121] make tag table suffix configurable --- plugins/outputs/postgresql/postgresql.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 02ec38d5cba94..60036e9a209f4 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -13,8 +13,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" ) -var tag_table_suffix = "_tag" - type Postgresql struct { db *sql.DB Address string @@ -22,6 +20,7 @@ type Postgresql struct { TagsAsJsonb bool FieldsAsJsonb bool TableTemplate string + TagTableSuffix string Tables map[string]bool } @@ -124,7 +123,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { tag_columndefs = append(tag_columndefs, fmt.Sprintf("%s text", quoteIdent(column))) } } - table := quoteIdent(metric.Name() + "_tag") + table := quoteIdent(metric.Name() + p.TagTableSuffix) sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(tag_id serial primary key,%s,UNIQUE(%s))", table, strings.Join(tag_columndefs, ","), strings.Join(tag_columns, ","))) } else { // tags in measurement table @@ -243,12 +242,12 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { for i, column := range where_columns { where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) } - query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", quoteIdent(tablename+"_tag"), strings.Join(where_parts, " AND ")) + query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", quoteIdent(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { log.Printf("I! Foreign key reference not found %s: %v", tablename, err) - query := p.generateInsert(tablename+"_tag", where_columns) + " RETURNING tag_id" + query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { return err @@ -319,8 +318,9 @@ func init() { func newPostgresql() *Postgresql { return &Postgresql{ - TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", - TagsAsJsonb: true, - FieldsAsJsonb: true, + TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", + TagsAsJsonb: true, + TagTableSuffix: "_tag", + FieldsAsJsonb: true, } } From 079bf7012ba5edb782a7251023fee01ea1cd2c62 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Mon, 5 Feb 2018 14:35:58 +0100 Subject: [PATCH 034/121] comment out noisy log messages when fk reference is not found handle text datatype and log unknown datatypes on table creation --- plugins/outputs/postgresql/postgresql.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 60036e9a209f4..2dc23f6c1dd4f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -148,6 +148,11 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { datatype = "int8" case float64: datatype = "float8" + case string: + datatype = "text" + default: + datatype = "text" + log.Printf("E! Unknown column datatype %s: %v", column, v) } columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } @@ -246,7 +251,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { - log.Printf("I! Foreign key reference not found %s: %v", tablename, err) + // log.Printf("I! Foreign key reference not found %s: %v", tablename, err) query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { From b337ffdbaa854a09207d6e36cb6b4584d8cce329 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Mon, 5 Feb 2018 22:28:08 +0100 Subject: [PATCH 035/121] handle missing columns --- plugins/outputs/postgresql/postgresql.go | 66 +++++++++++++++++++----- 1 file changed, 54 insertions(+), 12 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 2dc23f6c1dd4f..ff7d0d763e0e0 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -56,6 +56,23 @@ func quoteLiteral(name string) string { return "'" + strings.Replace(name, "'", "''", -1) + "'" } +func deriveDatatype(value interface{}) string { + var datatype string + + switch value.(type) { + case int64: + datatype = "int8" + case float64: + datatype = "float8" + case string: + datatype = "text" + default: + datatype = "text" + log.Printf("E! Unknown datatype %v", value) + } + return datatype +} + var sampleConfig = ` ## specify address via a url matching: ## postgres://[pqgotest[:password]]@localhost[/dbname]\ @@ -143,17 +160,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } else { var datatype string for column, v := range metric.Fields() { - switch v.(type) { - case int64: - datatype = "int8" - case float64: - datatype = "float8" - case string: - datatype = "text" - default: - datatype = "text" - log.Printf("E! Unknown column datatype %s: %v", column, v) - } + datatype = deriveDatatype(v) columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) } } @@ -310,7 +317,42 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { sql := p.generateInsert(tablename, columns) _, err := p.db.Exec(sql, values...) if err != nil { - fmt.Println("Error during insert", err) + // check if insert error was caused by column mismatch + if p.FieldsAsJsonb == false { + log.Printf("E! Error during insert: %v", err) + var quoted_columns []string + for _, column := range columns { + quoted_columns = append(quoted_columns, quoteLiteral(column)) + } + query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" + query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) + result, err := p.db.Query(query, "public", tablename) + defer result.Close() + if err != nil { + return err + } + // some columns are missing + + var column, datatype string + for result.Next() { + err := result.Scan(&column) + if err != nil { + log.Println(err) + } + for i, name := range columns { + if name == column { + datatype = deriveDatatype(values[i]) + } + } + query := "ALTER TABLE %s.%s ADD COLUMN %s %s;" + _, err = p.db.Exec(fmt.Sprintf(query, quoteIdent("public"), quoteIdent(tablename), quoteIdent(column), datatype)) + if err != nil { + return err + log.Println(err) + } + } + } + return err } } From 4d58e9351011d8a932765f7d2ac26ff1387bbef7 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 11 Feb 2018 22:50:56 +0100 Subject: [PATCH 036/121] remove dead code --- plugins/outputs/postgresql/postgresql.go | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index ff7d0d763e0e0..5aa239604aacd 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -348,7 +348,6 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { _, err = p.db.Exec(fmt.Sprintf(query, quoteIdent("public"), quoteIdent(tablename), quoteIdent(column), datatype)) if err != nil { return err - log.Println(err) } } } From 352a10580ff96cebb5d58cd51b5e1ae31caabe28 Mon Sep 17 00:00:00 2001 From: Oskari Saarenmaa Date: Mon, 16 Apr 2018 10:27:31 +0300 Subject: [PATCH 037/121] postgresql output: boolean columns --- plugins/outputs/postgresql/postgresql.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5aa239604aacd..751ce9152191d 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -60,6 +60,8 @@ func deriveDatatype(value interface{}) string { var datatype string switch value.(type) { + case bool: + datatype = "boolean" case int64: datatype = "int8" case float64: From 9ddb5f014a33af65ab1b8a01e2553a16006c9055 Mon Sep 17 00:00:00 2001 From: Oskari Saarenmaa Date: Tue, 17 Apr 2018 14:16:34 -0300 Subject: [PATCH 038/121] postgresql output: batch inserts to the same table/column set Batches are created for every (table, columns) set as we may have metrics with different set of tags & fields in the same batch. This speeds up inserts quite a bit by reducing the number of separate insert statements. Grouping inserts requires columns to appear in the same order, so we now sort them for both tags & fields. This code could use a bit of tidying. --- plugins/outputs/postgresql/postgresql.go | 50 +++++++++++++++++++++--- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 751ce9152191d..73a2c607bdcf0 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "log" + "sort" "strings" "github.com/jackc/pgx" @@ -95,7 +96,7 @@ var sampleConfig = ` # tags_as_foreignkeys = false ## Template to use for generating tables - ## Available Variables: + ## Available Variables: ## {TABLE} - tablename as identifier ## {TABLELITERAL} - tablename as string literal ## {COLUMNS} - column definitions @@ -203,6 +204,11 @@ func (p *Postgresql) tableExists(tableName string) bool { } func (p *Postgresql) Write(metrics []telegraf.Metric) error { + batches := make(map[string][]interface{}) + params := make(map[string][]string) + colmap := make(map[string][]string) + tabmap := make(map[string]string) + for _, metric := range metrics { tablename := metric.Name() @@ -288,9 +294,15 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { values = append(values, d) } } else { - for column, value := range metric.Tags() { + var keys []string + fields := metric.Tags() + for column := range fields { + keys = append(keys, column) + } + sort.Strings(keys) + for _, column := range keys { columns = append(columns, column) - values = append(values, value) + values = append(values, fields[column]) } } } @@ -310,18 +322,44 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { columns = append(columns, "fields") values = append(values, d) } else { - for column, value := range metric.Fields() { + var keys []string + fields := metric.Fields() + for column := range fields { + keys = append(keys, column) + } + sort.Strings(keys) + for _, column := range keys { columns = append(columns, column) - values = append(values, value) + values = append(values, fields[column]) } } - sql := p.generateInsert(tablename, columns) + var table_and_cols string; + var placeholder, quoted_columns []string; + for _, column := range columns { + quoted_columns = append(quoted_columns, quoteIdent(column)) + } + table_and_cols = fmt.Sprintf("%s(%s)", quoteIdent(tablename), strings.Join(quoted_columns, ",")) + batches[table_and_cols] = append(batches[table_and_cols], values...) + for i, _ := range columns { + i += len(params[table_and_cols]) * len(columns) + placeholder = append(placeholder, fmt.Sprintf("$%d", i + 1)) + } + params[table_and_cols] = append(params[table_and_cols], strings.Join(placeholder, ",")) + colmap[table_and_cols] = columns + tabmap[table_and_cols] = tablename + } + + for table_and_cols, values := range batches { + // log.Printf("Writing %d metrics into %s", len(params[table_and_cols]), table_and_cols) + sql := fmt.Sprintf("INSERT INTO %s VALUES (%s)", table_and_cols, strings.Join(params[table_and_cols], "),(")) _, err := p.db.Exec(sql, values...) if err != nil { // check if insert error was caused by column mismatch if p.FieldsAsJsonb == false { log.Printf("E! Error during insert: %v", err) + tablename := tabmap[table_and_cols] + columns := colmap[table_and_cols] var quoted_columns []string for _, column := range columns { quoted_columns = append(quoted_columns, quoteLiteral(column)) From 7e989e0a3dbcc4280f30cfdfa0f11458dac91a40 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sat, 12 May 2018 18:10:30 +0200 Subject: [PATCH 039/121] use timestamptz for time column --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 73a2c607bdcf0..e7e9f585b8668 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -124,7 +124,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { var sql []string pk = append(pk, quoteIdent("time")) - columns = append(columns, "time timestamp") + columns = append(columns, "time timestamptz") // handle tags if necessary if len(metric.Tags()) > 0 { From e50c752bf6d56a603636a97e4136d4fd0cdf44cf Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Sun, 13 May 2018 19:15:03 +0200 Subject: [PATCH 040/121] adjust test to timestamptz change --- plugins/outputs/postgresql/postgresql_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 4180f9900ca7b..962f7808fc69e 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -26,22 +26,22 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamp,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamp,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) p.TagsAsJsonb = false p.FieldsAsJsonb = false m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamp,"f" float8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamp,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamp,"k" text,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) } From a7cf1c77db87944017f306bfbf1556001842b0fd Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Mon, 11 Jun 2018 21:35:23 +0200 Subject: [PATCH 041/121] fix code formatting (gofmt) --- plugins/outputs/postgresql/postgresql.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e7e9f585b8668..7b195c0bcbbc1 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -334,8 +334,8 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } } - var table_and_cols string; - var placeholder, quoted_columns []string; + var table_and_cols string + var placeholder, quoted_columns []string for _, column := range columns { quoted_columns = append(quoted_columns, quoteIdent(column)) } @@ -343,7 +343,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { batches[table_and_cols] = append(batches[table_and_cols], values...) for i, _ := range columns { i += len(params[table_and_cols]) * len(columns) - placeholder = append(placeholder, fmt.Sprintf("$%d", i + 1)) + placeholder = append(placeholder, fmt.Sprintf("$%d", i+1)) } params[table_and_cols] = append(params[table_and_cols], strings.Join(placeholder, ",")) colmap[table_and_cols] = columns From c33e057a0deb2a215e1841d26537a5322c5c355d Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 5 Jul 2018 08:20:37 +0200 Subject: [PATCH 042/121] include type in error message about unknown type --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 7b195c0bcbbc1..a3091cb0f962d 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -71,7 +71,7 @@ func deriveDatatype(value interface{}) string { datatype = "text" default: datatype = "text" - log.Printf("E! Unknown datatype %v", value) + log.Printf("E! Unknown datatype %T(%v)", value) } return datatype } From b20dd456104c5d47b91e3e3078691fea81a5ec48 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 5 Jul 2018 08:59:46 +0200 Subject: [PATCH 043/121] handle uint64 as datatype --- plugins/outputs/postgresql/postgresql.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index a3091cb0f962d..5ce011268e52e 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -63,6 +63,8 @@ func deriveDatatype(value interface{}) string { switch value.(type) { case bool: datatype = "boolean" + case uint64: + datatype = "int8" case int64: datatype = "int8" case float64: From 41a515533b25d5a3a95c505f018c309336e373dd Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 5 Jul 2018 16:45:26 +0200 Subject: [PATCH 044/121] fix Printf call --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5ce011268e52e..8c75102b74f9f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -73,7 +73,7 @@ func deriveDatatype(value interface{}) string { datatype = "text" default: datatype = "text" - log.Printf("E! Unknown datatype %T(%v)", value) + log.Printf("E! Unknown datatype %T(%v)", value, value) } return datatype } From 9178024a8d71bba4a622ade978b831ff60acd42f Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 17 Jul 2018 11:17:30 +0200 Subject: [PATCH 045/121] show all config parameters in readme --- plugins/outputs/postgresql/README.md | 19 +++++++++++++++++++ plugins/outputs/postgresql/postgresql.go | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 61d5ace8d307d..90324827d7e73 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -14,4 +14,23 @@ This output plugin writes all metrics to PostgreSQL. ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false + + ## Template to use for generating tables + ## Available Variables: + ## {TABLE} - tablename as identifier + ## {TABLELITERAL} - tablename as string literal + ## {COLUMNS} - column definitions + ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) + + ## Default template + # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + ## Example for timescaledb + # table_template = "CREATE TABLE {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval);" + + ## Use jsonb datatype for tags. Default is true. + # tags_as_jsonb = true + + ## Use jsonb datatype for fields. Default is true. + # fields_as_jsonb = true + ``` diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 8c75102b74f9f..7a755bb5b675b 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -106,7 +106,7 @@ var sampleConfig = ` ## Default template # table_template = "CREATE TABLE {TABLE}({COLUMNS})" - ## Example for timescale + ## Example for timescaledb # table_template = "CREATE TABLE {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval);" ## Use jsonb datatype for tags From b43071ef525ea75042e12c032f0293d5e3db2125 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 16 Oct 2018 12:04:13 +0200 Subject: [PATCH 046/121] use CREATE TABLE IF NOT EXISTS --- plugins/outputs/postgresql/README.md | 4 ++-- plugins/outputs/postgresql/postgresql.go | 8 ++++---- plugins/outputs/postgresql/postgresql_test.go | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 90324827d7e73..5a341a837230c 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -23,9 +23,9 @@ This output plugin writes all metrics to PostgreSQL. ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) ## Default template - # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" ## Example for timescaledb - # table_template = "CREATE TABLE {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval);" + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval, if_not_exists := true);" ## Use jsonb datatype for tags. Default is true. # tags_as_jsonb = true diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 7a755bb5b675b..f033360d8463e 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -105,9 +105,9 @@ var sampleConfig = ` ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) ## Default template - # table_template = "CREATE TABLE {TABLE}({COLUMNS})" + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" ## Example for timescaledb - # table_template = "CREATE TABLE {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval);" + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval,if_not_exists := true);" ## Use jsonb datatype for tags # tags_as_jsonb = true @@ -386,7 +386,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { datatype = deriveDatatype(values[i]) } } - query := "ALTER TABLE %s.%s ADD COLUMN %s %s;" + query := "ALTER TABLE %s.%s ADD COLUMN IF NOT EXISTS %s %s;" _, err = p.db.Exec(fmt.Sprintf(query, quoteIdent("public"), quoteIdent(tablename), quoteIdent(column), datatype)) if err != nil { return err @@ -406,7 +406,7 @@ func init() { func newPostgresql() *Postgresql { return &Postgresql{ - TableTemplate: "CREATE TABLE {TABLE}({COLUMNS})", + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", TagsAsJsonb: true, TagTableSuffix: "_tag", FieldsAsJsonb: true, diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 962f7808fc69e..381153ddd5b08 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -26,22 +26,22 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) p.TagsAsJsonb = false p.FieldsAsJsonb = false m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE "m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) } From f6fa727cfe54d9cf1604763b891a4e012c761e33 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 25 Oct 2018 10:36:40 +0200 Subject: [PATCH 047/121] remove commented out code, initialize vars with values --- plugins/outputs/postgresql/README.md | 3 --- plugins/outputs/postgresql/postgresql.go | 13 +++---------- 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 5a341a837230c..14687f961dd9a 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -9,9 +9,6 @@ This output plugin writes all metrics to PostgreSQL. [[outputs.postgresql]] address = "host=localhost user=postgres sslmode=verify-full" - ## A list of tags to exclude from storing. If not specified, all tags are stored. - # ignored_tags = ["foo", "bar"] - ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index f033360d8463e..2db0b3a9f7556 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -180,15 +180,13 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } func (p *Postgresql) generateInsert(tablename string, columns []string) string { - var placeholder, quoted []string for i, column := range columns { placeholder = append(placeholder, fmt.Sprintf("$%d", i+1)) quoted = append(quoted, quoteIdent(column)) } - sql := fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", quoteIdent(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) - return sql + return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", quoteIdent(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) } func (p *Postgresql) tableExists(tableName string) bool { @@ -224,13 +222,10 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { p.Tables[tablename] = true } - var columns []string - var values []interface{} + columns := []string{"time"} + values := []interface{}{metric.Time()} var js map[string]interface{} - columns = append(columns, "time") - values = append(values, metric.Time()) - if len(metric.Tags()) > 0 { if p.TagsAsForeignkeys { // tags in separate table @@ -268,7 +263,6 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { - // log.Printf("I! Foreign key reference not found %s: %v", tablename, err) query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { @@ -353,7 +347,6 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } for table_and_cols, values := range batches { - // log.Printf("Writing %d metrics into %s", len(params[table_and_cols]), table_and_cols) sql := fmt.Sprintf("INSERT INTO %s VALUES (%s)", table_and_cols, strings.Join(params[table_and_cols], "),(")) _, err := p.db.Exec(sql, values...) if err != nil { From 99970bc97e3351103b0149da53f7b5a84b839340 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Tue, 27 Nov 2018 14:33:34 +0100 Subject: [PATCH 048/121] fix TABLELITERAL quoting --- plugins/outputs/postgresql/postgresql.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 2db0b3a9f7556..b40f8a43a7818 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -171,7 +171,7 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } query := strings.Replace(p.TableTemplate, "{TABLE}", quoteIdent(metric.Name()), -1) - query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(quoteIdent(metric.Name())), -1) query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) From 150a2a736bb14f25e02e3705a5beba260d52279c Mon Sep 17 00:00:00 2001 From: Rauli Ikonen Date: Sun, 25 Nov 2018 12:27:31 +0200 Subject: [PATCH 049/121] pg output: Support defining schema for metrics tables Make sure explicit schema is always used and allow changing that from public into something else. --- plugins/outputs/postgresql/postgresql.go | 30 ++++++++++++++++-------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index b40f8a43a7818..f8e54df1fdb23 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -17,6 +17,7 @@ import ( type Postgresql struct { db *sql.DB Address string + Schema string TagsAsForeignkeys bool TagsAsJsonb bool FieldsAsJsonb bool @@ -57,6 +58,10 @@ func quoteLiteral(name string) string { return "'" + strings.Replace(name, "'", "''", -1) + "'" } +func (p *Postgresql) fullTableName(name string) string { + return quoteIdent(p.Schema) + "." + quoteIdent(name) +} + func deriveDatatype(value interface{}) string { var datatype string @@ -109,6 +114,9 @@ var sampleConfig = ` ## Example for timescaledb # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval,if_not_exists := true);" + ## Schema to create the tables into + # schema = "public" + ## Use jsonb datatype for tags # tags_as_jsonb = true @@ -170,8 +178,8 @@ func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { } } - query := strings.Replace(p.TableTemplate, "{TABLE}", quoteIdent(metric.Name()), -1) - query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(quoteIdent(metric.Name())), -1) + query := strings.Replace(p.TableTemplate, "{TABLE}", p.fullTableName(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(p.fullTableName(metric.Name())), -1) query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) @@ -186,12 +194,12 @@ func (p *Postgresql) generateInsert(tablename string, columns []string) string { quoted = append(quoted, quoteIdent(column)) } - return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", quoteIdent(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) + return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", p.fullTableName(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) } func (p *Postgresql) tableExists(tableName string) bool { - stmt := "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname NOT IN ('information_schema','pg_catalog');" - result, err := p.db.Exec(stmt, tableName) + stmt := "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" + result, err := p.db.Exec(stmt, tableName, p.Schema) if err != nil { log.Printf("E! Error checking for existence of metric table %s: %v", tableName, err) return false @@ -217,6 +225,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { createStmt := p.generateCreateTable(metric) _, err := p.db.Exec(createStmt) if err != nil { + log.Printf("E! Creating table failed: statement: %v, error: %v", createStmt, err) return err } p.Tables[tablename] = true @@ -259,7 +268,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { for i, column := range where_columns { where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) } - query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", quoteIdent(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) + query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", p.fullTableName(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) err := p.db.QueryRow(query, where_values...).Scan(&tag_id) if err != nil { @@ -335,7 +344,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { for _, column := range columns { quoted_columns = append(quoted_columns, quoteIdent(column)) } - table_and_cols = fmt.Sprintf("%s(%s)", quoteIdent(tablename), strings.Join(quoted_columns, ",")) + table_and_cols = fmt.Sprintf("%s(%s)", p.fullTableName(tablename), strings.Join(quoted_columns, ",")) batches[table_and_cols] = append(batches[table_and_cols], values...) for i, _ := range columns { i += len(params[table_and_cols]) * len(columns) @@ -361,7 +370,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) - result, err := p.db.Query(query, "public", tablename) + result, err := p.db.Query(query, p.Schema, tablename) defer result.Close() if err != nil { return err @@ -379,8 +388,8 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { datatype = deriveDatatype(values[i]) } } - query := "ALTER TABLE %s.%s ADD COLUMN IF NOT EXISTS %s %s;" - _, err = p.db.Exec(fmt.Sprintf(query, quoteIdent("public"), quoteIdent(tablename), quoteIdent(column), datatype)) + query := "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" + _, err = p.db.Exec(fmt.Sprintf(query, p.fullTableName(tablename), quoteIdent(column), datatype)) if err != nil { return err } @@ -399,6 +408,7 @@ func init() { func newPostgresql() *Postgresql { return &Postgresql{ + Schema: "public", TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", TagsAsJsonb: true, TagTableSuffix: "_tag", From 4adb09686bbc175d01347785cbff8d8d48603bcd Mon Sep 17 00:00:00 2001 From: Rauli Ikonen Date: Thu, 29 Nov 2018 12:04:24 +0200 Subject: [PATCH 050/121] pg output: Retry writing metrics after adding missing columns In some cases metrics datapoints only contain subset of all fields that may be present for that metric. When storing columns separately instead of using JSON getting new columns always resulted in the write failing on first metric that was missing any columns and depending on how often write was getting called and how many metrics had this behavior there was potentially very long delay in getting all metrics through. --- plugins/outputs/postgresql/postgresql.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index f8e54df1fdb23..12d09d8a11f10 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -360,6 +360,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { _, err := p.db.Exec(sql, values...) if err != nil { // check if insert error was caused by column mismatch + retry := false if p.FieldsAsJsonb == false { log.Printf("E! Error during insert: %v", err) tablename := tabmap[table_and_cols] @@ -393,10 +394,19 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { if err != nil { return err } + retry = true } } - return err + // We added some columns and insert might work now. Try again immediately to + // avoid long lead time in getting metrics when there are several columns missing + // from the original create statement and they get added in small drops. + if retry { + _, err = p.db.Exec(sql, values...) + } + if err != nil { + return err + } } } return nil From 3d2d42fc23b3d67fae780b3d34b438038ad7ddae Mon Sep 17 00:00:00 2001 From: Rauli Ikonen Date: Thu, 29 Nov 2018 15:49:19 +0200 Subject: [PATCH 051/121] pg output: Don't try closing nil rows Used to cause SIGSEGV when the operation failed --- plugins/outputs/postgresql/postgresql.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 12d09d8a11f10..3fe78eac193ce 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -372,12 +372,12 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) result, err := p.db.Query(query, p.Schema, tablename) - defer result.Close() if err != nil { return err } - // some columns are missing + defer result.Close() + // some columns are missing var column, datatype string for result.Next() { err := result.Scan(&column) From bbfd604fa789504a4b7200419f4ce6f2ff591883 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 29 Nov 2018 17:41:35 +0100 Subject: [PATCH 052/121] adjust test output --- plugins/outputs/postgresql/postgresql_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 381153ddd5b08..3f0863ce427c7 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -26,22 +26,22 @@ func TestPostgresqlCreateStatement(t *testing.T) { var m telegraf.Metric m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) p.TagsAsJsonb = false p.FieldsAsJsonb = false m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) } @@ -52,17 +52,17 @@ func TestPostgresqlInsertStatement(t *testing.T) { p.FieldsAsJsonb = false sql := p.generateInsert("m", []string{"time", "f"}) - assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) + assert.Equal(t, `INSERT INTO "public"."m"("time","f") VALUES($1,$2)`, sql) sql = p.generateInsert("m", []string{"time", "i"}) - assert.Equal(t, `INSERT INTO "m"("time","i") VALUES($1,$2)`, sql) + assert.Equal(t, `INSERT INTO "public"."m"("time","i") VALUES($1,$2)`, sql) sql = p.generateInsert("m", []string{"time", "f", "i"}) - assert.Equal(t, `INSERT INTO "m"("time","f","i") VALUES($1,$2,$3)`, sql) + assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) sql = p.generateInsert("m", []string{"time", "k", "i"}) - assert.Equal(t, `INSERT INTO "m"("time","k","i") VALUES($1,$2,$3)`, sql) + assert.Equal(t, `INSERT INTO "public"."m"("time","k","i") VALUES($1,$2,$3)`, sql) sql = p.generateInsert("m", []string{"time", "k1", "k2", "i"}) - assert.Equal(t, `INSERT INTO "m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) + assert.Equal(t, `INSERT INTO "public"."m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) } From a76cbaacf362038b84bf64b859017d6078b2ee18 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 29 Nov 2018 17:49:41 +0100 Subject: [PATCH 053/121] add schema config settting to README --- plugins/outputs/postgresql/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 14687f961dd9a..3d3f623776dd8 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -24,6 +24,9 @@ This output plugin writes all metrics to PostgreSQL. ## Example for timescaledb # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval, if_not_exists := true);" + ## Schema to create the tables into + # schema = "public" + ## Use jsonb datatype for tags. Default is true. # tags_as_jsonb = true From 046d52d261c8084beaf33244ad3e74c1d5ffa64d Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 29 Nov 2018 18:47:03 +0100 Subject: [PATCH 054/121] Fix adding tags when using tags as foreign key --- plugins/outputs/postgresql/postgresql.go | 170 +++++++++++++++-------- 1 file changed, 114 insertions(+), 56 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 3fe78eac193ce..4b8a588aa3da5 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -211,6 +211,112 @@ func (p *Postgresql) tableExists(tableName string) bool { return false } +func (p *Postgresql) getTagId(metric telegraf.Metric) (int, error) { + var tag_id int + var where_columns []string + var where_values []interface{} + tablename := metric.Name() + + if p.TagsAsJsonb { + if len(metric.Tags()) > 0 { + d, err := buildJsonbTags(metric.Tags()) + if err != nil { + return tag_id, err + } + + where_columns = append(where_columns, "tags") + where_values = append(where_values, d) + } + } else { + for column, value := range metric.Tags() { + where_columns = append(where_columns, column) + where_values = append(where_values, value) + } + } + + var where_parts []string + for i, column := range where_columns { + where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) + } + query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", p.fullTableName(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) + + err := p.db.QueryRow(query, where_values...).Scan(&tag_id) + if err != nil { + query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" + err := p.db.QueryRow(query, where_values...).Scan(&tag_id) + if err != nil { + // check if insert error was caused by column mismatch + retry := false + if p.TagsAsJsonb == false { + log.Printf("E! Error during insert: %v", err) + tablename := tablename + p.TagTableSuffix + columns := where_columns + var quoted_columns []string + for _, column := range columns { + quoted_columns = append(quoted_columns, quoteLiteral(column)) + } + query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" + query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) + result, err := p.db.Query(query, p.Schema, tablename) + if err != nil { + return tag_id, err + } + defer result.Close() + + // some columns are missing + var column, datatype string + for result.Next() { + err := result.Scan(&column) + if err != nil { + log.Println(err) + } + for i, name := range columns { + if name == column { + datatype = deriveDatatype(where_values[i]) + } + } + query := "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" + _, err = p.db.Exec(fmt.Sprintf(query, p.fullTableName(tablename), quoteIdent(column), datatype)) + if err != nil { + return tag_id, err + } + retry = true + } + } + + // We added some columns and insert might work now. Try again immediately to + // avoid long lead time in getting metrics when there are several columns missing + // from the original create statement and they get added in small drops. + if retry { + err := p.db.QueryRow(query, where_values...).Scan(&tag_id) + if err != nil { + return tag_id, err + } + } + } + } + return tag_id, nil +} + +func buildJsonbTags(tags map[string]string) ([]byte, error) { + js := make(map[string]interface{}) + for column, value := range tags { + js[column] = value + } + + return buildJsonb(js) +} + +func buildJsonb(data map[string]interface{}) ([]byte, error) { + if len(data) > 0 { + d, err := json.Marshal(data) + if err != nil { + return d, err + } + } + return nil, nil +} + func (p *Postgresql) Write(metrics []telegraf.Metric) error { batches := make(map[string][]interface{}) params := make(map[string][]string) @@ -233,68 +339,25 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { columns := []string{"time"} values := []interface{}{metric.Time()} - var js map[string]interface{} if len(metric.Tags()) > 0 { if p.TagsAsForeignkeys { // tags in separate table - var tag_id int - var where_columns []string - var where_values []interface{} - - if p.TagsAsJsonb { - js = make(map[string]interface{}) - for column, value := range metric.Tags() { - js[column] = value - } - - if len(js) > 0 { - d, err := json.Marshal(js) - if err != nil { - return err - } - - where_columns = append(where_columns, "tags") - where_values = append(where_values, d) - } - } else { - for column, value := range metric.Tags() { - where_columns = append(where_columns, column) - where_values = append(where_values, value) - } - } - - var where_parts []string - for i, column := range where_columns { - where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) - } - query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", p.fullTableName(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) - - err := p.db.QueryRow(query, where_values...).Scan(&tag_id) + tag_id, err := p.getTagId(metric) if err != nil { - query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" - err := p.db.QueryRow(query, where_values...).Scan(&tag_id) - if err != nil { - return err - } + return err } - columns = append(columns, "tag_id") values = append(values, tag_id) } else { // tags in measurement table if p.TagsAsJsonb { - js = make(map[string]interface{}) - for column, value := range metric.Tags() { - js[column] = value + d, err := buildJsonbTags(metric.Tags()) + if err != nil { + return err } - if len(js) > 0 { - d, err := json.Marshal(js) - if err != nil { - return err - } - + if d != nil { columns = append(columns, "tags") values = append(values, d) } @@ -314,12 +377,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } if p.FieldsAsJsonb { - js = make(map[string]interface{}) - for column, value := range metric.Fields() { - js[column] = value - } - - d, err := json.Marshal(js) + d, err := buildJsonb(metric.Fields()) if err != nil { return err } @@ -398,7 +456,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } } - // We added some columns and insert might work now. Try again immediately to + // We added some columns and insert might work now. Try again immediately to // avoid long lead time in getting metrics when there are several columns missing // from the original create statement and they get added in small drops. if retry { From 4c24bc32821af681dbcb23d6d4fed981fe92bfee Mon Sep 17 00:00:00 2001 From: Blagoj Atanasovski Date: Wed, 29 May 2019 13:32:55 +0200 Subject: [PATCH 055/121] Refactor PostgreSQL output plugin code The PostgreSQL plugin code is split up in multiple files for better readability. Unit and Integration tests are written. Code complexity reduced a bit by reducing branching. --- plugins/outputs/postgresql/README.md | 4 +- .../outputs/postgresql/add_missing_columns.go | 72 +++ .../postgresql/add_missing_columns_test.go | 92 ++++ plugins/outputs/postgresql/create_table.go | 72 +++ .../outputs/postgresql/create_table_test.go | 48 ++ plugins/outputs/postgresql/db_wrapper.go | 43 ++ plugins/outputs/postgresql/generate_insert.go | 24 + .../postgresql/generate_insert_test.go | 39 ++ plugins/outputs/postgresql/get_tag_id.go | 87 ++++ plugins/outputs/postgresql/postgresql.go | 464 +++++------------- .../postgresql/postgresql_integration_test.go | 273 +++++++++++ plugins/outputs/postgresql/postgresql_test.go | 210 ++++++-- plugins/outputs/postgresql/table_keeper.go | 47 ++ .../outputs/postgresql/table_keeper_test.go | 71 +++ plugins/outputs/postgresql/utils.go | 68 +++ 15 files changed, 1215 insertions(+), 399 deletions(-) create mode 100644 plugins/outputs/postgresql/add_missing_columns.go create mode 100644 plugins/outputs/postgresql/add_missing_columns_test.go create mode 100644 plugins/outputs/postgresql/create_table.go create mode 100644 plugins/outputs/postgresql/create_table_test.go create mode 100644 plugins/outputs/postgresql/db_wrapper.go create mode 100644 plugins/outputs/postgresql/generate_insert.go create mode 100644 plugins/outputs/postgresql/generate_insert_test.go create mode 100644 plugins/outputs/postgresql/get_tag_id.go create mode 100644 plugins/outputs/postgresql/postgresql_integration_test.go create mode 100644 plugins/outputs/postgresql/table_keeper.go create mode 100644 plugins/outputs/postgresql/table_keeper_test.go create mode 100644 plugins/outputs/postgresql/utils.go diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 3d3f623776dd8..b9d0020682023 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -28,9 +28,9 @@ This output plugin writes all metrics to PostgreSQL. # schema = "public" ## Use jsonb datatype for tags. Default is true. - # tags_as_jsonb = true + # tags_as_jsonb = false ## Use jsonb datatype for fields. Default is true. - # fields_as_jsonb = true + # fields_as_jsonb = false ``` diff --git a/plugins/outputs/postgresql/add_missing_columns.go b/plugins/outputs/postgresql/add_missing_columns.go new file mode 100644 index 0000000000000..1d2be69bf3c55 --- /dev/null +++ b/plugins/outputs/postgresql/add_missing_columns.go @@ -0,0 +1,72 @@ +package postgresql + +import ( + "fmt" + "log" + "strings" +) + +func (p *Postgresql) addMissingColumns(tableName string, columns []string, values []interface{}) (bool, error) { + columnStatuses, err := p.whichColumnsAreMissing(columns, tableName) + if err != nil { + return false, err + } + + retry := false + for currentColumn, isMissing := range columnStatuses { + if !isMissing { + continue + } + + dataType := deriveDatatype(values[currentColumn]) + columnName := columns[currentColumn] + if err := p.addColumnToTable(columnName, dataType, tableName); err != nil { + return false, err + } + retry = true + } + + return retry, nil +} + +func prepareMissingColumnsQuery(columns []string) string { + var quotedColumns = make([]string, len(columns)) + for i, column := range columns { + quotedColumns[i] = quoteLiteral(column) + } + return fmt.Sprintf(missingColumnsTemplate, strings.Join(quotedColumns, ",")) +} + +// for a given array of columns x = [a, b, c ...] it returns an array of bools indicating +// if x[i] is missing +func (p *Postgresql) whichColumnsAreMissing(columns []string, tableName string) ([]bool, error) { + missingColumnsQuery := prepareMissingColumnsQuery(columns) + result, err := p.db.Query(missingColumnsQuery, p.Schema, tableName) + if err != nil { + return nil, err + } + defer result.Close() + columnStatus := make([]bool, len(columns)) + var isMissing bool + var columnName string + currentColumn := 0 + + for result.Next() { + err := result.Scan(&columnName, &isMissing) + if err != nil { + log.Println(err) + return nil, err + } + columnStatus[currentColumn] = isMissing + currentColumn++ + } + + return columnStatus, nil +} + +func (p *Postgresql) addColumnToTable(columnName, dataType, tableName string) error { + fullTableName := p.fullTableName(tableName) + addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, quoteIdent(columnName), dataType) + _, err := p.db.Exec(addColumnQuery) + return err +} diff --git a/plugins/outputs/postgresql/add_missing_columns_test.go b/plugins/outputs/postgresql/add_missing_columns_test.go new file mode 100644 index 0000000000000..7140847e03375 --- /dev/null +++ b/plugins/outputs/postgresql/add_missing_columns_test.go @@ -0,0 +1,92 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func prepareMissingColumnsQuery1(columns []string) string { + var quotedColumns = make([]string, len(columns)) + for i, column := range columns { + quotedColumns[i] = quoteLiteral(column) + } + return fmt.Sprintf(missingColumnsTemplate, strings.Join(quotedColumns, ",")) +} + +func TestPrepareMissingColumnsQuery(t *testing.T) { + columns := []string{} + assert.Equal(t, `WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2),`+ + `required AS (SELECT c FROM unnest(array []) AS c) `+ + `SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;`, + prepareMissingColumnsQuery(columns)) + columns = []string{"a", "b", "c"} + assert.Equal(t, `WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2),`+ + `required AS (SELECT c FROM unnest(array ['a','b','c']) AS c) `+ + `SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;`, + prepareMissingColumnsQuery(columns)) +} + +func TestWhichColumnsAreMissing(t *testing.T) { + mock := &mockWr{} + p := &Postgresql{db: mock} + + columns := []string{"col1"} + mock.queryErr = fmt.Errorf("error 1") + mock.expected = prepareMissingColumnsQuery(columns) + table := "tableName" + _, err := p.whichColumnsAreMissing(columns, table) + assert.Equal(t, err.Error(), "error 1") +} + +func TestAddColumnToTable(t *testing.T) { + mock := &mockWr{} + p := &Postgresql{db: mock, Schema: "pub"} + + column := "col1" + dataType := "text" + tableName := "table" + mock.execErr = fmt.Errorf("error 1") + mock.expected = `ALTER TABLE "pub"."table" ADD COLUMN IF NOT EXISTS "col1" text;` + err := p.addColumnToTable(column, dataType, tableName) + assert.EqualError(t, err, "error 1") + + mock.execErr = nil + assert.Nil(t, p.addColumnToTable(column, dataType, tableName)) + +} + +func (p *Postgresql) addColumnToTable1(columnName, dataType, tableName string) error { + fullTableName := p.fullTableName(tableName) + addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, quoteIdent(columnName), dataType) + _, err := p.db.Exec(addColumnQuery) + return err +} + +type mockWr struct { + expected string + exec sql.Result + execErr error + query *sql.Rows + queryErr error +} + +func (m *mockWr) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.expected != query { + return nil, fmt.Errorf("unexpected query; exp: '%s'; got: '%s'", m.expected, query) + } + return m.exec, m.execErr +} +func (m *mockWr) Query(query string, args ...interface{}) (*sql.Rows, error) { + if m.expected != query { + return nil, fmt.Errorf("unexpected query; exp: '%s'; got: '%s'", m.expected, query) + } + return m.query, m.queryErr +} +func (m *mockWr) QueryRow(query string, args ...interface{}) *sql.Row { + return nil +} +func (m *mockWr) Close() error { return nil } diff --git a/plugins/outputs/postgresql/create_table.go b/plugins/outputs/postgresql/create_table.go new file mode 100644 index 0000000000000..9b647e276b3ee --- /dev/null +++ b/plugins/outputs/postgresql/create_table.go @@ -0,0 +1,72 @@ +package postgresql + +import ( + "fmt" + "strings" + + "github.com/influxdata/telegraf" +) + +const ( + tagIDColumn = "tag_id" + createTagsTableTemplate = "CREATE TABLE IF NOT EXISTS %s(tag_id serial primary key,%s,UNIQUE(%s))" +) + +func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { + var columns []string + var pk []string + var sql []string + + pk = append(pk, quoteIdent("time")) + columns = append(columns, "time timestamptz") + + // handle tags if necessary + if len(metric.Tags()) > 0 { + if p.TagsAsForeignkeys { + // tags in separate table + var tagColumns []string + var tagColumndefs []string + columns = append(columns, "tag_id int") + + if p.TagsAsJsonb { + tagColumns = append(tagColumns, "tags") + tagColumndefs = append(tagColumndefs, "tags jsonb") + } else { + for column := range metric.Tags() { + tagColumns = append(tagColumns, quoteIdent(column)) + tagColumndefs = append(tagColumndefs, fmt.Sprintf("%s text", quoteIdent(column))) + } + } + table := p.fullTableName(metric.Name() + p.TagTableSuffix) + sql = append(sql, fmt.Sprintf(createTagsTableTemplate, table, strings.Join(tagColumndefs, ","), strings.Join(tagColumns, ","))) + } else { + // tags in measurement table + if p.TagsAsJsonb { + columns = append(columns, "tags jsonb") + } else { + for column := range metric.Tags() { + pk = append(pk, quoteIdent(column)) + columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) + } + } + } + } + + if p.FieldsAsJsonb { + columns = append(columns, "fields jsonb") + } else { + var datatype string + for column, v := range metric.Fields() { + datatype = deriveDatatype(v) + columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) + } + } + + query := strings.Replace(p.TableTemplate, "{TABLE}", p.fullTableName(metric.Name()), -1) + query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(p.fullTableName(metric.Name())), -1) + query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) + query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) + + sql = append(sql, query) + return strings.Join(sql, ";") +} diff --git a/plugins/outputs/postgresql/create_table_test.go b/plugins/outputs/postgresql/create_table_test.go new file mode 100644 index 0000000000000..404e3fdbd4683 --- /dev/null +++ b/plugins/outputs/postgresql/create_table_test.go @@ -0,0 +1,48 @@ +package postgresql + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func TestGenerateCreateTable(t *testing.T) { + p := newPostgresql() + p.TagsAsJsonb = true + p.FieldsAsJsonb = true + timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) + + var m telegraf.Metric + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) + + m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) + + p.TagsAsJsonb = false + p.FieldsAsJsonb = false + + m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) + + m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) + + m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) + assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) + + p.TagsAsForeignkeys = true + assert.Equal(t, + `CREATE TABLE IF NOT EXISTS "public"."m_tag"(tag_id serial primary key,"k" text,UNIQUE("k"));`+ + `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tag_id int,"i" int8)`, + p.generateCreateTable(m)) + + p.TagsAsJsonb = true + assert.Equal(t, + `CREATE TABLE IF NOT EXISTS "public"."m_tag"(tag_id serial primary key,tags jsonb,UNIQUE(tags));`+ + `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tag_id int,"i" int8)`, + p.generateCreateTable(m)) +} diff --git a/plugins/outputs/postgresql/db_wrapper.go b/plugins/outputs/postgresql/db_wrapper.go new file mode 100644 index 0000000000000..bb095429d985b --- /dev/null +++ b/plugins/outputs/postgresql/db_wrapper.go @@ -0,0 +1,43 @@ +package postgresql + +import ( + "database/sql" + // pgx driver for sql connections + _ "github.com/jackc/pgx/stdlib" +) + +type dbWrapper interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row + Close() error +} + +type defaultDbWrapper struct { + db *sql.DB +} + +func newDbWrapper(address string) (dbWrapper, error) { + db, err := sql.Open("pgx", address) + if err != nil { + return nil, err + } + + return &defaultDbWrapper{ + db: db, + }, nil +} + +func (d *defaultDbWrapper) Exec(query string, args ...interface{}) (sql.Result, error) { + return d.db.Exec(query, args...) +} + +func (d *defaultDbWrapper) Close() error { return d.db.Close() } + +func (d *defaultDbWrapper) Query(query string, args ...interface{}) (*sql.Rows, error) { + return d.db.Query(query, args...) +} + +func (d *defaultDbWrapper) QueryRow(query string, args ...interface{}) *sql.Row { + return d.db.QueryRow(query, args...) +} diff --git a/plugins/outputs/postgresql/generate_insert.go b/plugins/outputs/postgresql/generate_insert.go new file mode 100644 index 0000000000000..c71c884845da3 --- /dev/null +++ b/plugins/outputs/postgresql/generate_insert.go @@ -0,0 +1,24 @@ +package postgresql + +import ( + "fmt" + "strings" +) + +const ( + insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" +) + +func (p *Postgresql) generateInsert(tablename string, columns []string) string { + valuePlaceholders := make([]string, len(columns)) + quotedColumns := make([]string, len(columns)) + for i, column := range columns { + valuePlaceholders[i] = fmt.Sprintf("$%d", i+1) + quotedColumns[i] = quoteIdent(column) + } + + fullTableName := p.fullTableName(tablename) + columnNames := strings.Join(quotedColumns, ",") + values := strings.Join(valuePlaceholders, ",") + return fmt.Sprintf(insertIntoSQLTemplate, fullTableName, columnNames, values) +} diff --git a/plugins/outputs/postgresql/generate_insert_test.go b/plugins/outputs/postgresql/generate_insert_test.go new file mode 100644 index 0000000000000..28d2e023b9790 --- /dev/null +++ b/plugins/outputs/postgresql/generate_insert_test.go @@ -0,0 +1,39 @@ +package postgresql + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPostgresqlQuote(t *testing.T) { + assert.Equal(t, `"foo"`, quoteIdent("foo")) + assert.Equal(t, `"fo'o"`, quoteIdent("fo'o")) + assert.Equal(t, `"fo""o"`, quoteIdent("fo\"o")) + + assert.Equal(t, "'foo'", quoteLiteral("foo")) + assert.Equal(t, "'fo''o'", quoteLiteral("fo'o")) + assert.Equal(t, "'fo\"o'", quoteLiteral("fo\"o")) +} + +func TestPostgresqlInsertStatement(t *testing.T) { + p := newPostgresql() + + p.TagsAsJsonb = false + p.FieldsAsJsonb = false + + sql := p.generateInsert("m", []string{"time", "f"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","f") VALUES($1,$2)`, sql) + + sql = p.generateInsert("m", []string{"time", "i"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","i") VALUES($1,$2)`, sql) + + sql = p.generateInsert("m", []string{"time", "f", "i"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) + + sql = p.generateInsert("m", []string{"time", "k", "i"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","k","i") VALUES($1,$2,$3)`, sql) + + sql = p.generateInsert("m", []string{"time", "k1", "k2", "i"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) +} diff --git a/plugins/outputs/postgresql/get_tag_id.go b/plugins/outputs/postgresql/get_tag_id.go new file mode 100644 index 0000000000000..c17e6a6ea978c --- /dev/null +++ b/plugins/outputs/postgresql/get_tag_id.go @@ -0,0 +1,87 @@ +package postgresql + +import ( + "fmt" + "log" + "strings" + + "github.com/influxdata/telegraf" +) + +const ( + selectTagIDTemplate = "SELECT tag_id FROM %s WHERE %s" + missingColumnsTemplate = "WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2)," + + "required AS (SELECT c FROM unnest(array [%s]) AS c) " + + "SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;" + + addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" +) + +func (p *Postgresql) getTagID(metric telegraf.Metric) (int, error) { + var tagID int + var whereColumns []string + var whereValues []interface{} + tablename := metric.Name() + + if p.TagsAsJsonb && len(metric.Tags()) > 0 { + d, err := buildJsonbTags(metric.Tags()) + if err != nil { + return tagID, err + } + + whereColumns = append(whereColumns, "tags") + whereValues = append(whereValues, d) + } else { + for column, value := range metric.Tags() { + whereColumns = append(whereColumns, column) + whereValues = append(whereValues, value) + } + } + + whereParts := make([]string, len(whereColumns)) + for i, column := range whereColumns { + whereParts[i] = fmt.Sprintf("%s = $%d", quoteIdent(column), i+1) + } + + tagsTableName := tablename + p.TagTableSuffix + tagsTableFullName := p.fullTableName(tagsTableName) + query := fmt.Sprintf(selectTagIDTemplate, tagsTableFullName, strings.Join(whereParts, " AND ")) + + err := p.db.QueryRow(query, whereValues...).Scan(&tagID) + if err == nil { + return tagID, nil + } + query = p.generateInsert(tagsTableName, whereColumns) + " RETURNING tag_id" + err = p.db.QueryRow(query, whereValues...).Scan(&tagID) + if err == nil { + return tagID, nil + } + + // check if insert error was caused by column mismatch + + // if tags are jsonb, there shouldn't be a column mismatch + if p.TagsAsJsonb { + return tagID, err + } + + // check for missing columns + log.Printf("W! Possible column mismatch while inserting new tag-set: %v", err) + retry, err := p.addMissingColumns(tagsTableName, whereColumns, whereValues) + if err != nil { + // missing coulmns not properly added + log.Printf("E! Could not add missing columns: %v", err) + return tagID, err + } + + // We added some columns and insert might work now. Try again immediately to + // avoid long lead time in getting metrics when there are several columns missing + // from the original create statement and they get added in small drops. + if retry { + log.Printf("I! Retrying to insert new tag set") + err := p.db.QueryRow(query, whereValues...).Scan(&tagID) + if err != nil { + return tagID, err + } + } + return tagID, nil +} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 4b8a588aa3da5..0e30ea6e507d3 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -1,21 +1,20 @@ package postgresql import ( - "database/sql" - "encoding/json" - "fmt" "log" "sort" - "strings" - - "github.com/jackc/pgx" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" ) +const ( + tagsJSONColumn = "tags" + fieldsJSONColumn = "fields" +) + type Postgresql struct { - db *sql.DB + db dbWrapper Address string Schema string TagsAsForeignkeys bool @@ -23,66 +22,41 @@ type Postgresql struct { FieldsAsJsonb bool TableTemplate string TagTableSuffix string - Tables map[string]bool + tables tableKeeper } +func init() { + outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() }) +} + +func newPostgresql() *Postgresql { + return &Postgresql{ + Schema: "public", + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + TagTableSuffix: "_tag", + } +} + +// Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - db, err := sql.Open("pgx", p.Address) + db, err := newDbWrapper(p.Address) if err != nil { return err } p.db = db - p.Tables = make(map[string]bool) - + p.tables = newTableKeeper(db) return nil } +// Close closes the connection to the database func (p *Postgresql) Close() error { return p.db.Close() } -func contains(haystack []string, needle string) bool { - for _, key := range haystack { - if key == needle { - return true - } - } - return false -} - -func quoteIdent(name string) string { - return pgx.Identifier{name}.Sanitize() -} - -func quoteLiteral(name string) string { - return "'" + strings.Replace(name, "'", "''", -1) + "'" -} - func (p *Postgresql) fullTableName(name string) string { return quoteIdent(p.Schema) + "." + quoteIdent(name) } -func deriveDatatype(value interface{}) string { - var datatype string - - switch value.(type) { - case bool: - datatype = "boolean" - case uint64: - datatype = "int8" - case int64: - datatype = "int8" - case float64: - datatype = "float8" - case string: - datatype = "text" - default: - datatype = "text" - log.Printf("E! Unknown datatype %T(%v)", value, value) - } - return datatype -} - var sampleConfig = ` ## specify address via a url matching: ## postgres://[pqgotest[:password]]@localhost[/dbname]\ @@ -118,262 +92,41 @@ var sampleConfig = ` # schema = "public" ## Use jsonb datatype for tags - # tags_as_jsonb = true + # tags_as_jsonb = false ## Use jsonb datatype for fields - # fields_as_jsonb = true + # fields_as_jsonb = false ` func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } -func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { - var columns []string - var pk []string - var sql []string - - pk = append(pk, quoteIdent("time")) - columns = append(columns, "time timestamptz") - - // handle tags if necessary - if len(metric.Tags()) > 0 { - if p.TagsAsForeignkeys { - // tags in separate table - var tag_columns []string - var tag_columndefs []string - columns = append(columns, "tag_id int") - - if p.TagsAsJsonb { - tag_columns = append(tag_columns, "tags") - tag_columndefs = append(tag_columndefs, "tags jsonb") - } else { - for column, _ := range metric.Tags() { - tag_columns = append(tag_columns, quoteIdent(column)) - tag_columndefs = append(tag_columndefs, fmt.Sprintf("%s text", quoteIdent(column))) - } - } - table := quoteIdent(metric.Name() + p.TagTableSuffix) - sql = append(sql, fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s(tag_id serial primary key,%s,UNIQUE(%s))", table, strings.Join(tag_columndefs, ","), strings.Join(tag_columns, ","))) - } else { - // tags in measurement table - if p.TagsAsJsonb { - columns = append(columns, "tags jsonb") - } else { - for column, _ := range metric.Tags() { - pk = append(pk, quoteIdent(column)) - columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) - } - } - } - } - - if p.FieldsAsJsonb { - columns = append(columns, "fields jsonb") - } else { - var datatype string - for column, v := range metric.Fields() { - datatype = deriveDatatype(v) - columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) - } - } - - query := strings.Replace(p.TableTemplate, "{TABLE}", p.fullTableName(metric.Name()), -1) - query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(p.fullTableName(metric.Name())), -1) - query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) - query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) - - sql = append(sql, query) - return strings.Join(sql, ";") -} - -func (p *Postgresql) generateInsert(tablename string, columns []string) string { - var placeholder, quoted []string - for i, column := range columns { - placeholder = append(placeholder, fmt.Sprintf("$%d", i+1)) - quoted = append(quoted, quoteIdent(column)) - } - - return fmt.Sprintf("INSERT INTO %s(%s) VALUES(%s)", p.fullTableName(tablename), strings.Join(quoted, ","), strings.Join(placeholder, ",")) -} - -func (p *Postgresql) tableExists(tableName string) bool { - stmt := "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" - result, err := p.db.Exec(stmt, tableName, p.Schema) - if err != nil { - log.Printf("E! Error checking for existence of metric table %s: %v", tableName, err) - return false - } - if count, _ := result.RowsAffected(); count == 1 { - p.Tables[tableName] = true - return true - } - return false -} - -func (p *Postgresql) getTagId(metric telegraf.Metric) (int, error) { - var tag_id int - var where_columns []string - var where_values []interface{} - tablename := metric.Name() - - if p.TagsAsJsonb { - if len(metric.Tags()) > 0 { - d, err := buildJsonbTags(metric.Tags()) - if err != nil { - return tag_id, err - } - - where_columns = append(where_columns, "tags") - where_values = append(where_values, d) - } - } else { - for column, value := range metric.Tags() { - where_columns = append(where_columns, column) - where_values = append(where_values, value) - } - } - - var where_parts []string - for i, column := range where_columns { - where_parts = append(where_parts, fmt.Sprintf("%s = $%d", quoteIdent(column), i+1)) - } - query := fmt.Sprintf("SELECT tag_id FROM %s WHERE %s", p.fullTableName(tablename+p.TagTableSuffix), strings.Join(where_parts, " AND ")) - - err := p.db.QueryRow(query, where_values...).Scan(&tag_id) - if err != nil { - query := p.generateInsert(tablename+p.TagTableSuffix, where_columns) + " RETURNING tag_id" - err := p.db.QueryRow(query, where_values...).Scan(&tag_id) - if err != nil { - // check if insert error was caused by column mismatch - retry := false - if p.TagsAsJsonb == false { - log.Printf("E! Error during insert: %v", err) - tablename := tablename + p.TagTableSuffix - columns := where_columns - var quoted_columns []string - for _, column := range columns { - quoted_columns = append(quoted_columns, quoteLiteral(column)) - } - query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" - query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) - result, err := p.db.Query(query, p.Schema, tablename) - if err != nil { - return tag_id, err - } - defer result.Close() - - // some columns are missing - var column, datatype string - for result.Next() { - err := result.Scan(&column) - if err != nil { - log.Println(err) - } - for i, name := range columns { - if name == column { - datatype = deriveDatatype(where_values[i]) - } - } - query := "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" - _, err = p.db.Exec(fmt.Sprintf(query, p.fullTableName(tablename), quoteIdent(column), datatype)) - if err != nil { - return tag_id, err - } - retry = true - } - } - - // We added some columns and insert might work now. Try again immediately to - // avoid long lead time in getting metrics when there are several columns missing - // from the original create statement and they get added in small drops. - if retry { - err := p.db.QueryRow(query, where_values...).Scan(&tag_id) - if err != nil { - return tag_id, err - } - } - } - } - return tag_id, nil -} - -func buildJsonbTags(tags map[string]string) ([]byte, error) { - js := make(map[string]interface{}) - for column, value := range tags { - js[column] = value - } - - return buildJsonb(js) -} - -func buildJsonb(data map[string]interface{}) ([]byte, error) { - if len(data) > 0 { - d, err := json.Marshal(data) - if err != nil { - return d, err - } - } - return nil, nil -} - func (p *Postgresql) Write(metrics []telegraf.Metric) error { - batches := make(map[string][]interface{}) - params := make(map[string][]string) - colmap := make(map[string][]string) - tabmap := make(map[string]string) - + toInsert := make(map[string][]*colsAndValues) for _, metric := range metrics { tablename := metric.Name() // create table if needed - if p.Tables[tablename] == false && p.tableExists(tablename) == false { + if p.tables.exists(p.Schema, tablename) == false { createStmt := p.generateCreateTable(metric) _, err := p.db.Exec(createStmt) if err != nil { log.Printf("E! Creating table failed: statement: %v, error: %v", createStmt, err) return err } - p.Tables[tablename] = true + p.tables.add(tablename) } columns := []string{"time"} values := []interface{}{metric.Time()} - - if len(metric.Tags()) > 0 { - if p.TagsAsForeignkeys { - // tags in separate table - tag_id, err := p.getTagId(metric) - if err != nil { - return err - } - columns = append(columns, "tag_id") - values = append(values, tag_id) - } else { - // tags in measurement table - if p.TagsAsJsonb { - d, err := buildJsonbTags(metric.Tags()) - if err != nil { - return err - } - - if d != nil { - columns = append(columns, "tags") - values = append(values, d) - } - } else { - var keys []string - fields := metric.Tags() - for column := range fields { - keys = append(keys, column) - } - sort.Strings(keys) - for _, column := range keys { - columns = append(columns, column) - values = append(values, fields[column]) - } - } - } + tagColumns, tagValues, err := p.prepareTags(metric) + if err != nil { + return err + } + if tagColumns != nil { + columns = append(columns, tagColumns...) + values = append(values, tagValues...) } if p.FieldsAsJsonb { @@ -382,7 +135,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { return err } - columns = append(columns, "fields") + columns = append(columns, fieldsJSONColumn) values = append(values, d) } else { var keys []string @@ -397,89 +150,102 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } } - var table_and_cols string - var placeholder, quoted_columns []string - for _, column := range columns { - quoted_columns = append(quoted_columns, quoteIdent(column)) + newValues := &colsAndValues{ + cols: columns, + vals: values, } - table_and_cols = fmt.Sprintf("%s(%s)", p.fullTableName(tablename), strings.Join(quoted_columns, ",")) - batches[table_and_cols] = append(batches[table_and_cols], values...) - for i, _ := range columns { - i += len(params[table_and_cols]) * len(columns) - placeholder = append(placeholder, fmt.Sprintf("$%d", i+1)) - } - params[table_and_cols] = append(params[table_and_cols], strings.Join(placeholder, ",")) - colmap[table_and_cols] = columns - tabmap[table_and_cols] = tablename + toInsert[tablename] = append(toInsert[tablename], newValues) + } + + return p.insertBatches(toInsert) +} + +func (p *Postgresql) prepareTags(metric telegraf.Metric) ([]string, []interface{}, error) { + if len(metric.Tags()) == 0 { + return nil, nil, nil } - for table_and_cols, values := range batches { - sql := fmt.Sprintf("INSERT INTO %s VALUES (%s)", table_and_cols, strings.Join(params[table_and_cols], "),(")) - _, err := p.db.Exec(sql, values...) + if p.TagsAsForeignkeys { + // tags in separate table + tagID, err := p.getTagID(metric) + if err != nil { + return nil, nil, err + } + return []string{tagIDColumn}, []interface{}{tagID}, nil + } + // tags in measurement table + if p.TagsAsJsonb { + d, err := buildJsonbTags(metric.Tags()) if err != nil { + return nil, nil, err + } + + if d != nil { + return []string{tagsJSONColumn}, []interface{}{d}, nil + } + return nil, nil, nil + + } + + var keys []string + tags := metric.Tags() + for column := range tags { + keys = append(keys, column) + } + sort.Strings(keys) + numColumns := len(keys) + var columns = make([]string, numColumns) + var values = make([]interface{}, numColumns) + for i, column := range keys { + columns[i] = column + values[i] = tags[column] + } + return columns, values, nil +} + +type colsAndValues struct { + cols []string + vals []interface{} +} + +// insertBatches takes batches of data to be inserted. The batches are mapped +// by the target table, and each batch contains the columns and values for those +// columns that will generate the INSERT statement. +// On column mismatch an attempt is made to create the column and try to reinsert. +func (p *Postgresql) insertBatches(batches map[string][]*colsAndValues) error { + for tableName, colsAndValues := range batches { + for _, row := range colsAndValues { + sql := p.generateInsert(tableName, row.cols) + _, err := p.db.Exec(sql, row.vals...) + if err == nil { + continue + } + // check if insert error was caused by column mismatch + if p.FieldsAsJsonb { + return err + } + + log.Printf("W! Possible column mismatch while inserting new metrics: %v", err) + retry := false - if p.FieldsAsJsonb == false { - log.Printf("E! Error during insert: %v", err) - tablename := tabmap[table_and_cols] - columns := colmap[table_and_cols] - var quoted_columns []string - for _, column := range columns { - quoted_columns = append(quoted_columns, quoteLiteral(column)) - } - query := "SELECT c FROM unnest(array[%s]) AS c WHERE NOT EXISTS(SELECT 1 FROM information_schema.columns WHERE column_name=c AND table_schema=$1 AND table_name=$2)" - query = fmt.Sprintf(query, strings.Join(quoted_columns, ",")) - result, err := p.db.Query(query, p.Schema, tablename) - if err != nil { - return err - } - defer result.Close() - - // some columns are missing - var column, datatype string - for result.Next() { - err := result.Scan(&column) - if err != nil { - log.Println(err) - } - for i, name := range columns { - if name == column { - datatype = deriveDatatype(values[i]) - } - } - query := "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" - _, err = p.db.Exec(fmt.Sprintf(query, p.fullTableName(tablename), quoteIdent(column), datatype)) - if err != nil { - return err - } - retry = true - } + retry, err = p.addMissingColumns(tableName, row.cols, row.vals) + if err != nil { + log.Printf("E! Could not fix column mismatch: %v", err) + return err } // We added some columns and insert might work now. Try again immediately to // avoid long lead time in getting metrics when there are several columns missing // from the original create statement and they get added in small drops. if retry { - _, err = p.db.Exec(sql, values...) + _, err = p.db.Exec(sql, row.vals...) } if err != nil { return err } } } - return nil -} - -func init() { - outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() }) -} -func newPostgresql() *Postgresql { - return &Postgresql{ - Schema: "public", - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", - TagsAsJsonb: true, - TagTableSuffix: "_tag", - FieldsAsJsonb: true, - } + return nil } diff --git a/plugins/outputs/postgresql/postgresql_integration_test.go b/plugins/outputs/postgresql/postgresql_integration_test.go new file mode 100644 index 0000000000000..1fdbe0207ed33 --- /dev/null +++ b/plugins/outputs/postgresql/postgresql_integration_test.go @@ -0,0 +1,273 @@ +package postgresql + +import ( + "database/sql" + "fmt" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + _ "github.com/jackc/pgx/stdlib" + "github.com/stretchr/testify/assert" +) + +func prepareAndConnect(t *testing.T, foreignTags, jsonTags, jsonFields bool) (telegraf.Metric, *sql.DB, *Postgresql) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + testAddress := "postgres://postgres@localhost:5432/postgres?sslmode=disable" + + testMetric := testMetric("metric name", "tag1", int(1)) + + postgres := &Postgresql{ + Address: testAddress, + Schema: "public", + TagsAsForeignkeys: foreignTags, + TagsAsJsonb: jsonTags, + FieldsAsJsonb: jsonFields, + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + TagTableSuffix: "_tags", + } + + // drop metric tables if exists + + db, err := sql.Open("pgx", testAddress) + assert.NoError(t, err, "Could not connect to test db") + + _, err = db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, testMetric.Name())) + assert.NoError(t, err, "Could not prepare db") + _, err = db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) + assert.NoError(t, err, "Could not prepare db") + + err = postgres.Connect() + assert.NoError(t, err, "Could not connect") + return testMetric, db, postgres +} + +// testMetric Returns a simple test point: +// measurement -> name +// tags -> "tag":tag +// value -> "value": value +// time -> time.Now().UTC() +func testMetric(name string, tag string, value interface{}) telegraf.Metric { + if value == nil { + panic("Cannot use a nil value") + } + tags := map[string]string{"tag": tag} + pt, _ := metric.New( + name, + tags, + map[string]interface{}{"value": value}, + time.Now().UTC(), + ) + return pt +} + +func TestWriteToPostgres(t *testing.T) { + testMetric, dbConn, postgres := prepareAndConnect(t, false, false, false) + writeAndAssertSingleMetricNoJSON(t, testMetric, dbConn, postgres) +} + +func TestWriteToPostgresJsonTags(t *testing.T) { + tagsAsForeignKey := false + tagsAsJSON := true + fieldsAsJSON := false + testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + // insert first metric + err := postgres.Write([]telegraf.Metric{testMetric}) + assert.NoError(t, err, "Could not write") + + // should have created table, all columns in the same table + row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tags, value FROM "%s"`, testMetric.Name())) + var ts time.Time + var tags string + var value int64 + err = row.Scan(&ts, &tags, &value) + assert.NoError(t, err, "Could not check test results") + + sentTag, _ := testMetric.GetTag("tag") + sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) + sentValue, _ := testMetric.GetField("value") + sentTs := testMetric.Time() + // postgres doesn't support nano seconds in timestamp + sentTsNanoSecondOffset := sentTs.Nanosecond() + nanoSeconds := sentTsNanoSecondOffset % 1000 + sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) + if !ts.UTC().Equal(sentTs) || tags != sentTagJSON || value != sentValue.(int64) { + assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", + sentTs, sentTagJSON, sentValue, + ts.UTC(), tags, value)) + } +} + +func TestWriteToPostgresJsonTagsAsForeignTable(t *testing.T) { + tagsAsForeignKey := true + tagsAsJSON := true + fieldsAsJSON := false + testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + // insert first metric + err := postgres.Write([]telegraf.Metric{testMetric}) + assert.NoError(t, err, "Could not write") + + // should have created table, all columns in the same table + row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetric.Name())) + var ts time.Time + var tagID int64 + var value int64 + err = row.Scan(&ts, &tagID, &value) + assert.NoError(t, err, "Could not check test results") + + sentValue, _ := testMetric.GetField("value") + sentTs := testMetric.Time() + // postgres doesn't support nano seconds in timestamp + sentTsNanoSecondOffset := sentTs.Nanosecond() + nanoSeconds := sentTsNanoSecondOffset % 1000 + sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) + if !ts.UTC().Equal(sentTs) || tagID != 1 || value != sentValue.(int64) { + assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", + sentTs, 1, sentValue, + ts.UTC(), tagID, value)) + } + + sentTag, _ := testMetric.GetTag("tag") + sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) + row = dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) + tagID = 0 + var tags string + err = row.Scan(&tagID, &tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, int64(1), tagID) + assert.Equal(t, sentTagJSON, tags) +} + +func TestWriteToPostgresMultipleRowsOneTag(t *testing.T) { + tagsAsForeignKey := true + tagsAsJSON := true + fieldsAsJSON := false + testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + // insert first metric + err := postgres.Write([]telegraf.Metric{testMetric, testMetric}) + assert.NoError(t, err, "Could not write") + + // should have two rows + row := dbConn.QueryRow(fmt.Sprintf(`SELECT count(*) FROM "%s"`, testMetric.Name())) + var count int64 + err = row.Scan(&count) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, int64(2), count) + + sentTag, _ := testMetric.GetTag("tag") + sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) + row = dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) + var tagID int64 + var tags string + err = row.Scan(&tagID, &tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, int64(1), tagID) + assert.Equal(t, sentTagJSON, tags) +} + +func TestWriteToPostgresAddNewTag(t *testing.T) { + tagsAsForeignKey := true + tagsAsJSON := true + fieldsAsJSON := false + testMetricWithOneTag, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + testMetricWithOneMoreTag := testMetric("metric name", "tag1", int(2)) + testMetricWithOneMoreTag.AddTag("second_tag", "tag2") + // insert first two metric + err := postgres.Write([]telegraf.Metric{testMetricWithOneTag, testMetricWithOneMoreTag}) + assert.NoError(t, err, "Could not write") + + // should have two rows + row := dbConn.QueryRow(fmt.Sprintf(`SELECT count(*) FROM "%s"`, testMetricWithOneTag.Name())) + var count int64 + err = row.Scan(&count) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, int64(2), count) + + // and two tagsets + sentTag, _ := testMetricWithOneTag.GetTag("tag") + sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) + row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=1`, testMetricWithOneTag.Name(), postgres.TagTableSuffix)) + var tags string + err = row.Scan(&tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, sentTagJSON, tags) + + secondSentTagsJSON := `{"tag": "tag1", "second_tag": "tag2"}` + row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=2`, testMetricWithOneMoreTag.Name(), postgres.TagTableSuffix)) + err = row.Scan(&tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, secondSentTagsJSON, tags) + + // insert new point with a third tagset + testMetricWithThirdTag := testMetric("metric name", "tag1", int(2)) + testMetricWithThirdTag.AddTag("third_tag", "tag3") + err = postgres.Write([]telegraf.Metric{testMetricWithThirdTag}) + assert.NoError(t, err, "Could not write") + thirdSentTagsJSON := `{"tag": "tag1", "third_tag": "tag3"}` + row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=3`, testMetricWithThirdTag.Name(), postgres.TagTableSuffix)) + err = row.Scan(&tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, thirdSentTagsJSON, tags) +} + +func TestWriteToPostgresAddNewField(t *testing.T) { + testMetric, dbConn, postgres := prepareAndConnect(t, false, false, false) + // insert first metric + writeAndAssertSingleMetricNoJSON(t, testMetric, dbConn, postgres) + + //insert second metric with one more field + testMetric.AddField("field2", 1.0) + testMetric.SetTime(time.Now()) + err := postgres.Write([]telegraf.Metric{testMetric}) + assert.NoError(t, err, "Could not write") + + rows, err := dbConn.Query(fmt.Sprintf(`SELECT time, tag, value, field2 FROM "%s" ORDER BY time ASC`, testMetric.Name())) + assert.NoError(t, err, "Could not check written results") + var ts time.Time + var tag string + var value sql.NullInt64 + var field2 sql.NullFloat64 + rowNum := 1 + for rows.Next() { + rows.Scan(&ts, &tag, &value, &field2) + if rowNum == 1 { + assert.False(t, field2.Valid) + } else if rowNum == 2 { + assert.Equal(t, 1.0, field2.Float64) + } else { + assert.FailNow(t, "more rows than expected") + } + rowNum++ + } + +} + +func writeAndAssertSingleMetricNoJSON(t *testing.T, testMetric telegraf.Metric, dbConn *sql.DB, postgres *Postgresql) { + err := postgres.Write([]telegraf.Metric{testMetric}) + assert.NoError(t, err, "Could not write") + + // should have created table, all columns in the same table + row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag, value FROM "%s"`, testMetric.Name())) + var ts time.Time + var tag string + var value int64 + err = row.Scan(&ts, &tag, &value) + assert.NoError(t, err, "Could not check test results") + + sentTag, _ := testMetric.GetTag("tag") + sentValue, _ := testMetric.GetField("value") + sentTs := testMetric.Time() + // postgres doesn't support nano seconds in timestamp + sentTsNanoSecondOffset := sentTs.Nanosecond() + nanoSeconds := sentTsNanoSecondOffset % 1000 + sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) + if !ts.UTC().Equal(sentTs) || tag != sentTag || value != sentValue.(int64) { + assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", + sentTs, sentTag, sentValue, + ts.UTC(), tag, value)) + } +} diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 3f0863ce427c7..4c4bf34450016 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -1,68 +1,182 @@ package postgresql import ( + "database/sql" + "fmt" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - + _ "github.com/jackc/pgx/stdlib" "github.com/stretchr/testify/assert" ) -func TestPostgresqlQuote(t *testing.T) { - assert.Equal(t, `"foo"`, quoteIdent("foo")) - assert.Equal(t, `"fo'o"`, quoteIdent("fo'o")) - assert.Equal(t, `"fo""o"`, quoteIdent("fo\"o")) - - assert.Equal(t, "'foo'", quoteLiteral("foo")) - assert.Equal(t, "'fo''o'", quoteLiteral("fo'o")) - assert.Equal(t, "'fo\"o'", quoteLiteral("fo\"o")) -} - -func TestPostgresqlCreateStatement(t *testing.T) { - p := newPostgresql() +func TestWrite(t *testing.T) { timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) - - var m telegraf.Metric - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) - - p.TagsAsJsonb = false - p.FieldsAsJsonb = false - - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) - + oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, timestamp) + noTags, _ := metric.New("m", nil, map[string]interface{}{"f": 1}, timestamp) + testCases := []struct { + desc string + input []telegraf.Metric + fieldsAsJSON bool + execs []sql.Result + expectedExecQueries []string + execErrs []error + expectErr string + }{ + { + desc: "no metrics, no error", + input: []telegraf.Metric{}, + expectErr: "", + }, { + desc: "metric table not cached, error on creating it", + input: []telegraf.Metric{oneMetric}, + execs: []sql.Result{nil}, + execErrs: []error{fmt.Errorf("error on first exec")}, + expectErr: "error on first exec", + }, { + desc: "metric table not cached, gets cached, no tags, fields as json, error on insert", + input: []telegraf.Metric{noTags}, + fieldsAsJSON: true, + execs: []sql.Result{nil, nil}, + execErrs: []error{nil, fmt.Errorf("error on batch insert")}, + expectErr: "error on batch insert", + }, { + desc: "metric table not cached, gets cached, has tags, json fields, all good", + input: []telegraf.Metric{oneMetric}, + fieldsAsJSON: true, + execs: []sql.Result{nil, nil}, + execErrs: []error{nil, nil}, + expectedExecQueries: []string{ + `CREATE TABLE IF NOT EXISTS "a"."m"(time timestamptz,"t" text,fields jsonb)`, + `INSERT INTO "a"."m"("time","t","fields") VALUES($1,$2,$3)`}, + }, { + desc: "metric table not cached, gets cached, has tags, all good", + input: []telegraf.Metric{oneMetric}, + execs: []sql.Result{nil, nil}, + execErrs: []error{nil, nil}, + expectedExecQueries: []string{ + `CREATE TABLE IF NOT EXISTS "a"."m"(time timestamptz,"t" text,"f" int8)`, + `INSERT INTO "a"."m"("time","t","f") VALUES($1,$2,$3)`}, + }, + } + + for _, testCase := range testCases { + p := &Postgresql{ + tables: &mockTk{tables: make(map[string]bool)}, + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + Schema: "a", + FieldsAsJsonb: testCase.fieldsAsJSON, + db: &mockDb{ + exec: testCase.execs, + execErr: testCase.execErrs, + expectedQ: testCase.expectedExecQueries, + }} + err := p.Write(testCase.input) + if testCase.expectErr != "" { + assert.EqualError(t, err, testCase.expectErr, testCase.desc) + } else { + assert.Nil(t, err, testCase.desc) + } + } +} +func TestInsertBatches(t *testing.T) { + sampleData := map[string][]*colsAndValues{ + "tab": { + { + cols: []string{"a"}, + vals: []interface{}{1}, + }, + }, + } + + testCases := []struct { + input map[string][]*colsAndValues + desc string + resultsFromExec []sql.Result + errorsFromExec []error + errorOnQuery error + fieldsAsJSON bool + expectErr string + }{ + { + desc: "no batches, no errors", + input: make(map[string][]*colsAndValues), + errorsFromExec: []error{fmt.Errorf("should not have called exec")}, + }, { + desc: "error returned on first insert, fields as json", + input: sampleData, + resultsFromExec: []sql.Result{nil}, + errorsFromExec: []error{fmt.Errorf("error on first insert")}, + fieldsAsJSON: true, + expectErr: "error on first insert", + }, { + desc: "error returned on first insert, error on add column", + input: sampleData, + resultsFromExec: []sql.Result{nil}, + errorsFromExec: []error{fmt.Errorf("error on first insert")}, + errorOnQuery: fmt.Errorf("error on query"), + expectErr: "error on query", + }, { + desc: "no error on insert", + input: sampleData, + resultsFromExec: []sql.Result{nil}, + errorsFromExec: []error{nil}, + }, + } + + for _, testCase := range testCases { + m := &mockDb{exec: testCase.resultsFromExec, + execErr: testCase.errorsFromExec, + queryErr: testCase.errorOnQuery} + p := &Postgresql{ + db: m, + FieldsAsJsonb: testCase.fieldsAsJSON, + } + + err := p.insertBatches(testCase.input) + if testCase.expectErr != "" { + assert.EqualError(t, err, testCase.expectErr) + } else { + assert.Nil(t, err) + } + } } -func TestPostgresqlInsertStatement(t *testing.T) { - p := newPostgresql() - - p.TagsAsJsonb = false - p.FieldsAsJsonb = false +type mockDb struct { + currentExec int + exec []sql.Result + expectedQ []string + execErr []error + query *sql.Rows + queryErr error +} - sql := p.generateInsert("m", []string{"time", "f"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","f") VALUES($1,$2)`, sql) +func (m *mockDb) Exec(query string, args ...interface{}) (sql.Result, error) { + tmp := m.currentExec + m.currentExec++ + if m.expectedQ != nil && m.expectedQ[tmp] != query { + return nil, fmt.Errorf("unexpected query, got: '%s' expected: %s", query, m.expectedQ[tmp]) + } - sql = p.generateInsert("m", []string{"time", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","i") VALUES($1,$2)`, sql) + return m.exec[tmp], m.execErr[tmp] +} +func (m *mockDb) Query(query string, args ...interface{}) (*sql.Rows, error) { + return m.query, m.queryErr +} +func (m *mockDb) QueryRow(query string, args ...interface{}) *sql.Row { return nil } +func (m *mockDb) Close() error { return nil } - sql = p.generateInsert("m", []string{"time", "f", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) +type mockTk struct { + tables map[string]bool +} - sql = p.generateInsert("m", []string{"time", "k", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","k","i") VALUES($1,$2,$3)`, sql) +func (m *mockTk) add(tableName string) { + m.tables[tableName] = true +} - sql = p.generateInsert("m", []string{"time", "k1", "k2", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) +func (m *mockTk) exists(schema, table string) bool { + _, exists := m.tables[table] + return exists } diff --git a/plugins/outputs/postgresql/table_keeper.go b/plugins/outputs/postgresql/table_keeper.go new file mode 100644 index 0000000000000..3b0fd45ac481f --- /dev/null +++ b/plugins/outputs/postgresql/table_keeper.go @@ -0,0 +1,47 @@ +package postgresql + +import ( + "log" +) + +const ( + tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" +) + +type tableKeeper interface { + exists(schema, tableName string) bool + add(tableName string) +} + +type defTableKeeper struct { + Tables map[string]bool + db dbWrapper +} + +func newTableKeeper(db dbWrapper) tableKeeper { + return &defTableKeeper{ + Tables: make(map[string]bool), + db: db, + } +} + +func (t *defTableKeeper) exists(schema, tableName string) bool { + if _, ok := t.Tables[tableName]; ok { + return true + } + + result, err := t.db.Exec(tableExistsTemplate, tableName, schema) + if err != nil { + log.Printf("E! Error checking for existence of metric table %s: %v", tableName, err) + return false + } + if count, _ := result.RowsAffected(); count == 1 { + t.Tables[tableName] = true + return true + } + return false +} + +func (t *defTableKeeper) add(tableName string) { + t.Tables[tableName] = true +} diff --git a/plugins/outputs/postgresql/table_keeper_test.go b/plugins/outputs/postgresql/table_keeper_test.go new file mode 100644 index 0000000000000..0d7bb77bec307 --- /dev/null +++ b/plugins/outputs/postgresql/table_keeper_test.go @@ -0,0 +1,71 @@ +package postgresql + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewTableKeeper(t *testing.T) { + mock := &mockWr{} + tk := newTableKeeper(mock).(*defTableKeeper) + assert.Equal(t, mock, tk.db) + assert.Empty(t, tk.Tables) +} + +func TestTableKeeperAdd(t *testing.T) { + tk := newTableKeeper(nil).(*defTableKeeper) + tk.add("table") + tk.add("table2") + assert.Equal(t, 2, len(tk.Tables)) + assert.True(t, tk.Tables["table"]) + assert.True(t, tk.Tables["table2"]) + assert.False(t, tk.Tables["table3"]) + tk.add("table2") + assert.Equal(t, 2, len(tk.Tables)) +} + +func TestTableKeeperExists(t *testing.T) { + mock := &mockWr{} + tk := newTableKeeper(mock).(*defTableKeeper) + table := "table name" + + // table cached + tk.Tables[table] = true + mock.execErr = fmt.Errorf("should not call execute") + assert.True(t, tk.exists("", table)) + + // error on table exists query + mock.execErr = fmt.Errorf("error on query execute") + mock.expected = tableExistsTemplate + delete(tk.Tables, table) + assert.False(t, tk.exists("", table)) + assert.Equal(t, 0, len(tk.Tables)) + + // fetch from db, doesn't exist + mock.execErr = nil + mock.exec = &mockResult{} + assert.False(t, tk.exists("", table)) + + // fetch from db, exists + mock.exec = &mockResult{rows: 1} + assert.True(t, tk.exists("", table)) + assert.Equal(t, 1, len(tk.Tables)) + assert.True(t, tk.Tables[table]) +} + +type mockResult struct { + rows int64 + rowErr error + last int64 + lastErr error +} + +func (m *mockResult) LastInsertId() (int64, error) { + return m.last, m.lastErr +} + +func (m *mockResult) RowsAffected() (int64, error) { + return m.rows, m.rowErr +} diff --git a/plugins/outputs/postgresql/utils.go b/plugins/outputs/postgresql/utils.go new file mode 100644 index 0000000000000..801a2b6aac4d5 --- /dev/null +++ b/plugins/outputs/postgresql/utils.go @@ -0,0 +1,68 @@ +package postgresql + +import ( + "encoding/json" + "log" + "strings" + + "github.com/jackc/pgx" +) + +func buildJsonbTags(tags map[string]string) ([]byte, error) { + js := make(map[string]interface{}) + for column, value := range tags { + js[column] = value + } + + return buildJsonb(js) +} + +func buildJsonb(data map[string]interface{}) ([]byte, error) { + if len(data) > 0 { + d, err := json.Marshal(data) + if err != nil { + return nil, err + } + return d, nil + } + + return nil, nil +} + +func quoteIdent(name string) string { + return pgx.Identifier{name}.Sanitize() +} + +func quoteLiteral(name string) string { + return "'" + strings.Replace(name, "'", "''", -1) + "'" +} + +func deriveDatatype(value interface{}) string { + var datatype string + + switch value.(type) { + case bool: + datatype = "boolean" + case uint64: + datatype = "int8" + case int64: + datatype = "int8" + case float64: + datatype = "float8" + case string: + datatype = "text" + default: + datatype = "text" + log.Printf("E! Unknown datatype %T(%v)", value, value) + } + return datatype +} + +func contains(haystack []string, needle string) bool { + for _, key := range haystack { + if key == needle { + return true + } + } + return false +} From 22aff8c35d20f24709811189a5ead5e40c8d611a Mon Sep 17 00:00:00 2001 From: Blagoj Atanasovski Date: Sun, 14 Jul 2019 12:57:53 +0200 Subject: [PATCH 056/121] Optimize insert performance of metrics and tag_id lookup A refactor was done that optimized several things: 1. Metrics are organized by measurement and schema validation is done only once instead of every metric 2. Metric values are batched and inserted with binary COPY 3. Tag IDs are now cached with a LRU strategy --- plugins/outputs/postgresql/README.md | 60 ++-- .../outputs/postgresql/add_missing_columns.go | 72 ----- .../postgresql/add_missing_columns_test.go | 92 ------ .../postgresql/columns/column_mapper.go | 68 +++++ .../postgresql/columns/columns_initializer.go | 139 +++++++++ .../outputs/postgresql/columns/map_fields.go | 18 ++ .../outputs/postgresql/columns/map_tags.go | 18 ++ .../postgresql/columns/standard_columns.go | 16 ++ plugins/outputs/postgresql/create_table.go | 72 ----- .../outputs/postgresql/create_table_test.go | 48 ---- plugins/outputs/postgresql/db/db_wrapper.go | 65 +++++ plugins/outputs/postgresql/db_wrapper.go | 43 --- plugins/outputs/postgresql/generate_insert.go | 24 -- .../postgresql/generate_insert_test.go | 39 --- plugins/outputs/postgresql/get_tag_id.go | 87 ------ plugins/outputs/postgresql/postgresql.go | 237 ++++++---------- .../postgresql/postgresql_integration_test.go | 150 ++++++++++ plugins/outputs/postgresql/postgresql_test.go | 263 ++++++++---------- plugins/outputs/postgresql/table_keeper.go | 47 ---- .../outputs/postgresql/table_keeper_test.go | 71 ----- plugins/outputs/postgresql/tables/manager.go | 208 ++++++++++++++ .../outputs/postgresql/tables/manager_test.go | 139 +++++++++ plugins/outputs/postgresql/tags_cache.go | 159 +++++++++++ plugins/outputs/postgresql/transformer.go | 72 +++++ plugins/outputs/postgresql/utils.go | 68 ----- plugins/outputs/postgresql/utils/types.go | 30 ++ plugins/outputs/postgresql/utils/utils.go | 168 +++++++++++ .../outputs/postgresql/utils/utils_test.go | 138 +++++++++ 28 files changed, 1624 insertions(+), 987 deletions(-) delete mode 100644 plugins/outputs/postgresql/add_missing_columns.go delete mode 100644 plugins/outputs/postgresql/add_missing_columns_test.go create mode 100644 plugins/outputs/postgresql/columns/column_mapper.go create mode 100644 plugins/outputs/postgresql/columns/columns_initializer.go create mode 100644 plugins/outputs/postgresql/columns/map_fields.go create mode 100644 plugins/outputs/postgresql/columns/map_tags.go create mode 100644 plugins/outputs/postgresql/columns/standard_columns.go delete mode 100644 plugins/outputs/postgresql/create_table.go delete mode 100644 plugins/outputs/postgresql/create_table_test.go create mode 100644 plugins/outputs/postgresql/db/db_wrapper.go delete mode 100644 plugins/outputs/postgresql/db_wrapper.go delete mode 100644 plugins/outputs/postgresql/generate_insert.go delete mode 100644 plugins/outputs/postgresql/generate_insert_test.go delete mode 100644 plugins/outputs/postgresql/get_tag_id.go delete mode 100644 plugins/outputs/postgresql/table_keeper.go delete mode 100644 plugins/outputs/postgresql/table_keeper_test.go create mode 100644 plugins/outputs/postgresql/tables/manager.go create mode 100644 plugins/outputs/postgresql/tables/manager_test.go create mode 100644 plugins/outputs/postgresql/tags_cache.go create mode 100644 plugins/outputs/postgresql/transformer.go delete mode 100644 plugins/outputs/postgresql/utils.go create mode 100644 plugins/outputs/postgresql/utils/types.go create mode 100644 plugins/outputs/postgresql/utils/utils.go create mode 100644 plugins/outputs/postgresql/utils/utils_test.go diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index b9d0020682023..2a38b7a08ab5a 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -1,36 +1,44 @@ # PostgreSQL Output Plugin -This output plugin writes all metrics to PostgreSQL. +This output plugin writes all metrics to PostgreSQL. +The plugin manages the schema automatically updating missing columns, and checking if existing ones are of the proper type. ### Configuration: ```toml # Send metrics to postgres [[outputs.postgresql]] - address = "host=localhost user=postgres sslmode=verify-full" - - ## Store tags as foreign keys in the metrics table. Default is false. - # tags_as_foreignkeys = false - - ## Template to use for generating tables - ## Available Variables: - ## {TABLE} - tablename as identifier - ## {TABLELITERAL} - tablename as string literal - ## {COLUMNS} - column definitions - ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) - - ## Default template - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" - ## Example for timescaledb - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval, if_not_exists := true);" - - ## Schema to create the tables into - # schema = "public" - - ## Use jsonb datatype for tags. Default is true. - # tags_as_jsonb = false - - ## Use jsonb datatype for fields. Default is true. - # fields_as_jsonb = false + address = "host=localhost user=postgres sslmode=verify-full" + + ## Update existing tables to match the incoming metrics. Default is true + # do_schema_updates = true + + ## Store tags as foreign keys in the metrics table. Default is false. + # tags_as_foreignkeys = false + + ## If tags_as_foreignkeys is set to true you can choose the number of tag sets to cache + ## per measurement (metric name). Default is 1000, if set to 0 => cache has no limit. + # cached_tagsets_per_measurement = 1000 + + ## Template to use for generating tables + ## Available Variables: + ## {TABLE} - tablename as identifier + ## {TABLELITERAL} - tablename as string literal + ## {COLUMNS} - column definitions + ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) + + ## Default template + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" + ## Example for timescaledb + # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval, if_not_exists := true);" + + ## Schema to create the tables into + # schema = "public" + + ## Use jsonb datatype for tags. Default is false. + # tags_as_jsonb = false + + ## Use jsonb datatype for fields. Default is false. + # fields_as_jsonb = false ``` diff --git a/plugins/outputs/postgresql/add_missing_columns.go b/plugins/outputs/postgresql/add_missing_columns.go deleted file mode 100644 index 1d2be69bf3c55..0000000000000 --- a/plugins/outputs/postgresql/add_missing_columns.go +++ /dev/null @@ -1,72 +0,0 @@ -package postgresql - -import ( - "fmt" - "log" - "strings" -) - -func (p *Postgresql) addMissingColumns(tableName string, columns []string, values []interface{}) (bool, error) { - columnStatuses, err := p.whichColumnsAreMissing(columns, tableName) - if err != nil { - return false, err - } - - retry := false - for currentColumn, isMissing := range columnStatuses { - if !isMissing { - continue - } - - dataType := deriveDatatype(values[currentColumn]) - columnName := columns[currentColumn] - if err := p.addColumnToTable(columnName, dataType, tableName); err != nil { - return false, err - } - retry = true - } - - return retry, nil -} - -func prepareMissingColumnsQuery(columns []string) string { - var quotedColumns = make([]string, len(columns)) - for i, column := range columns { - quotedColumns[i] = quoteLiteral(column) - } - return fmt.Sprintf(missingColumnsTemplate, strings.Join(quotedColumns, ",")) -} - -// for a given array of columns x = [a, b, c ...] it returns an array of bools indicating -// if x[i] is missing -func (p *Postgresql) whichColumnsAreMissing(columns []string, tableName string) ([]bool, error) { - missingColumnsQuery := prepareMissingColumnsQuery(columns) - result, err := p.db.Query(missingColumnsQuery, p.Schema, tableName) - if err != nil { - return nil, err - } - defer result.Close() - columnStatus := make([]bool, len(columns)) - var isMissing bool - var columnName string - currentColumn := 0 - - for result.Next() { - err := result.Scan(&columnName, &isMissing) - if err != nil { - log.Println(err) - return nil, err - } - columnStatus[currentColumn] = isMissing - currentColumn++ - } - - return columnStatus, nil -} - -func (p *Postgresql) addColumnToTable(columnName, dataType, tableName string) error { - fullTableName := p.fullTableName(tableName) - addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, quoteIdent(columnName), dataType) - _, err := p.db.Exec(addColumnQuery) - return err -} diff --git a/plugins/outputs/postgresql/add_missing_columns_test.go b/plugins/outputs/postgresql/add_missing_columns_test.go deleted file mode 100644 index 7140847e03375..0000000000000 --- a/plugins/outputs/postgresql/add_missing_columns_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package postgresql - -import ( - "database/sql" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -func prepareMissingColumnsQuery1(columns []string) string { - var quotedColumns = make([]string, len(columns)) - for i, column := range columns { - quotedColumns[i] = quoteLiteral(column) - } - return fmt.Sprintf(missingColumnsTemplate, strings.Join(quotedColumns, ",")) -} - -func TestPrepareMissingColumnsQuery(t *testing.T) { - columns := []string{} - assert.Equal(t, `WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2),`+ - `required AS (SELECT c FROM unnest(array []) AS c) `+ - `SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;`, - prepareMissingColumnsQuery(columns)) - columns = []string{"a", "b", "c"} - assert.Equal(t, `WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2),`+ - `required AS (SELECT c FROM unnest(array ['a','b','c']) AS c) `+ - `SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;`, - prepareMissingColumnsQuery(columns)) -} - -func TestWhichColumnsAreMissing(t *testing.T) { - mock := &mockWr{} - p := &Postgresql{db: mock} - - columns := []string{"col1"} - mock.queryErr = fmt.Errorf("error 1") - mock.expected = prepareMissingColumnsQuery(columns) - table := "tableName" - _, err := p.whichColumnsAreMissing(columns, table) - assert.Equal(t, err.Error(), "error 1") -} - -func TestAddColumnToTable(t *testing.T) { - mock := &mockWr{} - p := &Postgresql{db: mock, Schema: "pub"} - - column := "col1" - dataType := "text" - tableName := "table" - mock.execErr = fmt.Errorf("error 1") - mock.expected = `ALTER TABLE "pub"."table" ADD COLUMN IF NOT EXISTS "col1" text;` - err := p.addColumnToTable(column, dataType, tableName) - assert.EqualError(t, err, "error 1") - - mock.execErr = nil - assert.Nil(t, p.addColumnToTable(column, dataType, tableName)) - -} - -func (p *Postgresql) addColumnToTable1(columnName, dataType, tableName string) error { - fullTableName := p.fullTableName(tableName) - addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, quoteIdent(columnName), dataType) - _, err := p.db.Exec(addColumnQuery) - return err -} - -type mockWr struct { - expected string - exec sql.Result - execErr error - query *sql.Rows - queryErr error -} - -func (m *mockWr) Exec(query string, args ...interface{}) (sql.Result, error) { - if m.expected != query { - return nil, fmt.Errorf("unexpected query; exp: '%s'; got: '%s'", m.expected, query) - } - return m.exec, m.execErr -} -func (m *mockWr) Query(query string, args ...interface{}) (*sql.Rows, error) { - if m.expected != query { - return nil, fmt.Errorf("unexpected query; exp: '%s'; got: '%s'", m.expected, query) - } - return m.query, m.queryErr -} -func (m *mockWr) QueryRow(query string, args ...interface{}) *sql.Row { - return nil -} -func (m *mockWr) Close() error { return nil } diff --git a/plugins/outputs/postgresql/columns/column_mapper.go b/plugins/outputs/postgresql/columns/column_mapper.go new file mode 100644 index 0000000000000..99692997ab904 --- /dev/null +++ b/plugins/outputs/postgresql/columns/column_mapper.go @@ -0,0 +1,68 @@ +package columns + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +// Mapper knows how to generate the column details for the main and tags table in the db +type Mapper interface { + // Iterate through an array of 'metrics' visiting only those indexed by 'indices' + // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the + // desired columns (their name, type and which role they play) for both the + // main metrics table in the DB, and if tagsAsFK == true for the tags table. + Target(indices []int, metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) +} + +type defMapper struct { + initTargetColumns targetColumnInitializer + tagsAsFK bool + tagsAsJSON bool + fieldsAsJSON bool +} + +// NewMapper returns a new implementation of the columns.Mapper interface. +func NewMapper(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) Mapper { + initializer := getInitialColumnsGenerator(tagsAsFK, tagsAsJSON, fieldsAsJSON) + return &defMapper{ + tagsAsFK: tagsAsFK, + tagsAsJSON: tagsAsJSON, + fieldsAsJSON: fieldsAsJSON, + initTargetColumns: initializer, + } +} + +// Iterate through an array of 'metrics' visiting only those indexed by 'indices' +// and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the +// desired columns (their name, type and which role they play) for both the +// main metrics table in the DB, and if tagsAsFK == true for the tags table. +func (d *defMapper) Target(indices []int, metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) { + columns, tagColumns := d.initTargetColumns() + if d.tagsAsJSON && d.fieldsAsJSON { + // if json is used for both, that's all the columns you need + return columns, tagColumns + } + + alreadyMapped := map[string]bool{} + // Iterate the metrics indexed by 'indices' and populate all the resulting required columns + // e.g. metric1(tags:[t1], fields:[f1,f2]), metric2(tags:[t2],fields:[f2, f3]) + // => columns = [time, t1, f1, f2, t2, f3], tagColumns = nil + // if tagsAsFK == true + // columns = [time, tagID, f1, f2, f3], tagColumns = [tagID, t1, t2] + // if tagsAsFK == true && fieldsAsJSON = true + // cols = [time, tagID, fields], tagCols = [tagID, t1, t2] + for _, index := range indices { + metric := metrics[index] + if !d.tagsAsJSON { + whichColumns := columns + if d.tagsAsFK { + whichColumns = tagColumns + } + mapTags(metric.TagList(), alreadyMapped, whichColumns) + } + + mapFields(metric.FieldList(), alreadyMapped, columns) + } + + return columns, tagColumns +} diff --git a/plugins/outputs/postgresql/columns/columns_initializer.go b/plugins/outputs/postgresql/columns/columns_initializer.go new file mode 100644 index 0000000000000..5391dabe93f27 --- /dev/null +++ b/plugins/outputs/postgresql/columns/columns_initializer.go @@ -0,0 +1,139 @@ +package columns + +import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + +// a function type that generates column details for the main, and tags table in the db +type targetColumnInitializer func() (*utils.TargetColumns, *utils.TargetColumns) + +// constants used for populating the 'targetColumnInit' map (for better readability) +const ( + cTagsAsFK = true + cTagsAsJSON = true + cFieldsAsJSON = true +) + +// Since some of the target columns for the tables in the database don't +// depend on the metrics received, but on the plugin config, we can have +// constant initializer functions. It is always known that the 'time' +// column goes first in the main table, then if the tags are kept in a +// separate table you need to add the 'tag_id' column... +// This map contains an initializer for all the combinations +// of (tagsAsFK, tagsAsJSON, fieldsAsJSON). +func getInitialColumnsGenerator(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) targetColumnInitializer { + return standardColumns[tagsAsFK][tagsAsJSON][fieldsAsJSON] +} + +var standardColumns = map[bool]map[bool]map[bool]targetColumnInitializer{ + cTagsAsFK: { + cTagsAsJSON: { + cFieldsAsJSON: tagsAsFKAndJSONAndFieldsAsJSONInit, + !cFieldsAsJSON: tagsAsFKAndJSONInit, + }, + !cTagsAsJSON: { + cFieldsAsJSON: tagsAsFKFieldsAsJSONInit, + !cFieldsAsJSON: tagsAsFKInit, + }, + }, + !cTagsAsFK: { + cTagsAsJSON: { + cFieldsAsJSON: tagsAndFieldsAsJSONInit, + !cFieldsAsJSON: tagsAsJSONInit, + }, + !cTagsAsJSON: { + cFieldsAsJSON: fieldsAsJSONInit, + !cFieldsAsJSON: vanillaColumns, + }, + }, +} + +func tagsAsFKAndJSONAndFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagIDColumnName, FieldsJSONColumn}, + DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType, JSONColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1, FieldsJSONColumn: 2}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, + }, &utils.TargetColumns{ + Names: []string{TagIDColumnName, TagsJSONColumn}, + DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK, JSONColumnDataType}, + Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, + Roles: []utils.ColumnRole{utils.TagsIDColType, utils.TagColType}, + } +} + +func tagsAsFKAndJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagIDColumnName}, + DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, + }, &utils.TargetColumns{ + Names: []string{TagIDColumnName, TagsJSONColumn}, + DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK, JSONColumnDataType}, + Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, + Roles: []utils.ColumnRole{utils.TagsIDColType, utils.FieldColType}, + } +} + +func tagsAsFKFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagIDColumnName, FieldsJSONColumn}, + DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType, JSONColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1, FieldsJSONColumn: 2}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, + }, &utils.TargetColumns{ + Names: []string{TagIDColumnName}, + DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK}, + Target: map[string]int{TagIDColumnName: 0}, + Roles: []utils.ColumnRole{utils.TagsIDColType}, + } +} + +func tagsAsFKInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagIDColumnName}, + DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, + }, &utils.TargetColumns{ + Names: []string{TagIDColumnName}, + DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK}, + Target: map[string]int{TagIDColumnName: 0}, + Roles: []utils.ColumnRole{utils.TagsIDColType}, + } +} + +func tagsAndFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagsJSONColumn, FieldsJSONColumn}, + DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType, JSONColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagsJSONColumn: 1, FieldsJSONColumn: 2}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, + }, nil +} + +func tagsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, TagsJSONColumn}, + DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType}, + Target: map[string]int{TimeColumnName: 0, TagsJSONColumn: 1}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType}, + }, nil +} + +func fieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName, FieldsJSONColumn}, + DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType}, + Target: map[string]int{TimeColumnName: 0, FieldsJSONColumn: 1}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.FieldColType}, + }, nil +} + +func vanillaColumns() (*utils.TargetColumns, *utils.TargetColumns) { + return &utils.TargetColumns{ + Names: []string{TimeColumnName}, + DataTypes: []utils.PgDataType{TimeColumnDataType}, + Target: map[string]int{TimeColumnName: 0}, + Roles: []utils.ColumnRole{utils.TimeColType}, + }, nil +} diff --git a/plugins/outputs/postgresql/columns/map_fields.go b/plugins/outputs/postgresql/columns/map_fields.go new file mode 100644 index 0000000000000..e905d5ddf66a0 --- /dev/null +++ b/plugins/outputs/postgresql/columns/map_fields.go @@ -0,0 +1,18 @@ +package columns + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +func mapFields(fieldList []*telegraf.Field, alreadyMapped map[string]bool, columns *utils.TargetColumns) { + for _, field := range fieldList { + if _, ok := alreadyMapped[field.Key]; !ok { + alreadyMapped[field.Key] = true + columns.Target[field.Key] = len(columns.Names) + columns.Names = append(columns.Names, field.Key) + columns.DataTypes = append(columns.DataTypes, utils.DerivePgDatatype(field.Value)) + columns.Roles = append(columns.Roles, utils.FieldColType) + } + } +} diff --git a/plugins/outputs/postgresql/columns/map_tags.go b/plugins/outputs/postgresql/columns/map_tags.go new file mode 100644 index 0000000000000..7bb575c0da768 --- /dev/null +++ b/plugins/outputs/postgresql/columns/map_tags.go @@ -0,0 +1,18 @@ +package columns + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +func mapTags(tagList []*telegraf.Tag, alreadyMapped map[string]bool, columns *utils.TargetColumns) { + for _, tag := range tagList { + if _, ok := alreadyMapped[tag.Key]; !ok { + alreadyMapped[tag.Key] = true + columns.Target[tag.Key] = len(columns.Names) + columns.Names = append(columns.Names, tag.Key) + columns.DataTypes = append(columns.DataTypes, utils.PgText) + columns.Roles = append(columns.Roles, utils.TagColType) + } + } +} diff --git a/plugins/outputs/postgresql/columns/standard_columns.go b/plugins/outputs/postgresql/columns/standard_columns.go new file mode 100644 index 0000000000000..75abe2ec6e869 --- /dev/null +++ b/plugins/outputs/postgresql/columns/standard_columns.go @@ -0,0 +1,16 @@ +package columns + +import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + +// Column names and data types for standard fields (time, tag_id, tags, and fields) +const ( + TimeColumnName = "time" + TimeColumnDataType = utils.PgTimestamptz + TimeColumnDefinition = TimeColumnName + " " + utils.PgTimestamptz + TagIDColumnName = "tag_id" + TagIDColumnDataType = utils.PgInt4 + TagIDColumnDataTypeAsPK = utils.PgSerial + TagsJSONColumn = "tags" + FieldsJSONColumn = "fields" + JSONColumnDataType = utils.PgJSONb +) diff --git a/plugins/outputs/postgresql/create_table.go b/plugins/outputs/postgresql/create_table.go deleted file mode 100644 index 9b647e276b3ee..0000000000000 --- a/plugins/outputs/postgresql/create_table.go +++ /dev/null @@ -1,72 +0,0 @@ -package postgresql - -import ( - "fmt" - "strings" - - "github.com/influxdata/telegraf" -) - -const ( - tagIDColumn = "tag_id" - createTagsTableTemplate = "CREATE TABLE IF NOT EXISTS %s(tag_id serial primary key,%s,UNIQUE(%s))" -) - -func (p *Postgresql) generateCreateTable(metric telegraf.Metric) string { - var columns []string - var pk []string - var sql []string - - pk = append(pk, quoteIdent("time")) - columns = append(columns, "time timestamptz") - - // handle tags if necessary - if len(metric.Tags()) > 0 { - if p.TagsAsForeignkeys { - // tags in separate table - var tagColumns []string - var tagColumndefs []string - columns = append(columns, "tag_id int") - - if p.TagsAsJsonb { - tagColumns = append(tagColumns, "tags") - tagColumndefs = append(tagColumndefs, "tags jsonb") - } else { - for column := range metric.Tags() { - tagColumns = append(tagColumns, quoteIdent(column)) - tagColumndefs = append(tagColumndefs, fmt.Sprintf("%s text", quoteIdent(column))) - } - } - table := p.fullTableName(metric.Name() + p.TagTableSuffix) - sql = append(sql, fmt.Sprintf(createTagsTableTemplate, table, strings.Join(tagColumndefs, ","), strings.Join(tagColumns, ","))) - } else { - // tags in measurement table - if p.TagsAsJsonb { - columns = append(columns, "tags jsonb") - } else { - for column := range metric.Tags() { - pk = append(pk, quoteIdent(column)) - columns = append(columns, fmt.Sprintf("%s text", quoteIdent(column))) - } - } - } - } - - if p.FieldsAsJsonb { - columns = append(columns, "fields jsonb") - } else { - var datatype string - for column, v := range metric.Fields() { - datatype = deriveDatatype(v) - columns = append(columns, fmt.Sprintf("%s %s", quoteIdent(column), datatype)) - } - } - - query := strings.Replace(p.TableTemplate, "{TABLE}", p.fullTableName(metric.Name()), -1) - query = strings.Replace(query, "{TABLELITERAL}", quoteLiteral(p.fullTableName(metric.Name())), -1) - query = strings.Replace(query, "{COLUMNS}", strings.Join(columns, ","), -1) - query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) - - sql = append(sql, query) - return strings.Join(sql, ";") -} diff --git a/plugins/outputs/postgresql/create_table_test.go b/plugins/outputs/postgresql/create_table_test.go deleted file mode 100644 index 404e3fdbd4683..0000000000000 --- a/plugins/outputs/postgresql/create_table_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package postgresql - -import ( - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" -) - -func TestGenerateCreateTable(t *testing.T) { - p := newPostgresql() - p.TagsAsJsonb = true - p.FieldsAsJsonb = true - timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) - - var m telegraf.Metric - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,fields jsonb)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tags jsonb,fields jsonb)`, p.generateCreateTable(m)) - - p.TagsAsJsonb = false - p.FieldsAsJsonb = false - - m, _ = metric.New("m", nil, map[string]interface{}{"f": float64(3.14)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"f" float8)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", nil, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"i" int8)`, p.generateCreateTable(m)) - - m, _ = metric.New("m", map[string]string{"k": "v"}, map[string]interface{}{"i": int(3)}, timestamp) - assert.Equal(t, `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,"k" text,"i" int8)`, p.generateCreateTable(m)) - - p.TagsAsForeignkeys = true - assert.Equal(t, - `CREATE TABLE IF NOT EXISTS "public"."m_tag"(tag_id serial primary key,"k" text,UNIQUE("k"));`+ - `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tag_id int,"i" int8)`, - p.generateCreateTable(m)) - - p.TagsAsJsonb = true - assert.Equal(t, - `CREATE TABLE IF NOT EXISTS "public"."m_tag"(tag_id serial primary key,tags jsonb,UNIQUE(tags));`+ - `CREATE TABLE IF NOT EXISTS "public"."m"(time timestamptz,tag_id int,"i" int8)`, - p.generateCreateTable(m)) -} diff --git a/plugins/outputs/postgresql/db/db_wrapper.go b/plugins/outputs/postgresql/db/db_wrapper.go new file mode 100644 index 0000000000000..c6cf999b1bc0c --- /dev/null +++ b/plugins/outputs/postgresql/db/db_wrapper.go @@ -0,0 +1,65 @@ +package db + +import ( + "log" + + "github.com/jackc/pgx" + // pgx driver for sql connections + _ "github.com/jackc/pgx/stdlib" +) + +// Wrapper defines an interface that encapsulates communication with a DB. +type Wrapper interface { + Exec(query string, args ...interface{}) (pgx.CommandTag, error) + DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error + Query(query string, args ...interface{}) (*pgx.Rows, error) + QueryRow(query string, args ...interface{}) *pgx.Row + Close() error +} + +type defaultDbWrapper struct { + db *pgx.Conn +} + +// NewWrapper returns an implementation of the db.Wrapper interface +// that issues queries to a PG database. +func NewWrapper(address string) (Wrapper, error) { + connConfig, err := pgx.ParseConnectionString(address) + if err != nil { + log.Printf("E! Couldn't parse connection address: %s\n%v", address, err) + return nil, err + } + db, err := pgx.Connect(connConfig) + if err != nil { + log.Printf("E! Couldn't connect to server\n%v", err) + return nil, err + } + + return &defaultDbWrapper{ + db: db, + }, nil +} + +func (d *defaultDbWrapper) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { + return d.db.Exec(query, args...) +} + +func (d *defaultDbWrapper) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { + source := pgx.CopyFromRows(batch) + _, err := d.db.CopyFrom(*fullTableName, colNames, source) + if err != nil { + log.Printf("E! Could not insert batch of rows in output db\n%v", err) + } + + return err +} + +func (d *defaultDbWrapper) Close() error { return d.db.Close() } + +func (d *defaultDbWrapper) Query(query string, args ...interface{}) (*pgx.Rows, error) { + return d.db.Query(query, args...) +} + +func (d *defaultDbWrapper) QueryRow(query string, args ...interface{}) *pgx.Row { + return d.db.QueryRow(query, args...) +} diff --git a/plugins/outputs/postgresql/db_wrapper.go b/plugins/outputs/postgresql/db_wrapper.go deleted file mode 100644 index bb095429d985b..0000000000000 --- a/plugins/outputs/postgresql/db_wrapper.go +++ /dev/null @@ -1,43 +0,0 @@ -package postgresql - -import ( - "database/sql" - // pgx driver for sql connections - _ "github.com/jackc/pgx/stdlib" -) - -type dbWrapper interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row - Close() error -} - -type defaultDbWrapper struct { - db *sql.DB -} - -func newDbWrapper(address string) (dbWrapper, error) { - db, err := sql.Open("pgx", address) - if err != nil { - return nil, err - } - - return &defaultDbWrapper{ - db: db, - }, nil -} - -func (d *defaultDbWrapper) Exec(query string, args ...interface{}) (sql.Result, error) { - return d.db.Exec(query, args...) -} - -func (d *defaultDbWrapper) Close() error { return d.db.Close() } - -func (d *defaultDbWrapper) Query(query string, args ...interface{}) (*sql.Rows, error) { - return d.db.Query(query, args...) -} - -func (d *defaultDbWrapper) QueryRow(query string, args ...interface{}) *sql.Row { - return d.db.QueryRow(query, args...) -} diff --git a/plugins/outputs/postgresql/generate_insert.go b/plugins/outputs/postgresql/generate_insert.go deleted file mode 100644 index c71c884845da3..0000000000000 --- a/plugins/outputs/postgresql/generate_insert.go +++ /dev/null @@ -1,24 +0,0 @@ -package postgresql - -import ( - "fmt" - "strings" -) - -const ( - insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" -) - -func (p *Postgresql) generateInsert(tablename string, columns []string) string { - valuePlaceholders := make([]string, len(columns)) - quotedColumns := make([]string, len(columns)) - for i, column := range columns { - valuePlaceholders[i] = fmt.Sprintf("$%d", i+1) - quotedColumns[i] = quoteIdent(column) - } - - fullTableName := p.fullTableName(tablename) - columnNames := strings.Join(quotedColumns, ",") - values := strings.Join(valuePlaceholders, ",") - return fmt.Sprintf(insertIntoSQLTemplate, fullTableName, columnNames, values) -} diff --git a/plugins/outputs/postgresql/generate_insert_test.go b/plugins/outputs/postgresql/generate_insert_test.go deleted file mode 100644 index 28d2e023b9790..0000000000000 --- a/plugins/outputs/postgresql/generate_insert_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package postgresql - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPostgresqlQuote(t *testing.T) { - assert.Equal(t, `"foo"`, quoteIdent("foo")) - assert.Equal(t, `"fo'o"`, quoteIdent("fo'o")) - assert.Equal(t, `"fo""o"`, quoteIdent("fo\"o")) - - assert.Equal(t, "'foo'", quoteLiteral("foo")) - assert.Equal(t, "'fo''o'", quoteLiteral("fo'o")) - assert.Equal(t, "'fo\"o'", quoteLiteral("fo\"o")) -} - -func TestPostgresqlInsertStatement(t *testing.T) { - p := newPostgresql() - - p.TagsAsJsonb = false - p.FieldsAsJsonb = false - - sql := p.generateInsert("m", []string{"time", "f"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","f") VALUES($1,$2)`, sql) - - sql = p.generateInsert("m", []string{"time", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","i") VALUES($1,$2)`, sql) - - sql = p.generateInsert("m", []string{"time", "f", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) - - sql = p.generateInsert("m", []string{"time", "k", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","k","i") VALUES($1,$2,$3)`, sql) - - sql = p.generateInsert("m", []string{"time", "k1", "k2", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) -} diff --git a/plugins/outputs/postgresql/get_tag_id.go b/plugins/outputs/postgresql/get_tag_id.go deleted file mode 100644 index c17e6a6ea978c..0000000000000 --- a/plugins/outputs/postgresql/get_tag_id.go +++ /dev/null @@ -1,87 +0,0 @@ -package postgresql - -import ( - "fmt" - "log" - "strings" - - "github.com/influxdata/telegraf" -) - -const ( - selectTagIDTemplate = "SELECT tag_id FROM %s WHERE %s" - missingColumnsTemplate = "WITH available AS (SELECT column_name as c FROM information_schema.columns WHERE table_schema = $1 and table_name = $2)," + - "required AS (SELECT c FROM unnest(array [%s]) AS c) " + - "SELECT required.c, available.c IS NULL FROM required LEFT JOIN available ON required.c = available.c;" - - addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" -) - -func (p *Postgresql) getTagID(metric telegraf.Metric) (int, error) { - var tagID int - var whereColumns []string - var whereValues []interface{} - tablename := metric.Name() - - if p.TagsAsJsonb && len(metric.Tags()) > 0 { - d, err := buildJsonbTags(metric.Tags()) - if err != nil { - return tagID, err - } - - whereColumns = append(whereColumns, "tags") - whereValues = append(whereValues, d) - } else { - for column, value := range metric.Tags() { - whereColumns = append(whereColumns, column) - whereValues = append(whereValues, value) - } - } - - whereParts := make([]string, len(whereColumns)) - for i, column := range whereColumns { - whereParts[i] = fmt.Sprintf("%s = $%d", quoteIdent(column), i+1) - } - - tagsTableName := tablename + p.TagTableSuffix - tagsTableFullName := p.fullTableName(tagsTableName) - query := fmt.Sprintf(selectTagIDTemplate, tagsTableFullName, strings.Join(whereParts, " AND ")) - - err := p.db.QueryRow(query, whereValues...).Scan(&tagID) - if err == nil { - return tagID, nil - } - query = p.generateInsert(tagsTableName, whereColumns) + " RETURNING tag_id" - err = p.db.QueryRow(query, whereValues...).Scan(&tagID) - if err == nil { - return tagID, nil - } - - // check if insert error was caused by column mismatch - - // if tags are jsonb, there shouldn't be a column mismatch - if p.TagsAsJsonb { - return tagID, err - } - - // check for missing columns - log.Printf("W! Possible column mismatch while inserting new tag-set: %v", err) - retry, err := p.addMissingColumns(tagsTableName, whereColumns, whereValues) - if err != nil { - // missing coulmns not properly added - log.Printf("E! Could not add missing columns: %v", err) - return tagID, err - } - - // We added some columns and insert might work now. Try again immediately to - // avoid long lead time in getting metrics when there are several columns missing - // from the original create statement and they get added in small drops. - if retry { - log.Printf("I! Retrying to insert new tag set") - err := p.db.QueryRow(query, whereValues...).Scan(&tagID) - if err != nil { - return tagID, err - } - } - return tagID, nil -} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 0e30ea6e507d3..c59f5cbb87cea 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,27 +2,30 @@ package postgresql import ( "log" - "sort" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" -) - -const ( - tagsJSONColumn = "tags" - fieldsJSONColumn = "fields" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/tables" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) type Postgresql struct { - db dbWrapper - Address string - Schema string - TagsAsForeignkeys bool - TagsAsJsonb bool - FieldsAsJsonb bool - TableTemplate string - TagTableSuffix string - tables tableKeeper + db db.Wrapper + Address string + Schema string + DoSchemaUpdates bool + TagsAsForeignkeys bool + CachedTagsetsPerMeasurement int + TagsAsJsonb bool + FieldsAsJsonb bool + TableTemplate string + TagTableSuffix string + tables tables.Manager + tagCache tagsCache + rows transformer + columns columns.Mapper } func init() { @@ -31,32 +34,39 @@ func init() { func newPostgresql() *Postgresql { return &Postgresql{ - Schema: "public", - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", - TagTableSuffix: "_tag", + Schema: "public", + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + TagTableSuffix: "_tag", + CachedTagsetsPerMeasurement: 1000, + DoSchemaUpdates: true, } } // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - db, err := newDbWrapper(p.Address) + db, err := db.NewWrapper(p.Address) if err != nil { return err } p.db = db - p.tables = newTableKeeper(db) + p.tables = tables.NewManager(db, p.Schema, p.TableTemplate) + + if p.TagsAsForeignkeys { + p.tagCache = newTagsCache(p.CachedTagsetsPerMeasurement, p.TagsAsJsonb, p.TagTableSuffix, p.Schema, p.db) + } + p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb, p.tagCache) + p.columns = columns.NewMapper(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) return nil } // Close closes the connection to the database func (p *Postgresql) Close() error { + p.tagCache = nil + p.tagCache = nil + p.tables = nil return p.db.Close() } -func (p *Postgresql) fullTableName(name string) string { - return quoteIdent(p.Schema) + "." + quoteIdent(name) -} - var sampleConfig = ` ## specify address via a url matching: ## postgres://[pqgotest[:password]]@localhost[/dbname]\ @@ -73,8 +83,16 @@ var sampleConfig = ` ## address = "host=localhost user=postgres sslmode=verify-full" + ## Update existing tables to match the incoming metrics automatically. Default is true + # do_schema_updates = true + ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false + + ## If tags_as_foreignkeys is set to true you can choose the number of tag sets to cache + ## per measurement (metric name). Default is 1000, if set to 0 => cache has no limit. + ## Has no effect if tags_as_foreignkeys = false + # cached_tagsets_per_measurement = 1000 ## Template to use for generating tables ## Available Variables: @@ -103,149 +121,64 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { - toInsert := make(map[string][]*colsAndValues) - for _, metric := range metrics { - tablename := metric.Name() - - // create table if needed - if p.tables.exists(p.Schema, tablename) == false { - createStmt := p.generateCreateTable(metric) - _, err := p.db.Exec(createStmt) - if err != nil { - log.Printf("E! Creating table failed: statement: %v, error: %v", createStmt, err) - return err - } - p.tables.add(tablename) - } - - columns := []string{"time"} - values := []interface{}{metric.Time()} - tagColumns, tagValues, err := p.prepareTags(metric) + metricsByMeasurement := utils.GroupMetricsByMeasurement(metrics) + for measureName, indices := range metricsByMeasurement { + err := p.writeMetricsFromMeasure(measureName, indices, metrics) if err != nil { return err } - if tagColumns != nil { - columns = append(columns, tagColumns...) - values = append(values, tagValues...) - } - - if p.FieldsAsJsonb { - d, err := buildJsonb(metric.Fields()) - if err != nil { - return err - } - - columns = append(columns, fieldsJSONColumn) - values = append(values, d) - } else { - var keys []string - fields := metric.Fields() - for column := range fields { - keys = append(keys, column) - } - sort.Strings(keys) - for _, column := range keys { - columns = append(columns, column) - values = append(values, fields[column]) - } - } - - newValues := &colsAndValues{ - cols: columns, - vals: values, - } - toInsert[tablename] = append(toInsert[tablename], newValues) } - - return p.insertBatches(toInsert) + return nil } -func (p *Postgresql) prepareTags(metric telegraf.Metric) ([]string, []interface{}, error) { - if len(metric.Tags()) == 0 { - return nil, nil, nil - } +// Writes only the metrics from a specified measure. 'metricIndices' is an array +// of the metrics that belong to the selected 'measureName' for faster lookup. +// If schema updates are enabled the target db tables are updated to be able +// to hold the new values. +func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices []int, metrics []telegraf.Metric) error { + targetColumns, targetTagColumns := p.columns.Target(metricIndices, metrics) - if p.TagsAsForeignkeys { - // tags in separate table - tagID, err := p.getTagID(metric) - if err != nil { - return nil, nil, err - } - return []string{tagIDColumn}, []interface{}{tagID}, nil - } - // tags in measurement table - if p.TagsAsJsonb { - d, err := buildJsonbTags(metric.Tags()) - if err != nil { - return nil, nil, err + if p.DoSchemaUpdates { + if err := p.prepareTable(measureName, targetColumns); err != nil { + return err } - - if d != nil { - return []string{tagsJSONColumn}, []interface{}{d}, nil + if p.TagsAsForeignkeys { + tagTableName := p.tagCache.tagsTableName(measureName) + if err := p.prepareTable(tagTableName, targetTagColumns); err != nil { + return err + } } - return nil, nil, nil - } - - var keys []string - tags := metric.Tags() - for column := range tags { - keys = append(keys, column) - } - sort.Strings(keys) - numColumns := len(keys) - var columns = make([]string, numColumns) - var values = make([]interface{}, numColumns) - for i, column := range keys { - columns[i] = column - values[i] = tags[column] + numColumns := len(targetColumns.Names) + values := make([][]interface{}, len(metricIndices)) + var rowTransformErr error + for rowNum, metricIndex := range metricIndices { + values[rowNum], rowTransformErr = p.rows.createRowFromMetric(numColumns, metrics[metricIndex], targetColumns, targetTagColumns) + if rowTransformErr != nil { + log.Printf("E! Could not transform metric to proper row\n%v", rowTransformErr) + return rowTransformErr + } } - return columns, values, nil -} -type colsAndValues struct { - cols []string - vals []interface{} + fullTableName := utils.FullTableName(p.Schema, measureName) + return p.db.DoCopy(fullTableName, targetColumns.Names, values) } -// insertBatches takes batches of data to be inserted. The batches are mapped -// by the target table, and each batch contains the columns and values for those -// columns that will generate the INSERT statement. -// On column mismatch an attempt is made to create the column and try to reinsert. -func (p *Postgresql) insertBatches(batches map[string][]*colsAndValues) error { - for tableName, colsAndValues := range batches { - for _, row := range colsAndValues { - sql := p.generateInsert(tableName, row.cols) - _, err := p.db.Exec(sql, row.vals...) - if err == nil { - continue - } - - // check if insert error was caused by column mismatch - if p.FieldsAsJsonb { - return err - } - - log.Printf("W! Possible column mismatch while inserting new metrics: %v", err) +// Checks if a table exists in the db, and then validates if all the required columns +// are present or some are missing (if metrics changed their field or tag sets). +func (p *Postgresql) prepareTable(tableName string, details *utils.TargetColumns) error { + tableExists := p.tables.Exists(tableName) - retry := false - retry, err = p.addMissingColumns(tableName, row.cols, row.vals) - if err != nil { - log.Printf("E! Could not fix column mismatch: %v", err) - return err - } - - // We added some columns and insert might work now. Try again immediately to - // avoid long lead time in getting metrics when there are several columns missing - // from the original create statement and they get added in small drops. - if retry { - _, err = p.db.Exec(sql, row.vals...) - } - if err != nil { - return err - } - } + if !tableExists { + return p.tables.CreateTable(tableName, details) } - return nil + missingColumns, err := p.tables.FindColumnMismatch(tableName, details) + if err != nil { + return err + } + if len(missingColumns) == 0 { + return nil + } + return p.tables.AddColumnsToTable(tableName, missingColumns, details) } diff --git a/plugins/outputs/postgresql/postgresql_integration_test.go b/plugins/outputs/postgresql/postgresql_integration_test.go index 1fdbe0207ed33..457665621c033 100644 --- a/plugins/outputs/postgresql/postgresql_integration_test.go +++ b/plugins/outputs/postgresql/postgresql_integration_test.go @@ -3,6 +3,8 @@ package postgresql import ( "database/sql" "fmt" + "math/rand" + "strconv" "testing" "time" @@ -27,6 +29,7 @@ func prepareAndConnect(t *testing.T, foreignTags, jsonTags, jsonFields bool) (te TagsAsForeignkeys: foreignTags, TagsAsJsonb: jsonTags, FieldsAsJsonb: jsonFields, + DoSchemaUpdates: true, TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", TagTableSuffix: "_tags", } @@ -75,6 +78,8 @@ func TestWriteToPostgresJsonTags(t *testing.T) { tagsAsJSON := true fieldsAsJSON := false testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + defer dbConn.Close() + // insert first metric err := postgres.Write([]telegraf.Metric{testMetric}) assert.NoError(t, err, "Could not write") @@ -107,6 +112,8 @@ func TestWriteToPostgresJsonTagsAsForeignTable(t *testing.T) { tagsAsJSON := true fieldsAsJSON := false testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + defer dbConn.Close() + // insert first metric err := postgres.Write([]telegraf.Metric{testMetric}) assert.NoError(t, err, "Could not write") @@ -147,6 +154,8 @@ func TestWriteToPostgresMultipleRowsOneTag(t *testing.T) { tagsAsJSON := true fieldsAsJSON := false testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + defer dbConn.Close() + // insert first metric err := postgres.Write([]telegraf.Metric{testMetric, testMetric}) assert.NoError(t, err, "Could not write") @@ -174,6 +183,8 @@ func TestWriteToPostgresAddNewTag(t *testing.T) { tagsAsJSON := true fieldsAsJSON := false testMetricWithOneTag, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + defer dbConn.Close() + testMetricWithOneMoreTag := testMetric("metric name", "tag1", int(2)) testMetricWithOneMoreTag.AddTag("second_tag", "tag2") // insert first two metric @@ -216,6 +227,8 @@ func TestWriteToPostgresAddNewTag(t *testing.T) { func TestWriteToPostgresAddNewField(t *testing.T) { testMetric, dbConn, postgres := prepareAndConnect(t, false, false, false) + defer dbConn.Close() + // insert first metric writeAndAssertSingleMetricNoJSON(t, testMetric, dbConn, postgres) @@ -271,3 +284,140 @@ func writeAndAssertSingleMetricNoJSON(t *testing.T, testMetric telegraf.Metric, ts.UTC(), tag, value)) } } + +func TestWriteToPostgresMultipleMetrics(t *testing.T) { + tagsAsForeignKey := true + tagsAsJSON := true + fieldsAsJSON := false + testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) + defer dbConn.Close() + dbConn.Exec(`DROP TABLE IF EXISTS "` + testMetric.Name() + `2"`) + dbConn.Exec(`DROP TABLE IF EXISTS "` + testMetric.Name() + `2_tag"`) + testMetricInSecondMeasurement, _ := metric.New(testMetric.Name()+"2", testMetric.Tags(), testMetric.Fields(), testMetric.Time().Add(time.Second)) + // insert first metric + err := postgres.Write([]telegraf.Metric{testMetric, testMetric, testMetricInSecondMeasurement}) + assert.NoError(t, err, "Could not write") + + // should have created table, all columns in the same table + rows, _ := dbConn.Query(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetric.Name())) + // check results for testMetric if in db + for i := 0; i < 2; i++ { + var ts time.Time + var tagID int64 + var value int64 + rows.Next() + err = rows.Scan(&ts, &tagID, &value) + assert.NoError(t, err, "Could not check test results") + + sentValue, _ := testMetric.GetField("value") + sentTs := testMetric.Time() + // postgres doesn't support nano seconds in timestamp + sentTsNanoSecondOffset := sentTs.Nanosecond() + nanoSeconds := sentTsNanoSecondOffset % 1000 + sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) + if !ts.UTC().Equal(sentTs.UTC()) { + assert.Fail(t, fmt.Sprintf("Expected: %v; Received: %v", sentTs, ts.UTC())) + } + + assert.Equal(t, int64(1), tagID) + assert.Equal(t, sentValue.(int64), value) + + sentTag, _ := testMetric.GetTag("tag") + sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) + row := dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) + tagID = 0 + var tags string + err = row.Scan(&tagID, &tags) + assert.NoError(t, err, "Could not check test results") + assert.Equal(t, int64(1), tagID) + assert.Equal(t, sentTagJSON, tags) + } + // check results for second metric + row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetricInSecondMeasurement.Name())) + var ts time.Time + var tagID int64 + var value int64 + err = row.Scan(&ts, &tagID, &value) + assert.NoError(t, err, "Could not check test results") + + sentValue, _ := testMetricInSecondMeasurement.GetField("value") + sentTs := testMetricInSecondMeasurement.Time() + // postgres doesn't support nano seconds in timestamp + sentTsNanoSecondOffset := sentTs.Nanosecond() + nanoSeconds := sentTsNanoSecondOffset % 1000 + sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) + if !ts.UTC().Equal(sentTs.UTC()) { + assert.Fail(t, fmt.Sprintf("Expected: %v; Received: %v", sentTs, ts.UTC())) + } + + assert.Equal(t, int64(1), tagID) + assert.Equal(t, sentValue.(int64), value) +} + +func TestPerformanceIsAcceptable(t *testing.T) { + _, db, postgres := prepareAndConnect(t, false, false, false) + defer db.Close() + numMetricsPerMeasure := 10000 + numTags := 5 + numDiffValuesForEachTag := 5 + numFields := 10 + numMeasures := 2 + metrics := make([]telegraf.Metric, numMeasures*numMetricsPerMeasure) + for measureInd := 0; measureInd < numMeasures; measureInd++ { + for numMetric := 0; numMetric < numMetricsPerMeasure; numMetric++ { + tags := map[string]string{} + for tag := 0; tag < numTags; tag++ { + randNum := rand.Intn(numDiffValuesForEachTag) + tags[fmt.Sprintf("tag_%d", tag)] = strconv.Itoa(randNum) + } + fields := map[string]interface{}{} + for field := 0; field < numFields; field++ { + fields[fmt.Sprintf("field_%d", field)] = rand.Float64() + } + metricName := "m_" + strconv.Itoa(measureInd) + m, _ := metric.New(metricName, tags, fields, time.Now()) + metrics[measureInd*numMetricsPerMeasure+numMetric] = m + } + } + + start := time.Now() + err := postgres.Write(metrics) + assert.NoError(t, err) + end := time.Since(start) + t.Log("Wrote " + strconv.Itoa(numMeasures*numMetricsPerMeasure) + " metrics in " + end.String()) +} + +func TestPostgresBatching(t *testing.T) { + _, db, postgres := prepareAndConnect(t, false, false, false) + defer db.Close() + numMetricsPerMeasure := 5 + numMeasures := 2 + metrics := make([]telegraf.Metric, numMeasures*numMetricsPerMeasure) + for measureInd := 0; measureInd < numMeasures; measureInd++ { + metricName := "m_" + strconv.Itoa(measureInd) + db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS ` + metricName)) + for numMetric := 0; numMetric < numMetricsPerMeasure; numMetric++ { + tags := map[string]string{} + fields := map[string]interface{}{"f": 1} + m, _ := metric.New(metricName, tags, fields, time.Now()) + metrics[measureInd*numMetricsPerMeasure+numMetric] = m + } + } + + err := postgres.Write(metrics) + assert.NoError(t, err) + err = postgres.Write(metrics) + assert.NoError(t, err) + // check num rows inserted by transaction id should be 'numMetricsPerMeasure' for + // both transactions, for all measures + for measureInd := 0; measureInd < numMeasures; measureInd++ { + metricName := "m_" + strconv.Itoa(measureInd) + rows, err := db.Query(`select count(*) from ` + metricName + ` group by xmin`) + assert.NoError(t, err) + var count int64 + rows.Next() + rows.Scan(&count) + assert.Equal(t, int64(numMetricsPerMeasure), count) + rows.Close() + } +} diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 4c4bf34450016..3c988bba800b2 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -1,182 +1,153 @@ package postgresql import ( - "database/sql" "fmt" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + "github.com/jackc/pgx" _ "github.com/jackc/pgx/stdlib" "github.com/stretchr/testify/assert" ) -func TestWrite(t *testing.T) { +func TestWriteAllInOnePlace(t *testing.T) { timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, timestamp) - noTags, _ := metric.New("m", nil, map[string]interface{}{"f": 1}, timestamp) - testCases := []struct { - desc string - input []telegraf.Metric - fieldsAsJSON bool - execs []sql.Result - expectedExecQueries []string - execErrs []error - expectErr string - }{ - { - desc: "no metrics, no error", - input: []telegraf.Metric{}, - expectErr: "", - }, { - desc: "metric table not cached, error on creating it", - input: []telegraf.Metric{oneMetric}, - execs: []sql.Result{nil}, - execErrs: []error{fmt.Errorf("error on first exec")}, - expectErr: "error on first exec", - }, { - desc: "metric table not cached, gets cached, no tags, fields as json, error on insert", - input: []telegraf.Metric{noTags}, - fieldsAsJSON: true, - execs: []sql.Result{nil, nil}, - execErrs: []error{nil, fmt.Errorf("error on batch insert")}, - expectErr: "error on batch insert", - }, { - desc: "metric table not cached, gets cached, has tags, json fields, all good", - input: []telegraf.Metric{oneMetric}, - fieldsAsJSON: true, - execs: []sql.Result{nil, nil}, - execErrs: []error{nil, nil}, - expectedExecQueries: []string{ - `CREATE TABLE IF NOT EXISTS "a"."m"(time timestamptz,"t" text,fields jsonb)`, - `INSERT INTO "a"."m"("time","t","fields") VALUES($1,$2,$3)`}, - }, { - desc: "metric table not cached, gets cached, has tags, all good", - input: []telegraf.Metric{oneMetric}, - execs: []sql.Result{nil, nil}, - execErrs: []error{nil, nil}, - expectedExecQueries: []string{ - `CREATE TABLE IF NOT EXISTS "a"."m"(time timestamptz,"t" text,"f" int8)`, - `INSERT INTO "a"."m"("time","t","f") VALUES($1,$2,$3)`}, - }, - } + twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, timestamp) + threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, timestamp) + fourMetric, _ := metric.New("m2", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 5, "f2": 6}, timestamp) - for _, testCase := range testCases { - p := &Postgresql{ - tables: &mockTk{tables: make(map[string]bool)}, - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", - Schema: "a", - FieldsAsJsonb: testCase.fieldsAsJSON, - db: &mockDb{ - exec: testCase.execs, - execErr: testCase.execErrs, - expectedQ: testCase.expectedExecQueries, - }} - err := p.Write(testCase.input) - if testCase.expectErr != "" { - assert.EqualError(t, err, testCase.expectErr, testCase.desc) - } else { - assert.Nil(t, err, testCase.desc) - } + p := &Postgresql{ + Schema: "public", + TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + TagTableSuffix: "_tag", + DoSchemaUpdates: true, + Address: "host=localhost user=postgres password=postgres sslmode=disable dbname=postgres", } -} -func TestInsertBatches(t *testing.T) { - sampleData := map[string][]*colsAndValues{ - "tab": { - { - cols: []string{"a"}, - vals: []interface{}{1}, - }, - }, + p.Connect() + err := p.Write([]telegraf.Metric{oneMetric, twoMetric, fourMetric, threeMetric}) + if err != nil { + fmt.Println(err.Error()) + t.Fail() } - - testCases := []struct { - input map[string][]*colsAndValues - desc string - resultsFromExec []sql.Result - errorsFromExec []error - errorOnQuery error - fieldsAsJSON bool - expectErr string - }{ - { - desc: "no batches, no errors", - input: make(map[string][]*colsAndValues), - errorsFromExec: []error{fmt.Errorf("should not have called exec")}, - }, { - desc: "error returned on first insert, fields as json", - input: sampleData, - resultsFromExec: []sql.Result{nil}, - errorsFromExec: []error{fmt.Errorf("error on first insert")}, - fieldsAsJSON: true, - expectErr: "error on first insert", - }, { - desc: "error returned on first insert, error on add column", - input: sampleData, - resultsFromExec: []sql.Result{nil}, - errorsFromExec: []error{fmt.Errorf("error on first insert")}, - errorOnQuery: fmt.Errorf("error on query"), - expectErr: "error on query", - }, { - desc: "no error on insert", - input: sampleData, - resultsFromExec: []sql.Result{nil}, - errorsFromExec: []error{nil}, - }, + fiveMetric, _ := metric.New("m", map[string]string{"t": "tv", "t3": "tv3"}, map[string]interface{}{"f": 7, "f3": 8}, timestamp) + err = p.Write([]telegraf.Metric{fiveMetric}) + if err != nil { + fmt.Println(err.Error()) + t.Fail() } +} + +func TestPostgresqlMetricsFromMeasure(t *testing.T) { + postgreSQL, metrics, metricIndices := prepareAllColumnsInOnePlaceNoJSON() + err := postgreSQL.writeMetricsFromMeasure(metrics[0].Name(), metricIndices["m"], metrics) + assert.NoError(t, err) + postgreSQL, metrics, metricIndices = prepareAllColumnsInOnePlaceTagsAndFieldsJSON() + err = postgreSQL.writeMetricsFromMeasure(metrics[0].Name(), metricIndices["m"], metrics) + assert.NoError(t, err) +} + +func prepareAllColumnsInOnePlaceNoJSON() (*Postgresql, []telegraf.Metric, map[string][]int) { + oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, time.Now()) + twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, time.Now()) + threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, time.Now()) - for _, testCase := range testCases { - m := &mockDb{exec: testCase.resultsFromExec, - execErr: testCase.errorsFromExec, - queryErr: testCase.errorOnQuery} - p := &Postgresql{ - db: m, - FieldsAsJsonb: testCase.fieldsAsJSON, + return &Postgresql{ + TagTableSuffix: "_tag", + DoSchemaUpdates: true, + tables: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, + rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, + columns: columns.NewMapper(false, false, false), + db: &mockDb{}, + }, []telegraf.Metric{ + oneMetric, twoMetric, threeMetric, + }, map[string][]int{ + "m": []int{0, 1, 2}, } +} - err := p.insertBatches(testCase.input) - if testCase.expectErr != "" { - assert.EqualError(t, err, testCase.expectErr) - } else { - assert.Nil(t, err) +func prepareAllColumnsInOnePlaceTagsAndFieldsJSON() (*Postgresql, []telegraf.Metric, map[string][]int) { + oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, time.Now()) + twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, time.Now()) + threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, time.Now()) + + return &Postgresql{ + TagTableSuffix: "_tag", + DoSchemaUpdates: true, + TagsAsForeignkeys: false, + TagsAsJsonb: true, + FieldsAsJsonb: true, + tables: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, + columns: columns.NewMapper(false, true, true), + rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, + db: &mockDb{}, + }, []telegraf.Metric{ + oneMetric, twoMetric, threeMetric, + }, map[string][]int{ + "m": []int{0, 1, 2}, } - } } -type mockDb struct { - currentExec int - exec []sql.Result - expectedQ []string - execErr []error - query *sql.Rows - queryErr error +type mockTables struct { + t map[string]bool + createErr error + missingCols []int + mismatchErr error + addColsErr error } -func (m *mockDb) Exec(query string, args ...interface{}) (sql.Result, error) { - tmp := m.currentExec - m.currentExec++ - if m.expectedQ != nil && m.expectedQ[tmp] != query { - return nil, fmt.Errorf("unexpected query, got: '%s' expected: %s", query, m.expectedQ[tmp]) +func (m *mockTables) Exists(tableName string) bool { + return m.t[tableName] +} +func (m *mockTables) CreateTable(tableName string, colDetails *utils.TargetColumns) error { + if m.createErr != nil { + return m.createErr } - - return m.exec[tmp], m.execErr[tmp] + m.t[tableName] = true + return nil +} +func (m *mockTables) FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) { + return m.missingCols, m.mismatchErr } -func (m *mockDb) Query(query string, args ...interface{}) (*sql.Rows, error) { - return m.query, m.queryErr +func (m *mockTables) AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { + return m.addColsErr } -func (m *mockDb) QueryRow(query string, args ...interface{}) *sql.Row { return nil } -func (m *mockDb) Close() error { return nil } -type mockTk struct { - tables map[string]bool +type mockTransformer struct { + rows [][]interface{} + current int + rowErr error } -func (m *mockTk) add(tableName string) { - m.tables[tableName] = true +func (mt *mockTransformer) createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) { + if mt.rowErr != nil { + return nil, mt.rowErr + } + row := mt.rows[mt.current] + mt.current++ + return row, nil } -func (m *mockTk) exists(schema, table string) bool { - _, exists := m.tables[table] - return exists +type mockDb struct { + doCopyErr error +} + +func (m *mockDb) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { + return "", nil +} +func (m *mockDb) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { + return m.doCopyErr +} +func (m *mockDb) Query(query string, args ...interface{}) (*pgx.Rows, error) { + return nil, nil +} +func (m *mockDb) QueryRow(query string, args ...interface{}) *pgx.Row { + return nil +} +func (m *mockDb) Close() error { + return nil } diff --git a/plugins/outputs/postgresql/table_keeper.go b/plugins/outputs/postgresql/table_keeper.go deleted file mode 100644 index 3b0fd45ac481f..0000000000000 --- a/plugins/outputs/postgresql/table_keeper.go +++ /dev/null @@ -1,47 +0,0 @@ -package postgresql - -import ( - "log" -) - -const ( - tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" -) - -type tableKeeper interface { - exists(schema, tableName string) bool - add(tableName string) -} - -type defTableKeeper struct { - Tables map[string]bool - db dbWrapper -} - -func newTableKeeper(db dbWrapper) tableKeeper { - return &defTableKeeper{ - Tables: make(map[string]bool), - db: db, - } -} - -func (t *defTableKeeper) exists(schema, tableName string) bool { - if _, ok := t.Tables[tableName]; ok { - return true - } - - result, err := t.db.Exec(tableExistsTemplate, tableName, schema) - if err != nil { - log.Printf("E! Error checking for existence of metric table %s: %v", tableName, err) - return false - } - if count, _ := result.RowsAffected(); count == 1 { - t.Tables[tableName] = true - return true - } - return false -} - -func (t *defTableKeeper) add(tableName string) { - t.Tables[tableName] = true -} diff --git a/plugins/outputs/postgresql/table_keeper_test.go b/plugins/outputs/postgresql/table_keeper_test.go deleted file mode 100644 index 0d7bb77bec307..0000000000000 --- a/plugins/outputs/postgresql/table_keeper_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package postgresql - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewTableKeeper(t *testing.T) { - mock := &mockWr{} - tk := newTableKeeper(mock).(*defTableKeeper) - assert.Equal(t, mock, tk.db) - assert.Empty(t, tk.Tables) -} - -func TestTableKeeperAdd(t *testing.T) { - tk := newTableKeeper(nil).(*defTableKeeper) - tk.add("table") - tk.add("table2") - assert.Equal(t, 2, len(tk.Tables)) - assert.True(t, tk.Tables["table"]) - assert.True(t, tk.Tables["table2"]) - assert.False(t, tk.Tables["table3"]) - tk.add("table2") - assert.Equal(t, 2, len(tk.Tables)) -} - -func TestTableKeeperExists(t *testing.T) { - mock := &mockWr{} - tk := newTableKeeper(mock).(*defTableKeeper) - table := "table name" - - // table cached - tk.Tables[table] = true - mock.execErr = fmt.Errorf("should not call execute") - assert.True(t, tk.exists("", table)) - - // error on table exists query - mock.execErr = fmt.Errorf("error on query execute") - mock.expected = tableExistsTemplate - delete(tk.Tables, table) - assert.False(t, tk.exists("", table)) - assert.Equal(t, 0, len(tk.Tables)) - - // fetch from db, doesn't exist - mock.execErr = nil - mock.exec = &mockResult{} - assert.False(t, tk.exists("", table)) - - // fetch from db, exists - mock.exec = &mockResult{rows: 1} - assert.True(t, tk.exists("", table)) - assert.Equal(t, 1, len(tk.Tables)) - assert.True(t, tk.Tables[table]) -} - -type mockResult struct { - rows int64 - rowErr error - last int64 - lastErr error -} - -func (m *mockResult) LastInsertId() (int64, error) { - return m.last, m.lastErr -} - -func (m *mockResult) RowsAffected() (int64, error) { - return m.rows, m.rowErr -} diff --git a/plugins/outputs/postgresql/tables/manager.go b/plugins/outputs/postgresql/tables/manager.go new file mode 100644 index 0000000000000..f99ca0c5c72d0 --- /dev/null +++ b/plugins/outputs/postgresql/tables/manager.go @@ -0,0 +1,208 @@ +package tables + +import ( + "database/sql" + "fmt" + "log" + "strings" + + "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +const ( + addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" + tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" + findColumnPresenceTemplate = "WITH available AS (SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2)," + + "required AS (SELECT c FROM unnest(array [%s]) AS c) " + + "SELECT required.c as column_name, available.column_name IS NOT NULL as exists, available.data_type FROM required LEFT JOIN available ON required.c = available.column_name;" +) + +type columnInDbDef struct { + dataType utils.PgDataType + exists bool +} + +// Manager defines an abstraction that can check the state of tables in a PG +// database, create, and update them. +type Manager interface { + // Exists checks if a table with the given name already is present in the DB. + Exists(tableName string) bool + // Creates a table in the database with the column names and types specified in 'colDetails' + CreateTable(tableName string, colDetails *utils.TargetColumns) error + // This function queries a table in the DB if the required columns in 'colDetails' are present and what is their + // data type. For existing columns it checks if the data type in the DB can safely hold the data from the metrics. + // It returns: + // - the indices of the missing columns (from colDetails) + // - or an error if + // = it couldn't discover the columns of the table in the db + // = the existing column types are incompatible with the required column types + FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) + // From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. + // this function will add the new columns with the required data type. + AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error +} + +type defTableManager struct { + Tables map[string]bool + db db.Wrapper + schema string + tableTemplate string +} + +// NewManager returns an instance of the tables.Manager interface +// that can handle checking and updating the state of tables in the PG database. +func NewManager(db db.Wrapper, schema, tableTemplate string) Manager { + return &defTableManager{ + Tables: make(map[string]bool), + db: db, + tableTemplate: tableTemplate, + schema: schema, + } +} + +// Exists checks if a table with the given name already is present in the DB. +func (t *defTableManager) Exists(tableName string) bool { + if _, ok := t.Tables[tableName]; ok { + return true + } + + commandTag, err := t.db.Exec(tableExistsTemplate, tableName, t.schema) + if err != nil { + log.Printf("E! Error checking for existence of metric table: %s\nSQL: %s\n%v", tableName, tableExistsTemplate, err) + return false + } + + if commandTag.RowsAffected() == 1 { + t.Tables[tableName] = true + return true + } + + return false +} + +// Creates a table in the database with the column names and types specified in 'colDetails' +func (t *defTableManager) CreateTable(tableName string, colDetails *utils.TargetColumns) error { + sql := t.generateCreateTableSQL(tableName, colDetails) + if _, err := t.db.Exec(sql); err != nil { + log.Printf("E! Couldn't create table: %s\nSQL: %s\n%v", tableName, sql, err) + return err + } + + t.Tables[tableName] = true + return nil +} + +// This function queries a table in the DB if the required columns in 'colDetails' are present and what is their +// data type. For existing columns it checks if the data type in the DB can safely hold the data from the metrics. +// It returns: +// - the indices of the missing columns (from colDetails) +// - or an error if +// = it couldn't discover the columns of the table in the db +// = the existing column types are incompatible with the required column types +func (t *defTableManager) FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) { + columnPresence, err := t.findColumnPresence(tableName, colDetails.Names) + if err != nil { + return nil, err + } + + missingCols := []int{} + for colIndex := range colDetails.Names { + colStateInDb := columnPresence[colIndex] + if !colStateInDb.exists { + missingCols = append(missingCols, colIndex) + continue + } + typeInDb := colStateInDb.dataType + typeInMetric := colDetails.DataTypes[colIndex] + if !utils.PgTypeCanContain(typeInDb, typeInMetric) { + return nil, fmt.Errorf("E! A column exists in '%s' of type '%s' required type '%s'", tableName, typeInDb, typeInMetric) + } + } + + return missingCols, nil +} + +// From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. +// this function will add the new columns with the required data type. +func (t *defTableManager) AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { + fullTableName := utils.FullTableName(t.schema, tableName).Sanitize() + for _, colIndex := range columnIndices { + name := colDetails.Names[colIndex] + dataType := colDetails.DataTypes[colIndex] + addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(name), dataType) + _, err := t.db.Exec(addColumnQuery) + if err != nil { + log.Printf("E! Couldn't add missing columns to the table: %s\nError executing: %s\n%v", tableName, addColumnQuery, err) + return err + } + } + + return nil +} + +// Populate the 'tableTemplate' (supplied as config option to the plugin) with the details of +// the required columns for the measurement to create a 'CREATE TABLE' SQL statement. +// The order, column names and data types are given in 'colDetails'. +func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *utils.TargetColumns) string { + colDefs := make([]string, len(colDetails.Names)) + pk := []string{} + for colIndex, colName := range colDetails.Names { + colDefs[colIndex] = utils.QuoteIdent(colName) + " " + string(colDetails.DataTypes[colIndex]) + if colDetails.Roles[colIndex] != utils.FieldColType { + pk = append(pk, colName) + } + } + + fullTableName := utils.FullTableName(t.schema, tableName).Sanitize() + query := strings.Replace(t.tableTemplate, "{TABLE}", fullTableName, -1) + query = strings.Replace(query, "{TABLELITERAL}", utils.QuoteLiteral(fullTableName), -1) + query = strings.Replace(query, "{COLUMNS}", strings.Join(colDefs, ","), -1) + query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) + + return query +} + +// For a given table and an array of column names it checks the database if those columns exist, +// and what's their data type. +func (t *defTableManager) findColumnPresence(tableName string, columns []string) ([]*columnInDbDef, error) { + columnPresenseQuery := prepareColumnPresenceQuery(columns) + result, err := t.db.Query(columnPresenseQuery, t.schema, tableName) + if err != nil { + log.Printf("E! Couldn't discover columns of table: %s\nQuery failed: %s\n%v", tableName, columnPresenseQuery, err) + return nil, err + } + defer result.Close() + columnStatus := make([]*columnInDbDef, len(columns)) + var exists bool + var columnName string + var pgLongType sql.NullString + currentColumn := 0 + + for result.Next() { + err := result.Scan(&columnName, &exists, &pgLongType) + if err != nil { + log.Printf("E! Couldn't discover columns of table: %s\n%v", tableName, err) + return nil, err + } + pgShortType := utils.PgDataType("") + if pgLongType.Valid { + pgShortType = utils.LongToShortPgType(pgLongType.String) + } + columnStatus[currentColumn] = &columnInDbDef{ + exists: exists, + dataType: pgShortType, + } + currentColumn++ + } + + return columnStatus, nil +} + +func prepareColumnPresenceQuery(columns []string) string { + quotedColumns := make([]string, len(columns)) + for i, column := range columns { + quotedColumns[i] = utils.QuoteLiteral(column) + } + return fmt.Sprintf(findColumnPresenceTemplate, strings.Join(quotedColumns, ",")) +} diff --git a/plugins/outputs/postgresql/tables/manager_test.go b/plugins/outputs/postgresql/tables/manager_test.go new file mode 100644 index 0000000000000..54a4fbbb39e3b --- /dev/null +++ b/plugins/outputs/postgresql/tables/manager_test.go @@ -0,0 +1,139 @@ +package tables + +import ( + "errors" + "testing" + + "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + "github.com/jackc/pgx" + "github.com/stretchr/testify/assert" +) + +type mockDb struct { + exec pgx.CommandTag + execErr error +} + +func (m *mockDb) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { + return m.exec, m.execErr +} +func (m *mockDb) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { + return nil +} +func (m *mockDb) Query(query string, args ...interface{}) (*pgx.Rows, error) { + return nil, nil +} +func (m *mockDb) QueryRow(query string, args ...interface{}) *pgx.Row { + return nil +} +func (m *mockDb) Close() error { + return nil +} + +func TestNewManager(t *testing.T) { + db := &mockDb{} + res := NewManager(db, "schema", "table template").(*defTableManager) + assert.Equal(t, "table template", res.tableTemplate) + assert.Equal(t, "schema", res.schema) + assert.Equal(t, db, res.db) +} + +func TestExists(t *testing.T) { + testCases := []struct { + desc string + in string + out bool + db *mockDb + cache map[string]bool + }{ + { + desc: "table already cached", + in: "table", + db: &mockDb{execErr: errors.New("should not have called exec")}, + cache: map[string]bool{"table": true}, + out: true, + }, { + desc: "table not cached, error on check db", + cache: map[string]bool{}, + in: "table", + db: &mockDb{execErr: errors.New("error on exec")}, + }, { + desc: "table not cached, exists in db", + cache: map[string]bool{}, + in: "table", + db: &mockDb{exec: "0 1"}, + out: true, + }, { + desc: "table not cached, doesn't exist", + cache: map[string]bool{}, + in: "table", + db: &mockDb{exec: "0 0"}, + out: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + manager := &defTableManager{ + Tables: tc.cache, + db: tc.db, + } + + got := manager.Exists(tc.in) + assert.Equal(t, tc.out, got) + }) + } +} + +func TestCreateTable(t *testing.T) { + testCases := []struct { + desc string + inT string + inCD *utils.TargetColumns + db db.Wrapper + template string + out error + }{ + { + desc: "error on exec, no table cached", + inT: "t", + inCD: &utils.TargetColumns{ + Names: []string{"time", "t", "f"}, + Target: map[string]int{"time": 0, "t": 1, "f": 2}, + DataTypes: []utils.PgDataType{"timestamptz", "text", "float8"}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, + }, + db: &mockDb{execErr: errors.New("error on exec")}, + template: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}) ", + out: errors.New("error on exec"), + }, { + desc: "all good, table is cached", + inT: "t", + inCD: &utils.TargetColumns{ + Names: []string{"time", "t", "f"}, + Target: map[string]int{"time": 0, "t": 1, "f": 2}, + DataTypes: []utils.PgDataType{"timestamptz", "text", "float8"}, + Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, + }, + db: &mockDb{}, + template: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}) ", + out: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + manager := &defTableManager{ + Tables: map[string]bool{}, + db: tc.db, + tableTemplate: tc.template, + } + got := manager.CreateTable(tc.inT, tc.inCD) + assert.Equal(t, tc.out, got) + if tc.out == nil { + assert.True(t, manager.Tables[tc.inT]) + } + }) + } +} diff --git a/plugins/outputs/postgresql/tags_cache.go b/plugins/outputs/postgresql/tags_cache.go new file mode 100644 index 0000000000000..6761a0d3ec43f --- /dev/null +++ b/plugins/outputs/postgresql/tags_cache.go @@ -0,0 +1,159 @@ +package postgresql + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/golang/groupcache/lru" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +const ( + selectTagIDTemplate = "SELECT tag_id FROM %s WHERE %s" +) + +// TagsCache retrieves the appropriate tagID based on the tag values +// from the database (used only when TagsAsForeignKey property selected). +// Also caches the LRU tagIDs +type tagsCache interface { + getTagID(target *utils.TargetColumns, metric telegraf.Metric) (int, error) + tagsTableName(measureName string) string +} + +type defTagsCache struct { + cache map[string]*lru.Cache + tagsAsJSONb bool + tagTableSuffix string + schema string + db db.Wrapper + itemsToCache int +} + +// newTagsCache returns a new implementation of the tags cache interface with LRU memoization +func newTagsCache(numItemsInCachePerMetric int, tagsAsJSONb bool, tagTableSuffix, schema string, db db.Wrapper) tagsCache { + return &defTagsCache{ + cache: map[string]*lru.Cache{}, + tagsAsJSONb: tagsAsJSONb, + tagTableSuffix: tagTableSuffix, + schema: schema, + db: db, + itemsToCache: numItemsInCachePerMetric, + } +} + +// Checks the cache for the tag set of the metric, if present returns immediately. +// Otherwise asks the database if that tag set has already been recorded. +// If not recorded, inserts a new row to the tags table for the specific measurement. +// Re-caches the tagID after checking the DB. +func (c *defTagsCache) getTagID(target *utils.TargetColumns, metric telegraf.Metric) (int, error) { + measureName := metric.Name() + tags := metric.Tags() + cacheKey := constructCacheKey(tags) + tagID, isCached := c.checkTagCache(measureName, cacheKey) + if isCached { + return tagID, nil + } + + var whereParts []string + var whereValues []interface{} + if c.tagsAsJSONb { + whereParts = []string{utils.QuoteIdent(columns.TagsJSONColumn) + "= $1"} + numTags := len(tags) + if numTags > 0 { + d, err := utils.BuildJsonb(tags) + if err != nil { + return tagID, err + } + whereValues = []interface{}{d} + } else { + whereValues = []interface{}{nil} + } + } else { + whereParts = make([]string, len(target.Names)-1) + whereValues = make([]interface{}, len(target.Names)-1) + whereIndex := 1 + for columnIndex, tagName := range target.Names[1:] { + if val, ok := tags[tagName]; ok { + whereParts[columnIndex] = utils.QuoteIdent(tagName) + " = $" + strconv.Itoa(whereIndex) + whereValues[whereIndex-1] = val + } else { + whereParts[whereIndex-1] = tagName + " IS NULL" + } + whereIndex++ + } + } + + tagsTableName := c.tagsTableName(measureName) + tagsTableFullName := utils.FullTableName(c.schema, tagsTableName).Sanitize() + // SELECT tag_id FROM measure_tag WHERE t1 = v1 AND ... tN = vN + query := fmt.Sprintf(selectTagIDTemplate, tagsTableFullName, strings.Join(whereParts, " AND ")) + err := c.db.QueryRow(query, whereValues...).Scan(&tagID) + // tag set found in DB, cache it and return + if err == nil { + c.addToCache(measureName, cacheKey, tagID) + return tagID, nil + } + + // tag set is new, insert it, and cache the tagID + query = utils.GenerateInsert(tagsTableFullName, target.Names[1:]) + " RETURNING " + columns.TagIDColumnName + err = c.db.QueryRow(query, whereValues...).Scan(&tagID) + if err == nil { + c.addToCache(measureName, cacheKey, tagID) + } + return tagID, err +} + +func (c *defTagsCache) tagsTableName(measureName string) string { + return measureName + c.tagTableSuffix +} + +// check the cache for the given 'measure' if it contains the +// tagID value for a given tag-set key. If the cache for that measure +// doesn't exist, creates it. +func (c *defTagsCache) checkTagCache(measure, key string) (int, bool) { + if cacheForMeasure, ok := c.cache[measure]; ok { + tagID, exists := cacheForMeasure.Get(key) + if exists { + return tagID.(int), exists + } + return 0, exists + } + + c.cache[measure] = lru.New(c.itemsToCache) + return 0, false +} + +func (c *defTagsCache) addToCache(measure, key string, tagID int) { + c.cache[measure].Add(key, tagID) +} + +// cache key is constructed from the tag set as +// {tag_a:1, tag_c:2, tag_b:3}=>'tag_a 1;tag_b 3;tag_c 2;' +func constructCacheKey(tags map[string]string) string { + numTags := len(tags) + if numTags == 0 { + return "" + } + keys := make([]string, numTags) + i := 0 + for key := range tags { + keys[i] = key + i++ + } + + sort.Strings(keys) + var whereParts strings.Builder + for _, key := range keys { + val := tags[key] + whereParts.WriteString(key) + whereParts.WriteString(" ") + whereParts.WriteString(val) + whereParts.WriteString(";") + } + return whereParts.String() +} diff --git a/plugins/outputs/postgresql/transformer.go b/plugins/outputs/postgresql/transformer.go new file mode 100644 index 0000000000000..1a843a264d40d --- /dev/null +++ b/plugins/outputs/postgresql/transformer.go @@ -0,0 +1,72 @@ +package postgresql + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +type transformer interface { + createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) +} + +type defTransformer struct { + tagsAsFK bool + tagsAsJSONb bool + fieldsAsJSONb bool + tagsCache tagsCache +} + +func newRowTransformer(tagsAsFK, tagsAsJSONb, fieldsAsJSONb bool, tagsCache tagsCache) transformer { + return &defTransformer{ + tagsAsFK: tagsAsFK, + tagsAsJSONb: tagsAsJSONb, + fieldsAsJSONb: fieldsAsJSONb, + tagsCache: tagsCache, + } +} + +func (dt *defTransformer) createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) { + row := make([]interface{}, numColumns) + // handle time + row[0] = metric.Time() + // handle tags and tag id + if dt.tagsAsFK { + tagID, err := dt.tagsCache.getTagID(targetTagColumns, metric) + if err != nil { + return nil, err + } + row[1] = tagID + } else { + if dt.tagsAsJSONb { + jsonVal, err := utils.BuildJsonb(metric.Tags()) + if err != nil { + return nil, err + } + targetIndex := targetColumns.Target[columns.TagsJSONColumn] + row[targetIndex] = jsonVal + } else { + for _, tag := range metric.TagList() { + targetIndex := targetColumns.Target[tag.Key] + row[targetIndex] = tag.Value + } + } + } + + // handle fields + if dt.fieldsAsJSONb { + jsonVal, err := utils.BuildJsonb(metric.Fields()) + if err != nil { + return nil, err + } + targetIndex := targetColumns.Target[columns.FieldsJSONColumn] + row[targetIndex] = jsonVal + } else { + for _, field := range metric.FieldList() { + targetIndex := targetColumns.Target[field.Key] + row[targetIndex] = field.Value + } + } + + return row, nil +} diff --git a/plugins/outputs/postgresql/utils.go b/plugins/outputs/postgresql/utils.go deleted file mode 100644 index 801a2b6aac4d5..0000000000000 --- a/plugins/outputs/postgresql/utils.go +++ /dev/null @@ -1,68 +0,0 @@ -package postgresql - -import ( - "encoding/json" - "log" - "strings" - - "github.com/jackc/pgx" -) - -func buildJsonbTags(tags map[string]string) ([]byte, error) { - js := make(map[string]interface{}) - for column, value := range tags { - js[column] = value - } - - return buildJsonb(js) -} - -func buildJsonb(data map[string]interface{}) ([]byte, error) { - if len(data) > 0 { - d, err := json.Marshal(data) - if err != nil { - return nil, err - } - return d, nil - } - - return nil, nil -} - -func quoteIdent(name string) string { - return pgx.Identifier{name}.Sanitize() -} - -func quoteLiteral(name string) string { - return "'" + strings.Replace(name, "'", "''", -1) + "'" -} - -func deriveDatatype(value interface{}) string { - var datatype string - - switch value.(type) { - case bool: - datatype = "boolean" - case uint64: - datatype = "int8" - case int64: - datatype = "int8" - case float64: - datatype = "float8" - case string: - datatype = "text" - default: - datatype = "text" - log.Printf("E! Unknown datatype %T(%v)", value, value) - } - return datatype -} - -func contains(haystack []string, needle string) bool { - for _, key := range haystack { - if key == needle { - return true - } - } - return false -} diff --git a/plugins/outputs/postgresql/utils/types.go b/plugins/outputs/postgresql/utils/types.go new file mode 100644 index 0000000000000..a44017399d601 --- /dev/null +++ b/plugins/outputs/postgresql/utils/types.go @@ -0,0 +1,30 @@ +package utils + +// ColumnRole specifies the role of a column in a metric. +// It helps map the columns to the DB. +type ColumnRole int + +const ( + TimeColType ColumnRole = iota + 1 + TagsIDColType + TagColType + FieldColType +) + +// PgDataType defines a string that represents a PostgreSQL data type. +type PgDataType string + +// TargetColumns contains all the information needed to map a collection of +// metrics who belong to the same Measurement. +type TargetColumns struct { + // the names the columns will have in the database + Names []string + // column name -> order number. where to place each column in rows + // batched to the db + Target map[string]int + // the data type of each column should have in the db. used when checking + // if the schema matches or it needs updates + DataTypes []PgDataType + // the role each column has, helps properly map the metric to the db + Roles []ColumnRole +} diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go new file mode 100644 index 0000000000000..1fbd90cc05526 --- /dev/null +++ b/plugins/outputs/postgresql/utils/utils.go @@ -0,0 +1,168 @@ +package utils + +import ( + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/jackc/pgx" +) + +const ( + insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" +) + +// GroupMetricsByMeasurement groups the list of metrics by the measurement name. +// But the values are the index of the measure from the input list of measures. +// [m, m, m2, m2, m] => {m:[0,1,4], m2:[2,3]} +func GroupMetricsByMeasurement(m []telegraf.Metric) map[string][]int { + toReturn := make(map[string][]int) + for i, metric := range m { + var metricLocations []int + var ok bool + name := metric.Name() + if metricLocations, ok = toReturn[name]; !ok { + metricLocations = []int{} + toReturn[name] = metricLocations + } + toReturn[name] = append(metricLocations, i) + } + return toReturn +} + +// BuildJsonb returns a byte array of the json representation +// of the passed object. +func BuildJsonb(data interface{}) ([]byte, error) { + d, err := json.Marshal(data) + if err != nil { + return nil, err + } + return d, nil +} + +// QuoteIdent returns a sanitized string safe to use in SQL as an identifier +func QuoteIdent(name string) string { + return pgx.Identifier{name}.Sanitize() +} + +// QuoteLiteral returns a sanitized string safe to use in sql as a string literal +func QuoteLiteral(name string) string { + return "'" + strings.Replace(name, "'", "''", -1) + "'" +} + +// FullTableName returns a sanitized table name with it's schema (if supplied) +func FullTableName(schema, name string) *pgx.Identifier { + if schema != "" { + return &pgx.Identifier{schema, name} + } + + return &pgx.Identifier{name} +} + +const ( + PgBool = "boolean" + PgInt8 = "int8" + PgInt4 = "int4" + PgInteger = "integer" + PgBigInt = "bigint" + PgFloat8 = "float8" + PgDoublePrecision = "double precision" + PgText = "text" + PgTimestamptz = "timestamptz" + PgTimestampWithTimeZone = "timestamp with time zone" + PgTimestamp = "timestamp" + PgTimestampWithoutTimeZone = "timestamp without time zone" + PgSerial = "serial" + PgJSONb = "jsonb" +) + +// DerivePgDatatype returns the appropriate PostgreSQL data type +// that could hold the value. +func DerivePgDatatype(value interface{}) PgDataType { + switch value.(type) { + case bool: + return PgBool + case uint64: + return PgInt8 + case int64: + return PgInt8 + case int: + return PgInt8 + case uint: + return PgInt8 + case uint32: + return PgInt4 + case int32: + return PgInt4 + case float64: + return PgFloat8 + case float32: + return PgFloat8 + case string: + return PgText + case time.Time: + return PgTimestamptz + default: + log.Printf("E! Unknown datatype %T(%v)", value, value) + return PgText + } +} + +// LongToShortPgType returns a PostgreSQL datatype in it's short +// notation form. +func LongToShortPgType(longPgType string) PgDataType { + switch longPgType { + case PgInteger: + return PgInt4 + case PgBigInt: + return PgInt8 + case PgDoublePrecision: + return PgFloat8 + case PgTimestampWithTimeZone: + return PgTimestamptz + case PgTimestampWithoutTimeZone: + return PgTimestamp + default: + return PgDataType(longPgType) + } +} + +// PgTypeCanContain tells you if one PostgreSQL data type can contain +// the values of another without data loss. +func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { + if canThis == containThis { + return true + } + if canThis == PgInt8 { + return containThis == PgInt4 + } + if canThis == PgInt4 { + return containThis == PgSerial + } + if canThis == PgFloat8 { + return containThis == PgInt4 + } + if canThis == PgTimestamptz { + return containThis == PgTimestamp + } + + return false +} + +// GenerateInsert returns a SQL statement to insert values in a table +// with $X placeholders for the values +func GenerateInsert(fullSanitizedTableName string, columns []string) string { + valuePlaceholders := make([]string, len(columns)) + quotedColumns := make([]string, len(columns)) + for i, column := range columns { + valuePlaceholders[i] = fmt.Sprintf("$%d", i+1) + quotedColumns[i] = QuoteIdent(column) + } + + columnNames := strings.Join(quotedColumns, ",") + values := strings.Join(valuePlaceholders, ",") + return fmt.Sprintf(insertIntoSQLTemplate, fullSanitizedTableName, columnNames, values) +} diff --git a/plugins/outputs/postgresql/utils/utils_test.go b/plugins/outputs/postgresql/utils/utils_test.go new file mode 100644 index 0000000000000..040a7202d5c67 --- /dev/null +++ b/plugins/outputs/postgresql/utils/utils_test.go @@ -0,0 +1,138 @@ +package utils + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func TestPostgresqlQuote(t *testing.T) { + assert.Equal(t, `"foo"`, QuoteIdent("foo")) + assert.Equal(t, `"fo'o"`, QuoteIdent("fo'o")) + assert.Equal(t, `"fo""o"`, QuoteIdent("fo\"o")) + + assert.Equal(t, "'foo'", QuoteLiteral("foo")) + assert.Equal(t, "'fo''o'", QuoteLiteral("fo'o")) + assert.Equal(t, "'fo\"o'", QuoteLiteral("fo\"o")) +} + +func TestBuildJsonb(t *testing.T) { + testCases := []struct { + desc string + in interface{} + out string + }{ + { + desc: "simple map", + in: map[string]int{"a": 1}, + out: `{"a":1}`, + }, { + desc: "single number", + in: 1, + out: `1`, + }, { + desc: "interface map", + in: map[int]interface{}{1: "a"}, + out: `{"1":"a"}`, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + res, err := BuildJsonb(tc.in) + assert.Nil(t, err) + assert.Equal(t, tc.out, string(res)) + + }) + } +} + +func TestFullTableName(t *testing.T) { + assert.Equal(t, `"tableName"`, FullTableName("", "tableName").Sanitize()) + assert.Equal(t, `"table name"`, FullTableName("", "table name").Sanitize()) + assert.Equal(t, `"table.name"`, FullTableName("", "table.name").Sanitize()) + assert.Equal(t, `"table"."name"`, FullTableName("table", "name").Sanitize()) + assert.Equal(t, `"schema name"."table name"`, FullTableName("schema name", "table name").Sanitize()) +} + +func TestDerivePgDataType(t *testing.T) { + assert.Equal(t, PgDataType("boolean"), DerivePgDatatype(true)) + assert.Equal(t, PgDataType("int8"), DerivePgDatatype(uint64(1))) + assert.Equal(t, PgDataType("int8"), DerivePgDatatype(1)) + assert.Equal(t, PgDataType("int8"), DerivePgDatatype(uint(1))) + assert.Equal(t, PgDataType("int8"), DerivePgDatatype(int64(1))) + assert.Equal(t, PgDataType("int4"), DerivePgDatatype(uint32(1))) + assert.Equal(t, PgDataType("int4"), DerivePgDatatype(int32(1))) + assert.Equal(t, PgDataType("float8"), DerivePgDatatype(float64(1.0))) + assert.Equal(t, PgDataType("float8"), DerivePgDatatype(float32(1.0))) + assert.Equal(t, PgDataType("text"), DerivePgDatatype("")) + assert.Equal(t, PgDataType("timestamptz"), DerivePgDatatype(time.Now())) + assert.Equal(t, PgDataType("text"), DerivePgDatatype([]int{})) +} + +func TestLongToShortPgType(t *testing.T) { + assert.Equal(t, PgDataType("boolean"), LongToShortPgType("boolean")) + assert.Equal(t, PgDataType("int4"), LongToShortPgType("integer")) + assert.Equal(t, PgDataType("int8"), LongToShortPgType("bigint")) + assert.Equal(t, PgDataType("float8"), LongToShortPgType("double precision")) + assert.Equal(t, PgDataType("timestamptz"), LongToShortPgType("timestamp with time zone")) + assert.Equal(t, PgDataType("timestamp"), LongToShortPgType("timestamp without time zone")) + assert.Equal(t, PgDataType("jsonb"), LongToShortPgType("jsonb")) + assert.Equal(t, PgDataType("text"), LongToShortPgType("text")) + assert.Equal(t, PgDataType("unknown"), LongToShortPgType("unknown")) +} + +func TestPgTypeCanContain(t *testing.T) { + assert.True(t, PgTypeCanContain(PgDataType("bogus same"), PgDataType("bogus same"))) + assert.True(t, PgTypeCanContain(PgDataType("int8"), PgDataType("int4"))) + assert.False(t, PgTypeCanContain(PgDataType("int8"), PgDataType("float8"))) + assert.False(t, PgTypeCanContain(PgDataType("int8"), PgDataType("timestamptz"))) + + assert.True(t, PgTypeCanContain(PgDataType("int4"), PgDataType("serial"))) + assert.True(t, PgTypeCanContain(PgDataType("int8"), PgDataType("int4"))) + assert.False(t, PgTypeCanContain(PgDataType("int4"), PgDataType("int8"))) + + assert.False(t, PgTypeCanContain(PgDataType("float8"), PgDataType("int8"))) + assert.True(t, PgTypeCanContain(PgDataType("float8"), PgDataType("int4"))) + + assert.True(t, PgTypeCanContain(PgDataType("timestamptz"), PgDataType("timestamp"))) + + assert.False(t, PgTypeCanContain(PgDataType("text"), PgDataType("timestamp"))) +} + +func TestGroupMetricsByMeasurement(t *testing.T) { + m11, _ := metric.New("m", map[string]string{}, map[string]interface{}{}, time.Now()) + m12, _ := metric.New("m", map[string]string{"t1": "tv1"}, map[string]interface{}{"f1": 1}, time.Now()) + m13, _ := metric.New("m", map[string]string{}, map[string]interface{}{"f2": 2}, time.Now()) + + m21, _ := metric.New("m2", map[string]string{}, map[string]interface{}{}, time.Now()) + m22, _ := metric.New("m2", map[string]string{"t1": "tv1"}, map[string]interface{}{"f1": 1}, time.Now()) + m23, _ := metric.New("m2", map[string]string{}, map[string]interface{}{"f2": 2}, time.Now()) + in := []telegraf.Metric{m11, m12, m21, m22, m13, m23} + expected := map[string][]int{ + "m": {0, 1, 4}, + "m2": {2, 3, 5}, + } + got := GroupMetricsByMeasurement(in) + assert.Equal(t, expected, got) +} + +func TestGenerateInsert(t *testing.T) { + + sql := GenerateInsert(`"m"`, []string{"time", "f"}) + assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) + + sql = GenerateInsert(`"m"`, []string{"time", "i"}) + assert.Equal(t, `INSERT INTO "m"("time","i") VALUES($1,$2)`, sql) + + sql = GenerateInsert(`"public"."m"`, []string{"time", "f", "i"}) + assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) + + sql = GenerateInsert(`"public"."m n"`, []string{"time", "k", "i"}) + assert.Equal(t, `INSERT INTO "public"."m n"("time","k","i") VALUES($1,$2,$3)`, sql) + + sql = GenerateInsert("m", []string{"time", "k1", "k2", "i"}) + assert.Equal(t, `INSERT INTO m("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) +} From e559ac59e6920d1116311008dacbb7ce8eb1d8bd Mon Sep 17 00:00:00 2001 From: Blagoj Atanasovski Date: Thu, 18 Jul 2019 11:37:41 +0200 Subject: [PATCH 057/121] Attempt reconnect to db if conn is lost and support PG env variables --- plugins/outputs/postgresql/README.md | 11 ++- .../postgresql/columns/column_mapper.go | 4 +- plugins/outputs/postgresql/db/db_wrapper.go | 40 +++++++- .../outputs/postgresql/db/db_wrapper_test.go | 21 ++++ plugins/outputs/postgresql/postgresql.go | 61 +++++++++--- .../postgresql/postgresql_integration_test.go | 4 +- plugins/outputs/postgresql/postgresql_test.go | 95 +++++++++++++------ .../outputs/postgresql/tables/manager_test.go | 2 + .../tables/{manager.go => table_manager.go} | 8 ++ plugins/outputs/postgresql/tags_cache.go | 5 + plugins/outputs/postgresql/utils/utils.go | 18 +--- 11 files changed, 206 insertions(+), 63 deletions(-) create mode 100644 plugins/outputs/postgresql/db/db_wrapper_test.go rename plugins/outputs/postgresql/tables/{manager.go => table_manager.go} (96%) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 2a38b7a08ab5a..c3d40d3babd4c 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -8,7 +8,16 @@ The plugin manages the schema automatically updating missing columns, and checki ```toml # Send metrics to postgres [[outputs.postgresql]] - address = "host=localhost user=postgres sslmode=verify-full" + ## specify address via a url: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. Also supported are PG environment vars + ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE + ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html + connection = "host=localhost user=postgres sslmode=verify-full" ## Update existing tables to match the incoming metrics. Default is true # do_schema_updates = true diff --git a/plugins/outputs/postgresql/columns/column_mapper.go b/plugins/outputs/postgresql/columns/column_mapper.go index 99692997ab904..78287998f5f08 100644 --- a/plugins/outputs/postgresql/columns/column_mapper.go +++ b/plugins/outputs/postgresql/columns/column_mapper.go @@ -7,7 +7,7 @@ import ( // Mapper knows how to generate the column details for the main and tags table in the db type Mapper interface { - // Iterate through an array of 'metrics' visiting only those indexed by 'indices' + // Target iterates through an array of 'metrics' visiting only those indexed by 'indices' // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the // desired columns (their name, type and which role they play) for both the // main metrics table in the DB, and if tagsAsFK == true for the tags table. @@ -32,7 +32,7 @@ func NewMapper(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) Mapper { } } -// Iterate through an array of 'metrics' visiting only those indexed by 'indices' +// Target iterates through an array of 'metrics' visiting only those indexed by 'indices' // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the // desired columns (their name, type and which role they play) for both the // main metrics table in the DB, and if tagsAsFK == true for the tags table. diff --git a/plugins/outputs/postgresql/db/db_wrapper.go b/plugins/outputs/postgresql/db/db_wrapper.go index c6cf999b1bc0c..4a95d12b9cf09 100644 --- a/plugins/outputs/postgresql/db/db_wrapper.go +++ b/plugins/outputs/postgresql/db/db_wrapper.go @@ -8,6 +8,8 @@ import ( _ "github.com/jackc/pgx/stdlib" ) +const checkConnQuery = "SELECT 1" + // Wrapper defines an interface that encapsulates communication with a DB. type Wrapper interface { Exec(query string, args ...interface{}) (pgx.CommandTag, error) @@ -15,6 +17,7 @@ type Wrapper interface { Query(query string, args ...interface{}) (*pgx.Rows, error) QueryRow(query string, args ...interface{}) *pgx.Row Close() error + IsAlive() bool } type defaultDbWrapper struct { @@ -23,13 +26,12 @@ type defaultDbWrapper struct { // NewWrapper returns an implementation of the db.Wrapper interface // that issues queries to a PG database. -func NewWrapper(address string) (Wrapper, error) { - connConfig, err := pgx.ParseConnectionString(address) +func NewWrapper(connection string) (Wrapper, error) { + connConfig, err := parseConnectionString(connection) if err != nil { - log.Printf("E! Couldn't parse connection address: %s\n%v", address, err) return nil, err } - db, err := pgx.Connect(connConfig) + db, err := pgx.Connect(*connConfig) if err != nil { log.Printf("E! Couldn't connect to server\n%v", err) return nil, err @@ -63,3 +65,33 @@ func (d *defaultDbWrapper) Query(query string, args ...interface{}) (*pgx.Rows, func (d *defaultDbWrapper) QueryRow(query string, args ...interface{}) *pgx.Row { return d.db.QueryRow(query, args...) } + +func (d *defaultDbWrapper) IsAlive() bool { + if !d.db.IsAlive() { + return false + } + row := d.db.QueryRow(checkConnQuery) + var one int64 + if err := row.Scan(&one); err != nil { + log.Printf("W! Error given on 'is conn alive':\n%v", err) + return false + } + return true +} + +func parseConnectionString(connection string) (*pgx.ConnConfig, error) { + envConnConfig, err := pgx.ParseEnvLibpq() + if err != nil { + log.Println("E! couldn't check PG environment variables") + return nil, err + } + + connConfig, err := pgx.ParseConnectionString(connection) + if err != nil { + log.Printf("E! Couldn't parse connection string: %s\n%v", connection, err) + return nil, err + } + + connConfig = envConnConfig.Merge(connConfig) + return &connConfig, nil +} diff --git a/plugins/outputs/postgresql/db/db_wrapper_test.go b/plugins/outputs/postgresql/db/db_wrapper_test.go new file mode 100644 index 0000000000000..ca6865b7ea56c --- /dev/null +++ b/plugins/outputs/postgresql/db/db_wrapper_test.go @@ -0,0 +1,21 @@ +package db + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseConnectionStringPgEnvOverride(t *testing.T) { + config, err := parseConnectionString("dbname=test") + assert.NoError(t, err) + assert.Equal(t, "test", config.Database) + assert.Equal(t, "", config.Password) + + os.Setenv("PGPASSWORD", "pass") + config, err = parseConnectionString("dbname=test") + assert.NoError(t, err) + assert.Equal(t, "test", config.Database) + assert.Equal(t, "pass", config.Password) +} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index c59f5cbb87cea..cfaa407d8b24a 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,6 +2,7 @@ package postgresql import ( "log" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" @@ -12,8 +13,7 @@ import ( ) type Postgresql struct { - db db.Wrapper - Address string + Connection string Schema string DoSchemaUpdates bool TagsAsForeignkeys bool @@ -22,10 +22,16 @@ type Postgresql struct { FieldsAsJsonb bool TableTemplate string TagTableSuffix string - tables tables.Manager - tagCache tagsCache - rows transformer - columns columns.Mapper + + // lock for the assignment of the dbWrapper, + // table manager and tags cache + dbConnLock sync.Mutex + db db.Wrapper + tables tables.Manager + tagCache tagsCache + + rows transformer + columns columns.Mapper } func init() { @@ -44,12 +50,16 @@ func newPostgresql() *Postgresql { // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - db, err := db.NewWrapper(p.Address) + p.dbConnLock.Lock() + defer p.dbConnLock.Unlock() + + // set p.db with a lock + db, err := db.NewWrapper(p.Connection) if err != nil { return err } p.db = db - p.tables = tables.NewManager(db, p.Schema, p.TableTemplate) + p.tables = tables.NewManager(p.db, p.Schema, p.TableTemplate) if p.TagsAsForeignkeys { p.tagCache = newTagsCache(p.CachedTagsetsPerMeasurement, p.TagsAsJsonb, p.TagTableSuffix, p.Schema, p.db) @@ -61,7 +71,8 @@ func (p *Postgresql) Connect() error { // Close closes the connection to the database func (p *Postgresql) Close() error { - p.tagCache = nil + p.dbConnLock.Lock() + defer p.dbConnLock.Unlock() p.tagCache = nil p.tables = nil return p.db.Close() @@ -74,14 +85,16 @@ var sampleConfig = ` ## or a simple string: ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## - ## All connection parameters are optional. + ## All connection parameters are optional. Also supported are PG environment vars + ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE + ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html ## ## Without the dbname parameter, the driver will default to a database ## with the same name as the user. This dbname is just for instantiating a ## connection with the server and doesn't restrict the databases we are trying ## to grab metrics for. ## - address = "host=localhost user=postgres sslmode=verify-full" + connection = "host=localhost user=postgres sslmode=verify-full" ## Update existing tables to match the incoming metrics automatically. Default is true # do_schema_updates = true @@ -121,6 +134,14 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { + if !p.checkConnection() { + log.Println("W! Connection is not alive, attempting reset") + if err := p.resetConnection(); err != nil { + log.Printf("E! Could not reset connection:\n%v", err) + return err + } + log.Println("I! Connection established again") + } metricsByMeasurement := utils.GroupMetricsByMeasurement(metrics) for measureName, indices := range metricsByMeasurement { err := p.writeMetricsFromMeasure(measureName, indices, metrics) @@ -182,3 +203,21 @@ func (p *Postgresql) prepareTable(tableName string, details *utils.TargetColumns } return p.tables.AddColumnsToTable(tableName, missingColumns, details) } + +func (p *Postgresql) checkConnection() bool { + p.dbConnLock.Lock() + defer p.dbConnLock.Unlock() + return p.db != nil && p.db.IsAlive() +} + +func (p *Postgresql) resetConnection() error { + p.dbConnLock.Lock() + defer p.dbConnLock.Unlock() + var err error + p.db, err = db.NewWrapper(p.Connection) + p.tables.SetConnection(p.db) + if p.tagCache != nil { + p.tagCache.setDb(p.db) + } + return err +} diff --git a/plugins/outputs/postgresql/postgresql_integration_test.go b/plugins/outputs/postgresql/postgresql_integration_test.go index 457665621c033..355cdc4cc352d 100644 --- a/plugins/outputs/postgresql/postgresql_integration_test.go +++ b/plugins/outputs/postgresql/postgresql_integration_test.go @@ -19,12 +19,12 @@ func prepareAndConnect(t *testing.T, foreignTags, jsonTags, jsonFields bool) (te t.Skip("Skipping integration test in short mode") } - testAddress := "postgres://postgres@localhost:5432/postgres?sslmode=disable" + testAddress := "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" testMetric := testMetric("metric name", "tag1", int(1)) postgres := &Postgresql{ - Address: testAddress, + Connection: testAddress, Schema: "public", TagsAsForeignkeys: foreignTags, TagsAsJsonb: jsonTags, diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 3c988bba800b2..7efff2e1dcd77 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -1,47 +1,20 @@ package postgresql import ( - "fmt" + "sync" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" "github.com/jackc/pgx" _ "github.com/jackc/pgx/stdlib" "github.com/stretchr/testify/assert" ) -func TestWriteAllInOnePlace(t *testing.T) { - timestamp := time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC) - oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, timestamp) - twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, timestamp) - threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, timestamp) - fourMetric, _ := metric.New("m2", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 5, "f2": 6}, timestamp) - - p := &Postgresql{ - Schema: "public", - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", - TagTableSuffix: "_tag", - DoSchemaUpdates: true, - Address: "host=localhost user=postgres password=postgres sslmode=disable dbname=postgres", - } - p.Connect() - err := p.Write([]telegraf.Metric{oneMetric, twoMetric, fourMetric, threeMetric}) - if err != nil { - fmt.Println(err.Error()) - t.Fail() - } - fiveMetric, _ := metric.New("m", map[string]string{"t": "tv", "t3": "tv3"}, map[string]interface{}{"f": 7, "f3": 8}, timestamp) - err = p.Write([]telegraf.Metric{fiveMetric}) - if err != nil { - fmt.Println(err.Error()) - t.Fail() - } -} - func TestPostgresqlMetricsFromMeasure(t *testing.T) { postgreSQL, metrics, metricIndices := prepareAllColumnsInOnePlaceNoJSON() err := postgreSQL.writeMetricsFromMeasure(metrics[0].Name(), metricIndices["m"], metrics) @@ -51,6 +24,46 @@ func TestPostgresqlMetricsFromMeasure(t *testing.T) { assert.NoError(t, err) } +func TestPostgresqlIsAliveCalledOnWrite(t *testing.T) { + postgreSQL, metrics, _ := prepareAllColumnsInOnePlaceNoJSON() + mockedDb := postgreSQL.db.(*mockDb) + mockedDb.isAliveResponses = []bool{true} + err := postgreSQL.Write(metrics[:1]) + assert.NoError(t, err) + assert.Equal(t, 1, mockedDb.currentIsAliveResponse) +} + +func TestPostgresqlDbAssignmentLock(t *testing.T) { + postgreSQL, metrics, _ := prepareAllColumnsInOnePlaceNoJSON() + mockedDb := postgreSQL.db.(*mockDb) + mockedDb.isAliveResponses = []bool{true} + mockedDb.secondsToSleepInIsAlive = 3 + var endOfWrite, startOfWrite, startOfReset, endOfReset time.Time + var wg sync.WaitGroup + wg.Add(2) + go func() { + startOfWrite = time.Now() + err := postgreSQL.Write(metrics[:1]) + assert.NoError(t, err) + endOfWrite = time.Now() + wg.Done() + }() + time.Sleep(time.Second) + + go func() { + startOfReset = time.Now() + postgreSQL.dbConnLock.Lock() + time.Sleep(time.Second) + postgreSQL.dbConnLock.Unlock() + endOfReset = time.Now() + wg.Done() + }() + wg.Wait() + assert.True(t, startOfWrite.Before(startOfReset)) + assert.True(t, startOfReset.Before(endOfWrite)) + assert.True(t, endOfWrite.Before(endOfReset)) +} + func prepareAllColumnsInOnePlaceNoJSON() (*Postgresql, []telegraf.Metric, map[string][]int) { oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, time.Now()) twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, time.Now()) @@ -63,6 +76,7 @@ func prepareAllColumnsInOnePlaceNoJSON() (*Postgresql, []telegraf.Metric, map[st rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, columns: columns.NewMapper(false, false, false), db: &mockDb{}, + dbConnLock: sync.Mutex{}, }, []telegraf.Metric{ oneMetric, twoMetric, threeMetric, }, map[string][]int{ @@ -81,6 +95,7 @@ func prepareAllColumnsInOnePlaceTagsAndFieldsJSON() (*Postgresql, []telegraf.Met TagsAsForeignkeys: false, TagsAsJsonb: true, FieldsAsJsonb: true, + dbConnLock: sync.Mutex{}, tables: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, columns: columns.NewMapper(false, true, true), rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, @@ -116,6 +131,7 @@ func (m *mockTables) FindColumnMismatch(tableName string, colDetails *utils.Targ func (m *mockTables) AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { return m.addColsErr } +func (m *mockTables) SetConnection(db db.Wrapper) {} type mockTransformer struct { rows [][]interface{} @@ -133,12 +149,16 @@ func (mt *mockTransformer) createRowFromMetric(numColumns int, metric telegraf.M } type mockDb struct { - doCopyErr error + doCopyErr error + isAliveResponses []bool + currentIsAliveResponse int + secondsToSleepInIsAlive int64 } func (m *mockDb) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { return "", nil } + func (m *mockDb) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { return m.doCopyErr } @@ -151,3 +171,18 @@ func (m *mockDb) QueryRow(query string, args ...interface{}) *pgx.Row { func (m *mockDb) Close() error { return nil } + +func (m *mockDb) IsAlive() bool { + if m.secondsToSleepInIsAlive > 0 { + time.Sleep(time.Duration(m.secondsToSleepInIsAlive) * time.Second) + } + if m.isAliveResponses == nil { + return true + } + if m.currentIsAliveResponse >= len(m.isAliveResponses) { + return m.isAliveResponses[len(m.isAliveResponses)] + } + which := m.currentIsAliveResponse + m.currentIsAliveResponse++ + return m.isAliveResponses[which] +} diff --git a/plugins/outputs/postgresql/tables/manager_test.go b/plugins/outputs/postgresql/tables/manager_test.go index 54a4fbbb39e3b..cf17956adb247 100644 --- a/plugins/outputs/postgresql/tables/manager_test.go +++ b/plugins/outputs/postgresql/tables/manager_test.go @@ -31,6 +31,8 @@ func (m *mockDb) Close() error { return nil } +func (m *mockDb) IsAlive() bool { return true } + func TestNewManager(t *testing.T) { db := &mockDb{} res := NewManager(db, "schema", "table template").(*defTableManager) diff --git a/plugins/outputs/postgresql/tables/manager.go b/plugins/outputs/postgresql/tables/table_manager.go similarity index 96% rename from plugins/outputs/postgresql/tables/manager.go rename to plugins/outputs/postgresql/tables/table_manager.go index f99ca0c5c72d0..eaf69c5ede742 100644 --- a/plugins/outputs/postgresql/tables/manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -41,6 +41,7 @@ type Manager interface { // From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. // this function will add the new columns with the required data type. AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error + SetConnection(db db.Wrapper) } type defTableManager struct { @@ -61,6 +62,13 @@ func NewManager(db db.Wrapper, schema, tableTemplate string) Manager { } } +// SetConnection to db, used only when previous was killed or restarted. +// It will also clear the local cache of which table exists. +func (t *defTableManager) SetConnection(db db.Wrapper) { + t.db = db + t.Tables = make(map[string]bool) +} + // Exists checks if a table with the given name already is present in the DB. func (t *defTableManager) Exists(tableName string) bool { if _, ok := t.Tables[tableName]; ok { diff --git a/plugins/outputs/postgresql/tags_cache.go b/plugins/outputs/postgresql/tags_cache.go index 6761a0d3ec43f..29640270d4d87 100644 --- a/plugins/outputs/postgresql/tags_cache.go +++ b/plugins/outputs/postgresql/tags_cache.go @@ -23,6 +23,7 @@ const ( type tagsCache interface { getTagID(target *utils.TargetColumns, metric telegraf.Metric) (int, error) tagsTableName(measureName string) string + setDb(db db.Wrapper) } type defTagsCache struct { @@ -46,6 +47,10 @@ func newTagsCache(numItemsInCachePerMetric int, tagsAsJSONb bool, tagTableSuffix } } +func (c *defTagsCache) setDb(db db.Wrapper) { + c.db = db +} + // Checks the cache for the tag set of the metric, if present returns immediately. // Otherwise asks the database if that tag set has already been recorded. // If not recorded, inserts a new row to the tags table for the specific measurement. diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 1fbd90cc05526..649f4728460af 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -62,6 +62,8 @@ func FullTableName(schema, name string) *pgx.Identifier { return &pgx.Identifier{name} } +// Constants for naming PostgreSQL data types both in +// their short and long versions. const ( PgBool = "boolean" PgInt8 = "int8" @@ -85,21 +87,11 @@ func DerivePgDatatype(value interface{}) PgDataType { switch value.(type) { case bool: return PgBool - case uint64: + case uint64, int64, int, uint: return PgInt8 - case int64: - return PgInt8 - case int: - return PgInt8 - case uint: - return PgInt8 - case uint32: + case uint32, int32: return PgInt4 - case int32: - return PgInt4 - case float64: - return PgFloat8 - case float32: + case float64, float32: return PgFloat8 case string: return PgText From 44fabb84fe8b503901afcdf9eaddeb5ef54015f8 Mon Sep 17 00:00:00 2001 From: Blagoj Atanasovski Date: Mon, 23 Dec 2019 15:07:55 +0100 Subject: [PATCH 058/121] error thrown on insufficient permissions --- plugins/outputs/postgresql/README.md | 5 ++ plugins/outputs/postgresql/postgresql.go | 1 + .../postgresql/tables/table_manager.go | 71 ++++++++++--------- 3 files changed, 43 insertions(+), 34 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index c3d40d3babd4c..6e80c61d868cb 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -3,6 +3,11 @@ This output plugin writes all metrics to PostgreSQL. The plugin manages the schema automatically updating missing columns, and checking if existing ones are of the proper type. +**_WARNING_**: In order to enable automatic schema update, the connection to the database must +be established with a user that has sufficient permissions. Either be a admin, or an owner of the +target schema. + + ### Configuration: ```toml diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index cfaa407d8b24a..569fd2cb73657 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -146,6 +146,7 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { for measureName, indices := range metricsByMeasurement { err := p.writeMetricsFromMeasure(measureName, indices, metrics) if err != nil { + log.Printf("copy error: %v", err) return err } } diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index eaf69c5ede742..e7aba2efe7156 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -1,8 +1,8 @@ package tables import ( - "database/sql" "fmt" + "github.com/pkg/errors" "log" "strings" @@ -11,11 +11,9 @@ import ( ) const ( - addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" - tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" - findColumnPresenceTemplate = "WITH available AS (SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2)," + - "required AS (SELECT c FROM unnest(array [%s]) AS c) " + - "SELECT required.c as column_name, available.column_name IS NOT NULL as exists, available.data_type FROM required LEFT JOIN available ON required.c = available.column_name;" + addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" + tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" + findExistingColumnsTemplate = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" ) type columnInDbDef struct { @@ -114,7 +112,7 @@ func (t *defTableManager) FindColumnMismatch(tableName string, colDetails *utils return nil, err } - missingCols := []int{} + var missingCols []int for colIndex := range colDetails.Names { colStateInDb := columnPresence[colIndex] if !colStateInDb.exists { @@ -154,7 +152,7 @@ func (t *defTableManager) AddColumnsToTable(tableName string, columnIndices []in // The order, column names and data types are given in 'colDetails'. func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *utils.TargetColumns) string { colDefs := make([]string, len(colDetails.Names)) - pk := []string{} + var pk []string for colIndex, colName := range colDetails.Names { colDefs[colIndex] = utils.QuoteIdent(colName) + " " + string(colDetails.DataTypes[colIndex]) if colDetails.Roles[colIndex] != utils.FieldColType { @@ -174,43 +172,48 @@ func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *u // For a given table and an array of column names it checks the database if those columns exist, // and what's their data type. func (t *defTableManager) findColumnPresence(tableName string, columns []string) ([]*columnInDbDef, error) { - columnPresenseQuery := prepareColumnPresenceQuery(columns) - result, err := t.db.Query(columnPresenseQuery, t.schema, tableName) + existingCols, err := t.findExistingColumns(tableName) if err != nil { - log.Printf("E! Couldn't discover columns of table: %s\nQuery failed: %s\n%v", tableName, columnPresenseQuery, err) return nil, err } - defer result.Close() - columnStatus := make([]*columnInDbDef, len(columns)) - var exists bool - var columnName string - var pgLongType sql.NullString - currentColumn := 0 + if len(existingCols) == 0 { + log.Printf("E! Table exists, but no columns discovered, user doesn't have enough permissions") + return nil, errors.New("Table exists, but no columns discovered, user doesn't have enough permissions") + } - for result.Next() { - err := result.Scan(&columnName, &exists, &pgLongType) - if err != nil { - log.Printf("E! Couldn't discover columns of table: %s\n%v", tableName, err) - return nil, err - } - pgShortType := utils.PgDataType("") - if pgLongType.Valid { - pgShortType = utils.LongToShortPgType(pgLongType.String) + columnStatus := make([]*columnInDbDef, len(columns)) + for i := 0; i < len(columns); i++ { + currentColumn := columns[i] + colType, exists := existingCols[currentColumn] + if !exists { + colType = "" } - columnStatus[currentColumn] = &columnInDbDef{ + columnStatus[i] = &columnInDbDef{ exists: exists, - dataType: pgShortType, + dataType: colType, } - currentColumn++ } return columnStatus, nil } -func prepareColumnPresenceQuery(columns []string) string { - quotedColumns := make([]string, len(columns)) - for i, column := range columns { - quotedColumns[i] = utils.QuoteLiteral(column) +func (t *defTableManager) findExistingColumns(table string) (map[string]utils.PgDataType, error) { + rows, err := t.db.Query(findExistingColumnsTemplate, t.schema, table) + if err != nil { + log.Printf("E! Couldn't discover existing columns of table: %s\n%v", table, err) + return nil, errors.Wrap(err, "could not discover existing columns") + } + defer rows.Close() + cols := make(map[string]utils.PgDataType) + for rows.Next() { + var colName, colTypeStr string + err := rows.Scan(&colName, &colTypeStr) + if err != nil { + log.Printf("E! Couldn't discover columns of table: %s\n%v", table, err) + return nil, err + } + pgShortType := utils.LongToShortPgType(colTypeStr) + cols[colName] = pgShortType } - return fmt.Sprintf(findColumnPresenceTemplate, strings.Join(quotedColumns, ",")) + return cols, nil } From ac6369693f8a21e61a68ef4872cc1a08e2f4d74e Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 10 Nov 2020 23:12:26 -0500 Subject: [PATCH 059/121] outputs.postgresql: fix tag table creation to ignore template commit 69fea1c broke tags_as_foreign keys when used with table_template. The template was being used for creation of the tag table when it should not be. --- plugins/outputs/postgresql/postgresql.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 569fd2cb73657..7522144ae6487 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -28,7 +28,7 @@ type Postgresql struct { dbConnLock sync.Mutex db db.Wrapper tables tables.Manager - tagCache tagsCache + tagTables tables.Manager rows transformer columns columns.Mapper @@ -38,10 +38,12 @@ func init() { outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() }) } +const createTableTemplate = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" + func newPostgresql() *Postgresql { return &Postgresql{ Schema: "public", - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", + TableTemplate: createTableTemplate, TagTableSuffix: "_tag", CachedTagsetsPerMeasurement: 1000, DoSchemaUpdates: true, @@ -63,6 +65,7 @@ func (p *Postgresql) Connect() error { if p.TagsAsForeignkeys { p.tagCache = newTagsCache(p.CachedTagsetsPerMeasurement, p.TagsAsJsonb, p.TagTableSuffix, p.Schema, p.db) + p.tagTables = tables.NewManager(p.db, p.Schema, createTableTemplate) } p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb, p.tagCache) p.columns = columns.NewMapper(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) @@ -161,12 +164,12 @@ func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices [ targetColumns, targetTagColumns := p.columns.Target(metricIndices, metrics) if p.DoSchemaUpdates { - if err := p.prepareTable(measureName, targetColumns); err != nil { + if err := p.prepareTable(p.tables, measureName, targetColumns); err != nil { return err } if p.TagsAsForeignkeys { tagTableName := p.tagCache.tagsTableName(measureName) - if err := p.prepareTable(tagTableName, targetTagColumns); err != nil { + if err := p.prepareTable(p.tagTables, tagTableName, targetTagColumns); err != nil { return err } } @@ -188,21 +191,21 @@ func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices [ // Checks if a table exists in the db, and then validates if all the required columns // are present or some are missing (if metrics changed their field or tag sets). -func (p *Postgresql) prepareTable(tableName string, details *utils.TargetColumns) error { - tableExists := p.tables.Exists(tableName) +func (p *Postgresql) prepareTable(tableManager tables.Manager, tableName string, details *utils.TargetColumns) error { + tableExists := tableManager.Exists(tableName) if !tableExists { - return p.tables.CreateTable(tableName, details) + return tableManager.CreateTable(tableName, details) } - missingColumns, err := p.tables.FindColumnMismatch(tableName, details) + missingColumns, err := tableManager.FindColumnMismatch(tableName, details) if err != nil { return err } if len(missingColumns) == 0 { return nil } - return p.tables.AddColumnsToTable(tableName, missingColumns, details) + return tableManager.AddColumnsToTable(tableName, missingColumns, details) } func (p *Postgresql) checkConnection() bool { From bc10b0010141b190d7c15d2c00593980ff0b7118 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 10 Nov 2020 23:15:54 -0500 Subject: [PATCH 060/121] outputs.postgresql: Use a deterministic tag key The previous code used a postgres serial for the tag table key. This results in telegraf having to lookup from the table to obtain the key. While there was an LRU cache to keep telegraf from having to constantly query, this posed a few problems. 1. The LRU cache is of a fixed size, but the performance can go from perfectly fine, to rock bottom as soon as you exceed the cache size. There is no gradual degredation that would allow the admin to detect the issue before it gets too bad. This has major repercussions as telegraf won't be able to keep up with ingest rate and will start dropping data. 2. The default for this cache was really small (1000), which is even likely too small for common use cases. 3. When telegraf starts up, the cache is cold, and so performance will be terrible while it is repopulated, again resulting in data loss. The new solution solves this by using a deterministic tag key that does not require a lookup. The key is a 64-bit integer from the hash/maphash package. This hash is specifically designed for creating a hash of a string/bytes that can be used as a lookup key. --- plugins/outputs/postgresql/README.md | 4 - .../postgresql/columns/columns_initializer.go | 8 +- .../postgresql/columns/standard_columns.go | 3 +- plugins/outputs/postgresql/postgresql.go | 42 ++--- plugins/outputs/postgresql/tags_cache.go | 164 ------------------ plugins/outputs/postgresql/transformer.go | 10 +- plugins/outputs/postgresql/utils/utils.go | 13 ++ 7 files changed, 35 insertions(+), 209 deletions(-) delete mode 100644 plugins/outputs/postgresql/tags_cache.go diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 6e80c61d868cb..3acc1dd743585 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -29,10 +29,6 @@ target schema. ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false - - ## If tags_as_foreignkeys is set to true you can choose the number of tag sets to cache - ## per measurement (metric name). Default is 1000, if set to 0 => cache has no limit. - # cached_tagsets_per_measurement = 1000 ## Template to use for generating tables ## Available Variables: diff --git a/plugins/outputs/postgresql/columns/columns_initializer.go b/plugins/outputs/postgresql/columns/columns_initializer.go index 5391dabe93f27..68aa5eb07b83f 100644 --- a/plugins/outputs/postgresql/columns/columns_initializer.go +++ b/plugins/outputs/postgresql/columns/columns_initializer.go @@ -54,7 +54,7 @@ func tagsAsFKAndJSONAndFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetCo Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, }, &utils.TargetColumns{ Names: []string{TagIDColumnName, TagsJSONColumn}, - DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK, JSONColumnDataType}, + DataTypes: []utils.PgDataType{TagIDColumnDataType, JSONColumnDataType}, Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, Roles: []utils.ColumnRole{utils.TagsIDColType, utils.TagColType}, } @@ -68,7 +68,7 @@ func tagsAsFKAndJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, }, &utils.TargetColumns{ Names: []string{TagIDColumnName, TagsJSONColumn}, - DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK, JSONColumnDataType}, + DataTypes: []utils.PgDataType{TagIDColumnDataType, JSONColumnDataType}, Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, Roles: []utils.ColumnRole{utils.TagsIDColType, utils.FieldColType}, } @@ -82,7 +82,7 @@ func tagsAsFKFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, }, &utils.TargetColumns{ Names: []string{TagIDColumnName}, - DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK}, + DataTypes: []utils.PgDataType{TagIDColumnDataType}, Target: map[string]int{TagIDColumnName: 0}, Roles: []utils.ColumnRole{utils.TagsIDColType}, } @@ -96,7 +96,7 @@ func tagsAsFKInit() (*utils.TargetColumns, *utils.TargetColumns) { Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, }, &utils.TargetColumns{ Names: []string{TagIDColumnName}, - DataTypes: []utils.PgDataType{TagIDColumnDataTypeAsPK}, + DataTypes: []utils.PgDataType{TagIDColumnDataType}, Target: map[string]int{TagIDColumnName: 0}, Roles: []utils.ColumnRole{utils.TagsIDColType}, } diff --git a/plugins/outputs/postgresql/columns/standard_columns.go b/plugins/outputs/postgresql/columns/standard_columns.go index 75abe2ec6e869..9c542ea457096 100644 --- a/plugins/outputs/postgresql/columns/standard_columns.go +++ b/plugins/outputs/postgresql/columns/standard_columns.go @@ -8,8 +8,7 @@ const ( TimeColumnDataType = utils.PgTimestamptz TimeColumnDefinition = TimeColumnName + " " + utils.PgTimestamptz TagIDColumnName = "tag_id" - TagIDColumnDataType = utils.PgInt4 - TagIDColumnDataTypeAsPK = utils.PgSerial + TagIDColumnDataType = utils.PgBigInt TagsJSONColumn = "tags" FieldsJSONColumn = "fields" JSONColumnDataType = utils.PgJSONb diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 7522144ae6487..6eab44fb780ee 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -13,15 +13,14 @@ import ( ) type Postgresql struct { - Connection string - Schema string - DoSchemaUpdates bool - TagsAsForeignkeys bool - CachedTagsetsPerMeasurement int - TagsAsJsonb bool - FieldsAsJsonb bool - TableTemplate string - TagTableSuffix string + Connection string + Schema string + DoSchemaUpdates bool + TagsAsForeignkeys bool + TagsAsJsonb bool + FieldsAsJsonb bool + TableTemplate string + TagTableSuffix string // lock for the assignment of the dbWrapper, // table manager and tags cache @@ -42,11 +41,10 @@ const createTableTemplate = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" func newPostgresql() *Postgresql { return &Postgresql{ - Schema: "public", - TableTemplate: createTableTemplate, - TagTableSuffix: "_tag", - CachedTagsetsPerMeasurement: 1000, - DoSchemaUpdates: true, + Schema: "public", + TableTemplate: createTableTemplate, + TagTableSuffix: "_tag", + DoSchemaUpdates: true, } } @@ -62,12 +60,11 @@ func (p *Postgresql) Connect() error { } p.db = db p.tables = tables.NewManager(p.db, p.Schema, p.TableTemplate) - if p.TagsAsForeignkeys { - p.tagCache = newTagsCache(p.CachedTagsetsPerMeasurement, p.TagsAsJsonb, p.TagTableSuffix, p.Schema, p.db) p.tagTables = tables.NewManager(p.db, p.Schema, createTableTemplate) } - p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb, p.tagCache) + + p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) p.columns = columns.NewMapper(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) return nil } @@ -76,7 +73,6 @@ func (p *Postgresql) Connect() error { func (p *Postgresql) Close() error { p.dbConnLock.Lock() defer p.dbConnLock.Unlock() - p.tagCache = nil p.tables = nil return p.db.Close() } @@ -104,11 +100,6 @@ var sampleConfig = ` ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false - - ## If tags_as_foreignkeys is set to true you can choose the number of tag sets to cache - ## per measurement (metric name). Default is 1000, if set to 0 => cache has no limit. - ## Has no effect if tags_as_foreignkeys = false - # cached_tagsets_per_measurement = 1000 ## Template to use for generating tables ## Available Variables: @@ -168,7 +159,7 @@ func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices [ return err } if p.TagsAsForeignkeys { - tagTableName := p.tagCache.tagsTableName(measureName) + tagTableName := measureName + p.TagTableSuffix if err := p.prepareTable(p.tagTables, tagTableName, targetTagColumns); err != nil { return err } @@ -220,8 +211,5 @@ func (p *Postgresql) resetConnection() error { var err error p.db, err = db.NewWrapper(p.Connection) p.tables.SetConnection(p.db) - if p.tagCache != nil { - p.tagCache.setDb(p.db) - } return err } diff --git a/plugins/outputs/postgresql/tags_cache.go b/plugins/outputs/postgresql/tags_cache.go deleted file mode 100644 index 29640270d4d87..0000000000000 --- a/plugins/outputs/postgresql/tags_cache.go +++ /dev/null @@ -1,164 +0,0 @@ -package postgresql - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/golang/groupcache/lru" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -const ( - selectTagIDTemplate = "SELECT tag_id FROM %s WHERE %s" -) - -// TagsCache retrieves the appropriate tagID based on the tag values -// from the database (used only when TagsAsForeignKey property selected). -// Also caches the LRU tagIDs -type tagsCache interface { - getTagID(target *utils.TargetColumns, metric telegraf.Metric) (int, error) - tagsTableName(measureName string) string - setDb(db db.Wrapper) -} - -type defTagsCache struct { - cache map[string]*lru.Cache - tagsAsJSONb bool - tagTableSuffix string - schema string - db db.Wrapper - itemsToCache int -} - -// newTagsCache returns a new implementation of the tags cache interface with LRU memoization -func newTagsCache(numItemsInCachePerMetric int, tagsAsJSONb bool, tagTableSuffix, schema string, db db.Wrapper) tagsCache { - return &defTagsCache{ - cache: map[string]*lru.Cache{}, - tagsAsJSONb: tagsAsJSONb, - tagTableSuffix: tagTableSuffix, - schema: schema, - db: db, - itemsToCache: numItemsInCachePerMetric, - } -} - -func (c *defTagsCache) setDb(db db.Wrapper) { - c.db = db -} - -// Checks the cache for the tag set of the metric, if present returns immediately. -// Otherwise asks the database if that tag set has already been recorded. -// If not recorded, inserts a new row to the tags table for the specific measurement. -// Re-caches the tagID after checking the DB. -func (c *defTagsCache) getTagID(target *utils.TargetColumns, metric telegraf.Metric) (int, error) { - measureName := metric.Name() - tags := metric.Tags() - cacheKey := constructCacheKey(tags) - tagID, isCached := c.checkTagCache(measureName, cacheKey) - if isCached { - return tagID, nil - } - - var whereParts []string - var whereValues []interface{} - if c.tagsAsJSONb { - whereParts = []string{utils.QuoteIdent(columns.TagsJSONColumn) + "= $1"} - numTags := len(tags) - if numTags > 0 { - d, err := utils.BuildJsonb(tags) - if err != nil { - return tagID, err - } - whereValues = []interface{}{d} - } else { - whereValues = []interface{}{nil} - } - } else { - whereParts = make([]string, len(target.Names)-1) - whereValues = make([]interface{}, len(target.Names)-1) - whereIndex := 1 - for columnIndex, tagName := range target.Names[1:] { - if val, ok := tags[tagName]; ok { - whereParts[columnIndex] = utils.QuoteIdent(tagName) + " = $" + strconv.Itoa(whereIndex) - whereValues[whereIndex-1] = val - } else { - whereParts[whereIndex-1] = tagName + " IS NULL" - } - whereIndex++ - } - } - - tagsTableName := c.tagsTableName(measureName) - tagsTableFullName := utils.FullTableName(c.schema, tagsTableName).Sanitize() - // SELECT tag_id FROM measure_tag WHERE t1 = v1 AND ... tN = vN - query := fmt.Sprintf(selectTagIDTemplate, tagsTableFullName, strings.Join(whereParts, " AND ")) - err := c.db.QueryRow(query, whereValues...).Scan(&tagID) - // tag set found in DB, cache it and return - if err == nil { - c.addToCache(measureName, cacheKey, tagID) - return tagID, nil - } - - // tag set is new, insert it, and cache the tagID - query = utils.GenerateInsert(tagsTableFullName, target.Names[1:]) + " RETURNING " + columns.TagIDColumnName - err = c.db.QueryRow(query, whereValues...).Scan(&tagID) - if err == nil { - c.addToCache(measureName, cacheKey, tagID) - } - return tagID, err -} - -func (c *defTagsCache) tagsTableName(measureName string) string { - return measureName + c.tagTableSuffix -} - -// check the cache for the given 'measure' if it contains the -// tagID value for a given tag-set key. If the cache for that measure -// doesn't exist, creates it. -func (c *defTagsCache) checkTagCache(measure, key string) (int, bool) { - if cacheForMeasure, ok := c.cache[measure]; ok { - tagID, exists := cacheForMeasure.Get(key) - if exists { - return tagID.(int), exists - } - return 0, exists - } - - c.cache[measure] = lru.New(c.itemsToCache) - return 0, false -} - -func (c *defTagsCache) addToCache(measure, key string, tagID int) { - c.cache[measure].Add(key, tagID) -} - -// cache key is constructed from the tag set as -// {tag_a:1, tag_c:2, tag_b:3}=>'tag_a 1;tag_b 3;tag_c 2;' -func constructCacheKey(tags map[string]string) string { - numTags := len(tags) - if numTags == 0 { - return "" - } - keys := make([]string, numTags) - i := 0 - for key := range tags { - keys[i] = key - i++ - } - - sort.Strings(keys) - var whereParts strings.Builder - for _, key := range keys { - val := tags[key] - whereParts.WriteString(key) - whereParts.WriteString(" ") - whereParts.WriteString(val) - whereParts.WriteString(";") - } - return whereParts.String() -} diff --git a/plugins/outputs/postgresql/transformer.go b/plugins/outputs/postgresql/transformer.go index 1a843a264d40d..9fe8862811077 100644 --- a/plugins/outputs/postgresql/transformer.go +++ b/plugins/outputs/postgresql/transformer.go @@ -14,15 +14,13 @@ type defTransformer struct { tagsAsFK bool tagsAsJSONb bool fieldsAsJSONb bool - tagsCache tagsCache } -func newRowTransformer(tagsAsFK, tagsAsJSONb, fieldsAsJSONb bool, tagsCache tagsCache) transformer { +func newRowTransformer(tagsAsFK, tagsAsJSONb, fieldsAsJSONb bool) transformer { return &defTransformer{ tagsAsFK: tagsAsFK, tagsAsJSONb: tagsAsJSONb, fieldsAsJSONb: fieldsAsJSONb, - tagsCache: tagsCache, } } @@ -32,11 +30,7 @@ func (dt *defTransformer) createRowFromMetric(numColumns int, metric telegraf.Me row[0] = metric.Time() // handle tags and tag id if dt.tagsAsFK { - tagID, err := dt.tagsCache.getTagID(targetTagColumns, metric) - if err != nil { - return nil, err - } - row[1] = tagID + row[1] = utils.GetTagID(metric) } else { if dt.tagsAsJSONb { jsonVal, err := utils.BuildJsonb(metric.Tags()) diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 649f4728460af..8e0c40d45aad5 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -3,6 +3,7 @@ package utils import ( "encoding/json" "fmt" + "hash/maphash" "log" "strings" "time" @@ -158,3 +159,15 @@ func GenerateInsert(fullSanitizedTableName string, columns []string) string { values := strings.Join(valuePlaceholders, ",") return fmt.Sprintf(insertIntoSQLTemplate, fullSanitizedTableName, columnNames, values) } + +func GetTagID(metric telegraf.Metric) int64 { + var hash maphash.Hash + for _, tag := range metric.TagList() { + _, _ = hash.WriteString(tag.Key) + _ = hash.WriteByte(0) + _, _ = hash.WriteString(tag.Value) + _ = hash.WriteByte(0) + } + // Convert to int64 as postgres does not support uint64 + return int64(hash.Sum64()) +} From 680ea4b1f55771265024f0baee15a422d56bde77 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 10 Nov 2020 23:37:22 -0500 Subject: [PATCH 061/121] outputs.postgresql: Sort columns on table creation This is just a convenience so that when doing `select *`, you don't get a random order. This is not meant to be a perfect solution as any additional columns that are added to the table definition will not be sorted (this is just how postgres works). It's just meant so that the column ordering isn't a complete mess, making the life of the DBA somewhat better. --- .../postgresql/tables/table_manager.go | 1 + plugins/outputs/postgresql/utils/types.go | 27 +++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index e7aba2efe7156..2c06b8a7c0f45 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -89,6 +89,7 @@ func (t *defTableManager) Exists(tableName string) bool { // Creates a table in the database with the column names and types specified in 'colDetails' func (t *defTableManager) CreateTable(tableName string, colDetails *utils.TargetColumns) error { + colDetails.Sort() sql := t.generateCreateTableSQL(tableName, colDetails) if _, err := t.db.Exec(sql); err != nil { log.Printf("E! Couldn't create table: %s\nSQL: %s\n%v", tableName, sql, err) diff --git a/plugins/outputs/postgresql/utils/types.go b/plugins/outputs/postgresql/utils/types.go index a44017399d601..3e11d236560b5 100644 --- a/plugins/outputs/postgresql/utils/types.go +++ b/plugins/outputs/postgresql/utils/types.go @@ -1,5 +1,10 @@ package utils +import ( + "sort" + "strings" +) + // ColumnRole specifies the role of a column in a metric. // It helps map the columns to the DB. type ColumnRole int @@ -28,3 +33,25 @@ type TargetColumns struct { // the role each column has, helps properly map the metric to the db Roles []ColumnRole } + +func (tcs TargetColumns) Len() int { + return len(tcs.Names) +} + +func (tcs TargetColumns) Less(i, j int) bool { + if tcs.Roles[i] != tcs.Roles[j] { + return tcs.Roles[i] < tcs.Roles[j] + } + return strings.ToLower(tcs.Names[i]) < strings.ToLower(tcs.Names[j]) +} + +func (tcs TargetColumns) Swap(i, j int) { + tcs.Names[i], tcs.Names[j] = tcs.Names[j], tcs.Names[i] + tcs.Target[tcs.Names[i]], tcs.Target[tcs.Names[j]] = tcs.Target[tcs.Names[j]], tcs.Target[tcs.Names[i]] + tcs.DataTypes[i], tcs.DataTypes[j] = tcs.DataTypes[j], tcs.DataTypes[i] + tcs.Roles[i], tcs.Roles[j] = tcs.Roles[j], tcs.Roles[i] +} + +func (tcs TargetColumns) Sort() { + sort.Sort(tcs) +} From 5966abdf9cacbeca4b0bef501a231aab9eaba243 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 11 Nov 2020 00:12:41 -0500 Subject: [PATCH 062/121] outputs.postgresql: Use standard postgres data types Previously the code used a mix of standard data types and extension types. This changes it to always use the standard postgres types. This addresses any inconsistencies that may arise due to the code not understanding that 2 types are really the same. --- .../postgresql/columns/standard_columns.go | 4 +- .../postgresql/tables/table_manager.go | 3 +- plugins/outputs/postgresql/utils/utils.go | 69 +++++++------------ 3 files changed, 28 insertions(+), 48 deletions(-) diff --git a/plugins/outputs/postgresql/columns/standard_columns.go b/plugins/outputs/postgresql/columns/standard_columns.go index 9c542ea457096..9297eef3f3f0c 100644 --- a/plugins/outputs/postgresql/columns/standard_columns.go +++ b/plugins/outputs/postgresql/columns/standard_columns.go @@ -5,8 +5,8 @@ import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" // Column names and data types for standard fields (time, tag_id, tags, and fields) const ( TimeColumnName = "time" - TimeColumnDataType = utils.PgTimestamptz - TimeColumnDefinition = TimeColumnName + " " + utils.PgTimestamptz + TimeColumnDataType = utils.PgTimestampWithTimeZone + TimeColumnDefinition = TimeColumnName + " " + utils.PgTimestampWithTimeZone TagIDColumnName = "tag_id" TagIDColumnDataType = utils.PgBigInt TagsJSONColumn = "tags" diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index 2c06b8a7c0f45..4b0fabd27d810 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -213,8 +213,7 @@ func (t *defTableManager) findExistingColumns(table string) (map[string]utils.Pg log.Printf("E! Couldn't discover columns of table: %s\n%v", table, err) return nil, err } - pgShortType := utils.LongToShortPgType(colTypeStr) - cols[colName] = pgShortType + cols[colName] = utils.PgDataType(colTypeStr) } return cols, nil } diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 8e0c40d45aad5..6e9f876bd1bfc 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -67,16 +67,13 @@ func FullTableName(schema, name string) *pgx.Identifier { // their short and long versions. const ( PgBool = "boolean" - PgInt8 = "int8" - PgInt4 = "int4" + PgSmallInt = "smallint" PgInteger = "integer" PgBigInt = "bigint" - PgFloat8 = "float8" + PgReal = "real" PgDoublePrecision = "double precision" PgText = "text" - PgTimestamptz = "timestamptz" PgTimestampWithTimeZone = "timestamp with time zone" - PgTimestamp = "timestamp" PgTimestampWithoutTimeZone = "timestamp without time zone" PgSerial = "serial" PgJSONb = "jsonb" @@ -89,60 +86,44 @@ func DerivePgDatatype(value interface{}) PgDataType { case bool: return PgBool case uint64, int64, int, uint: - return PgInt8 + return PgBigInt case uint32, int32: - return PgInt4 - case float64, float32: - return PgFloat8 + return PgInteger + case int16, int8: + return PgSmallInt + case float64: + return PgDoublePrecision + case float32: + return PgReal case string: return PgText case time.Time: - return PgTimestamptz + return PgTimestampWithTimeZone default: log.Printf("E! Unknown datatype %T(%v)", value, value) return PgText } } -// LongToShortPgType returns a PostgreSQL datatype in it's short -// notation form. -func LongToShortPgType(longPgType string) PgDataType { - switch longPgType { - case PgInteger: - return PgInt4 - case PgBigInt: - return PgInt8 - case PgDoublePrecision: - return PgFloat8 - case PgTimestampWithTimeZone: - return PgTimestamptz - case PgTimestampWithoutTimeZone: - return PgTimestamp - default: - return PgDataType(longPgType) - } -} - // PgTypeCanContain tells you if one PostgreSQL data type can contain // the values of another without data loss. func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { - if canThis == containThis { + switch canThis { + case containThis: return true + case PgBigInt: + return containThis == PgInteger || containThis == PgSmallInt + case PgInteger: + return containThis == PgSmallInt + case PgDoublePrecision: + return containThis == PgReal || containThis == PgBigInt || containThis == PgInteger || containThis == PgSmallInt + case PgReal: + return containThis == PgBigInt || containThis == PgInteger || containThis == PgSmallInt + case PgTimestampWithTimeZone: + return containThis == PgTimestampWithoutTimeZone + default: + return false } - if canThis == PgInt8 { - return containThis == PgInt4 - } - if canThis == PgInt4 { - return containThis == PgSerial - } - if canThis == PgFloat8 { - return containThis == PgInt4 - } - if canThis == PgTimestamptz { - return containThis == PgTimestamp - } - - return false } // GenerateInsert returns a SQL statement to insert values in a table From 7a9d8d7abca369e589b441c00cfcf7b8c0b7f632 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 11 Nov 2020 15:59:37 -0500 Subject: [PATCH 063/121] outputs.postgresql: use connection pool This switches the output to use a connection pool for managing the connection. Previously connection management was being done manually, accomplishing the same thing as if you had a pool size of 1. So this makes it much simpler. This also removes the dbWrapper abstraction layer. There's no point to it when there's only one database driver. Plus having access to the underlying pool will make future work easier (without having to expand the abstraction, at which point it's no longer abstracting). --- plugins/outputs/postgresql/db/db_wrapper.go | 97 ---------------- .../outputs/postgresql/db/db_wrapper_test.go | 21 ---- plugins/outputs/postgresql/postgresql.go | 104 +++++++++--------- .../postgresql/tables/table_manager.go | 47 ++++---- plugins/outputs/postgresql/utils/utils.go | 8 +- 5 files changed, 82 insertions(+), 195 deletions(-) delete mode 100644 plugins/outputs/postgresql/db/db_wrapper.go delete mode 100644 plugins/outputs/postgresql/db/db_wrapper_test.go diff --git a/plugins/outputs/postgresql/db/db_wrapper.go b/plugins/outputs/postgresql/db/db_wrapper.go deleted file mode 100644 index 4a95d12b9cf09..0000000000000 --- a/plugins/outputs/postgresql/db/db_wrapper.go +++ /dev/null @@ -1,97 +0,0 @@ -package db - -import ( - "log" - - "github.com/jackc/pgx" - // pgx driver for sql connections - _ "github.com/jackc/pgx/stdlib" -) - -const checkConnQuery = "SELECT 1" - -// Wrapper defines an interface that encapsulates communication with a DB. -type Wrapper interface { - Exec(query string, args ...interface{}) (pgx.CommandTag, error) - DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error - Query(query string, args ...interface{}) (*pgx.Rows, error) - QueryRow(query string, args ...interface{}) *pgx.Row - Close() error - IsAlive() bool -} - -type defaultDbWrapper struct { - db *pgx.Conn -} - -// NewWrapper returns an implementation of the db.Wrapper interface -// that issues queries to a PG database. -func NewWrapper(connection string) (Wrapper, error) { - connConfig, err := parseConnectionString(connection) - if err != nil { - return nil, err - } - db, err := pgx.Connect(*connConfig) - if err != nil { - log.Printf("E! Couldn't connect to server\n%v", err) - return nil, err - } - - return &defaultDbWrapper{ - db: db, - }, nil -} - -func (d *defaultDbWrapper) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { - return d.db.Exec(query, args...) -} - -func (d *defaultDbWrapper) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { - source := pgx.CopyFromRows(batch) - _, err := d.db.CopyFrom(*fullTableName, colNames, source) - if err != nil { - log.Printf("E! Could not insert batch of rows in output db\n%v", err) - } - - return err -} - -func (d *defaultDbWrapper) Close() error { return d.db.Close() } - -func (d *defaultDbWrapper) Query(query string, args ...interface{}) (*pgx.Rows, error) { - return d.db.Query(query, args...) -} - -func (d *defaultDbWrapper) QueryRow(query string, args ...interface{}) *pgx.Row { - return d.db.QueryRow(query, args...) -} - -func (d *defaultDbWrapper) IsAlive() bool { - if !d.db.IsAlive() { - return false - } - row := d.db.QueryRow(checkConnQuery) - var one int64 - if err := row.Scan(&one); err != nil { - log.Printf("W! Error given on 'is conn alive':\n%v", err) - return false - } - return true -} - -func parseConnectionString(connection string) (*pgx.ConnConfig, error) { - envConnConfig, err := pgx.ParseEnvLibpq() - if err != nil { - log.Println("E! couldn't check PG environment variables") - return nil, err - } - - connConfig, err := pgx.ParseConnectionString(connection) - if err != nil { - log.Printf("E! Couldn't parse connection string: %s\n%v", connection, err) - return nil, err - } - - connConfig = envConnConfig.Merge(connConfig) - return &connConfig, nil -} diff --git a/plugins/outputs/postgresql/db/db_wrapper_test.go b/plugins/outputs/postgresql/db/db_wrapper_test.go deleted file mode 100644 index ca6865b7ea56c..0000000000000 --- a/plugins/outputs/postgresql/db/db_wrapper_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package db - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseConnectionStringPgEnvOverride(t *testing.T) { - config, err := parseConnectionString("dbname=test") - assert.NoError(t, err) - assert.Equal(t, "test", config.Database) - assert.Equal(t, "", config.Password) - - os.Setenv("PGPASSWORD", "pass") - config, err = parseConnectionString("dbname=test") - assert.NoError(t, err) - assert.Equal(t, "test", config.Database) - assert.Equal(t, "pass", config.Password) -} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 6eab44fb780ee..4e5f4d5af34b8 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -1,13 +1,15 @@ package postgresql import ( + "context" "log" - "sync" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" "github.com/influxdata/telegraf/plugins/outputs/postgresql/tables" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -21,13 +23,13 @@ type Postgresql struct { FieldsAsJsonb bool TableTemplate string TagTableSuffix string + PoolSize int - // lock for the assignment of the dbWrapper, - // table manager and tags cache - dbConnLock sync.Mutex - db db.Wrapper - tables tables.Manager - tagTables tables.Manager + dbContext context.Context + dbContextCancel func() + db *pgxpool.Pool + tables tables.Manager + tagTables tables.Manager rows transformer columns columns.Mapper @@ -50,15 +52,20 @@ func newPostgresql() *Postgresql { // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - p.dbConnLock.Lock() - defer p.dbConnLock.Unlock() + poolConfig, err := pgxpool.ParseConfig(p.Connection) + if err != nil { + return err + } - // set p.db with a lock - db, err := db.NewWrapper(p.Connection) + poolConfig.AfterConnect = p.dbConnectedHook + + // Yes, we're not supposed to store the context. However since we don't receive a context, we have to. + p.dbContext, p.dbContextCancel = context.WithCancel(context.Background()) + p.db, err = pgxpool.ConnectConfig(p.dbContext, poolConfig) if err != nil { + log.Printf("E! Couldn't connect to server\n%v", err) return err } - p.db = db p.tables = tables.NewManager(p.db, p.Schema, p.TableTemplate) if p.TagsAsForeignkeys { p.tagTables = tables.NewManager(p.db, p.Schema, createTableTemplate) @@ -69,12 +76,33 @@ func (p *Postgresql) Connect() error { return nil } +// dbConnectHook checks to see whether we lost all connections, and if so resets any known state of the database (e.g. cached tables). +func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error { + if p.db == nil || p.tables == nil { + // This will happen on the initial connect since we haven't set it yet. + // Also meaning there is no state to reset. + return nil + } + + stat := p.db.Stat() + if stat.AcquiredConns()+stat.IdleConns() > 0 { + return nil + } + + p.tables.ClearTableCache() + if p.tagTables != nil { + p.tagTables.ClearTableCache() + } + + return nil +} + // Close closes the connection to the database func (p *Postgresql) Close() error { - p.dbConnLock.Lock() - defer p.dbConnLock.Unlock() p.tables = nil - return p.db.Close() + p.dbContextCancel() + p.db.Close() + return nil } var sampleConfig = ` @@ -128,17 +156,9 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { - if !p.checkConnection() { - log.Println("W! Connection is not alive, attempting reset") - if err := p.resetConnection(); err != nil { - log.Printf("E! Could not reset connection:\n%v", err) - return err - } - log.Println("I! Connection established again") - } metricsByMeasurement := utils.GroupMetricsByMeasurement(metrics) for measureName, indices := range metricsByMeasurement { - err := p.writeMetricsFromMeasure(measureName, indices, metrics) + err := p.writeMetricsFromMeasure(p.dbContext, measureName, indices, metrics) if err != nil { log.Printf("copy error: %v", err) return err @@ -151,16 +171,16 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { // of the metrics that belong to the selected 'measureName' for faster lookup. // If schema updates are enabled the target db tables are updated to be able // to hold the new values. -func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices []int, metrics []telegraf.Metric) error { +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, measureName string, metricIndices []int, metrics []telegraf.Metric) error { targetColumns, targetTagColumns := p.columns.Target(metricIndices, metrics) if p.DoSchemaUpdates { - if err := p.prepareTable(p.tables, measureName, targetColumns); err != nil { + if err := p.prepareTable(ctx, p.tables, measureName, targetColumns); err != nil { return err } if p.TagsAsForeignkeys { tagTableName := measureName + p.TagTableSuffix - if err := p.prepareTable(p.tagTables, tagTableName, targetTagColumns); err != nil { + if err := p.prepareTable(ctx, p.tagTables, tagTableName, targetTagColumns); err != nil { return err } } @@ -177,39 +197,25 @@ func (p *Postgresql) writeMetricsFromMeasure(measureName string, metricIndices [ } fullTableName := utils.FullTableName(p.Schema, measureName) - return p.db.DoCopy(fullTableName, targetColumns.Names, values) + _, err := p.db.CopyFrom(ctx, fullTableName, targetColumns.Names, pgx.CopyFromRows(values)) + return err } // Checks if a table exists in the db, and then validates if all the required columns // are present or some are missing (if metrics changed their field or tag sets). -func (p *Postgresql) prepareTable(tableManager tables.Manager, tableName string, details *utils.TargetColumns) error { - tableExists := tableManager.Exists(tableName) +func (p *Postgresql) prepareTable(ctx context.Context, tableManager tables.Manager, tableName string, details *utils.TargetColumns) error { + tableExists := tableManager.Exists(ctx, tableName) if !tableExists { - return tableManager.CreateTable(tableName, details) + return tableManager.CreateTable(ctx, tableName, details) } - missingColumns, err := tableManager.FindColumnMismatch(tableName, details) + missingColumns, err := tableManager.FindColumnMismatch(ctx, tableName, details) if err != nil { return err } if len(missingColumns) == 0 { return nil } - return tableManager.AddColumnsToTable(tableName, missingColumns, details) -} - -func (p *Postgresql) checkConnection() bool { - p.dbConnLock.Lock() - defer p.dbConnLock.Unlock() - return p.db != nil && p.db.IsAlive() -} - -func (p *Postgresql) resetConnection() error { - p.dbConnLock.Lock() - defer p.dbConnLock.Unlock() - var err error - p.db, err = db.NewWrapper(p.Connection) - p.tables.SetConnection(p.db) - return err + return tableManager.AddColumnsToTable(ctx, tableName, missingColumns, details) } diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index 4b0fabd27d810..0d73143d99833 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -1,12 +1,13 @@ package tables import ( + "context" "fmt" + "github.com/jackc/pgx/v4/pgxpool" "github.com/pkg/errors" "log" "strings" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -25,9 +26,9 @@ type columnInDbDef struct { // database, create, and update them. type Manager interface { // Exists checks if a table with the given name already is present in the DB. - Exists(tableName string) bool + Exists(ctx context.Context, tableName string) bool // Creates a table in the database with the column names and types specified in 'colDetails' - CreateTable(tableName string, colDetails *utils.TargetColumns) error + CreateTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error // This function queries a table in the DB if the required columns in 'colDetails' are present and what is their // data type. For existing columns it checks if the data type in the DB can safely hold the data from the metrics. // It returns: @@ -35,23 +36,23 @@ type Manager interface { // - or an error if // = it couldn't discover the columns of the table in the db // = the existing column types are incompatible with the required column types - FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) + FindColumnMismatch(ctx context.Context, tableName string, colDetails *utils.TargetColumns) ([]int, error) // From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. // this function will add the new columns with the required data type. - AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error - SetConnection(db db.Wrapper) + AddColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error + ClearTableCache() } type defTableManager struct { Tables map[string]bool - db db.Wrapper + db *pgxpool.Pool schema string tableTemplate string } // NewManager returns an instance of the tables.Manager interface // that can handle checking and updating the state of tables in the PG database. -func NewManager(db db.Wrapper, schema, tableTemplate string) Manager { +func NewManager(db *pgxpool.Pool, schema, tableTemplate string) Manager { return &defTableManager{ Tables: make(map[string]bool), db: db, @@ -60,20 +61,18 @@ func NewManager(db db.Wrapper, schema, tableTemplate string) Manager { } } -// SetConnection to db, used only when previous was killed or restarted. -// It will also clear the local cache of which table exists. -func (t *defTableManager) SetConnection(db db.Wrapper) { - t.db = db +// ClearTableCache clear the local cache of which table exists. +func (t *defTableManager) ClearTableCache() { t.Tables = make(map[string]bool) } // Exists checks if a table with the given name already is present in the DB. -func (t *defTableManager) Exists(tableName string) bool { +func (t *defTableManager) Exists(ctx context.Context, tableName string) bool { if _, ok := t.Tables[tableName]; ok { return true } - commandTag, err := t.db.Exec(tableExistsTemplate, tableName, t.schema) + commandTag, err := t.db.Exec(ctx, tableExistsTemplate, tableName, t.schema) if err != nil { log.Printf("E! Error checking for existence of metric table: %s\nSQL: %s\n%v", tableName, tableExistsTemplate, err) return false @@ -88,10 +87,10 @@ func (t *defTableManager) Exists(tableName string) bool { } // Creates a table in the database with the column names and types specified in 'colDetails' -func (t *defTableManager) CreateTable(tableName string, colDetails *utils.TargetColumns) error { +func (t *defTableManager) CreateTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error { colDetails.Sort() sql := t.generateCreateTableSQL(tableName, colDetails) - if _, err := t.db.Exec(sql); err != nil { + if _, err := t.db.Exec(ctx, sql); err != nil { log.Printf("E! Couldn't create table: %s\nSQL: %s\n%v", tableName, sql, err) return err } @@ -107,8 +106,8 @@ func (t *defTableManager) CreateTable(tableName string, colDetails *utils.Target // - or an error if // = it couldn't discover the columns of the table in the db // = the existing column types are incompatible with the required column types -func (t *defTableManager) FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) { - columnPresence, err := t.findColumnPresence(tableName, colDetails.Names) +func (t *defTableManager) FindColumnMismatch(ctx context.Context, tableName string, colDetails *utils.TargetColumns) ([]int, error) { + columnPresence, err := t.findColumnPresence(ctx, tableName, colDetails.Names) if err != nil { return nil, err } @@ -132,13 +131,13 @@ func (t *defTableManager) FindColumnMismatch(tableName string, colDetails *utils // From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. // this function will add the new columns with the required data type. -func (t *defTableManager) AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { +func (t *defTableManager) AddColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { fullTableName := utils.FullTableName(t.schema, tableName).Sanitize() for _, colIndex := range columnIndices { name := colDetails.Names[colIndex] dataType := colDetails.DataTypes[colIndex] addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(name), dataType) - _, err := t.db.Exec(addColumnQuery) + _, err := t.db.Exec(ctx, addColumnQuery) if err != nil { log.Printf("E! Couldn't add missing columns to the table: %s\nError executing: %s\n%v", tableName, addColumnQuery, err) return err @@ -172,8 +171,8 @@ func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *u // For a given table and an array of column names it checks the database if those columns exist, // and what's their data type. -func (t *defTableManager) findColumnPresence(tableName string, columns []string) ([]*columnInDbDef, error) { - existingCols, err := t.findExistingColumns(tableName) +func (t *defTableManager) findColumnPresence(ctx context.Context, tableName string, columns []string) ([]*columnInDbDef, error) { + existingCols, err := t.findExistingColumns(ctx, tableName) if err != nil { return nil, err } @@ -198,8 +197,8 @@ func (t *defTableManager) findColumnPresence(tableName string, columns []string) return columnStatus, nil } -func (t *defTableManager) findExistingColumns(table string) (map[string]utils.PgDataType, error) { - rows, err := t.db.Query(findExistingColumnsTemplate, t.schema, table) +func (t *defTableManager) findExistingColumns(ctx context.Context, table string) (map[string]utils.PgDataType, error) { + rows, err := t.db.Query(ctx, findExistingColumnsTemplate, t.schema, table) if err != nil { log.Printf("E! Couldn't discover existing columns of table: %s\n%v", table, err) return nil, errors.Wrap(err, "could not discover existing columns") diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 6e9f876bd1bfc..aa91234b34090 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -9,7 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/jackc/pgx" + "github.com/jackc/pgx/v4" ) const ( @@ -55,12 +55,12 @@ func QuoteLiteral(name string) string { } // FullTableName returns a sanitized table name with it's schema (if supplied) -func FullTableName(schema, name string) *pgx.Identifier { +func FullTableName(schema, name string) pgx.Identifier { if schema != "" { - return &pgx.Identifier{schema, name} + return pgx.Identifier{schema, name} } - return &pgx.Identifier{name} + return pgx.Identifier{name} } // Constants for naming PostgreSQL data types both in From a4b8a81076af2ca75c13de80279e6452ad67e71d Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 11 Nov 2020 22:15:34 -0500 Subject: [PATCH 064/121] outputs.postgresql: add write concurrency support --- .../postgresql/columns/column_mapper.go | 7 +- plugins/outputs/postgresql/postgresql.go | 94 ++++++++++++++++--- plugins/outputs/postgresql/utils/utils.go | 19 ++-- 3 files changed, 93 insertions(+), 27 deletions(-) diff --git a/plugins/outputs/postgresql/columns/column_mapper.go b/plugins/outputs/postgresql/columns/column_mapper.go index 78287998f5f08..3d799e3e2e886 100644 --- a/plugins/outputs/postgresql/columns/column_mapper.go +++ b/plugins/outputs/postgresql/columns/column_mapper.go @@ -11,7 +11,7 @@ type Mapper interface { // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the // desired columns (their name, type and which role they play) for both the // main metrics table in the DB, and if tagsAsFK == true for the tags table. - Target(indices []int, metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) + Target(metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) } type defMapper struct { @@ -36,7 +36,7 @@ func NewMapper(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) Mapper { // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the // desired columns (their name, type and which role they play) for both the // main metrics table in the DB, and if tagsAsFK == true for the tags table. -func (d *defMapper) Target(indices []int, metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) { +func (d *defMapper) Target(metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) { columns, tagColumns := d.initTargetColumns() if d.tagsAsJSON && d.fieldsAsJSON { // if json is used for both, that's all the columns you need @@ -51,8 +51,7 @@ func (d *defMapper) Target(indices []int, metrics []telegraf.Metric) (*utils.Tar // columns = [time, tagID, f1, f2, f3], tagColumns = [tagID, t1, t2] // if tagsAsFK == true && fieldsAsJSON = true // cols = [time, tagID, fields], tagCols = [tagID, t1, t2] - for _, index := range indices { - metric := metrics[index] + for _, metric := range metrics { if !d.tagsAsJSON { whichColumns := columns if d.tagsAsFK { diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 4e5f4d5af34b8..d9520d750dae2 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -3,6 +3,7 @@ package postgresql import ( "context" "log" + "time" "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" @@ -31,6 +32,8 @@ type Postgresql struct { tables tables.Manager tagTables tables.Manager + writeChan chan []telegraf.Metric + rows transformer columns columns.Mapper } @@ -52,7 +55,7 @@ func newPostgresql() *Postgresql { // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - poolConfig, err := pgxpool.ParseConfig(p.Connection) + poolConfig, err := pgxpool.ParseConfig("pool_max_conns=1 " + p.Connection) if err != nil { return err } @@ -73,6 +76,15 @@ func (p *Postgresql) Connect() error { p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) p.columns = columns.NewMapper(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) + + maxConns := int(p.db.Stat().MaxConns()) + if maxConns > 1 { + p.writeChan = make(chan []telegraf.Metric, maxConns) + for i := 0; i < maxConns; i++ { + go p.writeWorker(p.dbContext) + } + } + return nil } @@ -157,8 +169,17 @@ func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" func (p *Postgresql) Write(metrics []telegraf.Metric) error { metricsByMeasurement := utils.GroupMetricsByMeasurement(metrics) - for measureName, indices := range metricsByMeasurement { - err := p.writeMetricsFromMeasure(p.dbContext, measureName, indices, metrics) + + if p.db.Stat().MaxConns() > 1 { + return p.writeConcurrent(metricsByMeasurement) + } else { + return p.writeSequential(metricsByMeasurement) + } +} + +func (p *Postgresql) writeSequential(metricsByMeasurement map[string][]telegraf.Metric) error { + for _, metrics := range metricsByMeasurement { + err := p.writeMetricsFromMeasure(p.dbContext, metrics) if err != nil { log.Printf("copy error: %v", err) return err @@ -167,12 +188,61 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { return nil } -// Writes only the metrics from a specified measure. 'metricIndices' is an array -// of the metrics that belong to the selected 'measureName' for faster lookup. -// If schema updates are enabled the target db tables are updated to be able -// to hold the new values. -func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, measureName string, metricIndices []int, metrics []telegraf.Metric) error { - targetColumns, targetTagColumns := p.columns.Target(metricIndices, metrics) +func (p *Postgresql) writeConcurrent(metricsByMeasurement map[string][]telegraf.Metric) error { + for _, metrics := range metricsByMeasurement { + select { + case p.writeChan <- metrics: + case <-p.dbContext.Done(): + return nil + } + } + return nil +} + +var backoffInit = time.Millisecond * 250 +var backoffMax = time.Second * 15 + +func (p *Postgresql) writeWorker(ctx context.Context) { + for { + select { + case metrics := <-p.writeChan: + backoff := time.Duration(0) + for { + err := p.writeMetricsFromMeasure(ctx, metrics) + if err == nil { + break + } + + if !isTempError(err) { + log.Printf("write error (permanent): %v", err) + break + } + log.Printf("write error (retry in %s): %v", backoff, err) + time.Sleep(backoff) + + if backoff == 0 { + backoff = backoffInit + } else { + backoff *= 2 + if backoff > backoffMax { + backoff = backoffMax + } + } + } + case <-p.dbContext.Done(): + return + } + } +} + +func isTempError(err error) bool { + return false +} + +// Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, metrics []telegraf.Metric) error { + targetColumns, targetTagColumns := p.columns.Target(metrics) + measureName := metrics[0].Name() if p.DoSchemaUpdates { if err := p.prepareTable(ctx, p.tables, measureName, targetColumns); err != nil { @@ -186,10 +256,10 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, measureName st } } numColumns := len(targetColumns.Names) - values := make([][]interface{}, len(metricIndices)) + values := make([][]interface{}, len(metrics)) var rowTransformErr error - for rowNum, metricIndex := range metricIndices { - values[rowNum], rowTransformErr = p.rows.createRowFromMetric(numColumns, metrics[metricIndex], targetColumns, targetTagColumns) + for rowNum, metric := range metrics { + values[rowNum], rowTransformErr = p.rows.createRowFromMetric(numColumns, metric, targetColumns, targetTagColumns) if rowTransformErr != nil { log.Printf("E! Could not transform metric to proper row\n%v", rowTransformErr) return rowTransformErr diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index aa91234b34090..2a4237f259595 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -17,21 +17,18 @@ const ( ) // GroupMetricsByMeasurement groups the list of metrics by the measurement name. -// But the values are the index of the measure from the input list of measures. -// [m, m, m2, m2, m] => {m:[0,1,4], m2:[2,3]} -func GroupMetricsByMeasurement(m []telegraf.Metric) map[string][]int { - toReturn := make(map[string][]int) - for i, metric := range m { - var metricLocations []int +func GroupMetricsByMeasurement(m []telegraf.Metric) map[string][]telegraf.Metric { + groups := make(map[string][]telegraf.Metric) + for _, metric := range m { + var group []telegraf.Metric var ok bool name := metric.Name() - if metricLocations, ok = toReturn[name]; !ok { - metricLocations = []int{} - toReturn[name] = metricLocations + if group, ok = groups[name]; !ok { + group = []telegraf.Metric{} } - toReturn[name] = append(metricLocations, i) + groups[name] = append(group, metric) } - return toReturn + return groups } // BuildJsonb returns a byte array of the json representation From 2659ac58b65cba1e2ca772539097fe647e1256eb Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 12 Nov 2020 12:22:33 -0500 Subject: [PATCH 065/121] outputs.postgresql: cache table structure Previous code was querying the table structure prior to every insert. We should instead cache the table structure, and just assume the admin isn't going to do something silly like dropping columns or deleting the table while we're using it. --- plugins/outputs/postgresql/postgresql.go | 87 ++++---- .../outputs/postgresql/tables/manager_test.go | 6 +- .../postgresql/tables/table_manager.go | 211 +++++++----------- plugins/outputs/postgresql/utils/utils.go | 15 +- 4 files changed, 132 insertions(+), 187 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index d9520d750dae2..2bb5d2ef67dfe 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -29,8 +29,8 @@ type Postgresql struct { dbContext context.Context dbContextCancel func() db *pgxpool.Pool - tables tables.Manager - tagTables tables.Manager + tables *tables.TableManager + tagTables *tables.TableManager writeChan chan []telegraf.Metric @@ -181,7 +181,10 @@ func (p *Postgresql) writeSequential(metricsByMeasurement map[string][]telegraf. for _, metrics := range metricsByMeasurement { err := p.writeMetricsFromMeasure(p.dbContext, metrics) if err != nil { - log.Printf("copy error: %v", err) + if !isTempError(err) { + log.Printf("write error (permanent): %v", err) + } + //TODO use a transaction so that we don't end up with a partial write, and end up retrying metrics we've already written return err } } @@ -199,35 +202,12 @@ func (p *Postgresql) writeConcurrent(metricsByMeasurement map[string][]telegraf. return nil } -var backoffInit = time.Millisecond * 250 -var backoffMax = time.Second * 15 - func (p *Postgresql) writeWorker(ctx context.Context) { for { select { case metrics := <-p.writeChan: - backoff := time.Duration(0) - for { - err := p.writeMetricsFromMeasure(ctx, metrics) - if err == nil { - break - } - - if !isTempError(err) { - log.Printf("write error (permanent): %v", err) - break - } - log.Printf("write error (retry in %s): %v", backoff, err) - time.Sleep(backoff) - - if backoff == 0 { - backoff = backoffInit - } else { - backoff *= 2 - if backoff > backoffMax { - backoff = backoffMax - } - } + if err := p.writeRetry(ctx, metrics); err != nil { + log.Printf("write error (permanent): %v", err) } case <-p.dbContext.Done(): return @@ -239,18 +219,46 @@ func isTempError(err error) bool { return false } +var backoffInit = time.Millisecond * 250 +var backoffMax = time.Second * 15 + +func (p *Postgresql) writeRetry(ctx context.Context, metrics []telegraf.Metric) error { + backoff := time.Duration(0) + for { + err := p.writeMetricsFromMeasure(ctx, metrics) + if err == nil { + return nil + } + + if !isTempError(err) { + return err + } + log.Printf("write error (retry in %s): %v", backoff, err) + time.Sleep(backoff) + + if backoff == 0 { + backoff = backoffInit + } else { + backoff *= 2 + if backoff > backoffMax { + backoff = backoffMax + } + } + } +} + // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, metrics []telegraf.Metric) error { targetColumns, targetTagColumns := p.columns.Target(metrics) measureName := metrics[0].Name() if p.DoSchemaUpdates { - if err := p.prepareTable(ctx, p.tables, measureName, targetColumns); err != nil { + if err := p.tables.EnsureStructure(ctx, measureName, targetColumns); err != nil { return err } if p.TagsAsForeignkeys { tagTableName := measureName + p.TagTableSuffix - if err := p.prepareTable(ctx, p.tagTables, tagTableName, targetTagColumns); err != nil { + if err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns); err != nil { return err } } @@ -270,22 +278,3 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, metrics []tele _, err := p.db.CopyFrom(ctx, fullTableName, targetColumns.Names, pgx.CopyFromRows(values)) return err } - -// Checks if a table exists in the db, and then validates if all the required columns -// are present or some are missing (if metrics changed their field or tag sets). -func (p *Postgresql) prepareTable(ctx context.Context, tableManager tables.Manager, tableName string, details *utils.TargetColumns) error { - tableExists := tableManager.Exists(ctx, tableName) - - if !tableExists { - return tableManager.CreateTable(ctx, tableName, details) - } - - missingColumns, err := tableManager.FindColumnMismatch(ctx, tableName, details) - if err != nil { - return err - } - if len(missingColumns) == 0 { - return nil - } - return tableManager.AddColumnsToTable(ctx, tableName, missingColumns, details) -} diff --git a/plugins/outputs/postgresql/tables/manager_test.go b/plugins/outputs/postgresql/tables/manager_test.go index cf17956adb247..35d0513b9f689 100644 --- a/plugins/outputs/postgresql/tables/manager_test.go +++ b/plugins/outputs/postgresql/tables/manager_test.go @@ -35,7 +35,7 @@ func (m *mockDb) IsAlive() bool { return true } func TestNewManager(t *testing.T) { db := &mockDb{} - res := NewManager(db, "schema", "table template").(*defTableManager) + res := NewManager(db, "schema", "table template").(*TableManager) assert.Equal(t, "table template", res.tableTemplate) assert.Equal(t, "schema", res.schema) assert.Equal(t, db, res.db) @@ -77,7 +77,7 @@ func TestExists(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - manager := &defTableManager{ + manager := &TableManager{ Tables: tc.cache, db: tc.db, } @@ -126,7 +126,7 @@ func TestCreateTable(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - manager := &defTableManager{ + manager := &TableManager{ Tables: map[string]bool{}, db: tc.db, tableTemplate: tc.template, diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index 0d73143d99833..4d31f90e56d33 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -13,7 +13,6 @@ import ( const ( addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" - tableExistsTemplate = "SELECT tablename FROM pg_tables WHERE tablename = $1 AND schemaname = $2;" findExistingColumnsTemplate = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" ) @@ -22,29 +21,8 @@ type columnInDbDef struct { exists bool } -// Manager defines an abstraction that can check the state of tables in a PG -// database, create, and update them. -type Manager interface { - // Exists checks if a table with the given name already is present in the DB. - Exists(ctx context.Context, tableName string) bool - // Creates a table in the database with the column names and types specified in 'colDetails' - CreateTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error - // This function queries a table in the DB if the required columns in 'colDetails' are present and what is their - // data type. For existing columns it checks if the data type in the DB can safely hold the data from the metrics. - // It returns: - // - the indices of the missing columns (from colDetails) - // - or an error if - // = it couldn't discover the columns of the table in the db - // = the existing column types are incompatible with the required column types - FindColumnMismatch(ctx context.Context, tableName string, colDetails *utils.TargetColumns) ([]int, error) - // From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. - // this function will add the new columns with the required data type. - AddColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error - ClearTableCache() -} - -type defTableManager struct { - Tables map[string]bool +type TableManager struct { + Tables map[string]map[string]utils.PgDataType db *pgxpool.Pool schema string tableTemplate string @@ -52,96 +30,53 @@ type defTableManager struct { // NewManager returns an instance of the tables.Manager interface // that can handle checking and updating the state of tables in the PG database. -func NewManager(db *pgxpool.Pool, schema, tableTemplate string) Manager { - return &defTableManager{ - Tables: make(map[string]bool), +func NewManager(db *pgxpool.Pool, schema, tableTemplate string) *TableManager { + return &TableManager{ + Tables: make(map[string]map[string]utils.PgDataType), db: db, tableTemplate: tableTemplate, schema: schema, } } -// ClearTableCache clear the local cache of which table exists. -func (t *defTableManager) ClearTableCache() { - t.Tables = make(map[string]bool) -} - -// Exists checks if a table with the given name already is present in the DB. -func (t *defTableManager) Exists(ctx context.Context, tableName string) bool { - if _, ok := t.Tables[tableName]; ok { - return true - } - - commandTag, err := t.db.Exec(ctx, tableExistsTemplate, tableName, t.schema) - if err != nil { - log.Printf("E! Error checking for existence of metric table: %s\nSQL: %s\n%v", tableName, tableExistsTemplate, err) - return false - } - - if commandTag.RowsAffected() == 1 { - t.Tables[tableName] = true - return true - } - - return false +// ClearTableCache clear the table structure cache. +func (tm *TableManager) ClearTableCache() { + tm.Tables = make(map[string]map[string]utils.PgDataType) } // Creates a table in the database with the column names and types specified in 'colDetails' -func (t *defTableManager) CreateTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error { +func (tm *TableManager) createTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error { colDetails.Sort() - sql := t.generateCreateTableSQL(tableName, colDetails) - if _, err := t.db.Exec(ctx, sql); err != nil { - log.Printf("E! Couldn't create table: %s\nSQL: %s\n%v", tableName, sql, err) + sql := tm.generateCreateTableSQL(tableName, colDetails) + if _, err := tm.db.Exec(ctx, sql); err != nil { return err } - t.Tables[tableName] = true - return nil -} - -// This function queries a table in the DB if the required columns in 'colDetails' are present and what is their -// data type. For existing columns it checks if the data type in the DB can safely hold the data from the metrics. -// It returns: -// - the indices of the missing columns (from colDetails) -// - or an error if -// = it couldn't discover the columns of the table in the db -// = the existing column types are incompatible with the required column types -func (t *defTableManager) FindColumnMismatch(ctx context.Context, tableName string, colDetails *utils.TargetColumns) ([]int, error) { - columnPresence, err := t.findColumnPresence(ctx, tableName, colDetails.Names) - if err != nil { - return nil, err - } - - var missingCols []int - for colIndex := range colDetails.Names { - colStateInDb := columnPresence[colIndex] - if !colStateInDb.exists { - missingCols = append(missingCols, colIndex) - continue - } - typeInDb := colStateInDb.dataType - typeInMetric := colDetails.DataTypes[colIndex] - if !utils.PgTypeCanContain(typeInDb, typeInMetric) { - return nil, fmt.Errorf("E! A column exists in '%s' of type '%s' required type '%s'", tableName, typeInDb, typeInMetric) - } + columns := make(map[string]utils.PgDataType, len(colDetails.Names)) + for i, colName := range colDetails.Names { + columns[colName] = colDetails.DataTypes[i] } - return missingCols, nil + tm.Tables[tableName] = columns + return nil } -// From the column details (colDetails) of a given measurement, 'columnIndices' specifies which are missing in the DB. -// this function will add the new columns with the required data type. -func (t *defTableManager) AddColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { - fullTableName := utils.FullTableName(t.schema, tableName).Sanitize() +// addColumnsToTable adds the indicated columns to the table in the database. +// This is an idempotent operation, so attempting to add a column which already exists is a silent no-op. +func (tm *TableManager) addColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { + fullTableName := utils.FullTableName(tm.schema, tableName).Sanitize() for _, colIndex := range columnIndices { name := colDetails.Names[colIndex] dataType := colDetails.DataTypes[colIndex] addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(name), dataType) - _, err := t.db.Exec(ctx, addColumnQuery) + _, err := tm.db.Exec(ctx, addColumnQuery) if err != nil { - log.Printf("E! Couldn't add missing columns to the table: %s\nError executing: %s\n%v", tableName, addColumnQuery, err) - return err + return fmt.Errorf("adding '%s': %w", name, err) } + + //FIXME if the column exists, but is a different type, we won't get an error, but we need to ensure the type is one + // we can use, and not just assume it's correct. + tm.Tables[tableName][name] = dataType } return nil @@ -150,7 +85,7 @@ func (t *defTableManager) AddColumnsToTable(ctx context.Context, tableName strin // Populate the 'tableTemplate' (supplied as config option to the plugin) with the details of // the required columns for the measurement to create a 'CREATE TABLE' SQL statement. // The order, column names and data types are given in 'colDetails'. -func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *utils.TargetColumns) string { +func (tm *TableManager) generateCreateTableSQL(tableName string, colDetails *utils.TargetColumns) string { colDefs := make([]string, len(colDetails.Names)) var pk []string for colIndex, colName := range colDetails.Names { @@ -160,8 +95,8 @@ func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *u } } - fullTableName := utils.FullTableName(t.schema, tableName).Sanitize() - query := strings.Replace(t.tableTemplate, "{TABLE}", fullTableName, -1) + fullTableName := utils.FullTableName(tm.schema, tableName).Sanitize() + query := strings.Replace(tm.tableTemplate, "{TABLE}", fullTableName, -1) query = strings.Replace(query, "{TABLELITERAL}", utils.QuoteLiteral(fullTableName), -1) query = strings.Replace(query, "{COLUMNS}", strings.Join(colDefs, ","), -1) query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) @@ -169,50 +104,70 @@ func (t *defTableManager) generateCreateTableSQL(tableName string, colDetails *u return query } -// For a given table and an array of column names it checks the database if those columns exist, -// and what's their data type. -func (t *defTableManager) findColumnPresence(ctx context.Context, tableName string, columns []string) ([]*columnInDbDef, error) { - existingCols, err := t.findExistingColumns(ctx, tableName) +func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName string) error { + rows, err := tm.db.Query(ctx, findExistingColumnsTemplate, tm.schema, tableName) if err != nil { - return nil, err + log.Printf("E! Couldn't discover existing columns of table: %s\n%v", tableName, err) + return errors.Wrap(err, "could not discover existing columns") } - if len(existingCols) == 0 { - log.Printf("E! Table exists, but no columns discovered, user doesn't have enough permissions") - return nil, errors.New("Table exists, but no columns discovered, user doesn't have enough permissions") + defer rows.Close() + cols := make(map[string]utils.PgDataType) + for rows.Next() { + var colName, colTypeStr string + err := rows.Scan(&colName, &colTypeStr) + if err != nil { + log.Printf("E! Couldn't discover columns of table: %s\n%v", tableName, err) + return err + } + cols[colName] = utils.PgDataType(colTypeStr) } - columnStatus := make([]*columnInDbDef, len(columns)) - for i := 0; i < len(columns); i++ { - currentColumn := columns[i] - colType, exists := existingCols[currentColumn] - if !exists { - colType = "" + tm.Tables[tableName] = cols + return nil +} + +func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns *utils.TargetColumns) error { + structure, ok := tm.Tables[tableName] + if !ok { + // We don't know about the table. First try to query it. + if err := tm.refreshTableStructure(ctx, tableName); err != nil { + return fmt.Errorf("querying table structure: %w", err) } - columnStatus[i] = &columnInDbDef{ - exists: exists, - dataType: colType, + structure, ok = tm.Tables[tableName] + if !ok { + // Ok, table doesn't exist, now we can create it. + if err := tm.createTable(ctx, tableName, columns); err != nil { + return fmt.Errorf("creating table: %w", err) + } + structure = tm.Tables[tableName] } } - return columnStatus, nil -} - -func (t *defTableManager) findExistingColumns(ctx context.Context, table string) (map[string]utils.PgDataType, error) { - rows, err := t.db.Query(ctx, findExistingColumnsTemplate, t.schema, table) + missingColumns, err := tm.checkColumns(structure, columns) if err != nil { - log.Printf("E! Couldn't discover existing columns of table: %s\n%v", table, err) - return nil, errors.Wrap(err, "could not discover existing columns") + return fmt.Errorf("column validation: %w", err) } - defer rows.Close() - cols := make(map[string]utils.PgDataType) - for rows.Next() { - var colName, colTypeStr string - err := rows.Scan(&colName, &colTypeStr) - if err != nil { - log.Printf("E! Couldn't discover columns of table: %s\n%v", table, err) - return nil, err + if len(missingColumns) == 0 { + return nil + } + + if err := tm.addColumnsToTable(ctx, tableName, missingColumns, columns); err != nil { + return fmt.Errorf("adding columns: %w", err) + } + + return nil +} + +func (tm *TableManager) checkColumns(structure map[string]utils.PgDataType, columns *utils.TargetColumns) ([]int, error) { + var missingColumns []int + for i, colName := range columns.Names { + colType, ok := structure[colName] + if !ok { + missingColumns = append(missingColumns, i) + } + if !utils.PgTypeCanContain(colType, columns.DataTypes[i]) { + return nil, fmt.Errorf("column type '%s' cannot store '%s'", colType, columns.DataTypes[i]) } - cols[colName] = utils.PgDataType(colTypeStr) } - return cols, nil + return missingColumns, nil } diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 2a4237f259595..f80cbf4767a8f 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -3,13 +3,14 @@ package utils import ( "encoding/json" "fmt" - "hash/maphash" + "hash/fnv" "log" "strings" "time" - "github.com/influxdata/telegraf" "github.com/jackc/pgx/v4" + + "github.com/influxdata/telegraf" ) const ( @@ -139,12 +140,12 @@ func GenerateInsert(fullSanitizedTableName string, columns []string) string { } func GetTagID(metric telegraf.Metric) int64 { - var hash maphash.Hash + hash := fnv.New64a() for _, tag := range metric.TagList() { - _, _ = hash.WriteString(tag.Key) - _ = hash.WriteByte(0) - _, _ = hash.WriteString(tag.Value) - _ = hash.WriteByte(0) + _, _ = hash.Write([]byte(tag.Key)) + _, _ = hash.Write([]byte{0}) + _, _ = hash.Write([]byte(tag.Value)) + _, _ = hash.Write([]byte{0}) } // Convert to int64 as postgres does not support uint64 return int64(hash.Sum64()) From adcef5ff9cf8848e57a2c935b2002e5705933237 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 13 Nov 2020 20:43:01 -0500 Subject: [PATCH 066/121] outputs.postgresql: refactor table structure management Previous code was performing a table structure lookup before every insert, which has an obvious performance impact. However instead of just adding caching, the whole thing was ripped out and replaced. Bunch of reasons, but the new code is simpler, more memory efficient, and faster. --- .../postgresql/columns/column_mapper.go | 67 ------ .../postgresql/columns/columns_initializer.go | 139 ----------- .../outputs/postgresql/columns/map_fields.go | 18 -- .../outputs/postgresql/columns/map_tags.go | 18 -- .../postgresql/columns/standard_columns.go | 27 ++- plugins/outputs/postgresql/postgresql.go | 79 +++---- .../postgresql/tables/table_manager.go | 100 ++++---- plugins/outputs/postgresql/transformer.go | 223 +++++++++++++++--- plugins/outputs/postgresql/utils/types.go | 55 +++-- plugins/outputs/postgresql/utils/utils.go | 28 +-- 10 files changed, 338 insertions(+), 416 deletions(-) delete mode 100644 plugins/outputs/postgresql/columns/column_mapper.go delete mode 100644 plugins/outputs/postgresql/columns/columns_initializer.go delete mode 100644 plugins/outputs/postgresql/columns/map_fields.go delete mode 100644 plugins/outputs/postgresql/columns/map_tags.go diff --git a/plugins/outputs/postgresql/columns/column_mapper.go b/plugins/outputs/postgresql/columns/column_mapper.go deleted file mode 100644 index 3d799e3e2e886..0000000000000 --- a/plugins/outputs/postgresql/columns/column_mapper.go +++ /dev/null @@ -1,67 +0,0 @@ -package columns - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -// Mapper knows how to generate the column details for the main and tags table in the db -type Mapper interface { - // Target iterates through an array of 'metrics' visiting only those indexed by 'indices' - // and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the - // desired columns (their name, type and which role they play) for both the - // main metrics table in the DB, and if tagsAsFK == true for the tags table. - Target(metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) -} - -type defMapper struct { - initTargetColumns targetColumnInitializer - tagsAsFK bool - tagsAsJSON bool - fieldsAsJSON bool -} - -// NewMapper returns a new implementation of the columns.Mapper interface. -func NewMapper(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) Mapper { - initializer := getInitialColumnsGenerator(tagsAsFK, tagsAsJSON, fieldsAsJSON) - return &defMapper{ - tagsAsFK: tagsAsFK, - tagsAsJSON: tagsAsJSON, - fieldsAsJSON: fieldsAsJSON, - initTargetColumns: initializer, - } -} - -// Target iterates through an array of 'metrics' visiting only those indexed by 'indices' -// and depending on 'tagsAsFK', 'tagsAsJSON', and 'fieldsAsJSON' generate the -// desired columns (their name, type and which role they play) for both the -// main metrics table in the DB, and if tagsAsFK == true for the tags table. -func (d *defMapper) Target(metrics []telegraf.Metric) (*utils.TargetColumns, *utils.TargetColumns) { - columns, tagColumns := d.initTargetColumns() - if d.tagsAsJSON && d.fieldsAsJSON { - // if json is used for both, that's all the columns you need - return columns, tagColumns - } - - alreadyMapped := map[string]bool{} - // Iterate the metrics indexed by 'indices' and populate all the resulting required columns - // e.g. metric1(tags:[t1], fields:[f1,f2]), metric2(tags:[t2],fields:[f2, f3]) - // => columns = [time, t1, f1, f2, t2, f3], tagColumns = nil - // if tagsAsFK == true - // columns = [time, tagID, f1, f2, f3], tagColumns = [tagID, t1, t2] - // if tagsAsFK == true && fieldsAsJSON = true - // cols = [time, tagID, fields], tagCols = [tagID, t1, t2] - for _, metric := range metrics { - if !d.tagsAsJSON { - whichColumns := columns - if d.tagsAsFK { - whichColumns = tagColumns - } - mapTags(metric.TagList(), alreadyMapped, whichColumns) - } - - mapFields(metric.FieldList(), alreadyMapped, columns) - } - - return columns, tagColumns -} diff --git a/plugins/outputs/postgresql/columns/columns_initializer.go b/plugins/outputs/postgresql/columns/columns_initializer.go deleted file mode 100644 index 68aa5eb07b83f..0000000000000 --- a/plugins/outputs/postgresql/columns/columns_initializer.go +++ /dev/null @@ -1,139 +0,0 @@ -package columns - -import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" - -// a function type that generates column details for the main, and tags table in the db -type targetColumnInitializer func() (*utils.TargetColumns, *utils.TargetColumns) - -// constants used for populating the 'targetColumnInit' map (for better readability) -const ( - cTagsAsFK = true - cTagsAsJSON = true - cFieldsAsJSON = true -) - -// Since some of the target columns for the tables in the database don't -// depend on the metrics received, but on the plugin config, we can have -// constant initializer functions. It is always known that the 'time' -// column goes first in the main table, then if the tags are kept in a -// separate table you need to add the 'tag_id' column... -// This map contains an initializer for all the combinations -// of (tagsAsFK, tagsAsJSON, fieldsAsJSON). -func getInitialColumnsGenerator(tagsAsFK, tagsAsJSON, fieldsAsJSON bool) targetColumnInitializer { - return standardColumns[tagsAsFK][tagsAsJSON][fieldsAsJSON] -} - -var standardColumns = map[bool]map[bool]map[bool]targetColumnInitializer{ - cTagsAsFK: { - cTagsAsJSON: { - cFieldsAsJSON: tagsAsFKAndJSONAndFieldsAsJSONInit, - !cFieldsAsJSON: tagsAsFKAndJSONInit, - }, - !cTagsAsJSON: { - cFieldsAsJSON: tagsAsFKFieldsAsJSONInit, - !cFieldsAsJSON: tagsAsFKInit, - }, - }, - !cTagsAsFK: { - cTagsAsJSON: { - cFieldsAsJSON: tagsAndFieldsAsJSONInit, - !cFieldsAsJSON: tagsAsJSONInit, - }, - !cTagsAsJSON: { - cFieldsAsJSON: fieldsAsJSONInit, - !cFieldsAsJSON: vanillaColumns, - }, - }, -} - -func tagsAsFKAndJSONAndFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagIDColumnName, FieldsJSONColumn}, - DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType, JSONColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1, FieldsJSONColumn: 2}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, - }, &utils.TargetColumns{ - Names: []string{TagIDColumnName, TagsJSONColumn}, - DataTypes: []utils.PgDataType{TagIDColumnDataType, JSONColumnDataType}, - Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, - Roles: []utils.ColumnRole{utils.TagsIDColType, utils.TagColType}, - } -} - -func tagsAsFKAndJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagIDColumnName}, - DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, - }, &utils.TargetColumns{ - Names: []string{TagIDColumnName, TagsJSONColumn}, - DataTypes: []utils.PgDataType{TagIDColumnDataType, JSONColumnDataType}, - Target: map[string]int{TagIDColumnName: 0, TagsJSONColumn: 1}, - Roles: []utils.ColumnRole{utils.TagsIDColType, utils.FieldColType}, - } -} - -func tagsAsFKFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagIDColumnName, FieldsJSONColumn}, - DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType, JSONColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1, FieldsJSONColumn: 2}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType, utils.FieldColType}, - }, &utils.TargetColumns{ - Names: []string{TagIDColumnName}, - DataTypes: []utils.PgDataType{TagIDColumnDataType}, - Target: map[string]int{TagIDColumnName: 0}, - Roles: []utils.ColumnRole{utils.TagsIDColType}, - } -} - -func tagsAsFKInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagIDColumnName}, - DataTypes: []utils.PgDataType{TimeColumnDataType, TagIDColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagIDColumnName: 1}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagsIDColType}, - }, &utils.TargetColumns{ - Names: []string{TagIDColumnName}, - DataTypes: []utils.PgDataType{TagIDColumnDataType}, - Target: map[string]int{TagIDColumnName: 0}, - Roles: []utils.ColumnRole{utils.TagsIDColType}, - } -} - -func tagsAndFieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagsJSONColumn, FieldsJSONColumn}, - DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType, JSONColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagsJSONColumn: 1, FieldsJSONColumn: 2}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, - }, nil -} - -func tagsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, TagsJSONColumn}, - DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType}, - Target: map[string]int{TimeColumnName: 0, TagsJSONColumn: 1}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType}, - }, nil -} - -func fieldsAsJSONInit() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName, FieldsJSONColumn}, - DataTypes: []utils.PgDataType{TimeColumnDataType, JSONColumnDataType}, - Target: map[string]int{TimeColumnName: 0, FieldsJSONColumn: 1}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.FieldColType}, - }, nil -} - -func vanillaColumns() (*utils.TargetColumns, *utils.TargetColumns) { - return &utils.TargetColumns{ - Names: []string{TimeColumnName}, - DataTypes: []utils.PgDataType{TimeColumnDataType}, - Target: map[string]int{TimeColumnName: 0}, - Roles: []utils.ColumnRole{utils.TimeColType}, - }, nil -} diff --git a/plugins/outputs/postgresql/columns/map_fields.go b/plugins/outputs/postgresql/columns/map_fields.go deleted file mode 100644 index e905d5ddf66a0..0000000000000 --- a/plugins/outputs/postgresql/columns/map_fields.go +++ /dev/null @@ -1,18 +0,0 @@ -package columns - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -func mapFields(fieldList []*telegraf.Field, alreadyMapped map[string]bool, columns *utils.TargetColumns) { - for _, field := range fieldList { - if _, ok := alreadyMapped[field.Key]; !ok { - alreadyMapped[field.Key] = true - columns.Target[field.Key] = len(columns.Names) - columns.Names = append(columns.Names, field.Key) - columns.DataTypes = append(columns.DataTypes, utils.DerivePgDatatype(field.Value)) - columns.Roles = append(columns.Roles, utils.FieldColType) - } - } -} diff --git a/plugins/outputs/postgresql/columns/map_tags.go b/plugins/outputs/postgresql/columns/map_tags.go deleted file mode 100644 index 7bb575c0da768..0000000000000 --- a/plugins/outputs/postgresql/columns/map_tags.go +++ /dev/null @@ -1,18 +0,0 @@ -package columns - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -func mapTags(tagList []*telegraf.Tag, alreadyMapped map[string]bool, columns *utils.TargetColumns) { - for _, tag := range tagList { - if _, ok := alreadyMapped[tag.Key]; !ok { - alreadyMapped[tag.Key] = true - columns.Target[tag.Key] = len(columns.Names) - columns.Names = append(columns.Names, tag.Key) - columns.DataTypes = append(columns.DataTypes, utils.PgText) - columns.Roles = append(columns.Roles, utils.TagColType) - } - } -} diff --git a/plugins/outputs/postgresql/columns/standard_columns.go b/plugins/outputs/postgresql/columns/standard_columns.go index 9297eef3f3f0c..43c71b3df38a4 100644 --- a/plugins/outputs/postgresql/columns/standard_columns.go +++ b/plugins/outputs/postgresql/columns/standard_columns.go @@ -4,12 +4,23 @@ import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" // Column names and data types for standard fields (time, tag_id, tags, and fields) const ( - TimeColumnName = "time" - TimeColumnDataType = utils.PgTimestampWithTimeZone - TimeColumnDefinition = TimeColumnName + " " + utils.PgTimestampWithTimeZone - TagIDColumnName = "tag_id" - TagIDColumnDataType = utils.PgBigInt - TagsJSONColumn = "tags" - FieldsJSONColumn = "fields" - JSONColumnDataType = utils.PgJSONb + TimeColumnName = "time" + TimeColumnDataType = utils.PgTimestampWithTimeZone + TagIDColumnName = "tag_id" + TagIDColumnDataType = utils.PgBigInt + TagsJSONColumnName = "tags" + FieldsJSONColumnName = "fields" + JSONColumnDataType = utils.PgJSONb ) + +var TimeColumn = utils.Column{TimeColumnName, TimeColumnDataType, utils.TimeColType} +var TagIDColumn = utils.Column{TagIDColumnName, TagIDColumnDataType, utils.TagsIDColType} +var FieldsJSONColumn = utils.Column{FieldsJSONColumnName, JSONColumnDataType, utils.FieldColType} +var TagsJSONColumn = utils.Column{TagsJSONColumnName, JSONColumnDataType, utils.TagColType} + +func ColumnFromTag(key string, value interface{}) utils.Column { + return utils.Column{key, utils.DerivePgDatatype(value), utils.TagColType} +} +func ColumnFromField(key string, value interface{}) utils.Column { + return utils.Column{key, utils.DerivePgDatatype(value), utils.FieldColType} +} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 2bb5d2ef67dfe..1d019bbdd952b 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,6 +2,7 @@ package postgresql import ( "context" + "fmt" "log" "time" @@ -10,7 +11,6 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" "github.com/influxdata/telegraf/plugins/outputs/postgresql/tables" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -32,10 +32,7 @@ type Postgresql struct { tables *tables.TableManager tagTables *tables.TableManager - writeChan chan []telegraf.Metric - - rows transformer - columns columns.Mapper + writeChan chan *RowSource } func init() { @@ -74,12 +71,9 @@ func (p *Postgresql) Connect() error { p.tagTables = tables.NewManager(p.db, p.Schema, createTableTemplate) } - p.rows = newRowTransformer(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) - p.columns = columns.NewMapper(p.TagsAsForeignkeys, p.TagsAsJsonb, p.FieldsAsJsonb) - maxConns := int(p.db.Stat().MaxConns()) if maxConns > 1 { - p.writeChan = make(chan []telegraf.Metric, maxConns) + p.writeChan = make(chan *RowSource) for i := 0; i < maxConns; i++ { go p.writeWorker(p.dbContext) } @@ -168,18 +162,18 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { - metricsByMeasurement := utils.GroupMetricsByMeasurement(metrics) + rowSources := p.splitRowSources(metrics) if p.db.Stat().MaxConns() > 1 { - return p.writeConcurrent(metricsByMeasurement) + return p.writeConcurrent(rowSources) } else { - return p.writeSequential(metricsByMeasurement) + return p.writeSequential(rowSources) } } -func (p *Postgresql) writeSequential(metricsByMeasurement map[string][]telegraf.Metric) error { - for _, metrics := range metricsByMeasurement { - err := p.writeMetricsFromMeasure(p.dbContext, metrics) +func (p *Postgresql) writeSequential(rowSources map[string]*RowSource) error { + for _, rowSource := range rowSources { + err := p.writeMetricsFromMeasure(p.dbContext, rowSource) if err != nil { if !isTempError(err) { log.Printf("write error (permanent): %v", err) @@ -191,10 +185,10 @@ func (p *Postgresql) writeSequential(metricsByMeasurement map[string][]telegraf. return nil } -func (p *Postgresql) writeConcurrent(metricsByMeasurement map[string][]telegraf.Metric) error { - for _, metrics := range metricsByMeasurement { +func (p *Postgresql) writeConcurrent(rowSources map[string]*RowSource) error { + for _, rowSource := range rowSources { select { - case p.writeChan <- metrics: + case p.writeChan <- rowSource: case <-p.dbContext.Done(): return nil } @@ -205,8 +199,8 @@ func (p *Postgresql) writeConcurrent(metricsByMeasurement map[string][]telegraf. func (p *Postgresql) writeWorker(ctx context.Context) { for { select { - case metrics := <-p.writeChan: - if err := p.writeRetry(ctx, metrics); err != nil { + case rowSource := <-p.writeChan: + if err := p.writeRetry(ctx, rowSource); err != nil { log.Printf("write error (permanent): %v", err) } case <-p.dbContext.Done(): @@ -222,10 +216,10 @@ func isTempError(err error) bool { var backoffInit = time.Millisecond * 250 var backoffMax = time.Second * 15 -func (p *Postgresql) writeRetry(ctx context.Context, metrics []telegraf.Metric) error { +func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasure(ctx, metrics) + err := p.writeMetricsFromMeasure(ctx, rowSource) if err == nil { return nil } @@ -234,6 +228,7 @@ func (p *Postgresql) writeRetry(ctx context.Context, metrics []telegraf.Metric) return err } log.Printf("write error (retry in %s): %v", backoff, err) + rowSource.Reset() time.Sleep(backoff) if backoff == 0 { @@ -248,33 +243,29 @@ func (p *Postgresql) writeRetry(ctx context.Context, metrics []telegraf.Metric) } // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. -func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, metrics []telegraf.Metric) error { - targetColumns, targetTagColumns := p.columns.Target(metrics) - measureName := metrics[0].Name() +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *RowSource) error { + targetColumns := rowSource.Columns() + targetTagColumns := rowSource.TagColumns() + measureName := rowSource.Name() - if p.DoSchemaUpdates { - if err := p.tables.EnsureStructure(ctx, measureName, targetColumns); err != nil { - return err - } - if p.TagsAsForeignkeys { - tagTableName := measureName + p.TagTableSuffix - if err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns); err != nil { - return err - } - } + if err := p.tables.EnsureStructure(ctx, measureName, targetColumns, p.DoSchemaUpdates); err != nil { + return fmt.Errorf("validating table '%s': %w", measureName, err) } - numColumns := len(targetColumns.Names) - values := make([][]interface{}, len(metrics)) - var rowTransformErr error - for rowNum, metric := range metrics { - values[rowNum], rowTransformErr = p.rows.createRowFromMetric(numColumns, metric, targetColumns, targetTagColumns) - if rowTransformErr != nil { - log.Printf("E! Could not transform metric to proper row\n%v", rowTransformErr) - return rowTransformErr + if p.TagsAsForeignkeys { + tagTableName := measureName + p.TagTableSuffix + if err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns, p.DoSchemaUpdates); err != nil { + return fmt.Errorf("validating table '%s': %w", tagTableName, err) } } + columnPositions := make(map[string]int, len(targetColumns)) + columnNames := make([]string, len(targetColumns)) + for i, col := range targetColumns { + columnPositions[col.Name] = i + columnNames[i] = col.Name + } + fullTableName := utils.FullTableName(p.Schema, measureName) - _, err := p.db.CopyFrom(ctx, fullTableName, targetColumns.Names, pgx.CopyFromRows(values)) + _, err := p.db.CopyFrom(ctx, fullTableName, columnNames, rowSource) return err } diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index 4d31f90e56d33..d1d07dc82b1da 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -3,10 +3,10 @@ package tables import ( "context" "fmt" - "github.com/jackc/pgx/v4/pgxpool" - "github.com/pkg/errors" - "log" "strings" + "sync" + + "github.com/jackc/pgx/v4/pgxpool" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -16,13 +16,9 @@ const ( findExistingColumnsTemplate = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" ) -type columnInDbDef struct { - dataType utils.PgDataType - exists bool -} - type TableManager struct { Tables map[string]map[string]utils.PgDataType + tablesMutex sync.RWMutex db *pgxpool.Pool schema string tableTemplate string @@ -41,42 +37,47 @@ func NewManager(db *pgxpool.Pool, schema, tableTemplate string) *TableManager { // ClearTableCache clear the table structure cache. func (tm *TableManager) ClearTableCache() { + tm.tablesMutex.Lock() tm.Tables = make(map[string]map[string]utils.PgDataType) + tm.tablesMutex.Unlock() } // Creates a table in the database with the column names and types specified in 'colDetails' -func (tm *TableManager) createTable(ctx context.Context, tableName string, colDetails *utils.TargetColumns) error { - colDetails.Sort() +func (tm *TableManager) createTable(ctx context.Context, tableName string, colDetails []utils.Column) error { + utils.ColumnList(colDetails).Sort() sql := tm.generateCreateTableSQL(tableName, colDetails) if _, err := tm.db.Exec(ctx, sql); err != nil { return err } - columns := make(map[string]utils.PgDataType, len(colDetails.Names)) - for i, colName := range colDetails.Names { - columns[colName] = colDetails.DataTypes[i] + structure := map[string]utils.PgDataType{} + for _, col := range colDetails { + structure[col.Name] = col.Type } - tm.Tables[tableName] = columns + tm.tablesMutex.Lock() + tm.Tables[tableName] = structure + tm.tablesMutex.Unlock() return nil } // addColumnsToTable adds the indicated columns to the table in the database. // This is an idempotent operation, so attempting to add a column which already exists is a silent no-op. -func (tm *TableManager) addColumnsToTable(ctx context.Context, tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { +func (tm *TableManager) addColumnsToTable(ctx context.Context, tableName string, colDetails []utils.Column) error { + utils.ColumnList(colDetails).Sort() fullTableName := utils.FullTableName(tm.schema, tableName).Sanitize() - for _, colIndex := range columnIndices { - name := colDetails.Names[colIndex] - dataType := colDetails.DataTypes[colIndex] - addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(name), dataType) + for _, col := range colDetails { + addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(col.Name), col.Type) _, err := tm.db.Exec(ctx, addColumnQuery) if err != nil { - return fmt.Errorf("adding '%s': %w", name, err) + return fmt.Errorf("adding '%s': %w", col.Name, err) } //FIXME if the column exists, but is a different type, we won't get an error, but we need to ensure the type is one // we can use, and not just assume it's correct. - tm.Tables[tableName][name] = dataType + tm.tablesMutex.Lock() + tm.Tables[tableName][col.Name] = col.Type + tm.tablesMutex.Unlock() } return nil @@ -85,13 +86,13 @@ func (tm *TableManager) addColumnsToTable(ctx context.Context, tableName string, // Populate the 'tableTemplate' (supplied as config option to the plugin) with the details of // the required columns for the measurement to create a 'CREATE TABLE' SQL statement. // The order, column names and data types are given in 'colDetails'. -func (tm *TableManager) generateCreateTableSQL(tableName string, colDetails *utils.TargetColumns) string { - colDefs := make([]string, len(colDetails.Names)) +func (tm *TableManager) generateCreateTableSQL(tableName string, colDetails []utils.Column) string { + colDefs := make([]string, len(colDetails)) var pk []string - for colIndex, colName := range colDetails.Names { - colDefs[colIndex] = utils.QuoteIdent(colName) + " " + string(colDetails.DataTypes[colIndex]) - if colDetails.Roles[colIndex] != utils.FieldColType { - pk = append(pk, colName) + for i, col := range colDetails { + colDefs[i] = utils.QuoteIdent(col.Name) + " " + string(col.Type) + if col.Role != utils.FieldColType { + pk = append(pk, col.Name) } } @@ -107,8 +108,7 @@ func (tm *TableManager) generateCreateTableSQL(tableName string, colDetails *uti func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName string) error { rows, err := tm.db.Query(ctx, findExistingColumnsTemplate, tm.schema, tableName) if err != nil { - log.Printf("E! Couldn't discover existing columns of table: %s\n%v", tableName, err) - return errors.Wrap(err, "could not discover existing columns") + return err } defer rows.Close() cols := make(map[string]utils.PgDataType) @@ -116,30 +116,39 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str var colName, colTypeStr string err := rows.Scan(&colName, &colTypeStr) if err != nil { - log.Printf("E! Couldn't discover columns of table: %s\n%v", tableName, err) return err } cols[colName] = utils.PgDataType(colTypeStr) } - tm.Tables[tableName] = cols + if len(cols) > 0 { + tm.tablesMutex.Lock() + tm.Tables[tableName] = cols + tm.tablesMutex.Unlock() + } return nil } -func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns *utils.TargetColumns) error { +func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns []utils.Column, doSchemaUpdate bool) error { + tm.tablesMutex.RLock() structure, ok := tm.Tables[tableName] + tm.tablesMutex.RUnlock() if !ok { // We don't know about the table. First try to query it. if err := tm.refreshTableStructure(ctx, tableName); err != nil { return fmt.Errorf("querying table structure: %w", err) } + tm.tablesMutex.RLock() structure, ok = tm.Tables[tableName] + tm.tablesMutex.RUnlock() if !ok { // Ok, table doesn't exist, now we can create it. if err := tm.createTable(ctx, tableName, columns); err != nil { return fmt.Errorf("creating table: %w", err) } + tm.tablesMutex.RLock() structure = tm.Tables[tableName] + tm.tablesMutex.RUnlock() } } @@ -151,22 +160,31 @@ func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, c return nil } - if err := tm.addColumnsToTable(ctx, tableName, missingColumns, columns); err != nil { - return fmt.Errorf("adding columns: %w", err) + if doSchemaUpdate { + if err := tm.addColumnsToTable(ctx, tableName, missingColumns); err != nil { + return fmt.Errorf("adding columns: %w", err) + } + } else { + colSpecs := make([]string, len(missingColumns)) + for i, col := range missingColumns { + colSpecs[i] = col.Name + " " + string(col.Type) + } + return fmt.Errorf("missing columns: %s", strings.Join(colSpecs, ", ")) } return nil } -func (tm *TableManager) checkColumns(structure map[string]utils.PgDataType, columns *utils.TargetColumns) ([]int, error) { - var missingColumns []int - for i, colName := range columns.Names { - colType, ok := structure[colName] +func (tm *TableManager) checkColumns(structure map[string]utils.PgDataType, columns []utils.Column) ([]utils.Column, error) { + var missingColumns []utils.Column + for _, col := range columns { + dbColType, ok := structure[col.Name] if !ok { - missingColumns = append(missingColumns, i) + missingColumns = append(missingColumns, col) + continue } - if !utils.PgTypeCanContain(colType, columns.DataTypes[i]) { - return nil, fmt.Errorf("column type '%s' cannot store '%s'", colType, columns.DataTypes[i]) + if !utils.PgTypeCanContain(dbColType, col.Type) { + return nil, fmt.Errorf("column type '%s' cannot store '%s'", dbColType, col.Type) } } return missingColumns, nil diff --git a/plugins/outputs/postgresql/transformer.go b/plugins/outputs/postgresql/transformer.go index 9fe8862811077..e34171e23c530 100644 --- a/plugins/outputs/postgresql/transformer.go +++ b/plugins/outputs/postgresql/transformer.go @@ -1,66 +1,213 @@ package postgresql import ( + "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) -type transformer interface { - createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) +func (p *Postgresql) splitRowSources(metrics []telegraf.Metric) map[string]*RowSource { + rowSources := map[string]*RowSource{} + + for _, m := range metrics { + rs := rowSources[m.Name()] + if rs == nil { + rs = NewRowSource(p) + rowSources[m.Name()] = rs + } + rs.AddMetric(m) + } + + return rowSources } -type defTransformer struct { - tagsAsFK bool - tagsAsJSONb bool - fieldsAsJSONb bool +type RowSource struct { + postgresql *Postgresql + metrics []telegraf.Metric + cursor int + + tagPositions map[string]int + tagColumns []utils.Column + + fieldPositions map[string]int + fieldColumns []utils.Column + + // Technically this will only contain strings, since all tag values are strings. But this is a restriction of telegraf + // metrics, and not postgres. It might be nice to support somehow converting tag values into native times. + tagSets map[int64][]interface{} } -func newRowTransformer(tagsAsFK, tagsAsJSONb, fieldsAsJSONb bool) transformer { - return &defTransformer{ - tagsAsFK: tagsAsFK, - tagsAsJSONb: tagsAsJSONb, - fieldsAsJSONb: fieldsAsJSONb, +func NewRowSource(postgresql *Postgresql) *RowSource { + rs := &RowSource{ + postgresql: postgresql, + cursor: -1, + tagSets: make(map[int64][]interface{}), + } + if !postgresql.FieldsAsJsonb { + rs.tagPositions = map[string]int{} + rs.fieldPositions = map[string]int{} } + return rs } -func (dt *defTransformer) createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) { - row := make([]interface{}, numColumns) - // handle time - row[0] = metric.Time() - // handle tags and tag id - if dt.tagsAsFK { - row[1] = utils.GetTagID(metric) +func (rs *RowSource) AddMetric(metric telegraf.Metric) { + if rs.postgresql.TagsAsForeignkeys { + tagID := utils.GetTagID(metric) + if _, ok := rs.tagSets[tagID]; !ok { + tags := metric.TagList() + values := make([]interface{}, len(tags)) + for i, tag := range tags { + values[i] = columns.ColumnFromTag(tag.Key, tag.Value) + } + rs.tagSets[tagID] = values + } + } + + if !rs.postgresql.TagsAsJsonb { + for _, t := range metric.TagList() { + if _, ok := rs.tagPositions[t.Key]; !ok { + rs.tagPositions[t.Key] = len(rs.tagPositions) + rs.tagColumns = append(rs.tagColumns, columns.ColumnFromTag(t.Key, t.Value)) + } + } + } + + if !rs.postgresql.FieldsAsJsonb { + for _, f := range metric.FieldList() { + if _, ok := rs.fieldPositions[f.Key]; !ok { + rs.fieldPositions[f.Key] = len(rs.fieldPositions) + rs.fieldColumns = append(rs.fieldColumns, columns.ColumnFromField(f.Key, f.Value)) + } + } + } + + rs.metrics = append(rs.metrics, metric) +} + +func (rs *RowSource) Name() string { + if len(rs.metrics) == 0 { + return "" + } + return rs.metrics[0].Name() +} + +func (rs *RowSource) TagColumns() []utils.Column { + var cols []utils.Column + + if rs.postgresql.TagsAsForeignkeys { + cols = append(cols, columns.TagIDColumn) + } + + if rs.postgresql.TagsAsJsonb { + cols = append(cols, columns.TagsJSONColumn) } else { - if dt.tagsAsJSONb { - jsonVal, err := utils.BuildJsonb(metric.Tags()) - if err != nil { - return nil, err + cols = append(cols, rs.tagColumns...) + } + + return cols +} + +func (rs *RowSource) Columns() []utils.Column { + cols := []utils.Column{ + columns.TimeColumn, + } + + if rs.postgresql.TagsAsForeignkeys { + cols = append(cols, columns.TagIDColumn) + } else { + cols = append(cols, rs.TagColumns()...) + } + + if rs.postgresql.FieldsAsJsonb { + cols = append(cols, columns.FieldsJSONColumn) + } else { + cols = append(cols, rs.fieldColumns...) + } + + return cols +} + +func (rs *RowSource) DropFieldColumn(col utils.Column) { + if col.Role != utils.FieldColType || rs.postgresql.FieldsAsJsonb { + panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", rs.Name(), col.Name)) + } + + delete(rs.fieldPositions, col.Name) + for i, fc := range rs.fieldColumns { + if fc.Name != col.Name { + continue + } + rs.fieldColumns = append(rs.fieldColumns[:i], rs.fieldColumns[i+1:]...) + break + } +} + +func (rs *RowSource) Next() bool { + if rs.cursor+1 >= len(rs.metrics) { + return false + } + rs.cursor += 1 + return true +} + +func (rs *RowSource) Reset() { + rs.cursor = -1 +} + +func (rs *RowSource) Values() ([]interface{}, error) { + metric := rs.metrics[rs.cursor] + tags := metric.TagList() + fields := metric.FieldList() + + values := []interface{}{} + + values = append(values, metric.Time()) + + if !rs.postgresql.TagsAsForeignkeys { + if !rs.postgresql.TagsAsJsonb { + // tags_as_foreignkey=false, tags_as_json=false + tagValues := make([]interface{}, len(rs.tagPositions)) + for _, tag := range tags { + tagValues[rs.tagPositions[tag.Key]] = tag.Value } - targetIndex := targetColumns.Target[columns.TagsJSONColumn] - row[targetIndex] = jsonVal + values = append(values, tagValues...) } else { - for _, tag := range metric.TagList() { - targetIndex := targetColumns.Target[tag.Key] - row[targetIndex] = tag.Value + // tags_as_foreign_key=false, tags_as_json=true + value, err := utils.BuildJsonb(metric.Tags()) + if err != nil { + return nil, err } + values = append(values, value) } + } else { + // tags_as_foreignkey=true + values = append(values, utils.GetTagID(metric)) } - // handle fields - if dt.fieldsAsJSONb { - jsonVal, err := utils.BuildJsonb(metric.Fields()) - if err != nil { - return nil, err + if !rs.postgresql.FieldsAsJsonb { + // fields_as_json=false + fieldValues := make([]interface{}, len(rs.fieldPositions)) + for _, field := range fields { + // we might have dropped the field due to the table missing the column & schema updates being turned off + if fPos, ok := rs.fieldPositions[field.Key]; ok { + fieldValues[fPos] = field.Value + } } - targetIndex := targetColumns.Target[columns.FieldsJSONColumn] - row[targetIndex] = jsonVal + values = append(values, fieldValues...) } else { - for _, field := range metric.FieldList() { - targetIndex := targetColumns.Target[field.Key] - row[targetIndex] = field.Value + // fields_as_json=true + value, err := utils.BuildJsonb(metric.Fields()) + if err != nil { + return nil, err } + values = append(values, value) } - return row, nil + return values, nil +} + +func (rs *RowSource) Err() error { + return nil } diff --git a/plugins/outputs/postgresql/utils/types.go b/plugins/outputs/postgresql/utils/types.go index 3e11d236560b5..ad0ed3a4a275e 100644 --- a/plugins/outputs/postgresql/utils/types.go +++ b/plugins/outputs/postgresql/utils/types.go @@ -19,39 +19,48 @@ const ( // PgDataType defines a string that represents a PostgreSQL data type. type PgDataType string -// TargetColumns contains all the information needed to map a collection of -// metrics who belong to the same Measurement. -type TargetColumns struct { - // the names the columns will have in the database - Names []string - // column name -> order number. where to place each column in rows - // batched to the db - Target map[string]int +type Column struct { + Name string // the data type of each column should have in the db. used when checking // if the schema matches or it needs updates - DataTypes []PgDataType + Type PgDataType // the role each column has, helps properly map the metric to the db - Roles []ColumnRole + Role ColumnRole } -func (tcs TargetColumns) Len() int { - return len(tcs.Names) +//// TargetColumns contains all the information needed to map a collection of +//// metrics who belong to the same Measurement. +//type TargetColumns struct { +// // the names the columns will have in the database +// Names []string +// // column name -> order number. where to place each column in rows +// // batched to the db +// Target map[string]int +// // the data type of each column should have in the db. used when checking +// // if the schema matches or it needs updates +// DataTypes []PgDataType +// // the role each column has, helps properly map the metric to the db +// Roles []ColumnRole +//} +// + +type ColumnList []Column + +func (cl ColumnList) Len() int { + return len(cl) } -func (tcs TargetColumns) Less(i, j int) bool { - if tcs.Roles[i] != tcs.Roles[j] { - return tcs.Roles[i] < tcs.Roles[j] +func (cl ColumnList) Less(i, j int) bool { + if cl[i].Role != cl[j].Role { + return cl[i].Role < cl[j].Role } - return strings.ToLower(tcs.Names[i]) < strings.ToLower(tcs.Names[j]) + return strings.ToLower(cl[i].Name) < strings.ToLower(cl[j].Name) } -func (tcs TargetColumns) Swap(i, j int) { - tcs.Names[i], tcs.Names[j] = tcs.Names[j], tcs.Names[i] - tcs.Target[tcs.Names[i]], tcs.Target[tcs.Names[j]] = tcs.Target[tcs.Names[j]], tcs.Target[tcs.Names[i]] - tcs.DataTypes[i], tcs.DataTypes[j] = tcs.DataTypes[j], tcs.DataTypes[i] - tcs.Roles[i], tcs.Roles[j] = tcs.Roles[j], tcs.Roles[i] +func (cl ColumnList) Swap(i, j int) { + cl[i], cl[j] = cl[j], cl[i] } -func (tcs TargetColumns) Sort() { - sort.Sort(tcs) +func (cl ColumnList) Sort() { + sort.Sort(cl) } diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index f80cbf4767a8f..8e659d89fdaf3 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -17,21 +17,6 @@ const ( insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" ) -// GroupMetricsByMeasurement groups the list of metrics by the measurement name. -func GroupMetricsByMeasurement(m []telegraf.Metric) map[string][]telegraf.Metric { - groups := make(map[string][]telegraf.Metric) - for _, metric := range m { - var group []telegraf.Metric - var ok bool - name := metric.Name() - if group, ok = groups[name]; !ok { - group = []telegraf.Metric{} - } - groups[name] = append(group, metric) - } - return groups -} - // BuildJsonb returns a byte array of the json representation // of the passed object. func BuildJsonb(data interface{}) ([]byte, error) { @@ -70,6 +55,7 @@ const ( PgBigInt = "bigint" PgReal = "real" PgDoublePrecision = "double precision" + PgNumeric = "numeric" PgText = "text" PgTimestampWithTimeZone = "timestamp with time zone" PgTimestampWithoutTimeZone = "timestamp without time zone" @@ -83,9 +69,11 @@ func DerivePgDatatype(value interface{}) PgDataType { switch value.(type) { case bool: return PgBool - case uint64, int64, int, uint: + case uint64: + return PgNumeric + case int64, int, uint, uint32: return PgBigInt - case uint32, int32: + case int32: return PgInteger case int16, int8: return PgSmallInt @@ -113,10 +101,10 @@ func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { return containThis == PgInteger || containThis == PgSmallInt case PgInteger: return containThis == PgSmallInt - case PgDoublePrecision: + case PgDoublePrecision, PgReal: // You can store a real in a double, you just lose precision return containThis == PgReal || containThis == PgBigInt || containThis == PgInteger || containThis == PgSmallInt - case PgReal: - return containThis == PgBigInt || containThis == PgInteger || containThis == PgSmallInt + case PgNumeric: + return containThis == PgBigInt || containThis == PgSmallInt || containThis == PgInteger || containThis == PgReal || containThis == PgDoublePrecision case PgTimestampWithTimeZone: return containThis == PgTimestampWithoutTimeZone default: From 79146a8d0b867f8b203baef3fe54a838acf12124 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sat, 14 Nov 2020 00:18:58 -0500 Subject: [PATCH 067/121] outputs.postgresql: drop unsupported fields when schema updates disabled --- plugins/outputs/postgresql/postgresql.go | 24 ++++++++++++++++--- .../postgresql/tables/table_manager.go | 21 +++++++--------- plugins/outputs/postgresql/transformer.go | 10 ++++++++ 3 files changed, 39 insertions(+), 16 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 1d019bbdd952b..7e3a734169817 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "strings" "time" "github.com/jackc/pgx/v4" @@ -248,16 +249,33 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *Row targetTagColumns := rowSource.TagColumns() measureName := rowSource.Name() - if err := p.tables.EnsureStructure(ctx, measureName, targetColumns, p.DoSchemaUpdates); err != nil { + missingColumns, err := p.tables.EnsureStructure(ctx, measureName, targetColumns, p.DoSchemaUpdates) + if err != nil { return fmt.Errorf("validating table '%s': %w", measureName, err) } if p.TagsAsForeignkeys { tagTableName := measureName + p.TagTableSuffix - if err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns, p.DoSchemaUpdates); err != nil { + if _, err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns, true); err != nil { return fmt.Errorf("validating table '%s': %w", tagTableName, err) } } + if missingColumns != nil { + for _, col := range missingColumns { + if col.Role != utils.FieldColType { + return fmt.Errorf("table '%s' missing critical column: %s %s", measureName, col.Name, col.Type) + } + rowSource.DropFieldColumn(col) + targetColumns = rowSource.Columns() + } + + colSpecs := make([]string, len(missingColumns)) + for i, col := range missingColumns { + colSpecs[i] = col.Name + " " + string(col.Type) + } + log.Printf("[outputs.postgresql] Error: Table '%s' missing columns (fields dropped): %s", measureName, strings.Join(colSpecs, ", ")) + } + columnPositions := make(map[string]int, len(targetColumns)) columnNames := make([]string, len(targetColumns)) for i, col := range targetColumns { @@ -266,6 +284,6 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *Row } fullTableName := utils.FullTableName(p.Schema, measureName) - _, err := p.db.CopyFrom(ctx, fullTableName, columnNames, rowSource) + _, err = p.db.CopyFrom(ctx, fullTableName, columnNames, rowSource) return err } diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go index d1d07dc82b1da..2e526b79eb4d9 100644 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ b/plugins/outputs/postgresql/tables/table_manager.go @@ -129,14 +129,14 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str return nil } -func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns []utils.Column, doSchemaUpdate bool) error { +func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns []utils.Column, doSchemaUpdate bool) ([]utils.Column, error) { tm.tablesMutex.RLock() structure, ok := tm.Tables[tableName] tm.tablesMutex.RUnlock() if !ok { // We don't know about the table. First try to query it. if err := tm.refreshTableStructure(ctx, tableName); err != nil { - return fmt.Errorf("querying table structure: %w", err) + return nil, fmt.Errorf("querying table structure: %w", err) } tm.tablesMutex.RLock() structure, ok = tm.Tables[tableName] @@ -144,7 +144,7 @@ func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, c if !ok { // Ok, table doesn't exist, now we can create it. if err := tm.createTable(ctx, tableName, columns); err != nil { - return fmt.Errorf("creating table: %w", err) + return nil, fmt.Errorf("creating table: %w", err) } tm.tablesMutex.RLock() structure = tm.Tables[tableName] @@ -154,25 +154,20 @@ func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, c missingColumns, err := tm.checkColumns(structure, columns) if err != nil { - return fmt.Errorf("column validation: %w", err) + return nil, fmt.Errorf("column validation: %w", err) } if len(missingColumns) == 0 { - return nil + return nil, nil } if doSchemaUpdate { if err := tm.addColumnsToTable(ctx, tableName, missingColumns); err != nil { - return fmt.Errorf("adding columns: %w", err) + return nil, fmt.Errorf("adding columns: %w", err) } - } else { - colSpecs := make([]string, len(missingColumns)) - for i, col := range missingColumns { - colSpecs[i] = col.Name + " " + string(col.Type) - } - return fmt.Errorf("missing columns: %s", strings.Join(colSpecs, ", ")) + return nil, nil } - return nil + return missingColumns, nil } func (tm *TableManager) checkColumns(structure map[string]utils.PgDataType, columns []utils.Column) ([]utils.Column, error) { diff --git a/plugins/outputs/postgresql/transformer.go b/plugins/outputs/postgresql/transformer.go index e34171e23c530..f3efcc7d9d455 100644 --- a/plugins/outputs/postgresql/transformer.go +++ b/plugins/outputs/postgresql/transformer.go @@ -134,7 +134,17 @@ func (rs *RowSource) DropFieldColumn(col utils.Column) { panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", rs.Name(), col.Name)) } + pos, ok := rs.fieldPositions[col.Name] + if !ok { + return + } delete(rs.fieldPositions, col.Name) + for n, p := range rs.fieldPositions { + if p > pos { + rs.fieldPositions[n] -= 1 + } + } + for i, fc := range rs.fieldColumns { if fc.Name != col.Name { continue From aff0c3e93eb579dda3ca4b0327866dc3b6045961 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 15 Nov 2020 00:20:13 -0500 Subject: [PATCH 068/121] outputs.postgresql: give more control over table templating Previously the plugin only allowed very minor control over table templating. Could only adjust the table creation SQL for the main metrics table, and not the tag table. User also had no control over the ALTER TABLE statements. This changes it so that all table CREATE & ALTER statements are configurable. Additionally the templating was switched out from a rigid string substitution to Go templates. This allows a lot more flexibility in what can be done. Lastly instead of only having a single statement per operation, requiring `;` to delimit multiple statements, the configuration now allows a list of statements, all run within a transaction. This is because pgx uses prepared statements, and postgres does not allow more than 1 command (command, not query) inside a prepared statement. --- .../standard_columns.go => columns.go} | 2 +- plugins/outputs/postgresql/postgresql.go | 83 ++---- plugins/outputs/postgresql/postgresql_test.go | 4 +- .../{transformer.go => row_source.go} | 20 +- plugins/outputs/postgresql/table_manager.go | 270 ++++++++++++++++++ .../outputs/postgresql/tables/manager_test.go | 10 +- .../postgresql/tables/table_manager.go | 186 ------------ plugins/outputs/postgresql/template.go | 192 +++++++++++++ 8 files changed, 508 insertions(+), 259 deletions(-) rename plugins/outputs/postgresql/{columns/standard_columns.go => columns.go} (98%) rename plugins/outputs/postgresql/{transformer.go => row_source.go} (87%) create mode 100644 plugins/outputs/postgresql/table_manager.go delete mode 100644 plugins/outputs/postgresql/tables/table_manager.go create mode 100644 plugins/outputs/postgresql/template.go diff --git a/plugins/outputs/postgresql/columns/standard_columns.go b/plugins/outputs/postgresql/columns.go similarity index 98% rename from plugins/outputs/postgresql/columns/standard_columns.go rename to plugins/outputs/postgresql/columns.go index 43c71b3df38a4..ec438e8582f3b 100644 --- a/plugins/outputs/postgresql/columns/standard_columns.go +++ b/plugins/outputs/postgresql/columns.go @@ -1,4 +1,4 @@ -package columns +package postgresql import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 7e3a734169817..550ae9c94c072 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,9 +2,7 @@ package postgresql import ( "context" - "fmt" "log" - "strings" "time" "github.com/jackc/pgx/v4" @@ -12,26 +10,26 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/tables" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) type Postgresql struct { - Connection string - Schema string - DoSchemaUpdates bool - TagsAsForeignkeys bool - TagsAsJsonb bool - FieldsAsJsonb bool - TableTemplate string - TagTableSuffix string - PoolSize int + Connection string + Schema string + TagsAsForeignkeys bool + TagsAsJsonb bool + FieldsAsJsonb bool + CreateTemplates []*Template + AddColumnTemplates []*Template + TagTableCreateTemplates []*Template + TagTableAddColumnTemplates []*Template + TagTableSuffix string + PoolSize int dbContext context.Context dbContextCancel func() db *pgxpool.Pool - tables *tables.TableManager - tagTables *tables.TableManager + tableManager *TableManager writeChan chan *RowSource } @@ -44,10 +42,12 @@ const createTableTemplate = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" func newPostgresql() *Postgresql { return &Postgresql{ - Schema: "public", - TableTemplate: createTableTemplate, - TagTableSuffix: "_tag", - DoSchemaUpdates: true, + Schema: "public", + CreateTemplates: []*Template{TableCreateTemplate}, + AddColumnTemplates: []*Template{TableAddColumnTemplate}, + TagTableCreateTemplates: []*Template{TableCreateTemplate}, + TagTableAddColumnTemplates: []*Template{TableAddColumnTemplate}, + TagTableSuffix: "_tag", } } @@ -58,8 +58,6 @@ func (p *Postgresql) Connect() error { return err } - poolConfig.AfterConnect = p.dbConnectedHook - // Yes, we're not supposed to store the context. However since we don't receive a context, we have to. p.dbContext, p.dbContextCancel = context.WithCancel(context.Background()) p.db, err = pgxpool.ConnectConfig(p.dbContext, poolConfig) @@ -67,10 +65,7 @@ func (p *Postgresql) Connect() error { log.Printf("E! Couldn't connect to server\n%v", err) return err } - p.tables = tables.NewManager(p.db, p.Schema, p.TableTemplate) - if p.TagsAsForeignkeys { - p.tagTables = tables.NewManager(p.db, p.Schema, createTableTemplate) - } + p.tableManager = NewTableManager(p) maxConns := int(p.db.Stat().MaxConns()) if maxConns > 1 { @@ -85,7 +80,7 @@ func (p *Postgresql) Connect() error { // dbConnectHook checks to see whether we lost all connections, and if so resets any known state of the database (e.g. cached tables). func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error { - if p.db == nil || p.tables == nil { + if p.db == nil || p.tableManager == nil { // This will happen on the initial connect since we haven't set it yet. // Also meaning there is no state to reset. return nil @@ -96,17 +91,14 @@ func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error return nil } - p.tables.ClearTableCache() - if p.tagTables != nil { - p.tagTables.ClearTableCache() - } + p.tableManager.ClearTableCache() return nil } // Close closes the connection to the database func (p *Postgresql) Close() error { - p.tables = nil + p.tableManager = nil p.dbContextCancel() p.db.Close() return nil @@ -245,36 +237,13 @@ func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *RowSource) error { - targetColumns := rowSource.Columns() - targetTagColumns := rowSource.TagColumns() - measureName := rowSource.Name() - - missingColumns, err := p.tables.EnsureStructure(ctx, measureName, targetColumns, p.DoSchemaUpdates) + err := p.tableManager.MatchSource(ctx, rowSource) if err != nil { - return fmt.Errorf("validating table '%s': %w", measureName, err) - } - if p.TagsAsForeignkeys { - tagTableName := measureName + p.TagTableSuffix - if _, err := p.tagTables.EnsureStructure(ctx, tagTableName, targetTagColumns, true); err != nil { - return fmt.Errorf("validating table '%s': %w", tagTableName, err) - } + return err } - if missingColumns != nil { - for _, col := range missingColumns { - if col.Role != utils.FieldColType { - return fmt.Errorf("table '%s' missing critical column: %s %s", measureName, col.Name, col.Type) - } - rowSource.DropFieldColumn(col) - targetColumns = rowSource.Columns() - } - - colSpecs := make([]string, len(missingColumns)) - for i, col := range missingColumns { - colSpecs[i] = col.Name + " " + string(col.Type) - } - log.Printf("[outputs.postgresql] Error: Table '%s' missing columns (fields dropped): %s", measureName, strings.Join(colSpecs, ", ")) - } + targetColumns := rowSource.Columns() + measureName := rowSource.Name() columnPositions := make(map[string]int, len(targetColumns)) columnNames := make([]string, len(targetColumns)) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 7efff2e1dcd77..b342bf10cb1a5 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -72,7 +72,7 @@ func prepareAllColumnsInOnePlaceNoJSON() (*Postgresql, []telegraf.Metric, map[st return &Postgresql{ TagTableSuffix: "_tag", DoSchemaUpdates: true, - tables: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, + tableManager: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, columns: columns.NewMapper(false, false, false), db: &mockDb{}, @@ -96,7 +96,7 @@ func prepareAllColumnsInOnePlaceTagsAndFieldsJSON() (*Postgresql, []telegraf.Met TagsAsJsonb: true, FieldsAsJsonb: true, dbConnLock: sync.Mutex{}, - tables: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, + tableManager: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, columns: columns.NewMapper(false, true, true), rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, db: &mockDb{}, diff --git a/plugins/outputs/postgresql/transformer.go b/plugins/outputs/postgresql/row_source.go similarity index 87% rename from plugins/outputs/postgresql/transformer.go rename to plugins/outputs/postgresql/row_source.go index f3efcc7d9d455..116e5e6bb99d9 100644 --- a/plugins/outputs/postgresql/transformer.go +++ b/plugins/outputs/postgresql/row_source.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -59,7 +58,7 @@ func (rs *RowSource) AddMetric(metric telegraf.Metric) { tags := metric.TagList() values := make([]interface{}, len(tags)) for i, tag := range tags { - values[i] = columns.ColumnFromTag(tag.Key, tag.Value) + values[i] = ColumnFromTag(tag.Key, tag.Value) } rs.tagSets[tagID] = values } @@ -69,7 +68,7 @@ func (rs *RowSource) AddMetric(metric telegraf.Metric) { for _, t := range metric.TagList() { if _, ok := rs.tagPositions[t.Key]; !ok { rs.tagPositions[t.Key] = len(rs.tagPositions) - rs.tagColumns = append(rs.tagColumns, columns.ColumnFromTag(t.Key, t.Value)) + rs.tagColumns = append(rs.tagColumns, ColumnFromTag(t.Key, t.Value)) } } } @@ -78,7 +77,7 @@ func (rs *RowSource) AddMetric(metric telegraf.Metric) { for _, f := range metric.FieldList() { if _, ok := rs.fieldPositions[f.Key]; !ok { rs.fieldPositions[f.Key] = len(rs.fieldPositions) - rs.fieldColumns = append(rs.fieldColumns, columns.ColumnFromField(f.Key, f.Value)) + rs.fieldColumns = append(rs.fieldColumns, ColumnFromField(f.Key, f.Value)) } } } @@ -97,11 +96,11 @@ func (rs *RowSource) TagColumns() []utils.Column { var cols []utils.Column if rs.postgresql.TagsAsForeignkeys { - cols = append(cols, columns.TagIDColumn) + cols = append(cols, TagIDColumn) } if rs.postgresql.TagsAsJsonb { - cols = append(cols, columns.TagsJSONColumn) + cols = append(cols, TagsJSONColumn) } else { cols = append(cols, rs.tagColumns...) } @@ -111,17 +110,17 @@ func (rs *RowSource) TagColumns() []utils.Column { func (rs *RowSource) Columns() []utils.Column { cols := []utils.Column{ - columns.TimeColumn, + TimeColumn, } if rs.postgresql.TagsAsForeignkeys { - cols = append(cols, columns.TagIDColumn) + cols = append(cols, TagIDColumn) } else { cols = append(cols, rs.TagColumns()...) } if rs.postgresql.FieldsAsJsonb { - cols = append(cols, columns.FieldsJSONColumn) + cols = append(cols, FieldsJSONColumn) } else { cols = append(cols, rs.fieldColumns...) } @@ -158,6 +157,9 @@ func (rs *RowSource) Next() bool { if rs.cursor+1 >= len(rs.metrics) { return false } + //TODO check if all fields were dropped and if so, skip to next. + // This can happen if schema updates are disabled and the table is missing all the requisite columns. + // We could also use this to skip when any of the tag columns are missing. Currently this is detected in MatchSource and we throw an error. rs.cursor += 1 return true } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go new file mode 100644 index 0000000000000..5054fe75ca6e1 --- /dev/null +++ b/plugins/outputs/postgresql/table_manager.go @@ -0,0 +1,270 @@ +package postgresql + +import ( + "context" + "fmt" + "log" + "strings" + "sync" + + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +const ( + refreshTableStructureStatement = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" +) + +type TableManager struct { + *Postgresql + + Tables map[string]map[string]utils.Column + tablesMutex sync.RWMutex +} + +// NewTableManager returns an instance of the tables.Manager interface +// that can handle checking and updating the state of tables in the PG database. +func NewTableManager(postgresql *Postgresql) *TableManager { + return &TableManager{ + Postgresql: postgresql, + Tables: make(map[string]map[string]utils.Column), + } +} + +// ClearTableCache clear the table structure cache. +func (tm *TableManager) ClearTableCache() { + tm.tablesMutex.Lock() + tm.Tables = make(map[string]map[string]utils.Column) + tm.tablesMutex.Unlock() +} + +func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName string) error { + rows, err := tm.db.Query(ctx, refreshTableStructureStatement, tm.Schema, tableName) + if err != nil { + return err + } + defer rows.Close() + + cols := make(map[string]utils.Column) + for rows.Next() { + var colName, colTypeStr string + err := rows.Scan(&colName, &colTypeStr) + if err != nil { + return err + } + cols[colName] = utils.Column{ + Name: colName, + Type: utils.PgDataType(colTypeStr), + Role: utils.FieldColType, //FIXME this isn't necessarily correct. could be some other role. But while it's a lie, I don't think it affect anything. + } + } + + if len(cols) > 0 { + tm.tablesMutex.Lock() + tm.Tables[tableName] = cols + tm.tablesMutex.Unlock() + } + + return nil +} + +func (tm *TableManager) EnsureStructure( + ctx context.Context, + tableName string, + columns []utils.Column, + createTemplates []*Template, + addColumnsTemplates []*Template, + metricsTableName string, + tagsTableName string, +) ([]utils.Column, error) { + tm.tablesMutex.RLock() + dbColumns, ok := tm.Tables[tableName] + tm.tablesMutex.RUnlock() + if !ok { + // We don't know about the table. First try to query it. + if err := tm.refreshTableStructure(ctx, tableName); err != nil { + return nil, fmt.Errorf("querying table structure: %w", err) + } + tm.tablesMutex.RLock() + dbColumns, ok = tm.Tables[tableName] + tm.tablesMutex.RUnlock() + if !ok { + // Ok, table doesn't exist, now we can create it. + if err := tm.executeTemplates(ctx, createTemplates, tableName, columns, metricsTableName, tagsTableName); err != nil { + return nil, fmt.Errorf("creating table: %w", err) + } + tm.tablesMutex.RLock() + dbColumns = tm.Tables[tableName] + tm.tablesMutex.RUnlock() + } + } + + missingColumns, err := tm.checkColumns(dbColumns, columns) + if err != nil { + return nil, fmt.Errorf("column validation: %w", err) + } + if len(missingColumns) == 0 { + return nil, nil + } + + if len(addColumnsTemplates) == 0 { + return missingColumns, nil + } + + if err := tm.executeTemplates(ctx, addColumnsTemplates, tableName, missingColumns, metricsTableName, tagsTableName); err != nil { + return nil, fmt.Errorf("adding columns: %w", err) + } + return tm.checkColumns(tm.Tables[tableName], columns) +} + +func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) ([]utils.Column, error) { + var missingColumns []utils.Column + for _, srcCol := range srcColumns { + dbCol, ok := dbColumns[srcCol.Name] + if !ok { + missingColumns = append(missingColumns, srcCol) + continue + } + if !utils.PgTypeCanContain(dbCol.Type, srcCol.Type) { + return nil, fmt.Errorf("column type '%s' cannot store '%s'", dbCol.Type, srcCol.Type) + } + } + if len(missingColumns) > 0 { + // Sort so that: + // * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order) + // * When we display errors about missing columns, the order is also sane, and consistent + utils.ColumnList(missingColumns).Sort() + } + return missingColumns, nil +} + +func (tm *TableManager) executeTemplates( + ctx context.Context, + tmpls []*Template, + tableName string, + newColumns []utils.Column, + metricsTableName string, + tagsTableName string, +) error { + tmplTable := NewTemplateTable(tm.Schema, tableName, colMapToSlice(tm.Tables[tableName])) + metricsTmplTable := NewTemplateTable(tm.Schema, metricsTableName, colMapToSlice(tm.Tables[metricsTableName])) + tagsTmplTable := NewTemplateTable(tm.Schema, tagsTableName, colMapToSlice(tm.Tables[tagsTableName])) + + /* https://github.com/jackc/pgx/issues/872 + stmts := make([]string, len(tmpls)) + batch := &pgx.Batch{} + for i, tmpl := range tmpls { + sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) + if err != nil { + return err + } + stmts[i] = string(sql) + batch.Queue(stmts[i]) + } + + batch.Queue(refreshTableStructureStatement, tm.Schema, tableName) + + batchResult := tm.db.SendBatch(ctx, batch) + defer batchResult.Close() + + for i := 0; i < len(tmpls); i++ { + if x, err := batchResult.Exec(); err != nil { + return fmt.Errorf("executing `%.40s...`: %v %w", stmts[i], x, err) + } + } + + rows, err := batchResult.Query() + if err != nil { + return fmt.Errorf("refreshing table: %w", err) + } + tm.refreshTableStructureResponse(tableName, rows) + */ + + tx, err := tm.db.Begin(ctx) + if err != nil { + return err + } + + for _, tmpl := range tmpls { + sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) + if err != nil { + return err + } + if _, err := tx.Exec(ctx, string(sql)); err != nil { + _ = tx.Rollback(ctx) + return fmt.Errorf("executing `%.40s`: %w", sql, err) + } + } + + if err := tx.Commit(ctx); err != nil { + return err + } + + return tm.refreshTableStructure(ctx, tableName) +} + +func colMapToSlice(colMap map[string]utils.Column) []utils.Column { + if colMap == nil { + return nil + } + cols := make([]utils.Column, 0, len(colMap)) + for _, col := range colMap { + cols = append(cols, col) + } + return cols +} + +func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) error { + metricsTableName := rowSource.Name() + var tagsTableName string + if tm.TagsAsForeignkeys { + tagsTableName = metricsTableName + tm.TagTableSuffix + + missingCols, err := tm.EnsureStructure( + ctx, + tagsTableName, + rowSource.TagColumns(), + tm.TagTableCreateTemplates, + tm.TagTableAddColumnTemplates, + metricsTableName, + tagsTableName, + ) + if err != nil { + return err + } + + if len(missingCols) > 0 { + colDefs := make([]string, len(missingCols)) + for i, col := range missingCols { + colDefs[i] = col.Name + " " + string(col.Type) + } + //TODO just drop the individual rows instead. See RowSource.Next() + return fmt.Errorf("missing tag columns: %s", strings.Join(colDefs, ", ")) + } + } + + missingCols, err := tm.EnsureStructure(ctx, + metricsTableName, + rowSource.Columns(), + tm.CreateTemplates, + tm.AddColumnTemplates, + metricsTableName, + tagsTableName, + ) + if err != nil { + return err + } + + if len(missingCols) > 0 { + colDefs := make([]string, len(missingCols)) + for i, col := range missingCols { + colDefs[i] = col.Name + " " + string(col.Type) + } + log.Printf("[outputs.postgresql] Error: table '%s' is missing columns (dropping fields): %s", metricsTableName, strings.Join(colDefs, ", ")) + + for _, col := range missingCols { + rowSource.DropFieldColumn(col) + } + } + + return nil +} diff --git a/plugins/outputs/postgresql/tables/manager_test.go b/plugins/outputs/postgresql/tables/manager_test.go index 35d0513b9f689..7f9759b4f0aac 100644 --- a/plugins/outputs/postgresql/tables/manager_test.go +++ b/plugins/outputs/postgresql/tables/manager_test.go @@ -5,9 +5,11 @@ import ( "testing" "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" "github.com/jackc/pgx" "github.com/stretchr/testify/assert" + + "github.com/influxdata/telegraf/plugins/outputs/postgresql" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) type mockDb struct { @@ -35,7 +37,7 @@ func (m *mockDb) IsAlive() bool { return true } func TestNewManager(t *testing.T) { db := &mockDb{} - res := NewManager(db, "schema", "table template").(*TableManager) + res := postgresql.NewTableManager(db, "schema", "table template").(*postgresql.TableManager) assert.Equal(t, "table template", res.tableTemplate) assert.Equal(t, "schema", res.schema) assert.Equal(t, db, res.db) @@ -77,7 +79,7 @@ func TestExists(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - manager := &TableManager{ + manager := &postgresql.TableManager{ Tables: tc.cache, db: tc.db, } @@ -126,7 +128,7 @@ func TestCreateTable(t *testing.T) { for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - manager := &TableManager{ + manager := &postgresql.TableManager{ Tables: map[string]bool{}, db: tc.db, tableTemplate: tc.template, diff --git a/plugins/outputs/postgresql/tables/table_manager.go b/plugins/outputs/postgresql/tables/table_manager.go deleted file mode 100644 index 2e526b79eb4d9..0000000000000 --- a/plugins/outputs/postgresql/tables/table_manager.go +++ /dev/null @@ -1,186 +0,0 @@ -package tables - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/jackc/pgx/v4/pgxpool" - - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -const ( - addColumnTemplate = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s %s;" - findExistingColumnsTemplate = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" -) - -type TableManager struct { - Tables map[string]map[string]utils.PgDataType - tablesMutex sync.RWMutex - db *pgxpool.Pool - schema string - tableTemplate string -} - -// NewManager returns an instance of the tables.Manager interface -// that can handle checking and updating the state of tables in the PG database. -func NewManager(db *pgxpool.Pool, schema, tableTemplate string) *TableManager { - return &TableManager{ - Tables: make(map[string]map[string]utils.PgDataType), - db: db, - tableTemplate: tableTemplate, - schema: schema, - } -} - -// ClearTableCache clear the table structure cache. -func (tm *TableManager) ClearTableCache() { - tm.tablesMutex.Lock() - tm.Tables = make(map[string]map[string]utils.PgDataType) - tm.tablesMutex.Unlock() -} - -// Creates a table in the database with the column names and types specified in 'colDetails' -func (tm *TableManager) createTable(ctx context.Context, tableName string, colDetails []utils.Column) error { - utils.ColumnList(colDetails).Sort() - sql := tm.generateCreateTableSQL(tableName, colDetails) - if _, err := tm.db.Exec(ctx, sql); err != nil { - return err - } - - structure := map[string]utils.PgDataType{} - for _, col := range colDetails { - structure[col.Name] = col.Type - } - - tm.tablesMutex.Lock() - tm.Tables[tableName] = structure - tm.tablesMutex.Unlock() - return nil -} - -// addColumnsToTable adds the indicated columns to the table in the database. -// This is an idempotent operation, so attempting to add a column which already exists is a silent no-op. -func (tm *TableManager) addColumnsToTable(ctx context.Context, tableName string, colDetails []utils.Column) error { - utils.ColumnList(colDetails).Sort() - fullTableName := utils.FullTableName(tm.schema, tableName).Sanitize() - for _, col := range colDetails { - addColumnQuery := fmt.Sprintf(addColumnTemplate, fullTableName, utils.QuoteIdent(col.Name), col.Type) - _, err := tm.db.Exec(ctx, addColumnQuery) - if err != nil { - return fmt.Errorf("adding '%s': %w", col.Name, err) - } - - //FIXME if the column exists, but is a different type, we won't get an error, but we need to ensure the type is one - // we can use, and not just assume it's correct. - tm.tablesMutex.Lock() - tm.Tables[tableName][col.Name] = col.Type - tm.tablesMutex.Unlock() - } - - return nil -} - -// Populate the 'tableTemplate' (supplied as config option to the plugin) with the details of -// the required columns for the measurement to create a 'CREATE TABLE' SQL statement. -// The order, column names and data types are given in 'colDetails'. -func (tm *TableManager) generateCreateTableSQL(tableName string, colDetails []utils.Column) string { - colDefs := make([]string, len(colDetails)) - var pk []string - for i, col := range colDetails { - colDefs[i] = utils.QuoteIdent(col.Name) + " " + string(col.Type) - if col.Role != utils.FieldColType { - pk = append(pk, col.Name) - } - } - - fullTableName := utils.FullTableName(tm.schema, tableName).Sanitize() - query := strings.Replace(tm.tableTemplate, "{TABLE}", fullTableName, -1) - query = strings.Replace(query, "{TABLELITERAL}", utils.QuoteLiteral(fullTableName), -1) - query = strings.Replace(query, "{COLUMNS}", strings.Join(colDefs, ","), -1) - query = strings.Replace(query, "{KEY_COLUMNS}", strings.Join(pk, ","), -1) - - return query -} - -func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName string) error { - rows, err := tm.db.Query(ctx, findExistingColumnsTemplate, tm.schema, tableName) - if err != nil { - return err - } - defer rows.Close() - cols := make(map[string]utils.PgDataType) - for rows.Next() { - var colName, colTypeStr string - err := rows.Scan(&colName, &colTypeStr) - if err != nil { - return err - } - cols[colName] = utils.PgDataType(colTypeStr) - } - - if len(cols) > 0 { - tm.tablesMutex.Lock() - tm.Tables[tableName] = cols - tm.tablesMutex.Unlock() - } - return nil -} - -func (tm *TableManager) EnsureStructure(ctx context.Context, tableName string, columns []utils.Column, doSchemaUpdate bool) ([]utils.Column, error) { - tm.tablesMutex.RLock() - structure, ok := tm.Tables[tableName] - tm.tablesMutex.RUnlock() - if !ok { - // We don't know about the table. First try to query it. - if err := tm.refreshTableStructure(ctx, tableName); err != nil { - return nil, fmt.Errorf("querying table structure: %w", err) - } - tm.tablesMutex.RLock() - structure, ok = tm.Tables[tableName] - tm.tablesMutex.RUnlock() - if !ok { - // Ok, table doesn't exist, now we can create it. - if err := tm.createTable(ctx, tableName, columns); err != nil { - return nil, fmt.Errorf("creating table: %w", err) - } - tm.tablesMutex.RLock() - structure = tm.Tables[tableName] - tm.tablesMutex.RUnlock() - } - } - - missingColumns, err := tm.checkColumns(structure, columns) - if err != nil { - return nil, fmt.Errorf("column validation: %w", err) - } - if len(missingColumns) == 0 { - return nil, nil - } - - if doSchemaUpdate { - if err := tm.addColumnsToTable(ctx, tableName, missingColumns); err != nil { - return nil, fmt.Errorf("adding columns: %w", err) - } - return nil, nil - } - - return missingColumns, nil -} - -func (tm *TableManager) checkColumns(structure map[string]utils.PgDataType, columns []utils.Column) ([]utils.Column, error) { - var missingColumns []utils.Column - for _, col := range columns { - dbColType, ok := structure[col.Name] - if !ok { - missingColumns = append(missingColumns, col) - continue - } - if !utils.PgTypeCanContain(dbColType, col.Type) { - return nil, fmt.Errorf("column type '%s' cannot store '%s'", dbColType, col.Type) - } - } - return missingColumns, nil -} diff --git a/plugins/outputs/postgresql/template.go b/plugins/outputs/postgresql/template.go new file mode 100644 index 0000000000000..2c2a77d68d800 --- /dev/null +++ b/plugins/outputs/postgresql/template.go @@ -0,0 +1,192 @@ +package postgresql + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + + "github.com/Masterminds/sprig" +) + +var TableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}})`) +var TableAddColumnTemplate = newTemplate(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`) + +var templateFuncs = map[string]interface{}{ + "quoteIdentifier": QuoteIdentifier, + "quoteLiteral": QuoteLiteral, +} + +func asString(obj interface{}) string { + switch obj := obj.(type) { + case string: + return obj + case []byte: + return string(obj) + case fmt.Stringer: + return obj.String() + default: + return fmt.Sprintf("%v", obj) + } +} +func QuoteIdentifier(name interface{}) string { + return `"` + strings.ReplaceAll(asString(name), `"`, `""`) + `"` +} +func QuoteLiteral(str fmt.Stringer) string { + return "'" + strings.ReplaceAll(asString(str), "'", "''") + "'" +} + +type TemplateTable struct { + Schema string + Name string + Columns TemplateColumns +} + +func NewTemplateTable(schemaName, tableName string, columns []utils.Column) *TemplateTable { + if tableName == "" { + return nil + } + return &TemplateTable{ + Schema: schemaName, + Name: tableName, + Columns: NewTemplateColumns(columns), + } +} + +//func (tt *TemplateTable) SetName(name string) { +// tt.Name = name +//} +//func (tt *TemplateTable) SetSchema(schema string) { +// tt.Schema = schema +//} +func (tt *TemplateTable) String() string { + return tt.Identifier() +} +func (tt *TemplateTable) Identifier() string { + return QuoteIdentifier(tt.Schema) + "." + QuoteIdentifier(tt.Name) +} + +//func (tt *TemplateTable) Literal() string { +// return QuoteLiteral(tt.Identifier()) +//} + +type TemplateColumn utils.Column + +func (tc TemplateColumn) String() string { + return tc.Definition() +} +func (tc TemplateColumn) Definition() string { + return tc.Identifier() + " " + string(tc.Type) +} +func (tc TemplateColumn) Identifier() string { + return QuoteIdentifier(tc.Name) +} + +//func (tc TemplateColumn) Literal() string { +// return QuoteLiteral(tc.Name) +//} + +type TemplateColumns []TemplateColumn + +func NewTemplateColumns(cols []utils.Column) TemplateColumns { + tcs := make(TemplateColumns, len(cols)) + for i, col := range cols { + tcs[i] = TemplateColumn(col) + } + return tcs +} + +func (tcs TemplateColumns) List() []TemplateColumn { + return tcs +} + +func (tcs TemplateColumns) Definitions() []string { + defs := make([]string, len(tcs)) + for i, tc := range tcs { + defs[i] = tc.Definition() + } + return defs +} + +func (tcs TemplateColumns) Identifiers() []string { + idents := make([]string, len(tcs)) + for i, tc := range tcs { + idents[i] = QuoteIdentifier(tc.Name) + } + return idents +} + +func (tcs TemplateColumns) String() string { + colStrs := make([]string, len(tcs)) + for i, tc := range tcs { + colStrs[i] = tc.String() + } + return strings.Join(colStrs, ", ") +} + +func (tcs TemplateColumns) Keys() TemplateColumns { + var cols []TemplateColumn + for _, tc := range tcs { + if tc.Role != utils.FieldColType { + cols = append(cols, tc) + } + } + return cols +} + +func (tcs TemplateColumns) Tags() TemplateColumns { + var cols []TemplateColumn + for _, tc := range tcs { + if tc.Role == utils.TagColType || tc.Role == utils.TagsIDColType { + cols = append(cols, tc) + } + } + return cols +} + +func (tcs TemplateColumns) Fields() TemplateColumns { + var cols []TemplateColumn + for _, tc := range tcs { + if tc.Role == utils.FieldColType { + cols = append(cols, tc) + } + } + return cols +} + +type Template template.Template + +func newTemplate(templateString string) *Template { + t := &Template{} + if err := t.UnmarshalText([]byte(templateString)); err != nil { + panic(err) + } + return t +} + +func (t *Template) UnmarshalText(text []byte) error { + tmpl := template.New("") + tmpl.Funcs(templateFuncs) + tmpl.Funcs(sprig.TxtFuncMap()) + tt, err := tmpl.Parse(string(text)) + if err != nil { + return err + } + *t = Template(*tt) + return nil +} + +func (t *Template) Render(table *TemplateTable, newColumns []utils.Column, metricTable *TemplateTable, tagTable *TemplateTable) ([]byte, error) { + data := map[string]interface{}{ + "table": table, + "columns": NewTemplateColumns(newColumns), + "metricTable": metricTable, + "tagTable": tagTable, + } + + buf := bytes.NewBuffer(nil) + err := (*template.Template)(t).Execute(buf, data) + return buf.Bytes(), err +} From a344a3cab32f8fcb57f525ed06af5e7535dd3002 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 15 Nov 2020 15:00:22 -0500 Subject: [PATCH 069/121] outputs.postgresql: Drop metrics with missing tag columns in DB When schema updates are disabled, drop any metrics with tags that aren't in the database. We don't want to just throw an error as the rest of the metrics might be fine. We also don't want to insert without that tag as this can result in a collision with other metrics. --- plugins/outputs/postgresql/row_source.go | 132 ++++++++++++++++---- plugins/outputs/postgresql/table_manager.go | 9 +- plugins/outputs/postgresql/template.go | 6 + 3 files changed, 114 insertions(+), 33 deletions(-) diff --git a/plugins/outputs/postgresql/row_source.go b/plugins/outputs/postgresql/row_source.go index 116e5e6bb99d9..16ab7e92ee6d5 100644 --- a/plugins/outputs/postgresql/row_source.go +++ b/plugins/outputs/postgresql/row_source.go @@ -23,19 +23,29 @@ func (p *Postgresql) splitRowSources(metrics []telegraf.Metric) map[string]*RowS } type RowSource struct { - postgresql *Postgresql - metrics []telegraf.Metric - cursor int + postgresql *Postgresql + metrics []telegraf.Metric + cursor int + cursorValues []interface{} + cursorError error + // tagPositions is the position of each tag within the tag set. Regardless of whether tags are foreign keys or not tagPositions map[string]int - tagColumns []utils.Column + // tagColumns is the list of tags to emit. List is in order. + tagColumns []utils.Column + // tagSets is the list of tag IDs to tag values in use within the RowSource. The position of each value in the list + // corresponds to the key name in the tagColumns list. + // This data is used to build out the foreign tag table when enabled. + // Technically the tag values will only contain strings, since all tag values are strings. But this is a restriction of telegraf + // metrics, and not postgres. It might be nice to support somehow converting tag values into native times. + tagSets map[int64][]interface{} + // fieldPositions is the position of each field within the tag set. fieldPositions map[string]int - fieldColumns []utils.Column + // fieldColumns is the list of fields to emit. List is in order. + fieldColumns []utils.Column - // Technically this will only contain strings, since all tag values are strings. But this is a restriction of telegraf - // metrics, and not postgres. It might be nice to support somehow converting tag values into native times. - tagSets map[int64][]interface{} + droppedTagColumns []string } func NewRowSource(postgresql *Postgresql) *RowSource { @@ -92,6 +102,7 @@ func (rs *RowSource) Name() string { return rs.metrics[0].Name() } +// Returns the superset of all tags of all metrics. func (rs *RowSource) TagColumns() []utils.Column { var cols []utils.Column @@ -108,6 +119,7 @@ func (rs *RowSource) TagColumns() []utils.Column { return cols } +// Returns the superset of the union of all tags+fields of all metrics. func (rs *RowSource) Columns() []utils.Column { cols := []utils.Column{ TimeColumn, @@ -128,7 +140,51 @@ func (rs *RowSource) Columns() []utils.Column { return cols } -func (rs *RowSource) DropFieldColumn(col utils.Column) { +func (rs *RowSource) DropColumn(col utils.Column) { + switch col.Role { + case utils.TagColType: + rs.dropTagColumn(col) + case utils.FieldColType: + rs.dropFieldColumn(col) + default: + panic(fmt.Sprintf("Tried to perform an invalid column drop. This should not have happened. measurement=%s name=%s role=%v", rs.Name(), col.Name, col.Role)) + } +} + +// Drops the tag column from conversion. Any metrics containing this tag will be skipped. +func (rs *RowSource) dropTagColumn(col utils.Column) { + if col.Role != utils.TagColType || rs.postgresql.TagsAsJsonb { + panic(fmt.Sprintf("Tried to perform an invalid tag drop. This should not have happened. measurement=%s tag=%s", rs.Name(), col.Name)) + } + rs.droppedTagColumns = append(rs.droppedTagColumns, col.Name) + + pos, ok := rs.tagPositions[col.Name] + if !ok { + return + } + + delete(rs.tagPositions, col.Name) + for n, p := range rs.tagPositions { + if p > pos { + rs.tagPositions[n] -= 1 + } + } + + rs.tagColumns = append(rs.tagColumns[:pos], rs.tagColumns[pos+1:]...) + + for setID, set := range rs.tagSets { + if set[pos] != nil { + // The tag is defined, so drop the whole set + delete(rs.tagSets, setID) + } else { + // The tag is null, so keep the set, and just drop the column so we don't try to use it. + rs.tagSets[setID] = append(set, set[:pos], set[pos+1:]) + } + } +} + +// Drops the field column from conversion. Any metrics containing this field will have the field omitted. +func (rs *RowSource) dropFieldColumn(col utils.Column) { if col.Role != utils.FieldColType || rs.postgresql.FieldsAsJsonb { panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", rs.Name(), col.Name)) } @@ -137,6 +193,7 @@ func (rs *RowSource) DropFieldColumn(col utils.Column) { if !ok { return } + delete(rs.fieldPositions, col.Name) for n, p := range rs.fieldPositions { if p > pos { @@ -144,31 +201,32 @@ func (rs *RowSource) DropFieldColumn(col utils.Column) { } } - for i, fc := range rs.fieldColumns { - if fc.Name != col.Name { - continue - } - rs.fieldColumns = append(rs.fieldColumns[:i], rs.fieldColumns[i+1:]...) - break - } + rs.fieldColumns = append(rs.fieldColumns[:pos], rs.fieldColumns[pos+1:]...) } func (rs *RowSource) Next() bool { - if rs.cursor+1 >= len(rs.metrics) { - return false - } - //TODO check if all fields were dropped and if so, skip to next. - // This can happen if schema updates are disabled and the table is missing all the requisite columns. - // We could also use this to skip when any of the tag columns are missing. Currently this is detected in MatchSource and we throw an error. - rs.cursor += 1 - return true + for { + if rs.cursor+1 >= len(rs.metrics) { + rs.cursorValues = nil + rs.cursorError = nil + return false + } + rs.cursor += 1 + + rs.cursorValues, rs.cursorError = rs.values() + if rs.cursorValues != nil || rs.cursorError != nil { + return true + } + } } func (rs *RowSource) Reset() { rs.cursor = -1 } -func (rs *RowSource) Values() ([]interface{}, error) { +// values calculates the values for the metric at the cursor position. +// If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil. +func (rs *RowSource) values() ([]interface{}, error) { metric := rs.metrics[rs.cursor] tags := metric.TagList() fields := metric.FieldList() @@ -182,7 +240,12 @@ func (rs *RowSource) Values() ([]interface{}, error) { // tags_as_foreignkey=false, tags_as_json=false tagValues := make([]interface{}, len(rs.tagPositions)) for _, tag := range tags { - tagValues[rs.tagPositions[tag.Key]] = tag.Value + tagPos, ok := rs.tagPositions[tag.Key] + if !ok { + // tag has been dropped, we can't emit or we risk collision with another metric + return nil, nil + } + tagValues[tagPos] = tag.Value } values = append(values, tagValues...) } else { @@ -195,18 +258,29 @@ func (rs *RowSource) Values() ([]interface{}, error) { } } else { // tags_as_foreignkey=true - values = append(values, utils.GetTagID(metric)) + tagID := utils.GetTagID(metric) + if _, ok := rs.tagSets[tagID]; !ok { + // tag has been dropped, we can't emit or we risk collision with another metric + return nil, nil + } + values = append(values, tagID) } if !rs.postgresql.FieldsAsJsonb { // fields_as_json=false fieldValues := make([]interface{}, len(rs.fieldPositions)) + fieldsEmpty := true for _, field := range fields { // we might have dropped the field due to the table missing the column & schema updates being turned off if fPos, ok := rs.fieldPositions[field.Key]; ok { fieldValues[fPos] = field.Value + fieldsEmpty = false } } + if fieldsEmpty { + // all fields have been dropped. Don't emit a metric with just tags and no fields. + return nil, nil + } values = append(values, fieldValues...) } else { // fields_as_json=true @@ -220,6 +294,10 @@ func (rs *RowSource) Values() ([]interface{}, error) { return values, nil } +func (rs *RowSource) Values() ([]interface{}, error) { + return rs.cursorValues, rs.cursorError +} + func (rs *RowSource) Err() error { return nil } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 5054fe75ca6e1..efeef0c0d5004 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -235,10 +235,10 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) e if len(missingCols) > 0 { colDefs := make([]string, len(missingCols)) for i, col := range missingCols { + rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - //TODO just drop the individual rows instead. See RowSource.Next() - return fmt.Errorf("missing tag columns: %s", strings.Join(colDefs, ", ")) + log.Printf("[outputs.postgresql] Error: table '%s' is missing tag columns (dropping metrics): %s", tagsTableName, strings.Join(colDefs, ", ")) } } @@ -257,13 +257,10 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) e if len(missingCols) > 0 { colDefs := make([]string, len(missingCols)) for i, col := range missingCols { + rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } log.Printf("[outputs.postgresql] Error: table '%s' is missing columns (dropping fields): %s", metricsTableName, strings.Join(colDefs, ", ")) - - for _, col := range missingCols { - rowSource.DropFieldColumn(col) - } } return nil diff --git a/plugins/outputs/postgresql/template.go b/plugins/outputs/postgresql/template.go index 2c2a77d68d800..af76945579475 100644 --- a/plugins/outputs/postgresql/template.go +++ b/plugins/outputs/postgresql/template.go @@ -83,6 +83,12 @@ func (tc TemplateColumn) Definition() string { func (tc TemplateColumn) Identifier() string { return QuoteIdentifier(tc.Name) } +func (tc TemplateColumn) IsTag() bool { + return tc.Role == utils.TagColType +} +func (tc TemplateColumn) IsField() bool { + return tc.Role == utils.FieldColType +} //func (tc TemplateColumn) Literal() string { // return QuoteLiteral(tc.Name) From 7f464f1874aacb66d2f2f4f2ed04bbe28de8d6ca Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 15 Nov 2020 22:35:24 -0500 Subject: [PATCH 070/121] outputs.postgresql: Re-add population of the tag table Had previously been removed while the code was being ripped apart. But now that the plugin is stabilizing, it's time to add it back. Still needs cleanup (note the TODO statements), but it's functional and moderately performant. --- plugins/outputs/postgresql/postgresql.go | 67 +++++++++++++++++---- plugins/outputs/postgresql/row_source.go | 67 ++++++++++++--------- plugins/outputs/postgresql/table_manager.go | 30 +++++---- plugins/outputs/postgresql/template.go | 1 + plugins/outputs/postgresql/utils/utils.go | 21 ++++--- 5 files changed, 126 insertions(+), 60 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 550ae9c94c072..58a8e574f40ee 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,7 +2,9 @@ package postgresql import ( "context" + "fmt" "log" + "strings" "time" "github.com/jackc/pgx/v4" @@ -45,7 +47,7 @@ func newPostgresql() *Postgresql { Schema: "public", CreateTemplates: []*Template{TableCreateTemplate}, AddColumnTemplates: []*Template{TableAddColumnTemplate}, - TagTableCreateTemplates: []*Template{TableCreateTemplate}, + TagTableCreateTemplates: []*Template{TagTableCreateTemplate}, TagTableAddColumnTemplates: []*Template{TableAddColumnTemplate}, TagTableSuffix: "_tag", } @@ -242,17 +244,60 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *Row return err } - targetColumns := rowSource.Columns() - measureName := rowSource.Name() - - columnPositions := make(map[string]int, len(targetColumns)) - columnNames := make([]string, len(targetColumns)) - for i, col := range targetColumns { - columnPositions[col.Name] = i - columnNames[i] = col.Name + if err := p.WriteTagTable(ctx, rowSource); err != nil { + // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from + // future metrics after issue is corrected. + log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %w", rowSource.Name()+p.TagTableSuffix, err) } - fullTableName := utils.FullTableName(p.Schema, measureName) - _, err = p.db.CopyFrom(ctx, fullTableName, columnNames, rowSource) + fullTableName := utils.FullTableName(p.Schema, rowSource.Name()) + _, err = p.db.CopyFrom(ctx, fullTableName, rowSource.ColumnNames(), rowSource) return err } + +func (p *Postgresql) WriteTagTable(ctx context.Context, rowSource *RowSource) error { + tagCols := rowSource.TagTableColumns() + + columnNames := make([]string, len(tagCols)) + placeholders := make([]string, len(tagCols)) + for i, col := range tagCols { + columnNames[i] = QuoteIdentifier(col.Name) + placeholders[i] = fmt.Sprintf("$%d", i+1) + } + + // pgx batch code will automatically convert this into a prepared statement & cache it + sql := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (tag_id) DO NOTHING", + rowSource.Name()+p.TagTableSuffix, + strings.Join(columnNames, ","), + strings.Join(placeholders, ","), + ) + + batch := &pgx.Batch{} + //TODO rowSource should emit another source for the tags. We shouldn't have to dive into its private methods. + //TODO cache which tagSets we've already inserted and skip them. + //TODO copy into a temp table, and then `insert ... on conflict` from that into the tag table. + for tagID, tagSet := range rowSource.tagSets { + values := make([]interface{}, len(columnNames)) + values[0] = tagID + + if !p.TagsAsJsonb { + for _, tag := range tagSet { + values[rowSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column + } + } else { + values[1] = utils.TagListToJSON(tagSet) + } + + batch.Queue(sql, values...) + } + results := p.db.SendBatch(ctx, batch) + defer results.Close() + + for i := 0; i < len(rowSource.tagSets); i++ { + if _, err := results.Exec(); err != nil { + return err + } + } + + return nil +} diff --git a/plugins/outputs/postgresql/row_source.go b/plugins/outputs/postgresql/row_source.go index 16ab7e92ee6d5..1c1fd9478c801 100644 --- a/plugins/outputs/postgresql/row_source.go +++ b/plugins/outputs/postgresql/row_source.go @@ -36,9 +36,7 @@ type RowSource struct { // tagSets is the list of tag IDs to tag values in use within the RowSource. The position of each value in the list // corresponds to the key name in the tagColumns list. // This data is used to build out the foreign tag table when enabled. - // Technically the tag values will only contain strings, since all tag values are strings. But this is a restriction of telegraf - // metrics, and not postgres. It might be nice to support somehow converting tag values into native times. - tagSets map[int64][]interface{} + tagSets map[int64][]*telegraf.Tag // fieldPositions is the position of each field within the tag set. fieldPositions map[string]int @@ -52,7 +50,7 @@ func NewRowSource(postgresql *Postgresql) *RowSource { rs := &RowSource{ postgresql: postgresql, cursor: -1, - tagSets: make(map[int64][]interface{}), + tagSets: make(map[int64][]*telegraf.Tag), } if !postgresql.FieldsAsJsonb { rs.tagPositions = map[string]int{} @@ -65,12 +63,7 @@ func (rs *RowSource) AddMetric(metric telegraf.Metric) { if rs.postgresql.TagsAsForeignkeys { tagID := utils.GetTagID(metric) if _, ok := rs.tagSets[tagID]; !ok { - tags := metric.TagList() - values := make([]interface{}, len(tags)) - for i, tag := range tags { - values[i] = ColumnFromTag(tag.Key, tag.Value) - } - rs.tagSets[tagID] = values + rs.tagSets[tagID] = metric.TagList() } } @@ -106,10 +99,6 @@ func (rs *RowSource) Name() string { func (rs *RowSource) TagColumns() []utils.Column { var cols []utils.Column - if rs.postgresql.TagsAsForeignkeys { - cols = append(cols, TagIDColumn) - } - if rs.postgresql.TagsAsJsonb { cols = append(cols, TagsJSONColumn) } else { @@ -119,8 +108,13 @@ func (rs *RowSource) TagColumns() []utils.Column { return cols } -// Returns the superset of the union of all tags+fields of all metrics. -func (rs *RowSource) Columns() []utils.Column { +// Returns the superset of all fields of all metrics. +func (rs *RowSource) FieldColumns() []utils.Column { + return rs.fieldColumns +} + +// Returns the full column list, including time, tag id or tags, and fields. +func (rs *RowSource) MetricTableColumns() []utils.Column { cols := []utils.Column{ TimeColumn, } @@ -134,12 +128,31 @@ func (rs *RowSource) Columns() []utils.Column { if rs.postgresql.FieldsAsJsonb { cols = append(cols, FieldsJSONColumn) } else { - cols = append(cols, rs.fieldColumns...) + cols = append(cols, rs.FieldColumns()...) + } + + return cols +} + +func (rs *RowSource) TagTableColumns() []utils.Column { + cols := []utils.Column{ + TagIDColumn, } + cols = append(cols, rs.TagColumns()...) + return cols } +func (rs *RowSource) ColumnNames() []string { + cols := rs.MetricTableColumns() + names := make([]string, len(cols)) + for i, col := range cols { + names[i] = col.Name + } + return names +} + func (rs *RowSource) DropColumn(col utils.Column) { switch col.Role { case utils.TagColType: @@ -173,12 +186,12 @@ func (rs *RowSource) dropTagColumn(col utils.Column) { rs.tagColumns = append(rs.tagColumns[:pos], rs.tagColumns[pos+1:]...) for setID, set := range rs.tagSets { - if set[pos] != nil { - // The tag is defined, so drop the whole set - delete(rs.tagSets, setID) - } else { - // The tag is null, so keep the set, and just drop the column so we don't try to use it. - rs.tagSets[setID] = append(set, set[:pos], set[pos+1:]) + for _, tag := range set { + if tag.Key == col.Name { + // The tag is defined, so drop the whole set + delete(rs.tagSets, setID) + break + } } } } @@ -250,11 +263,7 @@ func (rs *RowSource) values() ([]interface{}, error) { values = append(values, tagValues...) } else { // tags_as_foreign_key=false, tags_as_json=true - value, err := utils.BuildJsonb(metric.Tags()) - if err != nil { - return nil, err - } - values = append(values, value) + values = append(values, utils.TagListToJSON(metric.TagList())) } } else { // tags_as_foreignkey=true @@ -284,7 +293,7 @@ func (rs *RowSource) values() ([]interface{}, error) { values = append(values, fieldValues...) } else { // fields_as_json=true - value, err := utils.BuildJsonb(metric.Fields()) + value, err := utils.FieldListToJSON(metric.FieldList()) if err != nil { return nil, err } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index efeef0c0d5004..ade91e00e7e1f 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -213,20 +213,24 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { return cols } +// MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches. +// If the schema does not match, and schema updates are disabled: +// If a field missing from the DB, the field is omitted. +// If a tag is missing from the DB, the metric is dropped. func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) error { - metricsTableName := rowSource.Name() - var tagsTableName string + metricTableName := rowSource.Name() + var tagTableName string if tm.TagsAsForeignkeys { - tagsTableName = metricsTableName + tm.TagTableSuffix + tagTableName = metricTableName + tm.TagTableSuffix missingCols, err := tm.EnsureStructure( ctx, - tagsTableName, - rowSource.TagColumns(), + tagTableName, + rowSource.TagTableColumns(), tm.TagTableCreateTemplates, tm.TagTableAddColumnTemplates, - metricsTableName, - tagsTableName, + metricTableName, + tagTableName, ) if err != nil { return err @@ -238,17 +242,17 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) e rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - log.Printf("[outputs.postgresql] Error: table '%s' is missing tag columns (dropping metrics): %s", tagsTableName, strings.Join(colDefs, ", ")) + log.Printf("[outputs.postgresql] Error: table '%s' is missing tag columns (dropping metrics): %s", tagTableName, strings.Join(colDefs, ", ")) } } missingCols, err := tm.EnsureStructure(ctx, - metricsTableName, - rowSource.Columns(), + metricTableName, + rowSource.MetricTableColumns(), tm.CreateTemplates, tm.AddColumnTemplates, - metricsTableName, - tagsTableName, + metricTableName, + tagTableName, ) if err != nil { return err @@ -260,7 +264,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) e rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - log.Printf("[outputs.postgresql] Error: table '%s' is missing columns (dropping fields): %s", metricsTableName, strings.Join(colDefs, ", ")) + log.Printf("[outputs.postgresql] Error: table '%s' is missing columns (dropping fields): %s", metricTableName, strings.Join(colDefs, ", ")) } return nil diff --git a/plugins/outputs/postgresql/template.go b/plugins/outputs/postgresql/template.go index af76945579475..a06f99d988411 100644 --- a/plugins/outputs/postgresql/template.go +++ b/plugins/outputs/postgresql/template.go @@ -13,6 +13,7 @@ import ( var TableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}})`) var TableAddColumnTemplate = newTemplate(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`) +var TagTableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`) var templateFuncs = map[string]interface{}{ "quoteIdentifier": QuoteIdentifier, diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 8e659d89fdaf3..06359d284921a 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -17,14 +17,21 @@ const ( insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" ) -// BuildJsonb returns a byte array of the json representation -// of the passed object. -func BuildJsonb(data interface{}) ([]byte, error) { - d, err := json.Marshal(data) - if err != nil { - return nil, err +func TagListToJSON(tagList []*telegraf.Tag) []byte { + tags := make(map[string]string, len(tagList)) + for _, tag := range tagList { + tags[tag.Key] = tag.Value } - return d, nil + bs, _ := json.Marshal(tags) + return bs +} + +func FieldListToJSON(fieldList []*telegraf.Field) ([]byte, error) { + fields := make(map[string]interface{}, len(fieldList)) + for _, field := range fieldList { + fields[field.Key] = field.Value + } + return json.Marshal(fields) } // QuoteIdent returns a sanitized string safe to use in SQL as an identifier From c6df522c19f243b2396a6709713122b1b1ffb8e4 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 9 Dec 2020 08:23:51 -0500 Subject: [PATCH 071/121] outputs.postgresql: fix column sorting when creating new tables --- plugins/outputs/postgresql/table_manager.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index ade91e00e7e1f..ac2f2da34f7f2 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -76,6 +76,11 @@ func (tm *TableManager) EnsureStructure( metricsTableName string, tagsTableName string, ) ([]utils.Column, error) { + // Sort so that: + // * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order) + // * When we display errors about missing columns, the order is also sane, and consistent + utils.ColumnList(columns).Sort() + tm.tablesMutex.RLock() dbColumns, ok := tm.Tables[tableName] tm.tablesMutex.RUnlock() @@ -128,12 +133,6 @@ func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColum return nil, fmt.Errorf("column type '%s' cannot store '%s'", dbCol.Type, srcCol.Type) } } - if len(missingColumns) > 0 { - // Sort so that: - // * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order) - // * When we display errors about missing columns, the order is also sane, and consistent - utils.ColumnList(missingColumns).Sort() - } return missingColumns, nil } From e33f46f2afa9fa9d46450684ae0681c3f320d96d Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 9 Dec 2020 08:32:09 -0500 Subject: [PATCH 072/121] outputs.postgresql: don't retry permanent errors when in single-connection mode --- plugins/outputs/postgresql/postgresql.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 58a8e574f40ee..ed3198957df0d 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -170,11 +170,11 @@ func (p *Postgresql) writeSequential(rowSources map[string]*RowSource) error { for _, rowSource := range rowSources { err := p.writeMetricsFromMeasure(p.dbContext, rowSource) if err != nil { - if !isTempError(err) { - log.Printf("write error (permanent): %v", err) + if isTempError(err) { + //TODO use a transaction so that we don't end up with a partial write, and end up retrying metrics we've already written + return err } - //TODO use a transaction so that we don't end up with a partial write, and end up retrying metrics we've already written - return err + log.Printf("write error (permanent, dropping sub-batch): %v", err) } } return nil @@ -196,7 +196,7 @@ func (p *Postgresql) writeWorker(ctx context.Context) { select { case rowSource := <-p.writeChan: if err := p.writeRetry(ctx, rowSource); err != nil { - log.Printf("write error (permanent): %v", err) + log.Printf("write error (permanent, dropping sub-batch): %v", err) } case <-p.dbContext.Done(): return From 9f206c5698c9a8d7e567a3e95efa311247b4546c Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 11:25:16 -0500 Subject: [PATCH 073/121] identify tag columns using PG column comments Some template methods filter columns based on role. For some columns, we can tell by the name (e.g. 'time' or 'tag_id'). However tag & field columns look the same. So we need a way to differentiate them. This change accomplishes that by identifying tag columns by using a PG column comment. --- plugins/outputs/postgresql/table_manager.go | 44 +++++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index ac2f2da34f7f2..6b61452487312 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -11,7 +11,7 @@ import ( ) const ( - refreshTableStructureStatement = "SELECT column_name, data_type FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" + refreshTableStructureStatement = "SELECT column_name, data_type, col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" ) type TableManager struct { @@ -47,14 +47,36 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str cols := make(map[string]utils.Column) for rows.Next() { var colName, colTypeStr string - err := rows.Scan(&colName, &colTypeStr) + desc := new(string) + err := rows.Scan(&colName, &colTypeStr, &desc) if err != nil { return err } + + role := utils.FieldColType + switch colName { + case TimeColumnName: + role = utils.TimeColType + case TagIDColumnName: + role = utils.TagsIDColType + case TagsJSONColumnName: + role = utils.TagColType + case FieldsJSONColumnName: + role = utils.FieldColType + default: + // We don't want to monopolize the column comment (preventing user from storing other information there), so just look at the first word + if desc != nil { + descWords := strings.Split(*desc, " ") + if descWords[0] == "tag" { + role = utils.TagColType + } + } + } + cols[colName] = utils.Column{ Name: colName, Type: utils.PgDataType(colTypeStr), - Role: utils.FieldColType, //FIXME this isn't necessarily correct. could be some other role. But while it's a lie, I don't think it affect anything. + Role: role, } } @@ -190,7 +212,21 @@ func (tm *TableManager) executeTemplates( } if _, err := tx.Exec(ctx, string(sql)); err != nil { _ = tx.Rollback(ctx) - return fmt.Errorf("executing `%.40s`: %w", sql, err) + return fmt.Errorf("executing `%s`: %w", sql, err) + } + } + + // We need to be able to determine the role of the column when reading the structure back (because of the templates). + // For some columns we can determine this by the column name (time, tag_id, etc). However tags and fields can have any + // name, and look the same. So we add a comment to tag columns, and through process of elimination what remains are + // field columns. + for _, col := range newColumns { + if col.Role != utils.TagColType { + continue + } + if _, err := tx.Exec(ctx, "COMMENT ON COLUMN "+tmplTable.String()+"."+QuoteIdentifier(col.Name)+" IS 'tag'"); err != nil { + _ = tx.Rollback(ctx) + return fmt.Errorf("setting column role comment: %s", err) } } From fec5a952f3baf901d745ea175d2eac4caba859f2 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 11:31:06 -0500 Subject: [PATCH 074/121] outputs/postgresql: split templating into its own package This is so that when users are looking up documentation on the templating functions, theyre not bombarded by lots of docs on internal methods that they don't care about. Few other misc code cleanup things pulled in as well. --- plugins/outputs/postgresql/postgresql.go | 53 ++++++----- plugins/outputs/postgresql/table_manager.go | 15 ++-- .../postgresql/{ => template}/template.go | 90 +++++++++++++++++-- 3 files changed, 123 insertions(+), 35 deletions(-) rename plugins/outputs/postgresql/{ => template}/template.go (66%) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index ed3198957df0d..37a916cd01f73 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -2,7 +2,11 @@ package postgresql import ( "context" + "errors" "fmt" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/jackc/pgconn" "log" "strings" "time" @@ -21,12 +25,13 @@ type Postgresql struct { TagsAsForeignkeys bool TagsAsJsonb bool FieldsAsJsonb bool - CreateTemplates []*Template - AddColumnTemplates []*Template - TagTableCreateTemplates []*Template - TagTableAddColumnTemplates []*Template + CreateTemplates []*template.Template + AddColumnTemplates []*template.Template + TagTableCreateTemplates []*template.Template + TagTableAddColumnTemplates []*template.Template TagTableSuffix string PoolSize int + RetryMaxBackoff internal.Duration dbContext context.Context dbContextCancel func() @@ -40,16 +45,15 @@ func init() { outputs.Add("postgresql", func() telegraf.Output { return newPostgresql() }) } -const createTableTemplate = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" - func newPostgresql() *Postgresql { return &Postgresql{ Schema: "public", - CreateTemplates: []*Template{TableCreateTemplate}, - AddColumnTemplates: []*Template{TableAddColumnTemplate}, - TagTableCreateTemplates: []*Template{TagTableCreateTemplate}, - TagTableAddColumnTemplates: []*Template{TableAddColumnTemplate}, + CreateTemplates: []*template.Template{template.TableCreateTemplate}, + AddColumnTemplates: []*template.Template{template.TableAddColumnTemplate}, + TagTableCreateTemplates: []*template.Template{template.TagTableCreateTemplate}, + TagTableAddColumnTemplates: []*template.Template{template.TableAddColumnTemplate}, TagTableSuffix: "_tag", + RetryMaxBackoff: internal.Duration{time.Second * 15}, } } @@ -205,12 +209,13 @@ func (p *Postgresql) writeWorker(ctx context.Context) { } func isTempError(err error) bool { - return false + var pgErr *pgconn.PgError + if errors.As(err, &pgErr); pgErr != nil { + return false + } + return true } -var backoffInit = time.Millisecond * 250 -var backoffMax = time.Second * 15 - func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error { backoff := time.Duration(0) for { @@ -227,11 +232,11 @@ func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error time.Sleep(backoff) if backoff == 0 { - backoff = backoffInit + backoff = time.Millisecond * 250 } else { backoff *= 2 - if backoff > backoffMax { - backoff = backoffMax + if backoff > p.RetryMaxBackoff.Duration { + backoff = p.RetryMaxBackoff.Duration } } } @@ -244,10 +249,12 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *Row return err } - if err := p.WriteTagTable(ctx, rowSource); err != nil { - // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from - // future metrics after issue is corrected. - log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %w", rowSource.Name()+p.TagTableSuffix, err) + if p.TagsAsForeignkeys { + if err := p.WriteTagTable(ctx, rowSource); err != nil { + // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from + // future metrics after issue is corrected. + log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %s", rowSource.Name()+p.TagTableSuffix, err) + } } fullTableName := utils.FullTableName(p.Schema, rowSource.Name()) @@ -261,13 +268,13 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, rowSource *RowSource) er columnNames := make([]string, len(tagCols)) placeholders := make([]string, len(tagCols)) for i, col := range tagCols { - columnNames[i] = QuoteIdentifier(col.Name) + columnNames[i] = template.QuoteIdentifier(col.Name) placeholders[i] = fmt.Sprintf("$%d", i+1) } // pgx batch code will automatically convert this into a prepared statement & cache it sql := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (tag_id) DO NOTHING", - rowSource.Name()+p.TagTableSuffix, + template.QuoteIdentifier(p.Schema)+"."+template.QuoteIdentifier(rowSource.Name()+p.TagTableSuffix), strings.Join(columnNames, ","), strings.Join(placeholders, ","), ) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 6b61452487312..affed5502ec4c 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -3,6 +3,7 @@ package postgresql import ( "context" "fmt" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "log" "strings" "sync" @@ -93,8 +94,8 @@ func (tm *TableManager) EnsureStructure( ctx context.Context, tableName string, columns []utils.Column, - createTemplates []*Template, - addColumnsTemplates []*Template, + createTemplates []*template.Template, + addColumnsTemplates []*template.Template, metricsTableName string, tagsTableName string, ) ([]utils.Column, error) { @@ -160,15 +161,15 @@ func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColum func (tm *TableManager) executeTemplates( ctx context.Context, - tmpls []*Template, + tmpls []*template.Template, tableName string, newColumns []utils.Column, metricsTableName string, tagsTableName string, ) error { - tmplTable := NewTemplateTable(tm.Schema, tableName, colMapToSlice(tm.Tables[tableName])) - metricsTmplTable := NewTemplateTable(tm.Schema, metricsTableName, colMapToSlice(tm.Tables[metricsTableName])) - tagsTmplTable := NewTemplateTable(tm.Schema, tagsTableName, colMapToSlice(tm.Tables[tagsTableName])) + tmplTable := template.NewTemplateTable(tm.Schema, tableName, colMapToSlice(tm.Tables[tableName])) + metricsTmplTable := template.NewTemplateTable(tm.Schema, metricsTableName, colMapToSlice(tm.Tables[metricsTableName])) + tagsTmplTable := template.NewTemplateTable(tm.Schema, tagsTableName, colMapToSlice(tm.Tables[tagsTableName])) /* https://github.com/jackc/pgx/issues/872 stmts := make([]string, len(tmpls)) @@ -224,7 +225,7 @@ func (tm *TableManager) executeTemplates( if col.Role != utils.TagColType { continue } - if _, err := tx.Exec(ctx, "COMMENT ON COLUMN "+tmplTable.String()+"."+QuoteIdentifier(col.Name)+" IS 'tag'"); err != nil { + if _, err := tx.Exec(ctx, "COMMENT ON COLUMN "+tmplTable.String()+"."+template.QuoteIdentifier(col.Name)+" IS 'tag'"); err != nil { _ = tx.Rollback(ctx) return fmt.Errorf("setting column role comment: %s", err) } diff --git a/plugins/outputs/postgresql/template.go b/plugins/outputs/postgresql/template/template.go similarity index 66% rename from plugins/outputs/postgresql/template.go rename to plugins/outputs/postgresql/template/template.go index a06f99d988411..a0a1ec4924988 100644 --- a/plugins/outputs/postgresql/template.go +++ b/plugins/outputs/postgresql/template/template.go @@ -1,10 +1,13 @@ -package postgresql +package template import ( "bytes" + "encoding/base32" "fmt" + "hash/fnv" "strings" "text/template" + "unsafe" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" @@ -35,7 +38,7 @@ func asString(obj interface{}) string { func QuoteIdentifier(name interface{}) string { return `"` + strings.ReplaceAll(asString(name), `"`, `""`) + `"` } -func QuoteLiteral(str fmt.Stringer) string { +func QuoteLiteral(str interface{}) string { return "'" + strings.ReplaceAll(asString(str), "'", "''") + "'" } @@ -66,8 +69,29 @@ func (tt *TemplateTable) String() string { return tt.Identifier() } func (tt *TemplateTable) Identifier() string { + if tt.Schema == "" { + return QuoteIdentifier(tt.Name) + } return QuoteIdentifier(tt.Schema) + "." + QuoteIdentifier(tt.Name) } +func (tt *TemplateTable) WithSchema(name string) *TemplateTable { + ttNew := &TemplateTable{} + *ttNew = *tt + ttNew.Schema = name + return ttNew +} +func (tt *TemplateTable) WithName(name string) *TemplateTable { + ttNew := &TemplateTable{} + *ttNew = *tt + ttNew.Name = name + return ttNew +} +func (tt *TemplateTable) WithSuffix(suffixes ...string) *TemplateTable { + ttNew := &TemplateTable{} + *ttNew = *tt + ttNew.Name += strings.Join(suffixes, "") + return ttNew +} //func (tt *TemplateTable) Literal() string { // return QuoteLiteral(tt.Identifier()) @@ -84,6 +108,12 @@ func (tc TemplateColumn) Definition() string { func (tc TemplateColumn) Identifier() string { return QuoteIdentifier(tc.Name) } +func (tc TemplateColumn) Selector() string { + if tc.Type != "" { + return tc.Identifier() + } + return "NULL as " + tc.Identifier() +} func (tc TemplateColumn) IsTag() bool { return tc.Role == utils.TagColType } @@ -120,11 +150,19 @@ func (tcs TemplateColumns) Definitions() []string { func (tcs TemplateColumns) Identifiers() []string { idents := make([]string, len(tcs)) for i, tc := range tcs { - idents[i] = QuoteIdentifier(tc.Name) + idents[i] = tc.Identifier() } return idents } +func (tcs TemplateColumns) Selectors() []string { + selectors := make([]string, len(tcs)) + for i, tc := range tcs { + selectors[i] = tc.Selector() + } + return selectors +} + func (tcs TemplateColumns) String() string { colStrs := make([]string, len(tcs)) for i, tc := range tcs { @@ -143,10 +181,40 @@ func (tcs TemplateColumns) Keys() TemplateColumns { return cols } +func (tcs TemplateColumns) Sorted() TemplateColumns { + cols := append([]TemplateColumn{}, tcs...) + (*utils.ColumnList)(unsafe.Pointer(&cols)).Sort() + return cols +} + +func (tcs TemplateColumns) Concat(tcsList ...TemplateColumns) TemplateColumns { + tcsNew := append(TemplateColumns{}, tcs...) + for _, tcs := range tcsList { + tcsNew = append(tcsNew, tcs...) + } + return tcsNew +} + +// Generates a list of SQL selectors against the given columns. +// For each column in tcs, if the column also exist in tcsFrom, it will be selected. If the column does not exist NULL will be selected. +func (tcs TemplateColumns) Union(tcsFrom TemplateColumns) TemplateColumns { + tcsNew := append(TemplateColumns{}, tcs...) +TCS: + for i, tc := range tcs { + for _, tcFrom := range tcsFrom { + if tc.Name == tcFrom.Name { + continue TCS + } + } + tcsNew[i].Type = "" + } + return tcsNew +} + func (tcs TemplateColumns) Tags() TemplateColumns { var cols []TemplateColumn for _, tc := range tcs { - if tc.Role == utils.TagColType || tc.Role == utils.TagsIDColType { + if tc.Role == utils.TagColType { cols = append(cols, tc) } } @@ -163,6 +231,15 @@ func (tcs TemplateColumns) Fields() TemplateColumns { return cols } +func (tcs TemplateColumns) Hash() string { + hash := fnv.New32a() + for _, tc := range tcs.Sorted() { + hash.Write([]byte(tc.Name)) + hash.Write([]byte{0}) + } + return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil))) +} + type Template template.Template func newTemplate(templateString string) *Template { @@ -175,6 +252,7 @@ func newTemplate(templateString string) *Template { func (t *Template) UnmarshalText(text []byte) error { tmpl := template.New("") + tmpl.Option("missingkey=error") tmpl.Funcs(templateFuncs) tmpl.Funcs(sprig.TxtFuncMap()) tt, err := tmpl.Parse(string(text)) @@ -186,9 +264,11 @@ func (t *Template) UnmarshalText(text []byte) error { } func (t *Template) Render(table *TemplateTable, newColumns []utils.Column, metricTable *TemplateTable, tagTable *TemplateTable) ([]byte, error) { + tcs := NewTemplateColumns(newColumns).Sorted() data := map[string]interface{}{ "table": table, - "columns": NewTemplateColumns(newColumns), + "columns": tcs, + "allColumns": tcs.Concat(table.Columns).Sorted(), "metricTable": metricTable, "tagTable": tagTable, } From 9f1d8c17392ebf01f86e3f966094578553b3d955 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 12:02:15 -0500 Subject: [PATCH 075/121] outputs/postgresql: rename row_source -> table_source --- plugins/outputs/postgresql/postgresql.go | 67 ++++--- plugins/outputs/postgresql/table_manager.go | 2 +- .../{row_source.go => table_source.go} | 179 ++++++++---------- 3 files changed, 121 insertions(+), 127 deletions(-) rename plugins/outputs/postgresql/{row_source.go => table_source.go} (52%) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 37a916cd01f73..150418989192c 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -38,7 +38,7 @@ type Postgresql struct { db *pgxpool.Pool tableManager *TableManager - writeChan chan *RowSource + writeChan chan *TableSource } func init() { @@ -75,7 +75,7 @@ func (p *Postgresql) Connect() error { maxConns := int(p.db.Stat().MaxConns()) if maxConns > 1 { - p.writeChan = make(chan *RowSource) + p.writeChan = make(chan *TableSource) for i := 0; i < maxConns; i++ { go p.writeWorker(p.dbContext) } @@ -161,18 +161,27 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { - rowSources := p.splitRowSources(metrics) + tableSources := map[string]*TableSource{} + + for _, m := range metrics { + rs := tableSources[m.Name()] + if rs == nil { + rs = NewTableSource(p) + tableSources[m.Name()] = rs + } + rs.AddMetric(m) + } if p.db.Stat().MaxConns() > 1 { - return p.writeConcurrent(rowSources) + return p.writeConcurrent(tableSources) } else { - return p.writeSequential(rowSources) + return p.writeSequential(tableSources) } } -func (p *Postgresql) writeSequential(rowSources map[string]*RowSource) error { - for _, rowSource := range rowSources { - err := p.writeMetricsFromMeasure(p.dbContext, rowSource) +func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error { + for _, tableSource := range tableSources { + err := p.writeMetricsFromMeasure(p.dbContext, tableSource) if err != nil { if isTempError(err) { //TODO use a transaction so that we don't end up with a partial write, and end up retrying metrics we've already written @@ -184,10 +193,10 @@ func (p *Postgresql) writeSequential(rowSources map[string]*RowSource) error { return nil } -func (p *Postgresql) writeConcurrent(rowSources map[string]*RowSource) error { - for _, rowSource := range rowSources { +func (p *Postgresql) writeConcurrent(tableSources map[string]*TableSource) error { + for _, tableSource := range tableSources { select { - case p.writeChan <- rowSource: + case p.writeChan <- tableSource: case <-p.dbContext.Done(): return nil } @@ -198,8 +207,8 @@ func (p *Postgresql) writeConcurrent(rowSources map[string]*RowSource) error { func (p *Postgresql) writeWorker(ctx context.Context) { for { select { - case rowSource := <-p.writeChan: - if err := p.writeRetry(ctx, rowSource); err != nil { + case tableSource := <-p.writeChan: + if err := p.writeRetry(ctx, tableSource); err != nil { log.Printf("write error (permanent, dropping sub-batch): %v", err) } case <-p.dbContext.Done(): @@ -216,10 +225,10 @@ func isTempError(err error) bool { return true } -func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error { +func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasure(ctx, rowSource) + err := p.writeMetricsFromMeasure(ctx, tableSource) if err == nil { return nil } @@ -228,7 +237,7 @@ func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error return err } log.Printf("write error (retry in %s): %v", backoff, err) - rowSource.Reset() + tableSource.Reset() time.Sleep(backoff) if backoff == 0 { @@ -243,27 +252,27 @@ func (p *Postgresql) writeRetry(ctx context.Context, rowSource *RowSource) error } // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. -func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, rowSource *RowSource) error { - err := p.tableManager.MatchSource(ctx, rowSource) +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, tableSource *TableSource) error { + err := p.tableManager.MatchSource(ctx, tableSource) if err != nil { return err } if p.TagsAsForeignkeys { - if err := p.WriteTagTable(ctx, rowSource); err != nil { + if err := p.WriteTagTable(ctx, tableSource); err != nil { // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from // future metrics after issue is corrected. - log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %s", rowSource.Name()+p.TagTableSuffix, err) + log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } } - fullTableName := utils.FullTableName(p.Schema, rowSource.Name()) - _, err = p.db.CopyFrom(ctx, fullTableName, rowSource.ColumnNames(), rowSource) + fullTableName := utils.FullTableName(p.Schema, tableSource.Name()) + _, err = p.db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource) return err } -func (p *Postgresql) WriteTagTable(ctx context.Context, rowSource *RowSource) error { - tagCols := rowSource.TagTableColumns() +func (p *Postgresql) WriteTagTable(ctx context.Context, tableSource *TableSource) error { + tagCols := tableSource.TagTableColumns() columnNames := make([]string, len(tagCols)) placeholders := make([]string, len(tagCols)) @@ -274,22 +283,22 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, rowSource *RowSource) er // pgx batch code will automatically convert this into a prepared statement & cache it sql := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (tag_id) DO NOTHING", - template.QuoteIdentifier(p.Schema)+"."+template.QuoteIdentifier(rowSource.Name()+p.TagTableSuffix), + template.QuoteIdentifier(p.Schema)+"."+template.QuoteIdentifier(tableSource.Name()+p.TagTableSuffix), strings.Join(columnNames, ","), strings.Join(placeholders, ","), ) batch := &pgx.Batch{} - //TODO rowSource should emit another source for the tags. We shouldn't have to dive into its private methods. + //TODO tableSource should emit another source for the tags. We shouldn't have to dive into its private methods. //TODO cache which tagSets we've already inserted and skip them. //TODO copy into a temp table, and then `insert ... on conflict` from that into the tag table. - for tagID, tagSet := range rowSource.tagSets { + for tagID, tagSet := range tableSource.tagSets { values := make([]interface{}, len(columnNames)) values[0] = tagID if !p.TagsAsJsonb { for _, tag := range tagSet { - values[rowSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column + values[tableSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column } } else { values[1] = utils.TagListToJSON(tagSet) @@ -300,7 +309,7 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, rowSource *RowSource) er results := p.db.SendBatch(ctx, batch) defer results.Close() - for i := 0; i < len(rowSource.tagSets); i++ { + for i := 0; i < len(tableSource.tagSets); i++ { if _, err := results.Exec(); err != nil { return err } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index affed5502ec4c..dc81b6f677db5 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -253,7 +253,7 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { // If the schema does not match, and schema updates are disabled: // If a field missing from the DB, the field is omitted. // If a tag is missing from the DB, the metric is dropped. -func (tm *TableManager) MatchSource(ctx context.Context, rowSource *RowSource) error { +func (tm *TableManager) MatchSource(ctx context.Context, rowSource *TableSource) error { metricTableName := rowSource.Name() var tagTableName string if tm.TagsAsForeignkeys { diff --git a/plugins/outputs/postgresql/row_source.go b/plugins/outputs/postgresql/table_source.go similarity index 52% rename from plugins/outputs/postgresql/row_source.go rename to plugins/outputs/postgresql/table_source.go index 1c1fd9478c801..6a9abf78ff454 100644 --- a/plugins/outputs/postgresql/row_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -7,22 +7,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) -func (p *Postgresql) splitRowSources(metrics []telegraf.Metric) map[string]*RowSource { - rowSources := map[string]*RowSource{} - - for _, m := range metrics { - rs := rowSources[m.Name()] - if rs == nil { - rs = NewRowSource(p) - rowSources[m.Name()] = rs - } - rs.AddMetric(m) - } - - return rowSources -} - -type RowSource struct { +type TableSource struct { postgresql *Postgresql metrics []telegraf.Metric cursor int @@ -33,7 +18,7 @@ type RowSource struct { tagPositions map[string]int // tagColumns is the list of tags to emit. List is in order. tagColumns []utils.Column - // tagSets is the list of tag IDs to tag values in use within the RowSource. The position of each value in the list + // tagSets is the list of tag IDs to tag values in use within the TableSource. The position of each value in the list // corresponds to the key name in the tagColumns list. // This data is used to build out the foreign tag table when enabled. tagSets map[int64][]*telegraf.Tag @@ -46,106 +31,106 @@ type RowSource struct { droppedTagColumns []string } -func NewRowSource(postgresql *Postgresql) *RowSource { - rs := &RowSource{ +func NewTableSource(postgresql *Postgresql) *TableSource { + tsrc := &TableSource{ postgresql: postgresql, cursor: -1, tagSets: make(map[int64][]*telegraf.Tag), } if !postgresql.FieldsAsJsonb { - rs.tagPositions = map[string]int{} - rs.fieldPositions = map[string]int{} + tsrc.tagPositions = map[string]int{} + tsrc.fieldPositions = map[string]int{} } - return rs + return tsrc } -func (rs *RowSource) AddMetric(metric telegraf.Metric) { - if rs.postgresql.TagsAsForeignkeys { +func (tsrc *TableSource) AddMetric(metric telegraf.Metric) { + if tsrc.postgresql.TagsAsForeignkeys { tagID := utils.GetTagID(metric) - if _, ok := rs.tagSets[tagID]; !ok { - rs.tagSets[tagID] = metric.TagList() + if _, ok := tsrc.tagSets[tagID]; !ok { + tsrc.tagSets[tagID] = metric.TagList() } } - if !rs.postgresql.TagsAsJsonb { + if !tsrc.postgresql.TagsAsJsonb { for _, t := range metric.TagList() { - if _, ok := rs.tagPositions[t.Key]; !ok { - rs.tagPositions[t.Key] = len(rs.tagPositions) - rs.tagColumns = append(rs.tagColumns, ColumnFromTag(t.Key, t.Value)) + if _, ok := tsrc.tagPositions[t.Key]; !ok { + tsrc.tagPositions[t.Key] = len(tsrc.tagPositions) + tsrc.tagColumns = append(tsrc.tagColumns, ColumnFromTag(t.Key, t.Value)) } } } - if !rs.postgresql.FieldsAsJsonb { + if !tsrc.postgresql.FieldsAsJsonb { for _, f := range metric.FieldList() { - if _, ok := rs.fieldPositions[f.Key]; !ok { - rs.fieldPositions[f.Key] = len(rs.fieldPositions) - rs.fieldColumns = append(rs.fieldColumns, ColumnFromField(f.Key, f.Value)) + if _, ok := tsrc.fieldPositions[f.Key]; !ok { + tsrc.fieldPositions[f.Key] = len(tsrc.fieldPositions) + tsrc.fieldColumns = append(tsrc.fieldColumns, ColumnFromField(f.Key, f.Value)) } } } - rs.metrics = append(rs.metrics, metric) + tsrc.metrics = append(tsrc.metrics, metric) } -func (rs *RowSource) Name() string { - if len(rs.metrics) == 0 { +func (tsrc *TableSource) Name() string { + if len(tsrc.metrics) == 0 { return "" } - return rs.metrics[0].Name() + return tsrc.metrics[0].Name() } // Returns the superset of all tags of all metrics. -func (rs *RowSource) TagColumns() []utils.Column { +func (tsrc *TableSource) TagColumns() []utils.Column { var cols []utils.Column - if rs.postgresql.TagsAsJsonb { + if tsrc.postgresql.TagsAsJsonb { cols = append(cols, TagsJSONColumn) } else { - cols = append(cols, rs.tagColumns...) + cols = append(cols, tsrc.tagColumns...) } return cols } // Returns the superset of all fields of all metrics. -func (rs *RowSource) FieldColumns() []utils.Column { - return rs.fieldColumns +func (tsrc *TableSource) FieldColumns() []utils.Column { + return tsrc.fieldColumns } // Returns the full column list, including time, tag id or tags, and fields. -func (rs *RowSource) MetricTableColumns() []utils.Column { +func (tsrc *TableSource) MetricTableColumns() []utils.Column { cols := []utils.Column{ TimeColumn, } - if rs.postgresql.TagsAsForeignkeys { + if tsrc.postgresql.TagsAsForeignkeys { cols = append(cols, TagIDColumn) } else { - cols = append(cols, rs.TagColumns()...) + cols = append(cols, tsrc.TagColumns()...) } - if rs.postgresql.FieldsAsJsonb { + if tsrc.postgresql.FieldsAsJsonb { cols = append(cols, FieldsJSONColumn) } else { - cols = append(cols, rs.FieldColumns()...) + cols = append(cols, tsrc.FieldColumns()...) } return cols } -func (rs *RowSource) TagTableColumns() []utils.Column { +func (tsrc *TableSource) TagTableColumns() []utils.Column { cols := []utils.Column{ TagIDColumn, } - cols = append(cols, rs.TagColumns()...) + cols = append(cols, tsrc.TagColumns()...) return cols } -func (rs *RowSource) ColumnNames() []string { - cols := rs.MetricTableColumns() +func (tsrc *TableSource) ColumnNames() []string { + cols := tsrc.MetricTableColumns() names := make([]string, len(cols)) for i, col := range cols { names[i] = col.Name @@ -153,43 +138,43 @@ func (rs *RowSource) ColumnNames() []string { return names } -func (rs *RowSource) DropColumn(col utils.Column) { +func (tsrc *TableSource) DropColumn(col utils.Column) { switch col.Role { case utils.TagColType: - rs.dropTagColumn(col) + tsrc.dropTagColumn(col) case utils.FieldColType: - rs.dropFieldColumn(col) + tsrc.dropFieldColumn(col) default: - panic(fmt.Sprintf("Tried to perform an invalid column drop. This should not have happened. measurement=%s name=%s role=%v", rs.Name(), col.Name, col.Role)) + panic(fmt.Sprintf("Tried to perform an invalid column drop. This should not have happened. measurement=%s name=%s role=%v", tsrc.Name(), col.Name, col.Role)) } } // Drops the tag column from conversion. Any metrics containing this tag will be skipped. -func (rs *RowSource) dropTagColumn(col utils.Column) { - if col.Role != utils.TagColType || rs.postgresql.TagsAsJsonb { - panic(fmt.Sprintf("Tried to perform an invalid tag drop. This should not have happened. measurement=%s tag=%s", rs.Name(), col.Name)) +func (tsrc *TableSource) dropTagColumn(col utils.Column) { + if col.Role != utils.TagColType || tsrc.postgresql.TagsAsJsonb { + panic(fmt.Sprintf("Tried to perform an invalid tag drop. This should not have happened. measurement=%s tag=%s", tsrc.Name(), col.Name)) } - rs.droppedTagColumns = append(rs.droppedTagColumns, col.Name) + tsrc.droppedTagColumns = append(tsrc.droppedTagColumns, col.Name) - pos, ok := rs.tagPositions[col.Name] + pos, ok := tsrc.tagPositions[col.Name] if !ok { return } - delete(rs.tagPositions, col.Name) - for n, p := range rs.tagPositions { + delete(tsrc.tagPositions, col.Name) + for n, p := range tsrc.tagPositions { if p > pos { - rs.tagPositions[n] -= 1 + tsrc.tagPositions[n] -= 1 } } - rs.tagColumns = append(rs.tagColumns[:pos], rs.tagColumns[pos+1:]...) + tsrc.tagColumns = append(tsrc.tagColumns[:pos], tsrc.tagColumns[pos+1:]...) - for setID, set := range rs.tagSets { + for setID, set := range tsrc.tagSets { for _, tag := range set { if tag.Key == col.Name { // The tag is defined, so drop the whole set - delete(rs.tagSets, setID) + delete(tsrc.tagSets, setID) break } } @@ -197,50 +182,50 @@ func (rs *RowSource) dropTagColumn(col utils.Column) { } // Drops the field column from conversion. Any metrics containing this field will have the field omitted. -func (rs *RowSource) dropFieldColumn(col utils.Column) { - if col.Role != utils.FieldColType || rs.postgresql.FieldsAsJsonb { - panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", rs.Name(), col.Name)) +func (tsrc *TableSource) dropFieldColumn(col utils.Column) { + if col.Role != utils.FieldColType || tsrc.postgresql.FieldsAsJsonb { + panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", tsrc.Name(), col.Name)) } - pos, ok := rs.fieldPositions[col.Name] + pos, ok := tsrc.fieldPositions[col.Name] if !ok { return } - delete(rs.fieldPositions, col.Name) - for n, p := range rs.fieldPositions { + delete(tsrc.fieldPositions, col.Name) + for n, p := range tsrc.fieldPositions { if p > pos { - rs.fieldPositions[n] -= 1 + tsrc.fieldPositions[n] -= 1 } } - rs.fieldColumns = append(rs.fieldColumns[:pos], rs.fieldColumns[pos+1:]...) + tsrc.fieldColumns = append(tsrc.fieldColumns[:pos], tsrc.fieldColumns[pos+1:]...) } -func (rs *RowSource) Next() bool { +func (tsrc *TableSource) Next() bool { for { - if rs.cursor+1 >= len(rs.metrics) { - rs.cursorValues = nil - rs.cursorError = nil + if tsrc.cursor+1 >= len(tsrc.metrics) { + tsrc.cursorValues = nil + tsrc.cursorError = nil return false } - rs.cursor += 1 + tsrc.cursor += 1 - rs.cursorValues, rs.cursorError = rs.values() - if rs.cursorValues != nil || rs.cursorError != nil { + tsrc.cursorValues, tsrc.cursorError = tsrc.values() + if tsrc.cursorValues != nil || tsrc.cursorError != nil { return true } } } -func (rs *RowSource) Reset() { - rs.cursor = -1 +func (tsrc *TableSource) Reset() { + tsrc.cursor = -1 } // values calculates the values for the metric at the cursor position. // If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil. -func (rs *RowSource) values() ([]interface{}, error) { - metric := rs.metrics[rs.cursor] +func (tsrc *TableSource) values() ([]interface{}, error) { + metric := tsrc.metrics[tsrc.cursor] tags := metric.TagList() fields := metric.FieldList() @@ -248,12 +233,12 @@ func (rs *RowSource) values() ([]interface{}, error) { values = append(values, metric.Time()) - if !rs.postgresql.TagsAsForeignkeys { - if !rs.postgresql.TagsAsJsonb { + if !tsrc.postgresql.TagsAsForeignkeys { + if !tsrc.postgresql.TagsAsJsonb { // tags_as_foreignkey=false, tags_as_json=false - tagValues := make([]interface{}, len(rs.tagPositions)) + tagValues := make([]interface{}, len(tsrc.tagPositions)) for _, tag := range tags { - tagPos, ok := rs.tagPositions[tag.Key] + tagPos, ok := tsrc.tagPositions[tag.Key] if !ok { // tag has been dropped, we can't emit or we risk collision with another metric return nil, nil @@ -268,20 +253,20 @@ func (rs *RowSource) values() ([]interface{}, error) { } else { // tags_as_foreignkey=true tagID := utils.GetTagID(metric) - if _, ok := rs.tagSets[tagID]; !ok { + if _, ok := tsrc.tagSets[tagID]; !ok { // tag has been dropped, we can't emit or we risk collision with another metric return nil, nil } values = append(values, tagID) } - if !rs.postgresql.FieldsAsJsonb { + if !tsrc.postgresql.FieldsAsJsonb { // fields_as_json=false - fieldValues := make([]interface{}, len(rs.fieldPositions)) + fieldValues := make([]interface{}, len(tsrc.fieldPositions)) fieldsEmpty := true for _, field := range fields { // we might have dropped the field due to the table missing the column & schema updates being turned off - if fPos, ok := rs.fieldPositions[field.Key]; ok { + if fPos, ok := tsrc.fieldPositions[field.Key]; ok { fieldValues[fPos] = field.Value fieldsEmpty = false } @@ -303,10 +288,10 @@ func (rs *RowSource) values() ([]interface{}, error) { return values, nil } -func (rs *RowSource) Values() ([]interface{}, error) { - return rs.cursorValues, rs.cursorError +func (tsrc *TableSource) Values() ([]interface{}, error) { + return tsrc.cursorValues, tsrc.cursorError } -func (rs *RowSource) Err() error { +func (tsrc *TableSource) Err() error { return nil } From 05af57e56b4afee10e2fdad72b6462d187c6435a Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 22:27:35 -0500 Subject: [PATCH 076/121] outputs/postgresql: use copy for loading tags --- plugins/outputs/postgresql/postgresql.go | 54 +++++-------- plugins/outputs/postgresql/table_manager.go | 3 +- plugins/outputs/postgresql/table_source.go | 85 ++++++++++++++++++++- 3 files changed, 102 insertions(+), 40 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 150418989192c..e7a4a9358b100 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -8,7 +8,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "github.com/jackc/pgconn" "log" - "strings" "time" "github.com/jackc/pgx/v4" @@ -272,48 +271,29 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, tableSource *T } func (p *Postgresql) WriteTagTable(ctx context.Context, tableSource *TableSource) error { - tagCols := tableSource.TagTableColumns() + //TODO cache which tagSets we've already inserted and skip them. + ttsrc := NewTagTableSource(tableSource) - columnNames := make([]string, len(tagCols)) - placeholders := make([]string, len(tagCols)) - for i, col := range tagCols { - columnNames[i] = template.QuoteIdentifier(col.Name) - placeholders[i] = fmt.Sprintf("$%d", i+1) + tx, err := p.db.Begin(ctx) + if err != nil { + return err } + defer tx.Rollback(ctx) - // pgx batch code will automatically convert this into a prepared statement & cache it - sql := fmt.Sprintf("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (tag_id) DO NOTHING", - template.QuoteIdentifier(p.Schema)+"."+template.QuoteIdentifier(tableSource.Name()+p.TagTableSuffix), - strings.Join(columnNames, ","), - strings.Join(placeholders, ","), - ) - - batch := &pgx.Batch{} - //TODO tableSource should emit another source for the tags. We shouldn't have to dive into its private methods. - //TODO cache which tagSets we've already inserted and skip them. - //TODO copy into a temp table, and then `insert ... on conflict` from that into the tag table. - for tagID, tagSet := range tableSource.tagSets { - values := make([]interface{}, len(columnNames)) - values[0] = tagID - - if !p.TagsAsJsonb { - for _, tag := range tagSet { - values[tableSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column - } - } else { - values[1] = utils.TagListToJSON(tagSet) - } + ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()} + identTemp := pgx.Identifier{ttsrc.Name() + "_temp"} + _, err = tx.Exec(ctx, fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize())) + if err != nil { + return fmt.Errorf("creating tags temp table: %w", err) + } - batch.Queue(sql, values...) + if _, err := tx.CopyFrom(ctx, identTemp, ttsrc.ColumnNames(), ttsrc); err != nil { + return fmt.Errorf("copying into tags temp table: %w", err) } - results := p.db.SendBatch(ctx, batch) - defer results.Close() - for i := 0; i < len(tableSource.tagSets); i++ { - if _, err := results.Exec(); err != nil { - return err - } + if _, err := tx.Exec(ctx, fmt.Sprintf("INSERT INTO %s SELECT * FROM %s ON CONFLICT (tag_id) DO NOTHING", ident.Sanitize(), identTemp.Sanitize())); err != nil { + return fmt.Errorf("inserting into tags table: %w", err) } - return nil + return tx.Commit(ctx) } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index dc81b6f677db5..18c6ce075d6f6 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -205,6 +205,7 @@ func (tm *TableManager) executeTemplates( if err != nil { return err } + defer tx.Rollback(ctx) for _, tmpl := range tmpls { sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) @@ -212,7 +213,6 @@ func (tm *TableManager) executeTemplates( return err } if _, err := tx.Exec(ctx, string(sql)); err != nil { - _ = tx.Rollback(ctx) return fmt.Errorf("executing `%s`: %w", sql, err) } } @@ -226,7 +226,6 @@ func (tm *TableManager) executeTemplates( continue } if _, err := tx.Exec(ctx, "COMMENT ON COLUMN "+tmplTable.String()+"."+template.QuoteIdentifier(col.Name)+" IS 'tag'"); err != nil { - _ = tx.Rollback(ctx) return fmt.Errorf("setting column role comment: %s", err) } } diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 6a9abf78ff454..a23d7bfb9b460 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -2,11 +2,11 @@ package postgresql import ( "fmt" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) +// TableSource satisfies pgx.CopyFromSource type TableSource struct { postgresql *Postgresql metrics []telegraf.Metric @@ -295,3 +295,86 @@ func (tsrc *TableSource) Values() ([]interface{}, error) { func (tsrc *TableSource) Err() error { return nil } + +type TagTableSource struct { + *TableSource + tagIDs []int64 + + cursor int + cursorValues []interface{} + cursorError error +} + +func NewTagTableSource(tsrc *TableSource) *TagTableSource { + ttsrc := &TagTableSource{ + TableSource: tsrc, + cursor: -1, + } + + ttsrc.tagIDs = make([]int64, 0, len(tsrc.tagSets)) + for tagID := range tsrc.tagSets { + ttsrc.tagIDs = append(ttsrc.tagIDs, tagID) + } + + return ttsrc +} + +func (ttsrc *TagTableSource) Name() string { + return ttsrc.TableSource.Name() + ttsrc.postgresql.TagTableSuffix +} + +func (ttsrc *TagTableSource) ColumnNames() []string { + cols := ttsrc.TagTableColumns() + names := make([]string, len(cols)) + for i, col := range cols { + names[i] = col.Name + } + return names +} + +func (ttsrc *TagTableSource) Next() bool { + for { + if ttsrc.cursor+1 >= len(ttsrc.tagIDs) { + ttsrc.cursorValues = nil + ttsrc.cursorError = nil + return false + } + ttsrc.cursor += 1 + + ttsrc.cursorValues, ttsrc.cursorError = ttsrc.values() + if ttsrc.cursorValues != nil || ttsrc.cursorError != nil { + return true + } + } +} + +func (ttsrc *TagTableSource) Reset() { + ttsrc.cursor = -1 +} + +func (ttsrc *TagTableSource) values() ([]interface{}, error) { + tagID := ttsrc.tagIDs[ttsrc.cursor] + tagSet := ttsrc.tagSets[tagID] + + var values []interface{} + if !ttsrc.postgresql.TagsAsJsonb { + values = make([]interface{}, len(tagSet)+1) + for _, tag := range tagSet { + values[ttsrc.TableSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column + } + } else { + values = make([]interface{}, 2) + values[1] = utils.TagListToJSON(tagSet) + } + values[0] = tagID + + return values, nil +} + +func (ttsrc *TagTableSource) Values() ([]interface{}, error) { + return ttsrc.cursorValues, ttsrc.cursorError +} + +func (ttsrc *TagTableSource) Err() error { + return nil +} From 07d5d46190c26c172f46893beab3a9a6b3c3d62c Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 22:49:22 -0500 Subject: [PATCH 077/121] outputs/postgresql: update config documentation --- plugins/outputs/postgresql/postgresql.go | 30 ++++++++++++++---------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e7a4a9358b100..1584fa0ea66c2 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -120,6 +120,13 @@ var sampleConfig = ` ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html ## + ## Non-standard parameters: + ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. + ## pool_min_conns (default: 0) - Minimum size of connection pool. + ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. + ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. + ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. + ## ## Without the dbname parameter, the driver will default to a database ## with the same name as the user. This dbname is just for instantiating a ## connection with the server and doesn't restrict the databases we are trying @@ -133,18 +140,6 @@ var sampleConfig = ` ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false - ## Template to use for generating tables - ## Available Variables: - ## {TABLE} - tablename as identifier - ## {TABLELITERAL} - tablename as string literal - ## {COLUMNS} - column definitions - ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) - - ## Default template - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" - ## Example for timescaledb - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval,if_not_exists := true);" - ## Schema to create the tables into # schema = "public" @@ -154,6 +149,17 @@ var sampleConfig = ` ## Use jsonb datatype for fields # fields_as_jsonb = false + ## Templated statements to execute when creating a new table. + create_templates = ['CREATE TABLE {{.table}} ({{.columns}})'] + + ## Templated statements to execute when adding columns to a table. + add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] + + ## Templated statements to execute when creating a new tag table. + tag_table_create_templates ['CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'] + + ## Templated statements to execute when adding columns to a tag table. + tag_table_add_column_templates ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] ` func (p *Postgresql) SampleConfig() string { return sampleConfig } From c39a091c52feff62c22a517c6599de14b734d18e Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 4 Jan 2021 22:56:37 -0500 Subject: [PATCH 078/121] outputs/postgresql: update go modules --- go.mod | 17 +- go.sum | 125 +++++- .../postgresql/postgresql_integration_test.go | 423 ------------------ plugins/outputs/postgresql/postgresql_test.go | 188 -------- .../outputs/postgresql/tables/manager_test.go | 143 ------ 5 files changed, 128 insertions(+), 768 deletions(-) delete mode 100644 plugins/outputs/postgresql/postgresql_integration_test.go delete mode 100644 plugins/outputs/postgresql/postgresql_test.go delete mode 100644 plugins/outputs/postgresql/tables/manager_test.go diff --git a/go.mod b/go.mod index 523e4fbdf4b0a..b66f75565a0d6 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,9 @@ require ( github.com/Azure/go-autorest/autorest v0.11.17 github.com/Azure/go-autorest/autorest/azure/auth v0.5.6 github.com/BurntSushi/toml v0.3.1 + github.com/Masterminds/goutils v1.1.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/sprig v2.22.0+incompatible github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee github.com/Microsoft/ApplicationInsights-Go v0.4.2 github.com/Shopify/sarama v1.27.2 @@ -38,7 +41,6 @@ require ( github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 - github.com/cockroachdb/apd v1.1.0 // indirect github.com/containerd/containerd v1.4.1 // indirect github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect @@ -57,7 +59,7 @@ require ( github.com/goburrow/modbus v0.1.0 github.com/goburrow/serial v0.1.0 // indirect github.com/gobwas/glob v0.2.3 - github.com/gofrs/uuid v2.1.0+incompatible + github.com/gofrs/uuid v3.2.0+incompatible github.com/gogo/protobuf v1.3.1 github.com/golang/geo v0.0.0-20190916061304-5b978397cfec github.com/golang/protobuf v1.5.1 @@ -72,21 +74,25 @@ require ( github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 github.com/hashicorp/consul/api v1.6.0 github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/huandu/xstrings v1.3.2 // indirect + github.com/imdario/mergo v0.3.11 // indirect github.com/influxdata/go-syslog/v2 v2.0.1 github.com/influxdata/tail v1.0.1-0.20200707181643-03a791b270e4 github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgx v3.6.0+incompatible + github.com/jackc/pgconn v1.7.2 + github.com/jackc/pgx v3.6.2+incompatible + github.com/jackc/pgx/v4 v4.9.2 github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jmespath/go-jmespath v0.4.0 github.com/kardianos/service v1.0.0 github.com/karrick/godirwalk v1.16.1 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 - github.com/lib/pq v1.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/mdlayher/apcupsd v0.0.0-20200608131503-2bf01da7bf1b github.com/miekg/dns v1.1.31 + github.com/mitchellh/copystructure v1.0.0 // indirect github.com/multiplay/go-ts3 v1.0.0 github.com/naoina/go-stringutil v0.1.0 // indirect github.com/nats-io/nats-server/v2 v2.1.4 @@ -105,7 +111,6 @@ require ( github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 github.com/sensu/sensu-go/api/core/v2 v2.6.0 github.com/shirou/gopsutil v3.20.11+incompatible - github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect github.com/signalfx/golib/v3 v3.3.0 github.com/sirupsen/logrus v1.6.0 github.com/soniah/gosnmp v1.25.0 @@ -150,4 +155,4 @@ require ( ) // replaced due to https://github.com/satori/go.uuid/issues/73 -replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible +// replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum index 795772ffccca6..5beaf95b2dce6 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,12 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= @@ -249,6 +255,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= @@ -474,8 +482,6 @@ github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= -github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -657,9 +663,14 @@ github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= @@ -678,10 +689,63 @@ github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= -github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.7.2 h1:195tt17jkjy+FrFlY0pgyrul5kRLb7BGXY3JTrNxeXU= +github.com/jackc/pgconn v1.7.2/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.6.1 h1:CAtFD7TS95KrxRAh3bidgLwva48WYxk8YkbHZsSWfbI= +github.com/jackc/pgtype v1.6.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= +github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.9.2 h1:1V7EAc5jvIqXwdzgk8+YyOK+4071hhePzBCAF6gxUUw= +github.com/jackc/pgx/v4 v4.9.2/go.mod h1:Jt/xJDqjUDUOMSv8VMWPQlCObVgF2XOgqKsW8S4ROYA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.2 h1:mpQEXihFnWGDy6X98EOTh81JYuxn7txby8ilJ3iIPGM= +github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= @@ -749,6 +813,7 @@ github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH6 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -758,6 +823,7 @@ github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -768,6 +834,8 @@ github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -786,12 +854,19 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= @@ -818,6 +893,8 @@ github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE9 github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -830,6 +907,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -988,19 +1067,29 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sensu/sensu-go/api/core/v2 v2.6.0 h1:hEKPHFZZNDuWTlKr7Kgm2yog65ZdkBUqNesE5qaWEGo= @@ -1010,10 +1099,9 @@ github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT github.com/shirou/gopsutil v3.20.11+incompatible h1:LJr4ZQK4mPpIV5gOa4jCOKOGb4ty4DZO54I4FGqIpto= github.com/shirou/gopsutil v3.20.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoMJBbEoyE/RJoE8vz7Mb8AjvifMMwSyvY= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= @@ -1029,8 +1117,10 @@ github.com/signalfx/sapm-proto v0.4.0 h1:5lQX++6FeIjUZEIcnSgBqhOpmSjMkRBW3y/4ZiK github.com/signalfx/sapm-proto v0.4.0/go.mod h1:x3gtwJ1GRejtkghB4nYpwixh2zqJrLbPU959ZNhM0Fk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= @@ -1134,6 +1224,7 @@ github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4 github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1151,18 +1242,26 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20210406145628-7a1108eaa012 h1:4RGobP/iq7S22H0Bb92OEt+M8/cfBQnW+T+a2MC0sQo= go.starlark.net v0.0.0-20210406145628-7a1108eaa012/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1170,6 +1269,7 @@ golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1177,6 +1277,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1185,6 +1287,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1322,6 +1425,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1396,6 +1500,7 @@ golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1408,6 +1513,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190906203814-12febf440ab1/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1439,6 +1545,8 @@ golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200822203824-307de81be3f4/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9 h1:sEvmEcJVKBNUvgCUClbUQeHOAa9U0I2Ce1BooMvVCY4= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1557,6 +1665,7 @@ gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1 gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= diff --git a/plugins/outputs/postgresql/postgresql_integration_test.go b/plugins/outputs/postgresql/postgresql_integration_test.go deleted file mode 100644 index 355cdc4cc352d..0000000000000 --- a/plugins/outputs/postgresql/postgresql_integration_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package postgresql - -import ( - "database/sql" - "fmt" - "math/rand" - "strconv" - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - _ "github.com/jackc/pgx/stdlib" - "github.com/stretchr/testify/assert" -) - -func prepareAndConnect(t *testing.T, foreignTags, jsonTags, jsonFields bool) (telegraf.Metric, *sql.DB, *Postgresql) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - testAddress := "postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" - - testMetric := testMetric("metric name", "tag1", int(1)) - - postgres := &Postgresql{ - Connection: testAddress, - Schema: "public", - TagsAsForeignkeys: foreignTags, - TagsAsJsonb: jsonTags, - FieldsAsJsonb: jsonFields, - DoSchemaUpdates: true, - TableTemplate: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})", - TagTableSuffix: "_tags", - } - - // drop metric tables if exists - - db, err := sql.Open("pgx", testAddress) - assert.NoError(t, err, "Could not connect to test db") - - _, err = db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s"`, testMetric.Name())) - assert.NoError(t, err, "Could not prepare db") - _, err = db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) - assert.NoError(t, err, "Could not prepare db") - - err = postgres.Connect() - assert.NoError(t, err, "Could not connect") - return testMetric, db, postgres -} - -// testMetric Returns a simple test point: -// measurement -> name -// tags -> "tag":tag -// value -> "value": value -// time -> time.Now().UTC() -func testMetric(name string, tag string, value interface{}) telegraf.Metric { - if value == nil { - panic("Cannot use a nil value") - } - tags := map[string]string{"tag": tag} - pt, _ := metric.New( - name, - tags, - map[string]interface{}{"value": value}, - time.Now().UTC(), - ) - return pt -} - -func TestWriteToPostgres(t *testing.T) { - testMetric, dbConn, postgres := prepareAndConnect(t, false, false, false) - writeAndAssertSingleMetricNoJSON(t, testMetric, dbConn, postgres) -} - -func TestWriteToPostgresJsonTags(t *testing.T) { - tagsAsForeignKey := false - tagsAsJSON := true - fieldsAsJSON := false - testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) - defer dbConn.Close() - - // insert first metric - err := postgres.Write([]telegraf.Metric{testMetric}) - assert.NoError(t, err, "Could not write") - - // should have created table, all columns in the same table - row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tags, value FROM "%s"`, testMetric.Name())) - var ts time.Time - var tags string - var value int64 - err = row.Scan(&ts, &tags, &value) - assert.NoError(t, err, "Could not check test results") - - sentTag, _ := testMetric.GetTag("tag") - sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) - sentValue, _ := testMetric.GetField("value") - sentTs := testMetric.Time() - // postgres doesn't support nano seconds in timestamp - sentTsNanoSecondOffset := sentTs.Nanosecond() - nanoSeconds := sentTsNanoSecondOffset % 1000 - sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) - if !ts.UTC().Equal(sentTs) || tags != sentTagJSON || value != sentValue.(int64) { - assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", - sentTs, sentTagJSON, sentValue, - ts.UTC(), tags, value)) - } -} - -func TestWriteToPostgresJsonTagsAsForeignTable(t *testing.T) { - tagsAsForeignKey := true - tagsAsJSON := true - fieldsAsJSON := false - testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) - defer dbConn.Close() - - // insert first metric - err := postgres.Write([]telegraf.Metric{testMetric}) - assert.NoError(t, err, "Could not write") - - // should have created table, all columns in the same table - row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetric.Name())) - var ts time.Time - var tagID int64 - var value int64 - err = row.Scan(&ts, &tagID, &value) - assert.NoError(t, err, "Could not check test results") - - sentValue, _ := testMetric.GetField("value") - sentTs := testMetric.Time() - // postgres doesn't support nano seconds in timestamp - sentTsNanoSecondOffset := sentTs.Nanosecond() - nanoSeconds := sentTsNanoSecondOffset % 1000 - sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) - if !ts.UTC().Equal(sentTs) || tagID != 1 || value != sentValue.(int64) { - assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", - sentTs, 1, sentValue, - ts.UTC(), tagID, value)) - } - - sentTag, _ := testMetric.GetTag("tag") - sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) - row = dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) - tagID = 0 - var tags string - err = row.Scan(&tagID, &tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, int64(1), tagID) - assert.Equal(t, sentTagJSON, tags) -} - -func TestWriteToPostgresMultipleRowsOneTag(t *testing.T) { - tagsAsForeignKey := true - tagsAsJSON := true - fieldsAsJSON := false - testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) - defer dbConn.Close() - - // insert first metric - err := postgres.Write([]telegraf.Metric{testMetric, testMetric}) - assert.NoError(t, err, "Could not write") - - // should have two rows - row := dbConn.QueryRow(fmt.Sprintf(`SELECT count(*) FROM "%s"`, testMetric.Name())) - var count int64 - err = row.Scan(&count) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, int64(2), count) - - sentTag, _ := testMetric.GetTag("tag") - sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) - row = dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) - var tagID int64 - var tags string - err = row.Scan(&tagID, &tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, int64(1), tagID) - assert.Equal(t, sentTagJSON, tags) -} - -func TestWriteToPostgresAddNewTag(t *testing.T) { - tagsAsForeignKey := true - tagsAsJSON := true - fieldsAsJSON := false - testMetricWithOneTag, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) - defer dbConn.Close() - - testMetricWithOneMoreTag := testMetric("metric name", "tag1", int(2)) - testMetricWithOneMoreTag.AddTag("second_tag", "tag2") - // insert first two metric - err := postgres.Write([]telegraf.Metric{testMetricWithOneTag, testMetricWithOneMoreTag}) - assert.NoError(t, err, "Could not write") - - // should have two rows - row := dbConn.QueryRow(fmt.Sprintf(`SELECT count(*) FROM "%s"`, testMetricWithOneTag.Name())) - var count int64 - err = row.Scan(&count) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, int64(2), count) - - // and two tagsets - sentTag, _ := testMetricWithOneTag.GetTag("tag") - sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) - row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=1`, testMetricWithOneTag.Name(), postgres.TagTableSuffix)) - var tags string - err = row.Scan(&tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, sentTagJSON, tags) - - secondSentTagsJSON := `{"tag": "tag1", "second_tag": "tag2"}` - row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=2`, testMetricWithOneMoreTag.Name(), postgres.TagTableSuffix)) - err = row.Scan(&tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, secondSentTagsJSON, tags) - - // insert new point with a third tagset - testMetricWithThirdTag := testMetric("metric name", "tag1", int(2)) - testMetricWithThirdTag.AddTag("third_tag", "tag3") - err = postgres.Write([]telegraf.Metric{testMetricWithThirdTag}) - assert.NoError(t, err, "Could not write") - thirdSentTagsJSON := `{"tag": "tag1", "third_tag": "tag3"}` - row = dbConn.QueryRow(fmt.Sprintf(`SELECT tags FROM "%s%s" WHERE tag_id=3`, testMetricWithThirdTag.Name(), postgres.TagTableSuffix)) - err = row.Scan(&tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, thirdSentTagsJSON, tags) -} - -func TestWriteToPostgresAddNewField(t *testing.T) { - testMetric, dbConn, postgres := prepareAndConnect(t, false, false, false) - defer dbConn.Close() - - // insert first metric - writeAndAssertSingleMetricNoJSON(t, testMetric, dbConn, postgres) - - //insert second metric with one more field - testMetric.AddField("field2", 1.0) - testMetric.SetTime(time.Now()) - err := postgres.Write([]telegraf.Metric{testMetric}) - assert.NoError(t, err, "Could not write") - - rows, err := dbConn.Query(fmt.Sprintf(`SELECT time, tag, value, field2 FROM "%s" ORDER BY time ASC`, testMetric.Name())) - assert.NoError(t, err, "Could not check written results") - var ts time.Time - var tag string - var value sql.NullInt64 - var field2 sql.NullFloat64 - rowNum := 1 - for rows.Next() { - rows.Scan(&ts, &tag, &value, &field2) - if rowNum == 1 { - assert.False(t, field2.Valid) - } else if rowNum == 2 { - assert.Equal(t, 1.0, field2.Float64) - } else { - assert.FailNow(t, "more rows than expected") - } - rowNum++ - } - -} - -func writeAndAssertSingleMetricNoJSON(t *testing.T, testMetric telegraf.Metric, dbConn *sql.DB, postgres *Postgresql) { - err := postgres.Write([]telegraf.Metric{testMetric}) - assert.NoError(t, err, "Could not write") - - // should have created table, all columns in the same table - row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag, value FROM "%s"`, testMetric.Name())) - var ts time.Time - var tag string - var value int64 - err = row.Scan(&ts, &tag, &value) - assert.NoError(t, err, "Could not check test results") - - sentTag, _ := testMetric.GetTag("tag") - sentValue, _ := testMetric.GetField("value") - sentTs := testMetric.Time() - // postgres doesn't support nano seconds in timestamp - sentTsNanoSecondOffset := sentTs.Nanosecond() - nanoSeconds := sentTsNanoSecondOffset % 1000 - sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) - if !ts.UTC().Equal(sentTs) || tag != sentTag || value != sentValue.(int64) { - assert.Fail(t, fmt.Sprintf("Expected: %v, %v, %v; Received: %v, %v, %v", - sentTs, sentTag, sentValue, - ts.UTC(), tag, value)) - } -} - -func TestWriteToPostgresMultipleMetrics(t *testing.T) { - tagsAsForeignKey := true - tagsAsJSON := true - fieldsAsJSON := false - testMetric, dbConn, postgres := prepareAndConnect(t, tagsAsForeignKey, tagsAsJSON, fieldsAsJSON) - defer dbConn.Close() - dbConn.Exec(`DROP TABLE IF EXISTS "` + testMetric.Name() + `2"`) - dbConn.Exec(`DROP TABLE IF EXISTS "` + testMetric.Name() + `2_tag"`) - testMetricInSecondMeasurement, _ := metric.New(testMetric.Name()+"2", testMetric.Tags(), testMetric.Fields(), testMetric.Time().Add(time.Second)) - // insert first metric - err := postgres.Write([]telegraf.Metric{testMetric, testMetric, testMetricInSecondMeasurement}) - assert.NoError(t, err, "Could not write") - - // should have created table, all columns in the same table - rows, _ := dbConn.Query(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetric.Name())) - // check results for testMetric if in db - for i := 0; i < 2; i++ { - var ts time.Time - var tagID int64 - var value int64 - rows.Next() - err = rows.Scan(&ts, &tagID, &value) - assert.NoError(t, err, "Could not check test results") - - sentValue, _ := testMetric.GetField("value") - sentTs := testMetric.Time() - // postgres doesn't support nano seconds in timestamp - sentTsNanoSecondOffset := sentTs.Nanosecond() - nanoSeconds := sentTsNanoSecondOffset % 1000 - sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) - if !ts.UTC().Equal(sentTs.UTC()) { - assert.Fail(t, fmt.Sprintf("Expected: %v; Received: %v", sentTs, ts.UTC())) - } - - assert.Equal(t, int64(1), tagID) - assert.Equal(t, sentValue.(int64), value) - - sentTag, _ := testMetric.GetTag("tag") - sentTagJSON := fmt.Sprintf(`{"tag": "%s"}`, sentTag) - row := dbConn.QueryRow(fmt.Sprintf(`SELECT tag_id, tags FROM "%s%s"`, testMetric.Name(), postgres.TagTableSuffix)) - tagID = 0 - var tags string - err = row.Scan(&tagID, &tags) - assert.NoError(t, err, "Could not check test results") - assert.Equal(t, int64(1), tagID) - assert.Equal(t, sentTagJSON, tags) - } - // check results for second metric - row := dbConn.QueryRow(fmt.Sprintf(`SELECT time, tag_id, value FROM "%s"`, testMetricInSecondMeasurement.Name())) - var ts time.Time - var tagID int64 - var value int64 - err = row.Scan(&ts, &tagID, &value) - assert.NoError(t, err, "Could not check test results") - - sentValue, _ := testMetricInSecondMeasurement.GetField("value") - sentTs := testMetricInSecondMeasurement.Time() - // postgres doesn't support nano seconds in timestamp - sentTsNanoSecondOffset := sentTs.Nanosecond() - nanoSeconds := sentTsNanoSecondOffset % 1000 - sentTs = sentTs.Add(time.Duration(-nanoSeconds) * time.Nanosecond) - if !ts.UTC().Equal(sentTs.UTC()) { - assert.Fail(t, fmt.Sprintf("Expected: %v; Received: %v", sentTs, ts.UTC())) - } - - assert.Equal(t, int64(1), tagID) - assert.Equal(t, sentValue.(int64), value) -} - -func TestPerformanceIsAcceptable(t *testing.T) { - _, db, postgres := prepareAndConnect(t, false, false, false) - defer db.Close() - numMetricsPerMeasure := 10000 - numTags := 5 - numDiffValuesForEachTag := 5 - numFields := 10 - numMeasures := 2 - metrics := make([]telegraf.Metric, numMeasures*numMetricsPerMeasure) - for measureInd := 0; measureInd < numMeasures; measureInd++ { - for numMetric := 0; numMetric < numMetricsPerMeasure; numMetric++ { - tags := map[string]string{} - for tag := 0; tag < numTags; tag++ { - randNum := rand.Intn(numDiffValuesForEachTag) - tags[fmt.Sprintf("tag_%d", tag)] = strconv.Itoa(randNum) - } - fields := map[string]interface{}{} - for field := 0; field < numFields; field++ { - fields[fmt.Sprintf("field_%d", field)] = rand.Float64() - } - metricName := "m_" + strconv.Itoa(measureInd) - m, _ := metric.New(metricName, tags, fields, time.Now()) - metrics[measureInd*numMetricsPerMeasure+numMetric] = m - } - } - - start := time.Now() - err := postgres.Write(metrics) - assert.NoError(t, err) - end := time.Since(start) - t.Log("Wrote " + strconv.Itoa(numMeasures*numMetricsPerMeasure) + " metrics in " + end.String()) -} - -func TestPostgresBatching(t *testing.T) { - _, db, postgres := prepareAndConnect(t, false, false, false) - defer db.Close() - numMetricsPerMeasure := 5 - numMeasures := 2 - metrics := make([]telegraf.Metric, numMeasures*numMetricsPerMeasure) - for measureInd := 0; measureInd < numMeasures; measureInd++ { - metricName := "m_" + strconv.Itoa(measureInd) - db.Exec(fmt.Sprintf(`DROP TABLE IF EXISTS ` + metricName)) - for numMetric := 0; numMetric < numMetricsPerMeasure; numMetric++ { - tags := map[string]string{} - fields := map[string]interface{}{"f": 1} - m, _ := metric.New(metricName, tags, fields, time.Now()) - metrics[measureInd*numMetricsPerMeasure+numMetric] = m - } - } - - err := postgres.Write(metrics) - assert.NoError(t, err) - err = postgres.Write(metrics) - assert.NoError(t, err) - // check num rows inserted by transaction id should be 'numMetricsPerMeasure' for - // both transactions, for all measures - for measureInd := 0; measureInd < numMeasures; measureInd++ { - metricName := "m_" + strconv.Itoa(measureInd) - rows, err := db.Query(`select count(*) from ` + metricName + ` group by xmin`) - assert.NoError(t, err) - var count int64 - rows.Next() - rows.Scan(&count) - assert.Equal(t, int64(numMetricsPerMeasure), count) - rows.Close() - } -} diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go deleted file mode 100644 index b342bf10cb1a5..0000000000000 --- a/plugins/outputs/postgresql/postgresql_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package postgresql - -import ( - "sync" - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/columns" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" - "github.com/jackc/pgx" - _ "github.com/jackc/pgx/stdlib" - "github.com/stretchr/testify/assert" -) - -func TestPostgresqlMetricsFromMeasure(t *testing.T) { - postgreSQL, metrics, metricIndices := prepareAllColumnsInOnePlaceNoJSON() - err := postgreSQL.writeMetricsFromMeasure(metrics[0].Name(), metricIndices["m"], metrics) - assert.NoError(t, err) - postgreSQL, metrics, metricIndices = prepareAllColumnsInOnePlaceTagsAndFieldsJSON() - err = postgreSQL.writeMetricsFromMeasure(metrics[0].Name(), metricIndices["m"], metrics) - assert.NoError(t, err) -} - -func TestPostgresqlIsAliveCalledOnWrite(t *testing.T) { - postgreSQL, metrics, _ := prepareAllColumnsInOnePlaceNoJSON() - mockedDb := postgreSQL.db.(*mockDb) - mockedDb.isAliveResponses = []bool{true} - err := postgreSQL.Write(metrics[:1]) - assert.NoError(t, err) - assert.Equal(t, 1, mockedDb.currentIsAliveResponse) -} - -func TestPostgresqlDbAssignmentLock(t *testing.T) { - postgreSQL, metrics, _ := prepareAllColumnsInOnePlaceNoJSON() - mockedDb := postgreSQL.db.(*mockDb) - mockedDb.isAliveResponses = []bool{true} - mockedDb.secondsToSleepInIsAlive = 3 - var endOfWrite, startOfWrite, startOfReset, endOfReset time.Time - var wg sync.WaitGroup - wg.Add(2) - go func() { - startOfWrite = time.Now() - err := postgreSQL.Write(metrics[:1]) - assert.NoError(t, err) - endOfWrite = time.Now() - wg.Done() - }() - time.Sleep(time.Second) - - go func() { - startOfReset = time.Now() - postgreSQL.dbConnLock.Lock() - time.Sleep(time.Second) - postgreSQL.dbConnLock.Unlock() - endOfReset = time.Now() - wg.Done() - }() - wg.Wait() - assert.True(t, startOfWrite.Before(startOfReset)) - assert.True(t, startOfReset.Before(endOfWrite)) - assert.True(t, endOfWrite.Before(endOfReset)) -} - -func prepareAllColumnsInOnePlaceNoJSON() (*Postgresql, []telegraf.Metric, map[string][]int) { - oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, time.Now()) - twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, time.Now()) - threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, time.Now()) - - return &Postgresql{ - TagTableSuffix: "_tag", - DoSchemaUpdates: true, - tableManager: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, - rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, - columns: columns.NewMapper(false, false, false), - db: &mockDb{}, - dbConnLock: sync.Mutex{}, - }, []telegraf.Metric{ - oneMetric, twoMetric, threeMetric, - }, map[string][]int{ - "m": []int{0, 1, 2}, - } -} - -func prepareAllColumnsInOnePlaceTagsAndFieldsJSON() (*Postgresql, []telegraf.Metric, map[string][]int) { - oneMetric, _ := metric.New("m", map[string]string{"t": "tv"}, map[string]interface{}{"f": 1}, time.Now()) - twoMetric, _ := metric.New("m", map[string]string{"t2": "tv2"}, map[string]interface{}{"f2": 2}, time.Now()) - threeMetric, _ := metric.New("m", map[string]string{"t": "tv", "t2": "tv2"}, map[string]interface{}{"f": 3, "f2": 4}, time.Now()) - - return &Postgresql{ - TagTableSuffix: "_tag", - DoSchemaUpdates: true, - TagsAsForeignkeys: false, - TagsAsJsonb: true, - FieldsAsJsonb: true, - dbConnLock: sync.Mutex{}, - tableManager: &mockTables{t: map[string]bool{"m": true}, missingCols: []int{}}, - columns: columns.NewMapper(false, true, true), - rows: &mockTransformer{rows: [][]interface{}{nil, nil, nil}}, - db: &mockDb{}, - }, []telegraf.Metric{ - oneMetric, twoMetric, threeMetric, - }, map[string][]int{ - "m": []int{0, 1, 2}, - } -} - -type mockTables struct { - t map[string]bool - createErr error - missingCols []int - mismatchErr error - addColsErr error -} - -func (m *mockTables) Exists(tableName string) bool { - return m.t[tableName] -} -func (m *mockTables) CreateTable(tableName string, colDetails *utils.TargetColumns) error { - if m.createErr != nil { - return m.createErr - } - m.t[tableName] = true - return nil -} -func (m *mockTables) FindColumnMismatch(tableName string, colDetails *utils.TargetColumns) ([]int, error) { - return m.missingCols, m.mismatchErr -} -func (m *mockTables) AddColumnsToTable(tableName string, columnIndices []int, colDetails *utils.TargetColumns) error { - return m.addColsErr -} -func (m *mockTables) SetConnection(db db.Wrapper) {} - -type mockTransformer struct { - rows [][]interface{} - current int - rowErr error -} - -func (mt *mockTransformer) createRowFromMetric(numColumns int, metric telegraf.Metric, targetColumns, targetTagColumns *utils.TargetColumns) ([]interface{}, error) { - if mt.rowErr != nil { - return nil, mt.rowErr - } - row := mt.rows[mt.current] - mt.current++ - return row, nil -} - -type mockDb struct { - doCopyErr error - isAliveResponses []bool - currentIsAliveResponse int - secondsToSleepInIsAlive int64 -} - -func (m *mockDb) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { - return "", nil -} - -func (m *mockDb) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { - return m.doCopyErr -} -func (m *mockDb) Query(query string, args ...interface{}) (*pgx.Rows, error) { - return nil, nil -} -func (m *mockDb) QueryRow(query string, args ...interface{}) *pgx.Row { - return nil -} -func (m *mockDb) Close() error { - return nil -} - -func (m *mockDb) IsAlive() bool { - if m.secondsToSleepInIsAlive > 0 { - time.Sleep(time.Duration(m.secondsToSleepInIsAlive) * time.Second) - } - if m.isAliveResponses == nil { - return true - } - if m.currentIsAliveResponse >= len(m.isAliveResponses) { - return m.isAliveResponses[len(m.isAliveResponses)] - } - which := m.currentIsAliveResponse - m.currentIsAliveResponse++ - return m.isAliveResponses[which] -} diff --git a/plugins/outputs/postgresql/tables/manager_test.go b/plugins/outputs/postgresql/tables/manager_test.go deleted file mode 100644 index 7f9759b4f0aac..0000000000000 --- a/plugins/outputs/postgresql/tables/manager_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package tables - -import ( - "errors" - "testing" - - "github.com/influxdata/telegraf/plugins/outputs/postgresql/db" - "github.com/jackc/pgx" - "github.com/stretchr/testify/assert" - - "github.com/influxdata/telegraf/plugins/outputs/postgresql" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" -) - -type mockDb struct { - exec pgx.CommandTag - execErr error -} - -func (m *mockDb) Exec(query string, args ...interface{}) (pgx.CommandTag, error) { - return m.exec, m.execErr -} -func (m *mockDb) DoCopy(fullTableName *pgx.Identifier, colNames []string, batch [][]interface{}) error { - return nil -} -func (m *mockDb) Query(query string, args ...interface{}) (*pgx.Rows, error) { - return nil, nil -} -func (m *mockDb) QueryRow(query string, args ...interface{}) *pgx.Row { - return nil -} -func (m *mockDb) Close() error { - return nil -} - -func (m *mockDb) IsAlive() bool { return true } - -func TestNewManager(t *testing.T) { - db := &mockDb{} - res := postgresql.NewTableManager(db, "schema", "table template").(*postgresql.TableManager) - assert.Equal(t, "table template", res.tableTemplate) - assert.Equal(t, "schema", res.schema) - assert.Equal(t, db, res.db) -} - -func TestExists(t *testing.T) { - testCases := []struct { - desc string - in string - out bool - db *mockDb - cache map[string]bool - }{ - { - desc: "table already cached", - in: "table", - db: &mockDb{execErr: errors.New("should not have called exec")}, - cache: map[string]bool{"table": true}, - out: true, - }, { - desc: "table not cached, error on check db", - cache: map[string]bool{}, - in: "table", - db: &mockDb{execErr: errors.New("error on exec")}, - }, { - desc: "table not cached, exists in db", - cache: map[string]bool{}, - in: "table", - db: &mockDb{exec: "0 1"}, - out: true, - }, { - desc: "table not cached, doesn't exist", - cache: map[string]bool{}, - in: "table", - db: &mockDb{exec: "0 0"}, - out: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - manager := &postgresql.TableManager{ - Tables: tc.cache, - db: tc.db, - } - - got := manager.Exists(tc.in) - assert.Equal(t, tc.out, got) - }) - } -} - -func TestCreateTable(t *testing.T) { - testCases := []struct { - desc string - inT string - inCD *utils.TargetColumns - db db.Wrapper - template string - out error - }{ - { - desc: "error on exec, no table cached", - inT: "t", - inCD: &utils.TargetColumns{ - Names: []string{"time", "t", "f"}, - Target: map[string]int{"time": 0, "t": 1, "f": 2}, - DataTypes: []utils.PgDataType{"timestamptz", "text", "float8"}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, - }, - db: &mockDb{execErr: errors.New("error on exec")}, - template: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}) ", - out: errors.New("error on exec"), - }, { - desc: "all good, table is cached", - inT: "t", - inCD: &utils.TargetColumns{ - Names: []string{"time", "t", "f"}, - Target: map[string]int{"time": 0, "t": 1, "f": 2}, - DataTypes: []utils.PgDataType{"timestamptz", "text", "float8"}, - Roles: []utils.ColumnRole{utils.TimeColType, utils.TagColType, utils.FieldColType}, - }, - db: &mockDb{}, - template: "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}) ", - out: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - manager := &postgresql.TableManager{ - Tables: map[string]bool{}, - db: tc.db, - tableTemplate: tc.template, - } - got := manager.CreateTable(tc.inT, tc.inCD) - assert.Equal(t, tc.out, got) - if tc.out == nil { - assert.True(t, manager.Tables[tc.inT]) - } - }) - } -} From 5759658af161db0ac607597734bac52ba0aeb47c Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 12 Jan 2021 22:52:04 -0500 Subject: [PATCH 079/121] output/postgresql: Use a transaction for sequential writes Since we send feedback to telegraf on error, it will retry the batch. Use a transaction to ensure that we don't insert the same data twice. --- plugins/outputs/postgresql/postgresql.go | 34 +++++++++++++++------ plugins/outputs/postgresql/table_manager.go | 15 ++++++--- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 1584fa0ea66c2..b2d600a77e95c 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -18,6 +18,13 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) +type dbh interface { + Begin(ctx context.Context) (pgx.Tx, error) + CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) + Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error) + Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) +} + type Postgresql struct { Connection string Schema string @@ -185,16 +192,25 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { } func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error { + tx, err := p.db.Begin(p.dbContext) + if err != nil { + return fmt.Errorf("starting transaction: %w", err) + } + defer tx.Rollback(p.dbContext) + for _, tableSource := range tableSources { - err := p.writeMetricsFromMeasure(p.dbContext, tableSource) + err := p.writeMetricsFromMeasure(p.dbContext, tx, tableSource) if err != nil { if isTempError(err) { - //TODO use a transaction so that we don't end up with a partial write, and end up retrying metrics we've already written return err } log.Printf("write error (permanent, dropping sub-batch): %v", err) } } + + if err := tx.Commit(p.dbContext); err != nil { + return fmt.Errorf("committing transaction: %w", err) + } return nil } @@ -233,7 +249,7 @@ func isTempError(err error) bool { func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasure(ctx, tableSource) + err := p.writeMetricsFromMeasure(ctx, p.db, tableSource) if err == nil { return nil } @@ -257,14 +273,14 @@ func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) e } // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. -func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, tableSource *TableSource) error { - err := p.tableManager.MatchSource(ctx, tableSource) +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error { + err := p.tableManager.MatchSource(ctx, db, tableSource) if err != nil { return err } if p.TagsAsForeignkeys { - if err := p.WriteTagTable(ctx, tableSource); err != nil { + if err := p.WriteTagTable(ctx, db, tableSource); err != nil { // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from // future metrics after issue is corrected. log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) @@ -272,15 +288,15 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, tableSource *T } fullTableName := utils.FullTableName(p.Schema, tableSource.Name()) - _, err = p.db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource) + _, err = db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource) return err } -func (p *Postgresql) WriteTagTable(ctx context.Context, tableSource *TableSource) error { +func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *TableSource) error { //TODO cache which tagSets we've already inserted and skip them. ttsrc := NewTagTableSource(tableSource) - tx, err := p.db.Begin(ctx) + tx, err := db.Begin(ctx) if err != nil { return err } diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 18c6ce075d6f6..664b8c40476a3 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -92,6 +92,7 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str func (tm *TableManager) EnsureStructure( ctx context.Context, + db dbh, tableName string, columns []utils.Column, createTemplates []*template.Template, @@ -117,7 +118,7 @@ func (tm *TableManager) EnsureStructure( tm.tablesMutex.RUnlock() if !ok { // Ok, table doesn't exist, now we can create it. - if err := tm.executeTemplates(ctx, createTemplates, tableName, columns, metricsTableName, tagsTableName); err != nil { + if err := tm.executeTemplates(ctx, db, createTemplates, tableName, columns, metricsTableName, tagsTableName); err != nil { return nil, fmt.Errorf("creating table: %w", err) } tm.tablesMutex.RLock() @@ -138,7 +139,7 @@ func (tm *TableManager) EnsureStructure( return missingColumns, nil } - if err := tm.executeTemplates(ctx, addColumnsTemplates, tableName, missingColumns, metricsTableName, tagsTableName); err != nil { + if err := tm.executeTemplates(ctx, db, addColumnsTemplates, tableName, missingColumns, metricsTableName, tagsTableName); err != nil { return nil, fmt.Errorf("adding columns: %w", err) } return tm.checkColumns(tm.Tables[tableName], columns) @@ -161,6 +162,7 @@ func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColum func (tm *TableManager) executeTemplates( ctx context.Context, + db dbh, tmpls []*template.Template, tableName string, newColumns []utils.Column, @@ -201,7 +203,7 @@ func (tm *TableManager) executeTemplates( tm.refreshTableStructureResponse(tableName, rows) */ - tx, err := tm.db.Begin(ctx) + tx, err := db.Begin(ctx) if err != nil { return err } @@ -252,7 +254,7 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { // If the schema does not match, and schema updates are disabled: // If a field missing from the DB, the field is omitted. // If a tag is missing from the DB, the metric is dropped. -func (tm *TableManager) MatchSource(ctx context.Context, rowSource *TableSource) error { +func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error { metricTableName := rowSource.Name() var tagTableName string if tm.TagsAsForeignkeys { @@ -260,6 +262,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *TableSource) missingCols, err := tm.EnsureStructure( ctx, + db, tagTableName, rowSource.TagTableColumns(), tm.TagTableCreateTemplates, @@ -281,7 +284,9 @@ func (tm *TableManager) MatchSource(ctx context.Context, rowSource *TableSource) } } - missingCols, err := tm.EnsureStructure(ctx, + missingCols, err := tm.EnsureStructure( + ctx, + db, metricTableName, rowSource.MetricTableColumns(), tm.CreateTemplates, From e87c9f699dc605bbecec66e009337d76a7e7f76c Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 12 Jan 2021 22:53:15 -0500 Subject: [PATCH 080/121] outputs/postgresql: slight documentation cleanup --- plugins/outputs/postgresql/postgresql.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index b2d600a77e95c..14d806f2ed018 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -141,9 +141,6 @@ var sampleConfig = ` ## connection = "host=localhost user=postgres sslmode=verify-full" - ## Update existing tables to match the incoming metrics automatically. Default is true - # do_schema_updates = true - ## Store tags as foreign keys in the metrics table. Default is false. # tags_as_foreignkeys = false @@ -160,13 +157,15 @@ var sampleConfig = ` create_templates = ['CREATE TABLE {{.table}} ({{.columns}})'] ## Templated statements to execute when adding columns to a table. + ## Set to an empty list to disable. When doing so, points will be inserted with the missing fields/columns omitted. add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] ## Templated statements to execute when creating a new tag table. - tag_table_create_templates ['CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'] + tag_table_create_templates = ['CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'] ## Templated statements to execute when adding columns to a tag table. - tag_table_add_column_templates ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] + ## Set to an empty list to disable. When doing so, points containing the missing tags will be omitted. + tag_table_add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] ` func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -243,6 +242,9 @@ func isTempError(err error) bool { if errors.As(err, &pgErr); pgErr != nil { return false } + //TODO review: + // https://godoc.org/github.com/jackc/pgerrcode + // https://www.postgresql.org/docs/10/errcodes-appendix.html return true } From b51987f6b4d205269825cd2fa77966fb641a9f68 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 11 Apr 2021 22:57:02 -0400 Subject: [PATCH 081/121] outputs.postgresql: add tests --- docker-compose.yml | 6 + go.sum | 133 ++++ plugins/outputs/postgresql/postgresql.go | 163 +++-- plugins/outputs/postgresql/postgresql_test.go | 646 ++++++++++++++++++ plugins/outputs/postgresql/table_manager.go | 30 +- .../outputs/postgresql/table_manager_test.go | 159 +++++ plugins/outputs/postgresql/table_source.go | 40 +- .../outputs/postgresql/table_source_test.go | 234 +++++++ .../outputs/postgresql/utils/utils_test.go | 138 ---- 9 files changed, 1353 insertions(+), 196 deletions(-) create mode 100644 plugins/outputs/postgresql/postgresql_test.go create mode 100644 plugins/outputs/postgresql/table_manager_test.go create mode 100644 plugins/outputs/postgresql/table_source_test.go delete mode 100644 plugins/outputs/postgresql/utils/utils_test.go diff --git a/docker-compose.yml b/docker-compose.yml index 3c929f656b7de..359312c5063d2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -60,6 +60,12 @@ services: - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" + timescaledb: + image: timescale/timescaledb:2.1.1-pg12 + environment: + - POSTGRES_HOST_AUTH_METHOD=trust + ports: + - "5433:5432" rabbitmq: image: rabbitmq:3-management ports: diff --git a/go.sum b/go.sum index 5beaf95b2dce6..f59d451d8d6fd 100644 --- a/go.sum +++ b/go.sum @@ -101,6 +101,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -117,10 +118,12 @@ github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -135,8 +138,12 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrU github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 h1:5sXbqlSomvdjlRbWyNqkPsJ3Fg+tQZCbgeX1VGljbQY= github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -168,17 +175,23 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYKI= @@ -186,6 +199,7 @@ github.com/aws/aws-sdk-go v1.34.34 h1:5dC0ZU0xy25+UavGNEkQ/5MOQwxXDA2YXtjCL1HfYK github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.34.34/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.1.0 h1:sKP6QWxdN1oRYjl+k6S3bpgBI+XUx/0mqVOLIw4lR/Q= github.com/aws/aws-sdk-go-v2 v1.1.0/go.mod h1:smfAbmpW+tcRVuNUjo3MOArSZmW72t62rkCzc2i0TWM= github.com/aws/aws-sdk-go-v2/config v1.1.0 h1:f3QVGpAcKrWpYNhKB8hE/buMjcfei95buQ5xdr/xYcU= @@ -224,7 +238,10 @@ github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOC github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -240,22 +257,31 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= +github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= @@ -303,6 +329,7 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9 h1:NAvZb7gqQfLSNBPzVsvI7eZMosXtg2g2kxXrei90CtU= github.com/dropbox/godropbox v0.0.0-20180512210157-31879d3884b9/go.mod h1:glr97hP/JuXb+WMYCizc4PIFuzw1lCR97mwbe1VVXhQ= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -318,6 +345,7 @@ github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7j github.com/eclipse/paho.mqtt.golang v1.3.0 h1:MU79lqr3FKNKbSrGN7d7bNYqh8MwWW7Zcx0iG+VIw9I= github.com/eclipse/paho.mqtt.golang v1.3.0/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= @@ -327,12 +355,14 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4 h1:fP04zlkPjAGpsduG7xN3rRkxjAqkJaIQnnkNYYw/pAk= github.com/facebookgo/stackerr v0.0.0-20150612192056-c2fcf88613f4/go.mod h1:SBHk9aNQtiw4R4bEuzHjVmZikkUKCnO1v3lPQ21HZGk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -341,6 +371,7 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.10.2 h1:19ARM85nVi4xH7xPXuc5eM/udya5ieh7b/Sv+d844Tk= github.com/frankban/quicktest v1.10.2/go.mod h1:K+q6oSqb0W0Ininfk863uOk1lMy69l/P6txr3mVT54s= @@ -367,6 +398,7 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= @@ -386,6 +418,7 @@ github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -395,6 +428,7 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34 github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -415,6 +449,7 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -485,12 +520,16 @@ github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJA github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.3.1 h1:CzMaKrvF6Qa7XtRii064vKBQiyvmY8H8vG1xa1/W1JA= github.com/gogo/googleapis v1.3.1/go.mod h1:d+q1s/xVJxZGKWwC/6UfPIF33J+G1Tq4GYv9Y+Tg/EU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -571,6 +610,8 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -588,12 +629,15 @@ github.com/gopcua/opcua v0.1.13/go.mod h1:a6QH4F9XeODklCmWuvaOdL8v9H0d73CEKUHWVZ github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosnmp/gosnmp v1.31.0 h1:l18tqymKfReKBPr3kMK4mMM+n3DHlIpsZbBBSy8nuko= @@ -601,7 +645,10 @@ github.com/gosnmp/gosnmp v1.31.0/go.mod h1:EIp+qkEpXoVsyZxXKy0AmXQx0mCHMMcIhXXvN github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= @@ -611,9 +658,11 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.6.0 h1:SZB2hQW8AcTOpfDmiVblQbijxzsRuiyy0JpHfabvHio= github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -634,10 +683,13 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -646,18 +698,23 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hetznercloud/hcloud-go v1.21.1/go.mod h1:xng8lbDUg+xM1dgc0yGHX5EeqbwIq7UYlMWMTx3SQVg= @@ -666,17 +723,22 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY= github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= +github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= +github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= github.com/influxdata/influxdb v1.8.2/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= @@ -751,10 +813,13 @@ github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOO github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a/go.mod h1:1qNVsDcmNQDsAXYfUuF/Z0rtK5eT8x9D6Pi7S3PjXAg= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -763,6 +828,7 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -782,6 +848,8 @@ github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfE github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea h1:g2k+8WR7cHch4g0tBDhfiEvAp7fXxTNBiD1oC1Oxj3E= @@ -789,6 +857,7 @@ github.com/juju/errors v0.0.0-20181012004132-a4583d0a56ea/go.mod h1:W54LbzXuIE0b github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20191001232224-ce9dec17d28b/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= @@ -839,9 +908,13 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -872,6 +945,7 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= @@ -892,6 +966,7 @@ github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7 github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -902,7 +977,10 @@ github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdI github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2 h1:dxe5oCinTXiTIcfgmZecdCzPmAJKd46KsCWc35r0TV4= @@ -949,6 +1027,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -973,18 +1053,27 @@ github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -992,8 +1081,10 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pavius/impi v0.0.0-20180302134524-c1cbdcb8df2b/go.mod h1:x/hU0bfdWIhuOT1SKwiJg++yvkk6EuOtJk8WtDZqgr8= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= @@ -1009,10 +1100,14 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1034,6 +1129,7 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.13.0 h1:vJlpe9wPgDRM1Z+7Wj3zUUjY1nr6/1jNKyl7llliccg= @@ -1060,17 +1156,22 @@ github.com/robertkrimen/otto v0.0.0-20191219234010-c382bd3c16ff/go.mod h1:xvqspo github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= @@ -1079,6 +1180,7 @@ github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiB github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/samuel/go-zookeeper v0.0.0-20190810000440-0ceca61e4d75/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1104,6 +1206,7 @@ github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc h1:jUIKcSPO9MoM github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/signalfx/com_signalfx_metrics_protobuf v0.0.0-20190222193949-1fb69526e884 h1:KgLGEw137KEUtQnWBGzneCetphBj4+kKHRnhpAkXJC0= @@ -1128,31 +1231,47 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271 h1:WhxRHzgeVGETMlmVfqhRn8RIeeNoPr2Czh33I4Zdccw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1180,14 +1299,18 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330 h1:iBlTJosRsR70amr0zsmSPvaKNH8K/p3YlX/5SdPmSl8= github.com/vapourismo/knx-go v0.0.0-20201122213738-75fe09ace330/go.mod h1:7+aWBsUJCo9OQRCgTypRmIQW9KKKcPMjtrdnYIBsS70= @@ -1214,6 +1337,7 @@ github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHM github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1226,6 +1350,8 @@ github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29Xrm github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1253,6 +1379,8 @@ go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1464,6 +1592,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= @@ -1477,6 +1606,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1653,6 +1783,7 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/djherbis/times.v1 v1.2.0 h1:UCvDKl1L/fmBygl2Y7hubXCnY7t4Yj46ZrBFNUipFbM= gopkg.in/djherbis/times.v1 v1.2.0/go.mod h1:AQlg6unIsrsCEdQYhTzERy542dz6SFdQFZFv6mUY0P8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1687,6 +1818,7 @@ gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3M gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI= gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78= gopkg.in/tomb.v1 v1.0.0-20140529071818-c131134a1947/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1766,6 +1898,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhY sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 14d806f2ed018..872a05d0e4842 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" - "github.com/jackc/pgconn" - "log" "time" + "github.com/jackc/pgconn" + + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" @@ -28,7 +30,7 @@ type dbh interface { type Postgresql struct { Connection string Schema string - TagsAsForeignkeys bool + TagsAsForeignKeys bool TagsAsJsonb bool FieldsAsJsonb bool CreateTemplates []*template.Template @@ -36,8 +38,9 @@ type Postgresql struct { TagTableCreateTemplates []*template.Template TagTableAddColumnTemplates []*template.Template TagTableSuffix string - PoolSize int - RetryMaxBackoff internal.Duration + RetryMaxBackoff config.Duration + ForignTagConstraint bool + LogLevel string dbContext context.Context dbContextCancel func() @@ -45,6 +48,8 @@ type Postgresql struct { tableManager *TableManager writeChan chan *TableSource + + Logger telegraf.Logger } func init() { @@ -59,22 +64,58 @@ func newPostgresql() *Postgresql { TagTableCreateTemplates: []*template.Template{template.TagTableCreateTemplate}, TagTableAddColumnTemplates: []*template.Template{template.TableAddColumnTemplate}, TagTableSuffix: "_tag", - RetryMaxBackoff: internal.Duration{time.Second * 15}, + RetryMaxBackoff: config.Duration(time.Second * 15), + ForignTagConstraint: false, + Logger: models.NewLogger("outputs", "postgresql", ""), + } +} + +// pgxLogger makes telegraf.Logger compatible with pgx.Logger +type pgxLogger struct { + telegraf.Logger +} + +func (l pgxLogger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { + switch level { + case pgx.LogLevelError: + l.Errorf("PG %s - %+v", msg, data) + case pgx.LogLevelWarn: + l.Warnf("PG %s - %+v", msg, data) + case pgx.LogLevelInfo, pgx.LogLevelNone: + l.Infof("PG %s - %+v", msg, data) + case pgx.LogLevelDebug, pgx.LogLevelTrace: + l.Debugf("PG %s - %+v", msg, data) + default: + l.Debugf("PG %s - %+v", msg, data) } } // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { - poolConfig, err := pgxpool.ParseConfig("pool_max_conns=1 " + p.Connection) + poolConfig, err := pgxpool.ParseConfig(p.Connection) if err != nil { return err } + parsedConfig, _ := pgx.ParseConfig(p.Connection) + if _, ok := parsedConfig.Config.RuntimeParams["pool_max_conns"]; !ok { + // The pgx default for pool_max_conns is 4. However we want to default to 1. + poolConfig.MaxConns = 1 + } + poolConfig.AfterConnect = p.dbConnectedHook + + if p.LogLevel != "" { + poolConfig.ConnConfig.Logger = pgxLogger{p.Logger} + poolConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) + if err != nil { + return fmt.Errorf("invalid log level") + } + } // Yes, we're not supposed to store the context. However since we don't receive a context, we have to. p.dbContext, p.dbContextCancel = context.WithCancel(context.Background()) p.db, err = pgxpool.ConnectConfig(p.dbContext, poolConfig) if err != nil { - log.Printf("E! Couldn't connect to server\n%v", err) + p.Logger.Errorf("Couldn't connect to server\n%v", err) return err } p.tableManager = NewTableManager(p) @@ -91,6 +132,7 @@ func (p *Postgresql) Connect() error { } // dbConnectHook checks to see whether we lost all connections, and if so resets any known state of the database (e.g. cached tables). +// This is so that we handle failovers, where the new database might not have the same state as the previous. func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error { if p.db == nil || p.tableManager == nil { // This will happen on the initial connect since we haven't set it yet. @@ -110,9 +152,9 @@ func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error // Close closes the connection to the database func (p *Postgresql) Close() error { - p.tableManager = nil p.dbContextCancel() p.db.Close() + p.tableManager = nil return nil } @@ -157,14 +199,15 @@ var sampleConfig = ` create_templates = ['CREATE TABLE {{.table}} ({{.columns}})'] ## Templated statements to execute when adding columns to a table. - ## Set to an empty list to disable. When doing so, points will be inserted with the missing fields/columns omitted. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points + ## containing fields for which there is no column will have the field omitted. add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] ## Templated statements to execute when creating a new tag table. tag_table_create_templates = ['CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'] ## Templated statements to execute when adding columns to a tag table. - ## Set to an empty list to disable. When doing so, points containing the missing tags will be omitted. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. tag_table_add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] ` @@ -172,16 +215,7 @@ func (p *Postgresql) SampleConfig() string { return sampleConfig } func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } func (p *Postgresql) Write(metrics []telegraf.Metric) error { - tableSources := map[string]*TableSource{} - - for _, m := range metrics { - rs := tableSources[m.Name()] - if rs == nil { - rs = NewTableSource(p) - tableSources[m.Name()] = rs - } - rs.AddMetric(m) - } + tableSources := NewTableSources(p, metrics) if p.db.Stat().MaxConns() > 1 { return p.writeConcurrent(tableSources) @@ -203,7 +237,7 @@ func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error if isTempError(err) { return err } - log.Printf("write error (permanent, dropping sub-batch): %v", err) + p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err) } } @@ -229,7 +263,7 @@ func (p *Postgresql) writeWorker(ctx context.Context) { select { case tableSource := <-p.writeChan: if err := p.writeRetry(ctx, tableSource); err != nil { - log.Printf("write error (permanent, dropping sub-batch): %v", err) + p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err) } case <-p.dbContext.Done(): return @@ -237,29 +271,59 @@ func (p *Postgresql) writeWorker(ctx context.Context) { } } +// This is a subset of net.Error +type maybeTempError interface { + error + Temporary() bool +} + +// isTempError reports whether the error received during a metric write operation is temporary or permanent. +// A temporary error is one that if the write were retried at a later time, that it might succeed. +// Note however that this applies to the transaction as a whole, not the individual operation. Meaning for example a +// write might come in that needs a new table created, but another worker already created the table in between when we +// checked for it, and tried to create it. In this case, the operation error is permanent, as we can try `CREATE TABLE` +// again and it will still fail. But if we retry the transaction from scratch, when we perform the table check we'll see +// it exists, so we consider the error temporary. func isTempError(err error) bool { var pgErr *pgconn.PgError if errors.As(err, &pgErr); pgErr != nil { + // https://www.postgresql.org/docs/12/errcodes-appendix.html + errClass := pgErr.Code[:2] + switch errClass { + case "42": // Syntax Error or Access Rule Violation + switch pgErr.Code { + case "42701": // duplicate_column + return true + case "42P07": // duplicate_table + return true + } + case "53": // Insufficient Resources + return true + case "57": // Operator Intervention + return true + } + // Assume that any other error that comes from postgres is a permanent error return false } - //TODO review: - // https://godoc.org/github.com/jackc/pgerrcode - // https://www.postgresql.org/docs/10/errcodes-appendix.html - return true + + if mtErr := maybeTempError(nil); errors.As(err, &mtErr) { + return mtErr.Temporary() + } + + // Assume that any other error is permanent. + // This may mean that we incorrectly discard data that could have been retried, but the alternative is that we get + // stuck retrying data that will never succeed, causing good data to be dropped because the buffer fills up. + return false } func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasure(ctx, p.db, tableSource) - if err == nil { - return nil - } - + err := p.writeMetricsFromMeasureTx(ctx, tableSource) if !isTempError(err) { return err } - log.Printf("write error (retry in %s): %v", backoff, err) + p.Logger.Errorf("write error (retry in %s): %v", backoff, err) tableSource.Reset() time.Sleep(backoff) @@ -267,13 +331,27 @@ func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) e backoff = time.Millisecond * 250 } else { backoff *= 2 - if backoff > p.RetryMaxBackoff.Duration { - backoff = p.RetryMaxBackoff.Duration + if backoff > time.Duration(p.RetryMaxBackoff) { + backoff = time.Duration(p.RetryMaxBackoff) } } } } +func (p *Postgresql) writeMetricsFromMeasureTx(ctx context.Context, tableSource *TableSource) error { + tx, err := p.db.Begin(ctx) + if err != nil { + return err + } + defer tx.Rollback(ctx) + + if err := p.writeMetricsFromMeasure(ctx, tx, tableSource); err != nil { + return err + } + + return tx.Commit(ctx) +} + // Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error { err := p.tableManager.MatchSource(ctx, db, tableSource) @@ -281,11 +359,15 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS return err } - if p.TagsAsForeignkeys { + if p.TagsAsForeignKeys { if err := p.WriteTagTable(ctx, db, tableSource); err != nil { // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from // future metrics after issue is corrected. - log.Printf("[outputs.postgresql] Error: Writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) + if p.ForignTagConstraint { + return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) + } else { + p.Logger.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) + } } } @@ -306,7 +388,8 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *Tab ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()} identTemp := pgx.Identifier{ttsrc.Name() + "_temp"} - _, err = tx.Exec(ctx, fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize())) + sql := fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize()) + _, err = tx.Exec(ctx, sql) if err != nil { return fmt.Errorf("creating tags temp table: %w", err) } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go new file mode 100644 index 0000000000000..718b0b062208e --- /dev/null +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -0,0 +1,646 @@ +package postgresql + +import ( + "context" + "fmt" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" +) + +func timeout(t *testing.T, dur time.Duration) { + timer := time.AfterFunc(dur, func() { + t.Errorf("Test timed out after %s", dur) + t.FailNow() + }) + t.Cleanup(func() { timer.Stop() }) +} + +type Log struct { + level pgx.LogLevel + format string + args []interface{} +} + +func (l Log) String() string { + return fmt.Sprintf("%s: "+l.format, append([]interface{}{l.level}, l.args...)...) +} + +// LogAccumulator is a log collector that satisfies telegraf.Logger. +type LogAccumulator struct { + logs []Log + cond *sync.Cond + t *testing.T +} + +func NewLogAccumulator(t *testing.T) *LogAccumulator { + return &LogAccumulator{ + cond: sync.NewCond(&sync.Mutex{}), + t: t, + } +} + +func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []interface{}) { + la.cond.L.Lock() + log := Log{level, format, args} + la.logs = append(la.logs, log) + s := log.String() + la.t.Helper() + la.t.Log(s) + la.cond.Broadcast() + la.cond.L.Unlock() +} + +func (la *LogAccumulator) WaitLen(n int) []Log { + la.cond.L.Lock() + defer la.cond.L.Unlock() + for len(la.logs) < n { + la.cond.Wait() + } + return la.logs[:] +} + +// Waits for a specific query log from pgx to show up. +func (la *LogAccumulator) WaitFor(f func(l Log) bool, waitCommit bool) { + la.cond.L.Lock() + defer la.cond.L.Unlock() + i := 0 + var commitPid uint32 + for { + for ; i < len(la.logs); i++ { + log := la.logs[i] + if commitPid == 0 { + if f(log) { + if !waitCommit { + return + } + commitPid = log.args[1].(MSI)["pid"].(uint32) + } + } else { + if len(log.args) < 2 { + continue + } + data, ok := log.args[1].(MSI) + if !ok || data["pid"] != commitPid { + continue + } + if log.args[0] == "Exec" && data["sql"] == "commit" { + return + } else if log.args[0] == "Exec" && data["sql"] == "rollback" { + // transaction aborted, start looking for another match + commitPid = 0 + } else if log.level == pgx.LogLevelError { + commitPid = 0 + } + } + } + la.cond.Wait() + } +} + +func (la *LogAccumulator) WaitForQuery(str string, waitCommit bool) { + la.WaitFor(func(log Log) bool { + return log.format == "PG %s - %+v" && + (log.args[0].(string) == "Query" || log.args[0].(string) == "Exec") && + strings.Contains(log.args[1].(MSI)["sql"].(string), str) + }, waitCommit) +} + +func (la *LogAccumulator) WaitForCopy(tableName string, waitCommit bool) { + la.WaitFor(func(log Log) bool { + return log.format == "PG %s - %+v" && + log.args[0].(string) == "CopyFrom" && + log.args[1].(MSI)["tableName"].(pgx.Identifier)[1] == tableName + }, waitCommit) +} + +// Clear any stored logs. +// Do not run this while any WaitFor* operations are in progress. +func (la *LogAccumulator) Clear() { + la.cond.L.Lock() + if len(la.logs) > 0 { + la.logs = nil + } + la.cond.L.Unlock() +} + +//func (la *LogAccumulator) Chan() <-chan Log { +// ch := make(chan Log) +// rch := (<-chan Log)(ch) +// go func() { +// cond := la.cond +// logs := &la.logs +// runtime.SetFinalizer(rch, func(_ <-chan string) { +// cond.L.Lock() +// logs = nil +// cond.Broadcast() +// cond.L.Unlock() +// }) +// la = nil +// i := 0 +// cond.L.Lock() +// for { +// if logs == nil { +// break +// } +// if i == len(*logs) { +// cond.Wait() +// continue +// } +// log := (*logs)[i] +// i++ +// cond.L.Unlock() +// ch <- log +// cond.L.Lock() +// } +// cond.L.Unlock() +// }() +// return rch +//} + +func (la *LogAccumulator) Logs() []Log { + la.cond.L.Lock() + defer la.cond.L.Unlock() + return la.logs[:] +} + +func (la *LogAccumulator) Errorf(format string, args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelError, format, args) +} + +func (la *LogAccumulator) Error(args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelError, "%v", args) +} + +func (la *LogAccumulator) Debugf(format string, args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelDebug, format, args) +} + +func (la *LogAccumulator) Debug(args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelDebug, "%v", args) +} + +func (la *LogAccumulator) Warnf(format string, args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelWarn, format, args) +} + +func (la *LogAccumulator) Warn(args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelWarn, "%v", args) +} + +func (la *LogAccumulator) Infof(format string, args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelInfo, format, args) +} + +func (la *LogAccumulator) Info(args ...interface{}) { + la.t.Helper() + la.append(pgx.LogLevelInfo, "%v", args) +} + +var ctx context.Context + +func TestMain(m *testing.M) { + if os.Getenv("PGHOST") == "" && os.Getenv("PGHOSTADDR") == "" && os.Getenv("PGPORT") == "" { + // User has not specified a server, use the default, which is the one defined by docker-compose.yml at the top of the repo. + os.Setenv("PGHOST", "127.0.0.1") + os.Setenv("PGPORT", "5433") + os.Setenv("PGUSER", "postgres") + } + + ctx = context.Background() + if err := prepareDatabase("telegraf"); err != nil { + fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) + os.Exit(1) + } + os.Exit(m.Run()) +} + +func prepareDatabase(name string) error { + db, err := pgx.Connect(ctx, "") + if err != nil { + return err + } + _, err = db.Exec(ctx, "DROP DATABASE IF EXISTS "+name) + if err != nil { + return err + } + _, err = db.Exec(ctx, "CREATE DATABASE "+name) + return err +} + +type PostgresqlTest struct { + Postgresql + Logger *LogAccumulator +} + +func newPostgresqlTest(t *testing.T) *PostgresqlTest { + p := newPostgresql() + logger := NewLogAccumulator(t) + p.Logger = logger + pt := &PostgresqlTest{Postgresql: *p} + pt.Logger = logger + pt.Connection = "database=telegraf" + pt.LogLevel = "debug" + return pt +} + +func TestPostgresqlConnect(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + assert.EqualValues(t, 1, p.db.Stat().MaxConns()) + p.Close() + + p = newPostgresqlTest(t) + p.Connection += " pool_max_conns=2" + require.NoError(t, p.Connect()) + assert.EqualValues(t, 2, p.db.Stat().MaxConns()) + p.Close() +} + +func TestDBConnectedHook(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + _, err := p.db.Exec(ctx, "SELECT 1") + require.NoError(t, err) + tmTables := p.tableManager.Tables + + c, _ := p.db.Acquire(ctx) + c.Conn().Close(ctx) + c.Release() + + _, err = p.db.Exec(ctx, "SELECT 1") + require.NoError(t, err) + tmTables2 := p.tableManager.Tables + + assert.NotEqual(t, reflect.ValueOf(tmTables).Pointer(), reflect.ValueOf(tmTables2).Pointer()) +} + +func newMetric( + t *testing.T, + suffix string, + tags map[string]string, + fields map[string]interface{}, +) telegraf.Metric { + return metric.New(t.Name()+suffix, tags, fields, time.Now()) +} + +type MSS = map[string]string +type MSI = map[string]interface{} + +func dbTableDump(t *testing.T, db *pgxpool.Pool, suffix string) []MSI { + rows, err := db.Query(ctx, "SELECT * FROM "+pgx.Identifier{t.Name() + suffix}.Sanitize()) + require.NoError(t, err) + defer rows.Close() + + var dump []MSI + for rows.Next() { + msi := MSI{} + vals, err := rows.Values() + require.NoError(t, err) + for i, fd := range rows.FieldDescriptions() { + msi[string(fd.Name)] = vals[i] + } + dump = append(dump, msi) + } + require.NoError(t, rows.Err()) + return dump +} + +func TestWrite_sequential(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + newMetric(t, "_b", MSS{}, MSI{"v": 2}), + newMetric(t, "_a", MSS{}, MSI{"v": 3}), + } + require.NoError(t, p.Write(metrics)) + + dumpA := dbTableDump(t, p.db, "_a") + dumpB := dbTableDump(t, p.db, "_b") + + if assert.Len(t, dumpA, 2) { + assert.EqualValues(t, 1, dumpA[0]["v"]) + assert.EqualValues(t, 3, dumpA[1]["v"]) + } + if assert.Len(t, dumpB, 1) { + assert.EqualValues(t, 2, dumpB[0]["v"]) + } +} + +func TestWrite_concurrent(t *testing.T) { + p := newPostgresqlTest(t) + p.Connection += " pool_max_conns=3" + require.NoError(t, p.Connect()) + + // Write a metric so it creates a table we can lock. + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + p.Logger.WaitForCopy(t.Name()+"_a", true) + // clear so that the WaitForCopy calls below don't pick up this one + p.Logger.Clear() + + // Lock the table so that we ensure the writes hangs and the plugin has to open another connection. + tx, err := p.db.Begin(ctx) + require.NoError(t, err) + defer tx.Rollback(ctx) + _, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdent(t.Name()+"_a")) + require.NoError(t, err) + + metrics = []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 2}), + } + require.NoError(t, p.Write(metrics)) + + // Note, there is technically a possible race here, where it doesn't try to insert into _a until after _b. However + // this should be practically impossible, and trying to engineer a solution to account for it would be even more + // complex than we already are. + + metrics = []telegraf.Metric{ + newMetric(t, "_b", MSS{}, MSI{"v": 3}), + } + require.NoError(t, p.Write(metrics)) + + p.Logger.WaitForCopy(t.Name()+"_b", true) + // release the lock on table _a + tx.Rollback(ctx) + p.Logger.WaitForCopy(t.Name()+"_a", true) + + dumpA := dbTableDump(t, p.db, "_a") + dumpB := dbTableDump(t, p.db, "_b") + + if assert.Len(t, dumpA, 2) { + assert.EqualValues(t, 1, dumpA[0]["v"]) + assert.EqualValues(t, 2, dumpA[1]["v"]) + } + if assert.Len(t, dumpB, 1) { + assert.EqualValues(t, 3, dumpB[0]["v"]) + } + + // We should have had 3 connections. One for the lock, and one for each table. + assert.EqualValues(t, 3, p.db.Stat().TotalConns()) +} + +// Test that the bad metric is dropped, and the rest of the batch succeeds. +func TestWrite_sequentialPermError(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + newMetric(t, "_b", MSS{}, MSI{"v": 2}), + } + require.NoError(t, p.Write(metrics)) + + metrics = []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": "a"}), + newMetric(t, "_b", MSS{}, MSI{"v": 3}), + } + require.NoError(t, p.Write(metrics)) + + dumpA := dbTableDump(t, p.db, "_a") + dumpB := dbTableDump(t, p.db, "_b") + assert.Len(t, dumpA, 1) + assert.Len(t, dumpB, 2) + + haveError := false + for _, l := range p.Logger.Logs() { + if strings.Contains(l.String(), "write error") { + haveError = true + break + } + } + assert.True(t, haveError, "write error not found in log") +} + +// Test that the bad metric is dropped, and the rest of the batch succeeds. +func TestWrite_concurrentPermError(t *testing.T) { + p := newPostgresqlTest(t) + p.Connection += " pool_max_conns=2" + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + p.Logger.WaitForCopy(t.Name()+"_a", true) + + metrics = []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": "a"}), + newMetric(t, "_b", MSS{}, MSI{"v": 2}), + } + require.NoError(t, p.Write(metrics)) + p.Logger.WaitFor(func(l Log) bool { + return strings.Contains(l.String(), "write error") + }, false) + p.Logger.WaitForCopy(t.Name()+"_b", true) + + dumpA := dbTableDump(t, p.db, "_a") + dumpB := dbTableDump(t, p.db, "_b") + assert.Len(t, dumpA, 1) + assert.Len(t, dumpB, 1) +} + +// Verify that in sequential mode, errors are returned allowing telegraf agent to handle & retry +func TestWrite_sequentialTempError(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + // To avoid a race condition, we need to know when our goroutine has started listening to the log. + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + // Wait for the CREATE TABLE, and then kill the connection. + // The WaitFor callback holds a lock on the log. Meaning it will block logging of the next action. So we trigger + // on CREATE TABLE so that there's a few statements to go before the COMMIT. + p.Logger.WaitFor(func(log Log) bool { + if strings.Contains(log.String(), "release wg") { + wg.Done() + } + + if !strings.Contains(log.String(), "CREATE TABLE") { + return false + } + pid := log.args[1].(MSI)["pid"].(uint32) + + conf := p.db.Config().ConnConfig + conf.Logger = nil + c, err := pgx.ConnectConfig(context.Background(), conf) + if !assert.NoError(t, err) { + return true + } + _, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid) + assert.NoError(t, err) + return true + }, false) + }() + + p.Logger.Infof("release wg") + wg.Wait() + + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + } + require.Error(t, p.Write(metrics)) +} + +// Verify that when using concurrency, errors are not returned, but instead logged and automatically retried +func TestWrite_concurrentTempError(t *testing.T) { + p := newPostgresqlTest(t) + p.Connection += " pool_max_conns=2" + require.NoError(t, p.Connect()) + + // To avoid a race condition, we need to know when our goroutine has started listening to the log. + wg := sync.WaitGroup{} + wg.Add(1) + + go func() { + // Wait for the CREATE TABLE, and then kill the connection. + // The WaitFor callback holds a lock on the log. Meaning it will block logging of the next action. So we trigger + // on CREATE TABLE so that there's a few statements to go before the COMMIT. + p.Logger.WaitFor(func(log Log) bool { + if strings.Contains(log.String(), "release wg") { + wg.Done() + } + + if !strings.Contains(log.String(), "CREATE TABLE") { + return false + } + pid := log.args[1].(MSI)["pid"].(uint32) + + conf := p.db.Config().ConnConfig + conf.Logger = nil + c, err := pgx.ConnectConfig(context.Background(), conf) + if !assert.NoError(t, err) { + return true + } + _, err = c.Exec(context.Background(), "SELECT pg_terminate_backend($1)", pid) + assert.NoError(t, err) + return true + }, false) + }() + p.Logger.Infof("release wg") + wg.Wait() + + metrics := []telegraf.Metric{ + newMetric(t, "_a", MSS{}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + + p.Logger.WaitForCopy(t.Name()+"_a", true) + dumpA := dbTableDump(t, p.db, "_a") + assert.Len(t, dumpA, 1) + + haveError := false + for _, l := range p.Logger.Logs() { + if strings.Contains(l.String(), "write error") { + haveError = true + break + } + } + assert.True(t, haveError, "write error not found in log") +} + +func TestWriteTagTable(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + + dump := dbTableDump(t, p.db, "") + require.Len(t, dump, 1) + assert.EqualValues(t, 1, dump[0]["v"]) + + dumpTags := dbTableDump(t, p.db, p.TagTableSuffix) + require.Len(t, dumpTags, 1) + assert.EqualValues(t, dump[0]["tag_id"], dumpTags[0]["tag_id"]) + assert.EqualValues(t, "foo", dumpTags[0]["tag"]) +} + +// Verify that when using TagsAsForeignKeys and a tag can't be written, that we still add the metrics. +func TestWrite_tagError(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + + // It'll have the table cached, so won't know we dropped it, will try insert, and get error. + _, err := p.db.Exec(ctx, "DROP TABLE \""+t.Name()+"_tag\"") + require.NoError(t, err) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 2}), + } + require.NoError(t, p.Write(metrics)) + + dump := dbTableDump(t, p.db, "") + require.Len(t, dump, 2) + assert.EqualValues(t, 1, dump[0]["v"]) + assert.EqualValues(t, 2, dump[1]["v"]) +} + +// Verify that when using TagsAsForeignKeys and ForeignTagConstraing and a tag can't be written, that we drop the metrics. +func TestWrite_tagError_foreignConstraint(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.ForignTagConstraint = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) + + // It'll have the table cached, so won't know we dropped it, will try insert, and get error. + _, err := p.db.Exec(ctx, "DROP TABLE \""+t.Name()+"_tag\"") + require.NoError(t, err) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 2}), + } + assert.NoError(t, p.Write(metrics)) + haveError := false + for _, l := range p.Logger.Logs() { + if strings.Contains(l.String(), "write error") { + haveError = true + break + } + } + assert.True(t, haveError, "write error not found in log") + + dump := dbTableDump(t, p.db, "") + require.Len(t, dump, 1) + assert.EqualValues(t, 1, dump[0]["v"]) +} diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 664b8c40476a3..c72eac10b7690 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" - "log" "strings" "sync" @@ -12,12 +11,17 @@ import ( ) const ( - refreshTableStructureStatement = "SELECT column_name, data_type, col_description((table_schema||'.'||table_name)::regclass::oid, ordinal_position) FROM information_schema.columns WHERE table_schema = $1 and table_name = $2" + refreshTableStructureStatement = ` + SELECT column_name, data_type, col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position) + FROM information_schema.columns + WHERE table_schema = $1 and table_name = $2 + ` ) type TableManager struct { *Postgresql + // map[tableName]map[columnName]utils.Column Tables map[string]map[string]utils.Column tablesMutex sync.RWMutex } @@ -38,8 +42,8 @@ func (tm *TableManager) ClearTableCache() { tm.tablesMutex.Unlock() } -func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName string) error { - rows, err := tm.db.Query(ctx, refreshTableStructureStatement, tm.Schema, tableName) +func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tableName string) error { + rows, err := db.Query(ctx, refreshTableStructureStatement, tm.Schema, tableName) if err != nil { return err } @@ -80,6 +84,9 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str Role: role, } } + if err := rows.Err(); err != nil { + return err + } if len(cols) > 0 { tm.tablesMutex.Lock() @@ -90,6 +97,11 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, tableName str return nil } +// EnsureStructure ensures that the table identified by tableName contains the provided columns. +// +// createTemplates and addColumnTemplates are the templates which are executed in the event of table create or alter +// (respectively). +// metricsTableName and tagsTableName are passed to the templates. func (tm *TableManager) EnsureStructure( ctx context.Context, db dbh, @@ -110,7 +122,7 @@ func (tm *TableManager) EnsureStructure( tm.tablesMutex.RUnlock() if !ok { // We don't know about the table. First try to query it. - if err := tm.refreshTableStructure(ctx, tableName); err != nil { + if err := tm.refreshTableStructure(ctx, db, tableName); err != nil { return nil, fmt.Errorf("querying table structure: %w", err) } tm.tablesMutex.RLock() @@ -236,7 +248,7 @@ func (tm *TableManager) executeTemplates( return err } - return tm.refreshTableStructure(ctx, tableName) + return tm.refreshTableStructure(ctx, db, tableName) } func colMapToSlice(colMap map[string]utils.Column) []utils.Column { @@ -257,7 +269,7 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error { metricTableName := rowSource.Name() var tagTableName string - if tm.TagsAsForeignkeys { + if tm.TagsAsForeignKeys { tagTableName = metricTableName + tm.TagTableSuffix missingCols, err := tm.EnsureStructure( @@ -280,7 +292,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - log.Printf("[outputs.postgresql] Error: table '%s' is missing tag columns (dropping metrics): %s", tagTableName, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tagTableName, strings.Join(colDefs, ", ")) } } @@ -304,7 +316,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - log.Printf("[outputs.postgresql] Error: table '%s' is missing columns (dropping fields): %s", metricTableName, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table '%s' is missing columns (dropping fields): %s", metricTableName, strings.Join(colDefs, ", ")) } return nil diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go new file mode 100644 index 0000000000000..f9a56416c6716 --- /dev/null +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -0,0 +1,159 @@ +package postgresql + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestTableManager_EnsureStructure(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + cols := []utils.Column{ + ColumnFromTag("foo", ""), + ColumnFromField("baz", 0), + } + missingCols, err := p.tableManager.EnsureStructure( + ctx, + p.db, + t.Name(), + cols, + p.CreateTemplates, + p.AddColumnTemplates, + t.Name(), + "", + ) + require.NoError(t, err) + require.Empty(t, missingCols) + + assert.EqualValues(t, cols[0], p.tableManager.Tables[t.Name()]["foo"]) + assert.EqualValues(t, cols[1], p.tableManager.Tables[t.Name()]["baz"]) +} + +func TestTableManager_refreshTableStructure(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + cols := []utils.Column{ + ColumnFromTag("foo", ""), + ColumnFromField("baz", 0), + } + _, err := p.tableManager.EnsureStructure( + ctx, + p.db, + t.Name(), + cols, + p.CreateTemplates, + p.AddColumnTemplates, + t.Name(), + "", + ) + require.NoError(t, err) + + p.tableManager.ClearTableCache() + require.Empty(t, p.tableManager.Tables) + + require.NoError(t, p.tableManager.refreshTableStructure(ctx, p.db, t.Name())) + + assert.EqualValues(t, cols[0], p.tableManager.Tables[t.Name()]["foo"]) + assert.EqualValues(t, cols[1], p.tableManager.Tables[t.Name()]["baz"]) +} + +func TestTableManager_MatchSource(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.Contains(t, p.tableManager.Tables[t.Name()+p.TagTableSuffix], "tag") + assert.Contains(t, p.tableManager.Tables[t.Name()], "a") +} + +// verify that TableManager updates & caches the DB table structure unless the incoming metric can't fit. +func TestTableManager_cache(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) +} + +// Verify that when alter statements are disabled and a metric comes in with a new tag key, that the tag is omitted. +func TestTableSource_noAlterMissingTag(t *testing.T) { + p := newPostgresqlTest(t) + p.AddColumnTemplates = []*template.Template{} + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), + newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}), + } + tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.NotContains(t, tsrc.ColumnNames(), "bar") +} + +// Verify that when alter statements are disabled with foreign tags and a metric comes in with a new tag key, that the +// field is omitted. +func TestTableSource_noAlterMissingTagTableTag(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.TagTableAddColumnTemplates = []*template.Template{} + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), + newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}), + } + tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + ttsrc := NewTagTableSource(tsrc) + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.NotContains(t, ttsrc.ColumnNames(), "bar") +} + +// verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted. +func TestTableSource_noAlterMissingField(t *testing.T) { + p := newPostgresqlTest(t) + p.AddColumnTemplates = []*template.Template{} + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}), + } + tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.NotContains(t, tsrc.ColumnNames(), "b") +} diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index a23d7bfb9b460..0cab160f14e69 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -14,7 +14,7 @@ type TableSource struct { cursorValues []interface{} cursorError error - // tagPositions is the position of each tag within the tag set. Regardless of whether tags are foreign keys or not + // tagPositions is the position of each tag within the tag set, regardless of whether tags are foreign keys or not. tagPositions map[string]int // tagColumns is the list of tags to emit. List is in order. tagColumns []utils.Column @@ -23,7 +23,7 @@ type TableSource struct { // This data is used to build out the foreign tag table when enabled. tagSets map[int64][]*telegraf.Tag - // fieldPositions is the position of each field within the tag set. + // fieldPositions is the position of each field within the field list. fieldPositions map[string]int // fieldColumns is the list of fields to emit. List is in order. fieldColumns []utils.Column @@ -31,21 +31,38 @@ type TableSource struct { droppedTagColumns []string } +func NewTableSources(p *Postgresql, metrics []telegraf.Metric) map[string]*TableSource { + tableSources := map[string]*TableSource{} + + for _, m := range metrics { + tsrc := tableSources[m.Name()] + if tsrc == nil { + tsrc = NewTableSource(p) + tableSources[m.Name()] = tsrc + } + tsrc.AddMetric(m) + } + + return tableSources +} + func NewTableSource(postgresql *Postgresql) *TableSource { tsrc := &TableSource{ postgresql: postgresql, cursor: -1, tagSets: make(map[int64][]*telegraf.Tag), } - if !postgresql.FieldsAsJsonb { + if !postgresql.TagsAsJsonb { tsrc.tagPositions = map[string]int{} + } + if !postgresql.FieldsAsJsonb { tsrc.fieldPositions = map[string]int{} } return tsrc } func (tsrc *TableSource) AddMetric(metric telegraf.Metric) { - if tsrc.postgresql.TagsAsForeignkeys { + if tsrc.postgresql.TagsAsForeignKeys { tagID := utils.GetTagID(metric) if _, ok := tsrc.tagSets[tagID]; !ok { tsrc.tagSets[tagID] = metric.TagList() @@ -104,7 +121,7 @@ func (tsrc *TableSource) MetricTableColumns() []utils.Column { TimeColumn, } - if tsrc.postgresql.TagsAsForeignkeys { + if tsrc.postgresql.TagsAsForeignKeys { cols = append(cols, TagIDColumn) } else { cols = append(cols, tsrc.TagColumns()...) @@ -138,6 +155,9 @@ func (tsrc *TableSource) ColumnNames() []string { return names } +// Drops the specified column. +// If column is a tag column, any metrics containing the tag will be skipped. +// If column is a field column, any metrics containing the field will have it omitted. func (tsrc *TableSource) DropColumn(col utils.Column) { switch col.Role { case utils.TagColType: @@ -233,7 +253,7 @@ func (tsrc *TableSource) values() ([]interface{}, error) { values = append(values, metric.Time()) - if !tsrc.postgresql.TagsAsForeignkeys { + if !tsrc.postgresql.TagsAsForeignKeys { if !tsrc.postgresql.TagsAsJsonb { // tags_as_foreignkey=false, tags_as_json=false tagValues := make([]interface{}, len(tsrc.tagPositions)) @@ -253,9 +273,11 @@ func (tsrc *TableSource) values() ([]interface{}, error) { } else { // tags_as_foreignkey=true tagID := utils.GetTagID(metric) - if _, ok := tsrc.tagSets[tagID]; !ok { - // tag has been dropped, we can't emit or we risk collision with another metric - return nil, nil + if tsrc.postgresql.ForignTagConstraint { + if _, ok := tsrc.tagSets[tagID]; !ok { + // tag has been dropped + return nil, nil + } } values = append(values, tagID) } diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go new file mode 100644 index 0000000000000..4589f49857415 --- /dev/null +++ b/plugins/outputs/postgresql/table_source_test.go @@ -0,0 +1,234 @@ +package postgresql + +import ( + "encoding/json" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + "github.com/jackc/pgx/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +func TestTableSource(t *testing.T) { +} + +func indexOfStr(list []string, target string) int { + for i, v := range list { + if v == target { + return i + } + } + return -1 +} + +type source interface { + pgx.CopyFromSource + ColumnNames() []string +} + +func tSrcRow(src source) MSI { + if !src.Next() { + return nil + } + row := MSI{} + vals, err := src.Values() + if err != nil { + panic(err) + } + for i, name := range src.ColumnNames() { + row[name] = vals[i] + } + return row +} + +func TestTableSource_tagJSONB(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsJsonb = true + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + } + + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + row := tSrcRow(tsrc) + require.NoError(t, tsrc.Err()) + + assert.IsType(t, time.Time{}, row["time"]) + var tags MSI + require.NoError(t, json.Unmarshal(row["tags"].([]byte), &tags)) + assert.EqualValues(t, MSI{"a": "one", "b": "two"}, tags) + assert.EqualValues(t, 1, row["v"]) +} + +func TestTableSource_tagTable(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + } + + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + ttsrc := NewTagTableSource(tsrc) + ttrow := tSrcRow(ttsrc) + assert.EqualValues(t, "one", ttrow["a"]) + assert.EqualValues(t, "two", ttrow["b"]) + + row := tSrcRow(tsrc) + assert.Equal(t, row["tag_id"], ttrow["tag_id"]) +} + +func TestTableSource_tagTableJSONB(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.TagsAsJsonb = true + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + } + + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + ttsrc := NewTagTableSource(tsrc) + ttrow := tSrcRow(ttsrc) + var tags MSI + require.NoError(t, json.Unmarshal(ttrow["tags"].([]byte), &tags)) + assert.EqualValues(t, MSI{"a": "one", "b": "two"}, tags) +} + +func TestTableSource_fieldsJSONB(t *testing.T) { + p := newPostgresqlTest(t) + p.FieldsAsJsonb = true + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1, "b": 2}), + } + + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + row := tSrcRow(tsrc) + var fields MSI + require.NoError(t, json.Unmarshal(row["fields"].([]byte), &fields)) + // json unmarshals numbers as floats + assert.EqualValues(t, MSI{"a": 1.0, "b": 2.0}, fields) +} + +// TagsAsForeignKeys=false +// Test that when a tag column is dropped, all metrics containing that tag are dropped. +func TestTableSource_DropColumn_tag(t *testing.T) { + p := newPostgresqlTest(t) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + // Drop column "b" + var col utils.Column + for _, c := range tsrc.TagColumns() { + if c.Name == "b" { + col = c + break + } + } + tsrc.DropColumn(col) + + row := tSrcRow(tsrc) + assert.EqualValues(t, "one", row["a"]) + assert.EqualValues(t, 2, row["v"]) + assert.False(t, tsrc.Next()) +} + +// TagsAsForeignKeys=true, ForeignTagConstraint=true +// Test that when a tag column is dropped, all metrics containing that tag are dropped. +func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.ForignTagConstraint = true + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + // Drop column "b" + var col utils.Column + for _, c := range tsrc.TagColumns() { + if c.Name == "b" { + col = c + break + } + } + tsrc.DropColumn(col) + + ttsrc := NewTagTableSource(tsrc) + row := tSrcRow(ttsrc) + assert.EqualValues(t, "one", row["a"]) + assert.False(t, ttsrc.Next()) + + row = tSrcRow(tsrc) + assert.EqualValues(t, 2, row["v"]) + assert.False(t, tsrc.Next()) +} + +// TagsAsForeignKeys=true, ForeignTagConstraint=false +// Test that when a tag column is dropped, metrics are still added while the tag is not. +func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.ForignTagConstraint = false + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), + newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + // Drop column "b" + var col utils.Column + for _, c := range tsrc.TagColumns() { + if c.Name == "b" { + col = c + break + } + } + tsrc.DropColumn(col) + + ttsrc := NewTagTableSource(tsrc) + row := tSrcRow(ttsrc) + assert.EqualValues(t, "one", row["a"]) + assert.False(t, ttsrc.Next()) + + row = tSrcRow(tsrc) + assert.EqualValues(t, 1, row["v"]) + row = tSrcRow(tsrc) + assert.EqualValues(t, 2, row["v"]) +} + +// Test that when a field is dropped, only the field is dropped, and all rows remain, unless it was the only field. +func TestTableSource_DropColumn_field(t *testing.T) { + p := newPostgresqlTest(t) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2, "b": 3}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + // Drop column "a" + var col utils.Column + for _, c := range tsrc.FieldColumns() { + if c.Name == "a" { + col = c + break + } + } + tsrc.DropColumn(col) + + row := tSrcRow(tsrc) + assert.EqualValues(t, "foo", row["tag"]) + assert.EqualValues(t, 3, row["b"]) + assert.False(t, tsrc.Next()) +} diff --git a/plugins/outputs/postgresql/utils/utils_test.go b/plugins/outputs/postgresql/utils/utils_test.go deleted file mode 100644 index 040a7202d5c67..0000000000000 --- a/plugins/outputs/postgresql/utils/utils_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package utils - -import ( - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - "github.com/stretchr/testify/assert" -) - -func TestPostgresqlQuote(t *testing.T) { - assert.Equal(t, `"foo"`, QuoteIdent("foo")) - assert.Equal(t, `"fo'o"`, QuoteIdent("fo'o")) - assert.Equal(t, `"fo""o"`, QuoteIdent("fo\"o")) - - assert.Equal(t, "'foo'", QuoteLiteral("foo")) - assert.Equal(t, "'fo''o'", QuoteLiteral("fo'o")) - assert.Equal(t, "'fo\"o'", QuoteLiteral("fo\"o")) -} - -func TestBuildJsonb(t *testing.T) { - testCases := []struct { - desc string - in interface{} - out string - }{ - { - desc: "simple map", - in: map[string]int{"a": 1}, - out: `{"a":1}`, - }, { - desc: "single number", - in: 1, - out: `1`, - }, { - desc: "interface map", - in: map[int]interface{}{1: "a"}, - out: `{"1":"a"}`, - }, - } - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - res, err := BuildJsonb(tc.in) - assert.Nil(t, err) - assert.Equal(t, tc.out, string(res)) - - }) - } -} - -func TestFullTableName(t *testing.T) { - assert.Equal(t, `"tableName"`, FullTableName("", "tableName").Sanitize()) - assert.Equal(t, `"table name"`, FullTableName("", "table name").Sanitize()) - assert.Equal(t, `"table.name"`, FullTableName("", "table.name").Sanitize()) - assert.Equal(t, `"table"."name"`, FullTableName("table", "name").Sanitize()) - assert.Equal(t, `"schema name"."table name"`, FullTableName("schema name", "table name").Sanitize()) -} - -func TestDerivePgDataType(t *testing.T) { - assert.Equal(t, PgDataType("boolean"), DerivePgDatatype(true)) - assert.Equal(t, PgDataType("int8"), DerivePgDatatype(uint64(1))) - assert.Equal(t, PgDataType("int8"), DerivePgDatatype(1)) - assert.Equal(t, PgDataType("int8"), DerivePgDatatype(uint(1))) - assert.Equal(t, PgDataType("int8"), DerivePgDatatype(int64(1))) - assert.Equal(t, PgDataType("int4"), DerivePgDatatype(uint32(1))) - assert.Equal(t, PgDataType("int4"), DerivePgDatatype(int32(1))) - assert.Equal(t, PgDataType("float8"), DerivePgDatatype(float64(1.0))) - assert.Equal(t, PgDataType("float8"), DerivePgDatatype(float32(1.0))) - assert.Equal(t, PgDataType("text"), DerivePgDatatype("")) - assert.Equal(t, PgDataType("timestamptz"), DerivePgDatatype(time.Now())) - assert.Equal(t, PgDataType("text"), DerivePgDatatype([]int{})) -} - -func TestLongToShortPgType(t *testing.T) { - assert.Equal(t, PgDataType("boolean"), LongToShortPgType("boolean")) - assert.Equal(t, PgDataType("int4"), LongToShortPgType("integer")) - assert.Equal(t, PgDataType("int8"), LongToShortPgType("bigint")) - assert.Equal(t, PgDataType("float8"), LongToShortPgType("double precision")) - assert.Equal(t, PgDataType("timestamptz"), LongToShortPgType("timestamp with time zone")) - assert.Equal(t, PgDataType("timestamp"), LongToShortPgType("timestamp without time zone")) - assert.Equal(t, PgDataType("jsonb"), LongToShortPgType("jsonb")) - assert.Equal(t, PgDataType("text"), LongToShortPgType("text")) - assert.Equal(t, PgDataType("unknown"), LongToShortPgType("unknown")) -} - -func TestPgTypeCanContain(t *testing.T) { - assert.True(t, PgTypeCanContain(PgDataType("bogus same"), PgDataType("bogus same"))) - assert.True(t, PgTypeCanContain(PgDataType("int8"), PgDataType("int4"))) - assert.False(t, PgTypeCanContain(PgDataType("int8"), PgDataType("float8"))) - assert.False(t, PgTypeCanContain(PgDataType("int8"), PgDataType("timestamptz"))) - - assert.True(t, PgTypeCanContain(PgDataType("int4"), PgDataType("serial"))) - assert.True(t, PgTypeCanContain(PgDataType("int8"), PgDataType("int4"))) - assert.False(t, PgTypeCanContain(PgDataType("int4"), PgDataType("int8"))) - - assert.False(t, PgTypeCanContain(PgDataType("float8"), PgDataType("int8"))) - assert.True(t, PgTypeCanContain(PgDataType("float8"), PgDataType("int4"))) - - assert.True(t, PgTypeCanContain(PgDataType("timestamptz"), PgDataType("timestamp"))) - - assert.False(t, PgTypeCanContain(PgDataType("text"), PgDataType("timestamp"))) -} - -func TestGroupMetricsByMeasurement(t *testing.T) { - m11, _ := metric.New("m", map[string]string{}, map[string]interface{}{}, time.Now()) - m12, _ := metric.New("m", map[string]string{"t1": "tv1"}, map[string]interface{}{"f1": 1}, time.Now()) - m13, _ := metric.New("m", map[string]string{}, map[string]interface{}{"f2": 2}, time.Now()) - - m21, _ := metric.New("m2", map[string]string{}, map[string]interface{}{}, time.Now()) - m22, _ := metric.New("m2", map[string]string{"t1": "tv1"}, map[string]interface{}{"f1": 1}, time.Now()) - m23, _ := metric.New("m2", map[string]string{}, map[string]interface{}{"f2": 2}, time.Now()) - in := []telegraf.Metric{m11, m12, m21, m22, m13, m23} - expected := map[string][]int{ - "m": {0, 1, 4}, - "m2": {2, 3, 5}, - } - got := GroupMetricsByMeasurement(in) - assert.Equal(t, expected, got) -} - -func TestGenerateInsert(t *testing.T) { - - sql := GenerateInsert(`"m"`, []string{"time", "f"}) - assert.Equal(t, `INSERT INTO "m"("time","f") VALUES($1,$2)`, sql) - - sql = GenerateInsert(`"m"`, []string{"time", "i"}) - assert.Equal(t, `INSERT INTO "m"("time","i") VALUES($1,$2)`, sql) - - sql = GenerateInsert(`"public"."m"`, []string{"time", "f", "i"}) - assert.Equal(t, `INSERT INTO "public"."m"("time","f","i") VALUES($1,$2,$3)`, sql) - - sql = GenerateInsert(`"public"."m n"`, []string{"time", "k", "i"}) - assert.Equal(t, `INSERT INTO "public"."m n"("time","k","i") VALUES($1,$2,$3)`, sql) - - sql = GenerateInsert("m", []string{"time", "k1", "k2", "i"}) - assert.Equal(t, `INSERT INTO m("time","k1","k2","i") VALUES($1,$2,$3,$4)`, sql) -} From 1a507bc766226bc9ec5901750a8ff0af3417cb21 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 11 Apr 2021 22:57:15 -0400 Subject: [PATCH 082/121] outputs.postgresql: add template documentation --- .../outputs/postgresql/template/template.go | 188 ++++++++++++++++-- 1 file changed, 171 insertions(+), 17 deletions(-) diff --git a/plugins/outputs/postgresql/template/template.go b/plugins/outputs/postgresql/template/template.go index a0a1ec4924988..a1c1f44d0989b 100644 --- a/plugins/outputs/postgresql/template/template.go +++ b/plugins/outputs/postgresql/template/template.go @@ -1,3 +1,113 @@ +/* + +Templates are used for creation of the SQL used when creating and modifying tables. These templates are specified within +the configuration as the parameters 'create_templates', 'add_column_templates, 'tag_table_create_templates', and +'tag_table_add_column_templates'. + +The templating functionality behaves the same in all cases. However the variables will differ. + + +Variables + +The following variables are available within all template executions: + + * table - A TemplateTable object referring to the current table being + created/modified. + + * columns - A TemplateColumns object of the new columns being added to the + table (all columns in the case of a new table, and new columns in the case + of existing table). + + * allColumns - A TemplateColumns object of all the columns (both old and new) + of the table. In the case of a new table, this is the same as `columns`. + + * metricTable - A TemplateTable object referring to the table containing the + fields. In the case of TagsAsForeignKeys and `table` is the tag table, then + `metricTable` is the table using this one for its tags. + + * tagTable - A TemplateTable object referring to the table containing the + tags. In the case of TagsAsForeignKeys and `table` is the metrics table, + then `tagTable` is the table containing the tags for this one. + +Each object has helper methods that may be used within the template. See the documentation for the appropriate type. + +When the object is interpolated without a helper, it is automatically converted to a string through its String() method. + + +Functions + +All the functions provided by the Sprig library (http://masterminds.github.io/sprig/) are available within template executions. + +In addition, the following functions are also available: + + * quoteIdentifier - Quotes the input string as a Postgres identifier. + + * quoteLiteral - Quotes the input string as a Postgres literal. + + +Examples + +The default templates show basic usage. When left unconfigured, it is the equivalent of: + [outputs.postgresql] + create_templates = [ + '''CREATE TABLE {{.table}} ({{.columns}})''', + ] + add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + ] + tag_table_create_templates = [ + '''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))''' + ] + tag_table_add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + ] + +A simple example for usage with TimescaleDB would be: + [outputs.postgresql] + create_templates = [ + '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', + '''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', + '''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''', + ] +...where the defaults for the other templates would be automatically applied. + +A very complex example for versions of TimescaleDB which don't support adding columns to compressed hypertables (v<2.1.0), using views and unions to emulate the functionality, would be: + [outputs.postgresql] + schema = "telegraf" + create_templates = [ + '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', + '''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', + '''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''', + '''CREATE VIEW {{ .table.WithSuffix "_data" }} AS + SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''', + '''CREATE VIEW {{ .table.WithSchema "public" }} AS + SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} + FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt + WHERE t.tag_id = tt.tag_id''', + ] + add_column_templates = [ + '''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''', + '''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''', + '''DROP VIEW {{ .table.WithSchema "public" }}''', + + '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', + '''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1d')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', + '''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2d')''', + '''CREATE VIEW {{ .table.WithSuffix "_data" }} AS + SELECT {{ .allColumns.Selectors | join "," }} + FROM {{ .table }} + UNION ALL + SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }} + FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''', + '''CREATE VIEW {{ .table.WithSchema "public" }} + AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} + FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt + WHERE t.tag_id = tt.tag_id''', + ] +*/ package template import ( @@ -14,10 +124,15 @@ import ( "github.com/Masterminds/sprig" ) +// TableCreateTemplate is the default template used for creating new tables. var TableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}})`) -var TableAddColumnTemplate = newTemplate(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`) + +// TagTableCreateTemplate is the default template used when creating a new tag table. var TagTableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`) +// TableAddColumnTemplate is the default template used when adding new columns to an existing table. +var TableAddColumnTemplate = newTemplate(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`) + var templateFuncs = map[string]interface{}{ "quoteIdentifier": QuoteIdentifier, "quoteLiteral": QuoteLiteral, @@ -35,13 +150,22 @@ func asString(obj interface{}) string { return fmt.Sprintf("%v", obj) } } + +// QuoteIdentifier quotes the given string as a Postgres identifier (double-quotes the value). +// +// QuoteIdentifier is accessible within templates as 'quoteIdentifier'. func QuoteIdentifier(name interface{}) string { return `"` + strings.ReplaceAll(asString(name), `"`, `""`) + `"` } + +// QuoteLiteral quotes the given string as a Postgres literal (single-quotes the value). +// +// QuoteLiteral is accessible within templates as 'quoteLiteral'. func QuoteLiteral(str interface{}) string { return "'" + strings.ReplaceAll(asString(str), "'", "''") + "'" } +// TemplateTable is an object which represents a Postgres table. type TemplateTable struct { Schema string Name string @@ -59,33 +183,38 @@ func NewTemplateTable(schemaName, tableName string, columns []utils.Column) *Tem } } -//func (tt *TemplateTable) SetName(name string) { -// tt.Name = name -//} -//func (tt *TemplateTable) SetSchema(schema string) { -// tt.Schema = schema -//} +// String returns the table's fully qualified & quoted identifier (schema+table). func (tt *TemplateTable) String() string { return tt.Identifier() } + +// Identifier returns the table's fully qualified & quoted identifier (schema+table). +// +// If schema is empty, it is omitted from the result. func (tt *TemplateTable) Identifier() string { if tt.Schema == "" { return QuoteIdentifier(tt.Name) } return QuoteIdentifier(tt.Schema) + "." + QuoteIdentifier(tt.Name) } + +// WithSchema returns a copy of the TemplateTable object, but with the schema replaced by the given value. func (tt *TemplateTable) WithSchema(name string) *TemplateTable { ttNew := &TemplateTable{} *ttNew = *tt ttNew.Schema = name return ttNew } + +// WithName returns a copy of the TemplateTable object, but with the name replaced by the given value. func (tt *TemplateTable) WithName(name string) *TemplateTable { ttNew := &TemplateTable{} *ttNew = *tt ttNew.Name = name return ttNew } + +// WithSuffix returns a copy of the TemplateTable object, but with the name suffixed with the given value. func (tt *TemplateTable) WithSuffix(suffixes ...string) *TemplateTable { ttNew := &TemplateTable{} *ttNew = *tt @@ -93,38 +222,46 @@ func (tt *TemplateTable) WithSuffix(suffixes ...string) *TemplateTable { return ttNew } -//func (tt *TemplateTable) Literal() string { -// return QuoteLiteral(tt.Identifier()) -//} - +// A TemplateColumn is an object which represents a Postgres column. type TemplateColumn utils.Column +// String returns the column's definition (as used in a CREATE TABLE statement). E.G: +// "my_column" bigint func (tc TemplateColumn) String() string { return tc.Definition() } + +// Definition returns the column's definition (as used in a CREATE TABLE statement). E.G: +// "my_column" bigint func (tc TemplateColumn) Definition() string { return tc.Identifier() + " " + string(tc.Type) } + +// Identifier returns the column's quoted identifier. func (tc TemplateColumn) Identifier() string { return QuoteIdentifier(tc.Name) } + +// Selector returns the selector for the column. For most cases this is the same as Identifier. However in some cases, such as a UNION, this may return a statement such as `NULL AS "foo"`. func (tc TemplateColumn) Selector() string { if tc.Type != "" { return tc.Identifier() } - return "NULL as " + tc.Identifier() + return "NULL AS " + tc.Identifier() } + +// IsTag returns true if the column is a tag column. Otherwise false. func (tc TemplateColumn) IsTag() bool { return tc.Role == utils.TagColType } + +// IsField returns true if the column is a field column. Otherwise false. func (tc TemplateColumn) IsField() bool { return tc.Role == utils.FieldColType } -//func (tc TemplateColumn) Literal() string { -// return QuoteLiteral(tc.Name) -//} - +// TemplateColumns represents an ordered list of TemplateColumn objects, with convenience methods for operating on the +// list. type TemplateColumns []TemplateColumn func NewTemplateColumns(cols []utils.Column) TemplateColumns { @@ -135,10 +272,12 @@ func NewTemplateColumns(cols []utils.Column) TemplateColumns { return tcs } +// List returns the TemplateColumns object as a slice of TemplateColumn. func (tcs TemplateColumns) List() []TemplateColumn { return tcs } +// Definitions returns the list of column definitions. func (tcs TemplateColumns) Definitions() []string { defs := make([]string, len(tcs)) for i, tc := range tcs { @@ -147,6 +286,7 @@ func (tcs TemplateColumns) Definitions() []string { return defs } +// Identifiers returns the list of quoted column identifiers. func (tcs TemplateColumns) Identifiers() []string { idents := make([]string, len(tcs)) for i, tc := range tcs { @@ -155,6 +295,7 @@ func (tcs TemplateColumns) Identifiers() []string { return idents } +// Selectors returns the list of column selectors. func (tcs TemplateColumns) Selectors() []string { selectors := make([]string, len(tcs)) for i, tc := range tcs { @@ -163,6 +304,7 @@ func (tcs TemplateColumns) Selectors() []string { return selectors } +// String returns the comma delimited list of column identifiers. func (tcs TemplateColumns) String() string { colStrs := make([]string, len(tcs)) for i, tc := range tcs { @@ -171,6 +313,7 @@ func (tcs TemplateColumns) String() string { return strings.Join(colStrs, ", ") } +// Keys returns a TemplateColumns list of the columns which are not fields (e.g. time, tag_id, & tags). func (tcs TemplateColumns) Keys() TemplateColumns { var cols []TemplateColumn for _, tc := range tcs { @@ -181,12 +324,17 @@ func (tcs TemplateColumns) Keys() TemplateColumns { return cols } +// Sorted returns a sorted copy of TemplateColumns. +// +// Columns are sorted so that they are in order as: [Time, Tags, Fields], with the columns within each group sorted +// alphabetically. func (tcs TemplateColumns) Sorted() TemplateColumns { cols := append([]TemplateColumn{}, tcs...) (*utils.ColumnList)(unsafe.Pointer(&cols)).Sort() return cols } +// Concat returns a copy of TemplateColumns with the given tcsList appended to the end. func (tcs TemplateColumns) Concat(tcsList ...TemplateColumns) TemplateColumns { tcsNew := append(TemplateColumns{}, tcs...) for _, tcs := range tcsList { @@ -195,7 +343,8 @@ func (tcs TemplateColumns) Concat(tcsList ...TemplateColumns) TemplateColumns { return tcsNew } -// Generates a list of SQL selectors against the given columns. +// Union generates a list of SQL selectors against the given columns. +// // For each column in tcs, if the column also exist in tcsFrom, it will be selected. If the column does not exist NULL will be selected. func (tcs TemplateColumns) Union(tcsFrom TemplateColumns) TemplateColumns { tcsNew := append(TemplateColumns{}, tcs...) @@ -211,6 +360,7 @@ TCS: return tcsNew } +// Tags returns a TemplateColumns list of the columns which are tags. func (tcs TemplateColumns) Tags() TemplateColumns { var cols []TemplateColumn for _, tc := range tcs { @@ -221,6 +371,7 @@ func (tcs TemplateColumns) Tags() TemplateColumns { return cols } +// Fields returns a TemplateColumns list of the columns which are fields. func (tcs TemplateColumns) Fields() TemplateColumns { var cols []TemplateColumn for _, tc := range tcs { @@ -231,6 +382,9 @@ func (tcs TemplateColumns) Fields() TemplateColumns { return cols } +// Hash returns a hash of the column names. The hash is base-32 encoded string, up to 7 characters long with no padding. +// +// This can be useful as an identifier for supporting table renaming + unions in the case of non-modifiable tables. func (tcs TemplateColumns) Hash() string { hash := fnv.New32a() for _, tc := range tcs.Sorted() { From eb03fdab0b5e7bc5347846751a31e9517439d836 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 11 Apr 2021 23:03:36 -0400 Subject: [PATCH 083/121] outputs.postgresql: Reduce template type stutter --- plugins/outputs/postgresql/postgresql.go | 68 +++--- plugins/outputs/postgresql/postgresql_test.go | 43 ++-- plugins/outputs/postgresql/table_manager.go | 153 +++++++++----- .../outputs/postgresql/table_manager_test.go | 30 +-- .../outputs/postgresql/template/template.go | 194 +++++++++--------- plugins/outputs/postgresql/utils/utils.go | 36 ++++ 6 files changed, 319 insertions(+), 205 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 872a05d0e4842..5b218b2983b83 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "time" "github.com/jackc/pgconn" @@ -48,6 +49,7 @@ type Postgresql struct { tableManager *TableManager writeChan chan *TableSource + writeWaitGroup *utils.WaitGroup Logger telegraf.Logger } @@ -123,7 +125,9 @@ func (p *Postgresql) Connect() error { maxConns := int(p.db.Stat().MaxConns()) if maxConns > 1 { p.writeChan = make(chan *TableSource) + p.writeWaitGroup = utils.NewWaitGroup() for i := 0; i < maxConns; i++ { + p.writeWaitGroup.Add(1) go p.writeWorker(p.dbContext) } } @@ -152,6 +156,16 @@ func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error // Close closes the connection to the database func (p *Postgresql) Close() error { + if p.writeChan != nil { + // We're using async mode. Gracefully close with timeout. + close(p.writeChan) + select { + case <-p.writeWaitGroup.C(): + case <-time.NewTimer(time.Second * 5).C: + } + } + + // Die! p.dbContextCancel() p.db.Close() p.tableManager = nil @@ -259,9 +273,13 @@ func (p *Postgresql) writeConcurrent(tableSources map[string]*TableSource) error } func (p *Postgresql) writeWorker(ctx context.Context) { + defer p.writeWaitGroup.Done() for { select { - case tableSource := <-p.writeChan: + case tableSource, ok := <-p.writeChan: + if !ok { + return + } if err := p.writeRetry(ctx, tableSource); err != nil { p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err) } @@ -301,6 +319,14 @@ func isTempError(err error) bool { return true case "57": // Operator Intervention return true + case "23": // Integrity Constraint Violation + switch pgErr.Code { + case "23505": // unique_violation + if strings.Contains(err.Error(), "pg_type_typname_nsp_index") { + // Happens when you try to create 2 tables simultaneously. + return true + } + } } // Assume that any other error that comes from postgres is a permanent error return false @@ -319,7 +345,7 @@ func isTempError(err error) bool { func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasureTx(ctx, tableSource) + err := p.writeMetricsFromMeasure(ctx, p.db, tableSource) if !isTempError(err) { return err } @@ -338,48 +364,45 @@ func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) e } } -func (p *Postgresql) writeMetricsFromMeasureTx(ctx context.Context, tableSource *TableSource) error { - tx, err := p.db.Begin(ctx) +// Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. +func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error { + err := p.tableManager.MatchSource(ctx, db, tableSource) if err != nil { return err } - defer tx.Rollback(ctx) - - if err := p.writeMetricsFromMeasure(ctx, tx, tableSource); err != nil { - return err - } - return tx.Commit(ctx) -} - -// Writes the metrics from a specified measure. All the provided metrics must belong to the same measurement. -func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableSource *TableSource) error { - err := p.tableManager.MatchSource(ctx, db, tableSource) + tx, err := db.Begin(ctx) if err != nil { return err } + defer tx.Rollback(ctx) if p.TagsAsForeignKeys { - if err := p.WriteTagTable(ctx, db, tableSource); err != nil { - // log and continue. As the admin can correct the issue, and tags don't change over time, they can be added from - // future metrics after issue is corrected. + if err := p.WriteTagTable(ctx, tx, tableSource); err != nil { if p.ForignTagConstraint { return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } else { + // log and continue. As the admin can correct the issue, and tags don't change over time, they can be + // added from future metrics after issue is corrected. p.Logger.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } } } fullTableName := utils.FullTableName(p.Schema, tableSource.Name()) - _, err = db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource) - return err + if _, err := tx.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource); err != nil { + return err + } + + tx.Commit(ctx) + return nil } func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *TableSource) error { //TODO cache which tagSets we've already inserted and skip them. ttsrc := NewTagTableSource(tableSource) + // need a transaction so that if it errors, we don't roll back the parent transaction, just the tags tx, err := db.Begin(ctx) if err != nil { return err @@ -389,8 +412,7 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *Tab ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()} identTemp := pgx.Identifier{ttsrc.Name() + "_temp"} sql := fmt.Sprintf("CREATE TEMP TABLE %s (LIKE %s) ON COMMIT DROP", identTemp.Sanitize(), ident.Sanitize()) - _, err = tx.Exec(ctx, sql) - if err != nil { + if _, err := tx.Exec(ctx, sql); err != nil { return fmt.Errorf("creating tags temp table: %w", err) } @@ -398,7 +420,7 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *Tab return fmt.Errorf("copying into tags temp table: %w", err) } - if _, err := tx.Exec(ctx, fmt.Sprintf("INSERT INTO %s SELECT * FROM %s ON CONFLICT (tag_id) DO NOTHING", ident.Sanitize(), identTemp.Sanitize())); err != nil { + if _, err := tx.Exec(ctx, fmt.Sprintf("INSERT INTO %s SELECT * FROM %s ORDER BY tag_id ON CONFLICT (tag_id) DO NOTHING", ident.Sanitize(), identTemp.Sanitize())); err != nil { return fmt.Errorf("inserting into tags table: %w", err) } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 718b0b062208e..be67af4f9f2b1 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "os" - "reflect" "strings" "sync" "testing" @@ -42,13 +41,13 @@ func (l Log) String() string { type LogAccumulator struct { logs []Log cond *sync.Cond - t *testing.T + tb testing.TB } -func NewLogAccumulator(t *testing.T) *LogAccumulator { +func NewLogAccumulator(tb testing.TB) *LogAccumulator { return &LogAccumulator{ cond: sync.NewCond(&sync.Mutex{}), - t: t, + tb: tb, } } @@ -57,8 +56,8 @@ func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []inter log := Log{level, format, args} la.logs = append(la.logs, log) s := log.String() - la.t.Helper() - la.t.Log(s) + la.tb.Helper() + la.tb.Log(s) la.cond.Broadcast() la.cond.L.Unlock() } @@ -177,42 +176,42 @@ func (la *LogAccumulator) Logs() []Log { } func (la *LogAccumulator) Errorf(format string, args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelError, format, args) } func (la *LogAccumulator) Error(args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelError, "%v", args) } func (la *LogAccumulator) Debugf(format string, args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelDebug, format, args) } func (la *LogAccumulator) Debug(args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelDebug, "%v", args) } func (la *LogAccumulator) Warnf(format string, args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelWarn, format, args) } func (la *LogAccumulator) Warn(args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelWarn, "%v", args) } func (la *LogAccumulator) Infof(format string, args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelInfo, format, args) } func (la *LogAccumulator) Info(args ...interface{}) { - la.t.Helper() + la.tb.Helper() la.append(pgx.LogLevelInfo, "%v", args) } @@ -252,9 +251,9 @@ type PostgresqlTest struct { Logger *LogAccumulator } -func newPostgresqlTest(t *testing.T) *PostgresqlTest { +func newPostgresqlTest(tb testing.TB) *PostgresqlTest { p := newPostgresql() - logger := NewLogAccumulator(t) + logger := NewLogAccumulator(tb) p.Logger = logger pt := &PostgresqlTest{Postgresql: *p} pt.Logger = logger @@ -280,19 +279,19 @@ func TestDBConnectedHook(t *testing.T) { p := newPostgresqlTest(t) require.NoError(t, p.Connect()) - _, err := p.db.Exec(ctx, "SELECT 1") - require.NoError(t, err) - tmTables := p.tableManager.Tables + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{}, MSI{"v": 1}), + } + require.NoError(t, p.Write(metrics)) c, _ := p.db.Acquire(ctx) c.Conn().Close(ctx) c.Release() - _, err = p.db.Exec(ctx, "SELECT 1") + _, err := p.db.Exec(ctx, "SELECT 1") require.NoError(t, err) - tmTables2 := p.tableManager.Tables - assert.NotEqual(t, reflect.ValueOf(tmTables).Pointer(), reflect.ValueOf(tmTables2).Pointer()) + assert.Empty(t, p.tableManager.table(t.Name()).Columns()) } func newMetric( diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index c72eac10b7690..f473d64035051 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -6,6 +6,7 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "strings" "sync" + "sync/atomic" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -18,12 +19,34 @@ const ( ` ) +type tableState struct { + name string + // The atomic.Value protects columns from simple data race corruption as columns can be read while the mutex is + // locked. + columns atomic.Value + // The mutex protects columns when doing a check-and-set operation. It prevents 2 goroutines from independently + // checking the table's schema, and both trying to modify it, whether inconsistently, or to the same result. + sync.Mutex +} +func (ts *tableState) Columns() map[string]utils.Column { + cols := ts.columns.Load() + if cols == nil { + return nil + } + return cols.(map[string]utils.Column) +} +func (ts *tableState) SetColumns(cols map[string]utils.Column) { + ts.columns.Store(cols) +} + type TableManager struct { *Postgresql // map[tableName]map[columnName]utils.Column - Tables map[string]map[string]utils.Column - tablesMutex sync.RWMutex + tables map[string]*tableState + tablesMutex sync.Mutex + // schemaMutex is used to prevent parallel table creations/alters in Postgres. + schemaMutex sync.Mutex } // NewTableManager returns an instance of the tables.Manager interface @@ -31,19 +54,32 @@ type TableManager struct { func NewTableManager(postgresql *Postgresql) *TableManager { return &TableManager{ Postgresql: postgresql, - Tables: make(map[string]map[string]utils.Column), + tables: make(map[string]*tableState), } } // ClearTableCache clear the table structure cache. func (tm *TableManager) ClearTableCache() { tm.tablesMutex.Lock() - tm.Tables = make(map[string]map[string]utils.Column) + for _, tbl := range tm.tables { + tbl.SetColumns(nil) + } + tm.tablesMutex.Unlock() +} + +func (tm *TableManager) table(name string) *tableState { + tm.tablesMutex.Lock() + tbl := tm.tables[name] + if tbl == nil { + tbl = &tableState{name: name} + tm.tables[name] = tbl + } tm.tablesMutex.Unlock() + return tbl } -func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tableName string) error { - rows, err := db.Query(ctx, refreshTableStructureStatement, tm.Schema, tableName) +func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl *tableState) error { + rows, err := db.Query(ctx, refreshTableStructureStatement, tm.Schema, tbl.name) if err != nil { return err } @@ -89,9 +125,7 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, table } if len(cols) > 0 { - tm.tablesMutex.Lock() - tm.Tables[tableName] = cols - tm.tablesMutex.Unlock() + tbl.SetColumns(cols) } return nil @@ -102,44 +136,46 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, table // createTemplates and addColumnTemplates are the templates which are executed in the event of table create or alter // (respectively). // metricsTableName and tagsTableName are passed to the templates. +// +// If the table cannot be modified, the returned column list is the columns which are missing from the table. func (tm *TableManager) EnsureStructure( ctx context.Context, db dbh, - tableName string, + tbl *tableState, columns []utils.Column, createTemplates []*template.Template, addColumnsTemplates []*template.Template, - metricsTableName string, - tagsTableName string, + metricsTable *tableState, + tagsTable *tableState, ) ([]utils.Column, error) { // Sort so that: // * When we create/alter the table the columns are in a sane order (telegraf gives us the fields in random order) // * When we display errors about missing columns, the order is also sane, and consistent utils.ColumnList(columns).Sort() - tm.tablesMutex.RLock() - dbColumns, ok := tm.Tables[tableName] - tm.tablesMutex.RUnlock() - if !ok { + tbl.Lock() + tblColumns := tbl.Columns() + if tblColumns == nil { // We don't know about the table. First try to query it. - if err := tm.refreshTableStructure(ctx, db, tableName); err != nil { + if err := tm.refreshTableStructure(ctx, db, tbl); err != nil { + tbl.Unlock() return nil, fmt.Errorf("querying table structure: %w", err) } - tm.tablesMutex.RLock() - dbColumns, ok = tm.Tables[tableName] - tm.tablesMutex.RUnlock() - if !ok { + tblColumns = tbl.Columns() + + if tblColumns == nil { // Ok, table doesn't exist, now we can create it. - if err := tm.executeTemplates(ctx, db, createTemplates, tableName, columns, metricsTableName, tagsTableName); err != nil { + if err := tm.executeTemplates(ctx, db, createTemplates, tbl, columns, metricsTable, tagsTable); err != nil { + tbl.Unlock() return nil, fmt.Errorf("creating table: %w", err) } - tm.tablesMutex.RLock() - dbColumns = tm.Tables[tableName] - tm.tablesMutex.RUnlock() + + tblColumns = tbl.Columns() } } + tbl.Unlock() - missingColumns, err := tm.checkColumns(dbColumns, columns) + missingColumns, err := tm.checkColumns(tblColumns, columns) if err != nil { return nil, fmt.Errorf("column validation: %w", err) } @@ -151,10 +187,21 @@ func (tm *TableManager) EnsureStructure( return missingColumns, nil } - if err := tm.executeTemplates(ctx, db, addColumnsTemplates, tableName, missingColumns, metricsTableName, tagsTableName); err != nil { + tbl.Lock() + // Check again in case someone else got it while table was unlocked. + tblColumns = tbl.Columns() + missingColumns, _ = tm.checkColumns(tblColumns, columns) + if len(missingColumns) == 0 { + tbl.Unlock() + return nil, nil + } + + if err := tm.executeTemplates(ctx, db, addColumnsTemplates, tbl, missingColumns, metricsTable, tagsTable); err != nil { + tbl.Unlock() return nil, fmt.Errorf("adding columns: %w", err) } - return tm.checkColumns(tm.Tables[tableName], columns) + tbl.Unlock() + return tm.checkColumns(tbl.Columns(), columns) } func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) ([]utils.Column, error) { @@ -176,14 +223,19 @@ func (tm *TableManager) executeTemplates( ctx context.Context, db dbh, tmpls []*template.Template, - tableName string, + tbl *tableState, newColumns []utils.Column, - metricsTableName string, - tagsTableName string, + metricsTable *tableState, + tagsTable *tableState, ) error { - tmplTable := template.NewTemplateTable(tm.Schema, tableName, colMapToSlice(tm.Tables[tableName])) - metricsTmplTable := template.NewTemplateTable(tm.Schema, metricsTableName, colMapToSlice(tm.Tables[metricsTableName])) - tagsTmplTable := template.NewTemplateTable(tm.Schema, tagsTableName, colMapToSlice(tm.Tables[tagsTableName])) + tmplTable := template.NewTable(tm.Schema, tbl.name, colMapToSlice(tbl.Columns())) + metricsTmplTable := template.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.Columns())) + var tagsTmplTable *template.Table + if tagsTable != nil { + tagsTmplTable = template.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.Columns())) + } else { + tagsTmplTable = template.NewTable("", "", nil) + } /* https://github.com/jackc/pgx/issues/872 stmts := make([]string, len(tmpls)) @@ -215,6 +267,10 @@ func (tm *TableManager) executeTemplates( tm.refreshTableStructureResponse(tableName, rows) */ + // Lock to prevent concurrency issues in postgres (pg_type_typname_nsp_index unique constraint; SQLSTATE 23505) + tm.schemaMutex.Lock() + defer tm.schemaMutex.Unlock() + tx, err := db.Begin(ctx) if err != nil { return err @@ -248,7 +304,7 @@ func (tm *TableManager) executeTemplates( return err } - return tm.refreshTableStructure(ctx, db, tableName) + return tm.refreshTableStructure(ctx, db, tbl) } func colMapToSlice(colMap map[string]utils.Column) []utils.Column { @@ -263,24 +319,25 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { } // MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches. +// // If the schema does not match, and schema updates are disabled: -// If a field missing from the DB, the field is omitted. -// If a tag is missing from the DB, the metric is dropped. +// If a field missing from the DB, the field is omitted. +// If a tag is missing from the DB, the metric is dropped. func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error { - metricTableName := rowSource.Name() - var tagTableName string + metricTable := tm.table(rowSource.Name()) + var tagTable *tableState if tm.TagsAsForeignKeys { - tagTableName = metricTableName + tm.TagTableSuffix + tagTable = tm.table(metricTable.name + tm.TagTableSuffix) missingCols, err := tm.EnsureStructure( ctx, db, - tagTableName, + tagTable, rowSource.TagTableColumns(), tm.TagTableCreateTemplates, tm.TagTableAddColumnTemplates, - metricTableName, - tagTableName, + metricTable, + tagTable, ) if err != nil { return err @@ -292,19 +349,19 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tagTableName, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tagTable.name, strings.Join(colDefs, ", ")) } } missingCols, err := tm.EnsureStructure( ctx, db, - metricTableName, + metricTable, rowSource.MetricTableColumns(), tm.CreateTemplates, tm.AddColumnTemplates, - metricTableName, - tagTableName, + metricTable, + tagTable, ) if err != nil { return err @@ -316,7 +373,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl rowSource.DropColumn(col) colDefs[i] = col.Name + " " + string(col.Type) } - tm.Logger.Errorf("table '%s' is missing columns (dropping fields): %s", metricTableName, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table '%s' is missing columns (dropping fields): %s", metricTable.name, strings.Join(colDefs, ", ")) } return nil diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index f9a56416c6716..6104e5cf69f09 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -20,18 +20,18 @@ func TestTableManager_EnsureStructure(t *testing.T) { missingCols, err := p.tableManager.EnsureStructure( ctx, p.db, - t.Name(), + p.tableManager.table(t.Name()), cols, p.CreateTemplates, p.AddColumnTemplates, - t.Name(), - "", - ) + p.tableManager.table(t.Name()), + nil, + ) require.NoError(t, err) require.Empty(t, missingCols) - assert.EqualValues(t, cols[0], p.tableManager.Tables[t.Name()]["foo"]) - assert.EqualValues(t, cols[1], p.tableManager.Tables[t.Name()]["baz"]) + assert.EqualValues(t, cols[0], p.tableManager.table(t.Name()).Columns()["foo"]) + assert.EqualValues(t, cols[1], p.tableManager.table(t.Name()).Columns()["baz"]) } func TestTableManager_refreshTableStructure(t *testing.T) { @@ -45,22 +45,22 @@ func TestTableManager_refreshTableStructure(t *testing.T) { _, err := p.tableManager.EnsureStructure( ctx, p.db, - t.Name(), + p.tableManager.table(t.Name()), cols, p.CreateTemplates, p.AddColumnTemplates, - t.Name(), - "", + p.tableManager.table(t.Name()), + nil, ) require.NoError(t, err) p.tableManager.ClearTableCache() - require.Empty(t, p.tableManager.Tables) + require.Empty(t, p.tableManager.table(t.Name()).Columns()) - require.NoError(t, p.tableManager.refreshTableStructure(ctx, p.db, t.Name())) + require.NoError(t, p.tableManager.refreshTableStructure(ctx, p.db, p.tableManager.table(t.Name()))) - assert.EqualValues(t, cols[0], p.tableManager.Tables[t.Name()]["foo"]) - assert.EqualValues(t, cols[1], p.tableManager.Tables[t.Name()]["baz"]) + assert.EqualValues(t, cols[0], p.tableManager.table(t.Name()).Columns()["foo"]) + assert.EqualValues(t, cols[1], p.tableManager.table(t.Name()).Columns()["baz"]) } func TestTableManager_MatchSource(t *testing.T) { @@ -74,8 +74,8 @@ func TestTableManager_MatchSource(t *testing.T) { tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) - assert.Contains(t, p.tableManager.Tables[t.Name()+p.TagTableSuffix], "tag") - assert.Contains(t, p.tableManager.Tables[t.Name()], "a") + assert.Contains(t, p.tableManager.table(t.Name() + p.TagTableSuffix).Columns(), "tag") + assert.Contains(t, p.tableManager.table(t.Name()).Columns(), "a") } // verify that TableManager updates & caches the DB table structure unless the incoming metric can't fit. diff --git a/plugins/outputs/postgresql/template/template.go b/plugins/outputs/postgresql/template/template.go index a1c1f44d0989b..0652087473a9b 100644 --- a/plugins/outputs/postgresql/template/template.go +++ b/plugins/outputs/postgresql/template/template.go @@ -11,21 +11,21 @@ Variables The following variables are available within all template executions: - * table - A TemplateTable object referring to the current table being + * table - A Table object referring to the current table being created/modified. - * columns - A TemplateColumns object of the new columns being added to the + * columns - A Columns object of the new columns being added to the table (all columns in the case of a new table, and new columns in the case of existing table). - * allColumns - A TemplateColumns object of all the columns (both old and new) + * allColumns - A Columns object of all the columns (both old and new) of the table. In the case of a new table, this is the same as `columns`. - * metricTable - A TemplateTable object referring to the table containing the + * metricTable - A Table object referring to the table containing the fields. In the case of TagsAsForeignKeys and `table` is the tag table, then `metricTable` is the table using this one for its tags. - * tagTable - A TemplateTable object referring to the table containing the + * tagTable - A Table object referring to the table containing the tags. In the case of TagsAsForeignKeys and `table` is the metrics table, then `tagTable` is the table containing the tags for this one. @@ -165,85 +165,85 @@ func QuoteLiteral(str interface{}) string { return "'" + strings.ReplaceAll(asString(str), "'", "''") + "'" } -// TemplateTable is an object which represents a Postgres table. -type TemplateTable struct { +// Table is an object which represents a Postgres table. +type Table struct { Schema string Name string - Columns TemplateColumns + Columns Columns } -func NewTemplateTable(schemaName, tableName string, columns []utils.Column) *TemplateTable { +func NewTable(schemaName, tableName string, columns []utils.Column) *Table { if tableName == "" { return nil } - return &TemplateTable{ + return &Table{ Schema: schemaName, Name: tableName, - Columns: NewTemplateColumns(columns), + Columns: NewColumns(columns), } } // String returns the table's fully qualified & quoted identifier (schema+table). -func (tt *TemplateTable) String() string { - return tt.Identifier() +func (tbl *Table) String() string { + return tbl.Identifier() } // Identifier returns the table's fully qualified & quoted identifier (schema+table). // // If schema is empty, it is omitted from the result. -func (tt *TemplateTable) Identifier() string { - if tt.Schema == "" { - return QuoteIdentifier(tt.Name) +func (tbl *Table) Identifier() string { + if tbl.Schema == "" { + return QuoteIdentifier(tbl.Name) } - return QuoteIdentifier(tt.Schema) + "." + QuoteIdentifier(tt.Name) + return QuoteIdentifier(tbl.Schema) + "." + QuoteIdentifier(tbl.Name) } -// WithSchema returns a copy of the TemplateTable object, but with the schema replaced by the given value. -func (tt *TemplateTable) WithSchema(name string) *TemplateTable { - ttNew := &TemplateTable{} - *ttNew = *tt - ttNew.Schema = name - return ttNew +// WithSchema returns a copy of the Table object, but with the schema replaced by the given value. +func (tbl *Table) WithSchema(name string) *Table { + tblNew := &Table{} + *tblNew = *tbl + tblNew.Schema = name + return tblNew } -// WithName returns a copy of the TemplateTable object, but with the name replaced by the given value. -func (tt *TemplateTable) WithName(name string) *TemplateTable { - ttNew := &TemplateTable{} - *ttNew = *tt - ttNew.Name = name - return ttNew +// WithName returns a copy of the Table object, but with the name replaced by the given value. +func (tbl *Table) WithName(name string) *Table { + tblNew := &Table{} + *tblNew = *tbl + tblNew.Name = name + return tblNew } -// WithSuffix returns a copy of the TemplateTable object, but with the name suffixed with the given value. -func (tt *TemplateTable) WithSuffix(suffixes ...string) *TemplateTable { - ttNew := &TemplateTable{} - *ttNew = *tt - ttNew.Name += strings.Join(suffixes, "") - return ttNew +// WithSuffix returns a copy of the Table object, but with the name suffixed with the given value. +func (tbl *Table) WithSuffix(suffixes ...string) *Table { + tblNew := &Table{} + *tblNew = *tbl + tblNew.Name += strings.Join(suffixes, "") + return tblNew } -// A TemplateColumn is an object which represents a Postgres column. -type TemplateColumn utils.Column +// A Column is an object which represents a Postgres column. +type Column utils.Column // String returns the column's definition (as used in a CREATE TABLE statement). E.G: // "my_column" bigint -func (tc TemplateColumn) String() string { +func (tc Column) String() string { return tc.Definition() } // Definition returns the column's definition (as used in a CREATE TABLE statement). E.G: // "my_column" bigint -func (tc TemplateColumn) Definition() string { +func (tc Column) Definition() string { return tc.Identifier() + " " + string(tc.Type) } // Identifier returns the column's quoted identifier. -func (tc TemplateColumn) Identifier() string { +func (tc Column) Identifier() string { return QuoteIdentifier(tc.Name) } // Selector returns the selector for the column. For most cases this is the same as Identifier. However in some cases, such as a UNION, this may return a statement such as `NULL AS "foo"`. -func (tc TemplateColumn) Selector() string { +func (tc Column) Selector() string { if tc.Type != "" { return tc.Identifier() } @@ -251,92 +251,92 @@ func (tc TemplateColumn) Selector() string { } // IsTag returns true if the column is a tag column. Otherwise false. -func (tc TemplateColumn) IsTag() bool { +func (tc Column) IsTag() bool { return tc.Role == utils.TagColType } // IsField returns true if the column is a field column. Otherwise false. -func (tc TemplateColumn) IsField() bool { +func (tc Column) IsField() bool { return tc.Role == utils.FieldColType } -// TemplateColumns represents an ordered list of TemplateColumn objects, with convenience methods for operating on the +// Columns represents an ordered list of Column objects, with convenience methods for operating on the // list. -type TemplateColumns []TemplateColumn +type Columns []Column -func NewTemplateColumns(cols []utils.Column) TemplateColumns { - tcs := make(TemplateColumns, len(cols)) +func NewColumns(cols []utils.Column) Columns { + tcols := make(Columns, len(cols)) for i, col := range cols { - tcs[i] = TemplateColumn(col) + tcols[i] = Column(col) } - return tcs + return tcols } -// List returns the TemplateColumns object as a slice of TemplateColumn. -func (tcs TemplateColumns) List() []TemplateColumn { - return tcs +// List returns the Columns object as a slice of Column. +func (cols Columns) List() []Column { + return cols } // Definitions returns the list of column definitions. -func (tcs TemplateColumns) Definitions() []string { - defs := make([]string, len(tcs)) - for i, tc := range tcs { +func (cols Columns) Definitions() []string { + defs := make([]string, len(cols)) + for i, tc := range cols { defs[i] = tc.Definition() } return defs } // Identifiers returns the list of quoted column identifiers. -func (tcs TemplateColumns) Identifiers() []string { - idents := make([]string, len(tcs)) - for i, tc := range tcs { +func (cols Columns) Identifiers() []string { + idents := make([]string, len(cols)) + for i, tc := range cols { idents[i] = tc.Identifier() } return idents } // Selectors returns the list of column selectors. -func (tcs TemplateColumns) Selectors() []string { - selectors := make([]string, len(tcs)) - for i, tc := range tcs { +func (cols Columns) Selectors() []string { + selectors := make([]string, len(cols)) + for i, tc := range cols { selectors[i] = tc.Selector() } return selectors } // String returns the comma delimited list of column identifiers. -func (tcs TemplateColumns) String() string { - colStrs := make([]string, len(tcs)) - for i, tc := range tcs { +func (cols Columns) String() string { + colStrs := make([]string, len(cols)) + for i, tc := range cols { colStrs[i] = tc.String() } return strings.Join(colStrs, ", ") } -// Keys returns a TemplateColumns list of the columns which are not fields (e.g. time, tag_id, & tags). -func (tcs TemplateColumns) Keys() TemplateColumns { - var cols []TemplateColumn - for _, tc := range tcs { +// Keys returns a Columns list of the columns which are not fields (e.g. time, tag_id, & tags). +func (cols Columns) Keys() Columns { + var newCols []Column + for _, tc := range cols { if tc.Role != utils.FieldColType { - cols = append(cols, tc) + newCols = append(newCols, tc) } } - return cols + return newCols } -// Sorted returns a sorted copy of TemplateColumns. +// Sorted returns a sorted copy of Columns. // // Columns are sorted so that they are in order as: [Time, Tags, Fields], with the columns within each group sorted // alphabetically. -func (tcs TemplateColumns) Sorted() TemplateColumns { - cols := append([]TemplateColumn{}, tcs...) - (*utils.ColumnList)(unsafe.Pointer(&cols)).Sort() - return cols +func (cols Columns) Sorted() Columns { + newCols := append([]Column{}, cols...) + (*utils.ColumnList)(unsafe.Pointer(&newCols)).Sort() + return newCols } -// Concat returns a copy of TemplateColumns with the given tcsList appended to the end. -func (tcs TemplateColumns) Concat(tcsList ...TemplateColumns) TemplateColumns { - tcsNew := append(TemplateColumns{}, tcs...) +// Concat returns a copy of Columns with the given tcsList appended to the end. +func (cols Columns) Concat(tcsList ...Columns) Columns { + tcsNew := append(Columns{}, cols...) for _, tcs := range tcsList { tcsNew = append(tcsNew, tcs...) } @@ -346,10 +346,10 @@ func (tcs TemplateColumns) Concat(tcsList ...TemplateColumns) TemplateColumns { // Union generates a list of SQL selectors against the given columns. // // For each column in tcs, if the column also exist in tcsFrom, it will be selected. If the column does not exist NULL will be selected. -func (tcs TemplateColumns) Union(tcsFrom TemplateColumns) TemplateColumns { - tcsNew := append(TemplateColumns{}, tcs...) +func (cols Columns) Union(tcsFrom Columns) Columns { + tcsNew := append(Columns{}, cols...) TCS: - for i, tc := range tcs { + for i, tc := range cols { for _, tcFrom := range tcsFrom { if tc.Name == tcFrom.Name { continue TCS @@ -360,34 +360,34 @@ TCS: return tcsNew } -// Tags returns a TemplateColumns list of the columns which are tags. -func (tcs TemplateColumns) Tags() TemplateColumns { - var cols []TemplateColumn - for _, tc := range tcs { +// Tags returns a Columns list of the columns which are tags. +func (cols Columns) Tags() Columns { + var newCols []Column + for _, tc := range cols { if tc.Role == utils.TagColType { - cols = append(cols, tc) + newCols = append(newCols, tc) } } - return cols + return newCols } -// Fields returns a TemplateColumns list of the columns which are fields. -func (tcs TemplateColumns) Fields() TemplateColumns { - var cols []TemplateColumn - for _, tc := range tcs { +// Fields returns a Columns list of the columns which are fields. +func (cols Columns) Fields() Columns { + var newCols []Column + for _, tc := range cols { if tc.Role == utils.FieldColType { - cols = append(cols, tc) + newCols = append(newCols, tc) } } - return cols + return newCols } // Hash returns a hash of the column names. The hash is base-32 encoded string, up to 7 characters long with no padding. // // This can be useful as an identifier for supporting table renaming + unions in the case of non-modifiable tables. -func (tcs TemplateColumns) Hash() string { +func (cols Columns) Hash() string { hash := fnv.New32a() - for _, tc := range tcs.Sorted() { + for _, tc := range cols.Sorted() { hash.Write([]byte(tc.Name)) hash.Write([]byte{0}) } @@ -417,8 +417,8 @@ func (t *Template) UnmarshalText(text []byte) error { return nil } -func (t *Template) Render(table *TemplateTable, newColumns []utils.Column, metricTable *TemplateTable, tagTable *TemplateTable) ([]byte, error) { - tcs := NewTemplateColumns(newColumns).Sorted() +func (t *Template) Render(table *Table, newColumns []utils.Column, metricTable *Table, tagTable *Table) ([]byte, error) { + tcs := NewColumns(newColumns).Sorted() data := map[string]interface{}{ "table": table, "columns": tcs, diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 06359d284921a..acc55b98c1ec1 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -6,6 +6,7 @@ import ( "hash/fnv" "log" "strings" + "sync/atomic" "time" "github.com/jackc/pgx/v4" @@ -145,3 +146,38 @@ func GetTagID(metric telegraf.Metric) int64 { // Convert to int64 as postgres does not support uint64 return int64(hash.Sum64()) } + +// WaitGroup is similar to sync.WaitGroup, but allows interruptable waiting (e.g. a timeout). +type WaitGroup struct { + count int32 + done chan struct{} +} + +func NewWaitGroup() *WaitGroup { + return &WaitGroup{ + done: make(chan struct{}), + } +} + +func (wg *WaitGroup) Add(i int32) { + select { + case <-wg.done: + panic("use of an already-done WaitGroup") + default: + } + atomic.AddInt32(&wg.count, i) +} + +func (wg *WaitGroup) Done() { + i := atomic.AddInt32(&wg.count, -1) + if i == 0 { + close(wg.done) + } + if i < 0 { + panic("too many Done() calls") + } +} + +func (wg *WaitGroup) C() <-chan struct{} { + return wg.done +} From cd49b3e15877de3cf6b09b39f7539f5f6fb9b43f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sat, 17 Apr 2021 21:23:30 -0400 Subject: [PATCH 084/121] outputs.postgresql: Add benchmark --- plugins/outputs/postgresql/postgresql.go | 2 +- .../postgresql/postgresql_bench_test.go | 90 +++++++++++++++++++ plugins/outputs/postgresql/table_manager.go | 1 + .../outputs/postgresql/table_manager_test.go | 4 +- 4 files changed, 94 insertions(+), 3 deletions(-) create mode 100644 plugins/outputs/postgresql/postgresql_bench_test.go diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5b218b2983b83..712b04511744f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -48,7 +48,7 @@ type Postgresql struct { db *pgxpool.Pool tableManager *TableManager - writeChan chan *TableSource + writeChan chan *TableSource writeWaitGroup *utils.WaitGroup Logger telegraf.Logger diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go new file mode 100644 index 0000000000000..0e416c10799d4 --- /dev/null +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -0,0 +1,90 @@ +package postgresql + +import ( + "context" + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "math/rand" + "strconv" + "testing" + "time" +) + +func BenchmarkPostgresql_sequential(b *testing.B) { + gen := batchGenerator(ctx, b, 1000, 3, 8, 12, 100, 2) + benchmarkPostgresql(b, gen, 1, true) +} +func BenchmarkPostgresql_concurrent(b *testing.B) { + gen := batchGenerator(ctx, b, 1000, 3, 8, 12, 100, 2) + benchmarkPostgresql(b, gen, 10, true) +} + +func benchmarkPostgresql(b *testing.B, gen <-chan []telegraf.Metric, concurrency int, foreignTags bool) { + p := newPostgresqlTest(b) + p.Connection += fmt.Sprintf(" pool_max_conns=%d", concurrency) + p.TagsAsForeignKeys = foreignTags + p.LogLevel = "" + if err := p.Connect(); err != nil { + b.Fatalf("Error: %s", err) + } + + metricCount := 0 + + b.ResetTimer() + tStart := time.Now() + for i := 0; i < b.N; i++ { + batch := <-gen + if err := p.Write(batch); err != nil { + b.Fatalf("Error: %s", err) + } + metricCount += len(batch) + } + p.Close() + b.StopTimer() + tStop := time.Now() + b.ReportMetric(float64(metricCount)/tStop.Sub(tStart).Seconds(), "metrics/s") +} + +// tagCardinality counts all the tag keys & values as one element. fieldCardinality counts all the field keys (not values) as one element. +func batchGenerator(ctx context.Context, b *testing.B, batchSize int, numTables int, numTags int, numFields int, tagCardinality int, fieldCardinality int) <-chan []telegraf.Metric { + tagSets := make([]MSS, tagCardinality) + for i := 0; i < tagCardinality; i++ { + tags := MSS{} + for j := 0; j < numTags; j++ { + tags[fmt.Sprintf("tag_%d", j)] = fmt.Sprintf("%d", rand.Int()) + } + tagSets[i] = tags + } + + metricChan := make(chan []telegraf.Metric, 32) + go func() { + for { + batch := make([]telegraf.Metric, batchSize) + for i := 0; i < batchSize; i++ { + tableName := b.Name() + "_" + strconv.Itoa(rand.Intn(numTables)) + + tags := tagSets[rand.Intn(len(tagSets))] + + m, _ := metric.New(tableName, tags, nil, time.Now()) + m.AddTag("tableName", tableName) // ensure the tag set is unique to this table. Just in case... + + // We do field cardinality by randomizing the name of the final field to an integer < cardinality. + for j := 0; j < numFields-1; j++ { // use -1 to reserve the last field for cardinality + m.AddField("f"+strconv.Itoa(j), rand.Int()) + } + m.AddField("f"+strconv.Itoa(rand.Intn(fieldCardinality)), rand.Int()) + + batch[i] = m + } + + select { + case metricChan <- batch: + case <-ctx.Done(): + return + } + } + }() + + return metricChan +} diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index f473d64035051..9e1d950f9f8d5 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -28,6 +28,7 @@ type tableState struct { // checking the table's schema, and both trying to modify it, whether inconsistently, or to the same result. sync.Mutex } + func (ts *tableState) Columns() map[string]utils.Column { cols := ts.columns.Load() if cols == nil { diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index 6104e5cf69f09..a5f2bcd6b465b 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -26,7 +26,7 @@ func TestTableManager_EnsureStructure(t *testing.T) { p.AddColumnTemplates, p.tableManager.table(t.Name()), nil, - ) + ) require.NoError(t, err) require.Empty(t, missingCols) @@ -74,7 +74,7 @@ func TestTableManager_MatchSource(t *testing.T) { tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) - assert.Contains(t, p.tableManager.table(t.Name() + p.TagTableSuffix).Columns(), "tag") + assert.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).Columns(), "tag") assert.Contains(t, p.tableManager.table(t.Name()).Columns(), "a") } From ceb6d3584185f15f305bc0cfd6a4d2428cd8f36d Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 18 Apr 2021 20:02:43 -0400 Subject: [PATCH 085/121] outputs.postgresql: add tag_id insert caching --- go.mod | 1 + go.sum | 5 +++ plugins/outputs/postgresql/postgresql.go | 44 ++++++++++++++----- plugins/outputs/postgresql/postgresql_test.go | 24 +++++++++- plugins/outputs/postgresql/table_manager.go | 4 ++ plugins/outputs/postgresql/table_source.go | 20 ++++++--- .../outputs/postgresql/table_source_test.go | 5 +++ 7 files changed, 85 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index b66f75565a0d6..2ea2a5a4fe4e3 100644 --- a/go.mod +++ b/go.mod @@ -42,6 +42,7 @@ require ( github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 github.com/containerd/containerd v1.4.1 // indirect + github.com/coocood/freecache v1.1.1 // indirect github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect diff --git a/go.sum b/go.sum index f59d451d8d6fd..d820cbc230cfb 100644 --- a/go.sum +++ b/go.sum @@ -123,6 +123,7 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -274,6 +275,9 @@ github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= +github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -1256,6 +1260,7 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 712b04511744f..23fc5bc1dd042 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/coocood/freecache" "strings" "time" @@ -47,6 +48,7 @@ type Postgresql struct { dbContextCancel func() db *pgxpool.Pool tableManager *TableManager + tagsCache *freecache.Cache writeChan chan *TableSource writeWaitGroup *utils.WaitGroup @@ -122,6 +124,10 @@ func (p *Postgresql) Connect() error { } p.tableManager = NewTableManager(p) + if p.TagsAsForeignKeys { + p.tagsCache = freecache.NewCache(5*1024*1024) // 5MB + } + maxConns := int(p.db.Stat().MaxConns()) if maxConns > 1 { p.writeChan = make(chan *TableSource) @@ -345,7 +351,18 @@ func isTempError(err error) bool { func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - err := p.writeMetricsFromMeasure(ctx, p.db, tableSource) + tx, err := p.db.Begin(ctx) + if err != nil { + return err + } + + err = p.writeMetricsFromMeasure(ctx, tx, tableSource) + if err == nil { + tx.Commit(ctx) + return nil + } + + tx.Rollback(ctx) if !isTempError(err) { return err } @@ -371,14 +388,8 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS return err } - tx, err := db.Begin(ctx) - if err != nil { - return err - } - defer tx.Rollback(ctx) - if p.TagsAsForeignKeys { - if err := p.WriteTagTable(ctx, tx, tableSource); err != nil { + if err := p.WriteTagTable(ctx, db, tableSource); err != nil { if p.ForignTagConstraint { return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } else { @@ -390,18 +401,22 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS } fullTableName := utils.FullTableName(p.Schema, tableSource.Name()) - if _, err := tx.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource); err != nil { + if _, err := db.CopyFrom(ctx, fullTableName, tableSource.ColumnNames(), tableSource); err != nil { return err } - tx.Commit(ctx) return nil } func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *TableSource) error { - //TODO cache which tagSets we've already inserted and skip them. ttsrc := NewTagTableSource(tableSource) + // Check whether we have any tags to insert + if !ttsrc.Next() { + return nil + } + ttsrc.Reset() + // need a transaction so that if it errors, we don't roll back the parent transaction, just the tags tx, err := db.Begin(ctx) if err != nil { @@ -424,5 +439,10 @@ func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *Tab return fmt.Errorf("inserting into tags table: %w", err) } - return tx.Commit(ctx) + if err := tx.Commit(ctx); err != nil { + return err + } + + ttsrc.UpdateCache() + return nil } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index be67af4f9f2b1..cda46c218ace9 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -346,6 +346,17 @@ func TestWrite_sequential(t *testing.T) { if assert.Len(t, dumpB, 1) { assert.EqualValues(t, 2, dumpB[0]["v"]) } + + p.Logger.Clear() + require.NoError(t, p.Write(metrics)) + + stmtCount := 0 + for _, log := range p.Logger.Logs() { + if strings.Contains(log.String(), "info: PG ") { + stmtCount += 1 + } + } + assert.Equal(t, 4, stmtCount) // BEGIN, COPY table _a, COPY table _b, COMMIT } func TestWrite_concurrent(t *testing.T) { @@ -582,6 +593,17 @@ func TestWriteTagTable(t *testing.T) { require.Len(t, dumpTags, 1) assert.EqualValues(t, dump[0]["tag_id"], dumpTags[0]["tag_id"]) assert.EqualValues(t, "foo", dumpTags[0]["tag"]) + + p.Logger.Clear() + require.NoError(t, p.Write(metrics)) + + stmtCount := 0 + for _, log := range p.Logger.Logs() { + if strings.Contains(log.String(), "info: PG ") { + stmtCount += 1 + } + } + assert.Equal(t, 3, stmtCount) // BEGIN, COPY metrics table, COMMIT } // Verify that when using TagsAsForeignKeys and a tag can't be written, that we still add the metrics. @@ -627,7 +649,7 @@ func TestWrite_tagError_foreignConstraint(t *testing.T) { require.NoError(t, err) metrics = []telegraf.Metric{ - newMetric(t, "", MSS{"tag": "foo"}, MSI{"v": 2}), + newMetric(t, "", MSS{"tag": "bar"}, MSI{"v": 2}), } assert.NoError(t, p.Write(metrics)) haveError := false diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 9e1d950f9f8d5..09cc5000ddd82 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -66,6 +66,10 @@ func (tm *TableManager) ClearTableCache() { tbl.SetColumns(nil) } tm.tablesMutex.Unlock() + + if tm.tagsCache != nil { + tm.tagsCache.Clear() + } } func (tm *TableManager) table(name string) *tableState { diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 0cab160f14e69..45bb01d165bd3 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -358,13 +358,17 @@ func (ttsrc *TagTableSource) Next() bool { for { if ttsrc.cursor+1 >= len(ttsrc.tagIDs) { ttsrc.cursorValues = nil - ttsrc.cursorError = nil return false } ttsrc.cursor += 1 - ttsrc.cursorValues, ttsrc.cursorError = ttsrc.values() - if ttsrc.cursorValues != nil || ttsrc.cursorError != nil { + if _, err := ttsrc.postgresql.tagsCache.GetInt(ttsrc.tagIDs[ttsrc.cursor]); err == nil { + // tag ID already inserted + continue + } + + ttsrc.cursorValues = ttsrc.values() + if ttsrc.cursorValues != nil { return true } } @@ -374,7 +378,7 @@ func (ttsrc *TagTableSource) Reset() { ttsrc.cursor = -1 } -func (ttsrc *TagTableSource) values() ([]interface{}, error) { +func (ttsrc *TagTableSource) values() ([]interface{}) { tagID := ttsrc.tagIDs[ttsrc.cursor] tagSet := ttsrc.tagSets[tagID] @@ -390,13 +394,19 @@ func (ttsrc *TagTableSource) values() ([]interface{}, error) { } values[0] = tagID - return values, nil + return values } func (ttsrc *TagTableSource) Values() ([]interface{}, error) { return ttsrc.cursorValues, ttsrc.cursorError } +func (ttsrc *TagTableSource) UpdateCache() { + for _, tagID := range ttsrc.tagIDs { + ttsrc.postgresql.tagsCache.SetInt(tagID, nil, 0) + } +} + func (ttsrc *TagTableSource) Err() error { return nil } diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index 4589f49857415..03f0243d9cd57 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -2,6 +2,7 @@ package postgresql import ( "encoding/json" + "github.com/coocood/freecache" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" "github.com/jackc/pgx/v4" @@ -65,6 +66,7 @@ func TestTableSource_tagJSONB(t *testing.T) { func TestTableSource_tagTable(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true + p.tagsCache = freecache.NewCache(5*1024*1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -84,6 +86,7 @@ func TestTableSource_tagTableJSONB(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.TagsAsJsonb = true + p.tagsCache = freecache.NewCache(5*1024*1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -146,6 +149,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.ForignTagConstraint = true + p.tagsCache = freecache.NewCache(5*1024*1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -179,6 +183,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.ForignTagConstraint = false + p.tagsCache = freecache.NewCache(5*1024*1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), From 56036411f8a8f07b2e69e3bba3e274149ec3ce04 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 18 Apr 2021 21:19:35 -0400 Subject: [PATCH 086/121] outputs.postgresql: remove dbConnectedHook for clearing caches Remove the code that cleared caches on total DB connection loss. The intent was to be able to reset known state in the event of things like a database failover. But this is such an edge case, it's not worth the maintenance of the code. If we ever want it back, it's here, in the git history. --- plugins/outputs/postgresql/postgresql.go | 22 +------------------ plugins/outputs/postgresql/postgresql_test.go | 19 ---------------- plugins/outputs/postgresql/table_source.go | 2 +- .../outputs/postgresql/table_source_test.go | 8 +++---- 4 files changed, 6 insertions(+), 45 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 23fc5bc1dd042..ed1b5597f6a70 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -105,7 +105,6 @@ func (p *Postgresql) Connect() error { // The pgx default for pool_max_conns is 4. However we want to default to 1. poolConfig.MaxConns = 1 } - poolConfig.AfterConnect = p.dbConnectedHook if p.LogLevel != "" { poolConfig.ConnConfig.Logger = pgxLogger{p.Logger} @@ -125,7 +124,7 @@ func (p *Postgresql) Connect() error { p.tableManager = NewTableManager(p) if p.TagsAsForeignKeys { - p.tagsCache = freecache.NewCache(5*1024*1024) // 5MB + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) // 5MB } maxConns := int(p.db.Stat().MaxConns()) @@ -141,25 +140,6 @@ func (p *Postgresql) Connect() error { return nil } -// dbConnectHook checks to see whether we lost all connections, and if so resets any known state of the database (e.g. cached tables). -// This is so that we handle failovers, where the new database might not have the same state as the previous. -func (p *Postgresql) dbConnectedHook(ctx context.Context, conn *pgx.Conn) error { - if p.db == nil || p.tableManager == nil { - // This will happen on the initial connect since we haven't set it yet. - // Also meaning there is no state to reset. - return nil - } - - stat := p.db.Stat() - if stat.AcquiredConns()+stat.IdleConns() > 0 { - return nil - } - - p.tableManager.ClearTableCache() - - return nil -} - // Close closes the connection to the database func (p *Postgresql) Close() error { if p.writeChan != nil { diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index cda46c218ace9..157af3a55dd89 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -275,25 +275,6 @@ func TestPostgresqlConnect(t *testing.T) { p.Close() } -func TestDBConnectedHook(t *testing.T) { - p := newPostgresqlTest(t) - require.NoError(t, p.Connect()) - - metrics := []telegraf.Metric{ - newMetric(t, "", MSS{}, MSI{"v": 1}), - } - require.NoError(t, p.Write(metrics)) - - c, _ := p.db.Acquire(ctx) - c.Conn().Close(ctx) - c.Release() - - _, err := p.db.Exec(ctx, "SELECT 1") - require.NoError(t, err) - - assert.Empty(t, p.tableManager.table(t.Name()).Columns()) -} - func newMetric( t *testing.T, suffix string, diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 45bb01d165bd3..ccea55d104024 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -378,7 +378,7 @@ func (ttsrc *TagTableSource) Reset() { ttsrc.cursor = -1 } -func (ttsrc *TagTableSource) values() ([]interface{}) { +func (ttsrc *TagTableSource) values() []interface{} { tagID := ttsrc.tagIDs[ttsrc.cursor] tagSet := ttsrc.tagSets[tagID] diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index 03f0243d9cd57..5b661ff308d68 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -66,7 +66,7 @@ func TestTableSource_tagJSONB(t *testing.T) { func TestTableSource_tagTable(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true - p.tagsCache = freecache.NewCache(5*1024*1024) + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -86,7 +86,7 @@ func TestTableSource_tagTableJSONB(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.TagsAsJsonb = true - p.tagsCache = freecache.NewCache(5*1024*1024) + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -149,7 +149,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.ForignTagConstraint = true - p.tagsCache = freecache.NewCache(5*1024*1024) + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), @@ -183,7 +183,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.ForignTagConstraint = false - p.tagsCache = freecache.NewCache(5*1024*1024) + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), From 517f02cd4b40d827a83bea19b5060a2af56b632f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 18 Apr 2021 21:53:30 -0400 Subject: [PATCH 087/121] outputs.postgresql: simplify indexing of table columns --- plugins/outputs/postgresql/postgresql.go | 188 +++++++++--------- plugins/outputs/postgresql/postgresql_test.go | 2 +- plugins/outputs/postgresql/table_manager.go | 12 +- plugins/outputs/postgresql/table_source.go | 121 +++++------ .../outputs/postgresql/table_source_test.go | 4 +- plugins/outputs/postgresql/utils/utils.go | 21 ++ 6 files changed, 186 insertions(+), 162 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index ed1b5597f6a70..b8a4b9b758890 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -4,22 +4,21 @@ import ( "context" "errors" "fmt" - "github.com/coocood/freecache" "strings" "time" + "github.com/coocood/freecache" "github.com/jackc/pgconn" - - "github.com/influxdata/telegraf/config" - "github.com/influxdata/telegraf/models" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" - "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" + "github.com/influxdata/toml" ) type dbh interface { @@ -29,19 +28,93 @@ type dbh interface { Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error) } +var sampleConfig = ` + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. Also supported are PG environment vars + ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE + ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html + ## + ## Non-standard parameters: + ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. + ## pool_min_conns (default: 0) - Minimum size of connection pool. + ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. + ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. + ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + ## + #connection = "host=localhost user=postgres sslmode=verify-full" + + ## Postgres schema to use. + schema = "public" + + ## Store tags as foreign keys in the metrics table. Default is false. + tags_as_foreign_keys = false + + ## Suffix to append to table name (measurement name) for the foreign tag table. + tag_table_suffix = "_tag" + + ## Deny inserting metrics if the foreign tag can't be inserted. + foreign_tag_constraint = false + + ## Store all tags as a JSONB object in a single 'tags' column. + tags_as_jsonb = false + + ## Store all fields as a JSONB object in a single 'fields' column. + fields_as_jsonb = false + + ## Templated statements to execute when creating a new table. + create_templates = [ + '''CREATE TABLE {{.table}} ({{.columns}})''', + ] + + ## Templated statements to execute when adding columns to a table. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points + ## containing fields for which there is no column will have the field omitted. + add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + ] + + ## Templated statements to execute when creating a new tag table. + tag_table_create_templates = [ + '''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))''', + ] + + ## Templated statements to execute when adding columns to a tag table. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. + tag_table_add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + ] + + ## When using pool_max_conns>1, an a temporary error occurs, the query is retried with an incremental backoff. This + ## controls the maximum backoff duration. + retry_max_backoff = "15s" + + ## Enable & set the log level for the Postgres driver. + # log_level = "info" # trace, debug, info, warn, error, none +` + type Postgresql struct { Connection string Schema string TagsAsForeignKeys bool + TagTableSuffix string + ForeignTagConstraint bool TagsAsJsonb bool FieldsAsJsonb bool CreateTemplates []*template.Template AddColumnTemplates []*template.Template TagTableCreateTemplates []*template.Template TagTableAddColumnTemplates []*template.Template - TagTableSuffix string RetryMaxBackoff config.Duration - ForignTagConstraint bool LogLevel string dbContext context.Context @@ -61,39 +134,18 @@ func init() { } func newPostgresql() *Postgresql { - return &Postgresql{ - Schema: "public", - CreateTemplates: []*template.Template{template.TableCreateTemplate}, - AddColumnTemplates: []*template.Template{template.TableAddColumnTemplate}, - TagTableCreateTemplates: []*template.Template{template.TagTableCreateTemplate}, - TagTableAddColumnTemplates: []*template.Template{template.TableAddColumnTemplate}, - TagTableSuffix: "_tag", - RetryMaxBackoff: config.Duration(time.Second * 15), - ForignTagConstraint: false, - Logger: models.NewLogger("outputs", "postgresql", ""), + p := &Postgresql{ + Logger: models.NewLogger("outputs", "postgresql", ""), } -} - -// pgxLogger makes telegraf.Logger compatible with pgx.Logger -type pgxLogger struct { - telegraf.Logger -} - -func (l pgxLogger) Log(ctx context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { - switch level { - case pgx.LogLevelError: - l.Errorf("PG %s - %+v", msg, data) - case pgx.LogLevelWarn: - l.Warnf("PG %s - %+v", msg, data) - case pgx.LogLevelInfo, pgx.LogLevelNone: - l.Infof("PG %s - %+v", msg, data) - case pgx.LogLevelDebug, pgx.LogLevelTrace: - l.Debugf("PG %s - %+v", msg, data) - default: - l.Debugf("PG %s - %+v", msg, data) + if err := toml.Unmarshal([]byte(p.SampleConfig()), p); err != nil { + panic(err.Error()) } + return p } +func (p *Postgresql) SampleConfig() string { return sampleConfig } +func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } + // Connect establishes a connection to the target database and prepares the cache func (p *Postgresql) Connect() error { poolConfig, err := pgxpool.ParseConfig(p.Connection) @@ -107,7 +159,7 @@ func (p *Postgresql) Connect() error { } if p.LogLevel != "" { - poolConfig.ConnConfig.Logger = pgxLogger{p.Logger} + poolConfig.ConnConfig.Logger = utils.PGXLogger{p.Logger} poolConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) if err != nil { return fmt.Errorf("invalid log level") @@ -140,7 +192,7 @@ func (p *Postgresql) Connect() error { return nil } -// Close closes the connection to the database +// Close closes the connection(s) to the database. func (p *Postgresql) Close() error { if p.writeChan != nil { // We're using async mode. Gracefully close with timeout. @@ -158,62 +210,6 @@ func (p *Postgresql) Close() error { return nil } -var sampleConfig = ` - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ - ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production - ## - ## All connection parameters are optional. Also supported are PG environment vars - ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE - ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html - ## - ## Non-standard parameters: - ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. - ## pool_min_conns (default: 0) - Minimum size of connection pool. - ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. - ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. - ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. - ## - ## Without the dbname parameter, the driver will default to a database - ## with the same name as the user. This dbname is just for instantiating a - ## connection with the server and doesn't restrict the databases we are trying - ## to grab metrics for. - ## - connection = "host=localhost user=postgres sslmode=verify-full" - - ## Store tags as foreign keys in the metrics table. Default is false. - # tags_as_foreignkeys = false - - ## Schema to create the tables into - # schema = "public" - - ## Use jsonb datatype for tags - # tags_as_jsonb = false - - ## Use jsonb datatype for fields - # fields_as_jsonb = false - - ## Templated statements to execute when creating a new table. - create_templates = ['CREATE TABLE {{.table}} ({{.columns}})'] - - ## Templated statements to execute when adding columns to a table. - ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points - ## containing fields for which there is no column will have the field omitted. - add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] - - ## Templated statements to execute when creating a new tag table. - tag_table_create_templates = ['CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))'] - - ## Templated statements to execute when adding columns to a tag table. - ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. - tag_table_add_column_templates = ['ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}'] -` - -func (p *Postgresql) SampleConfig() string { return sampleConfig } -func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } - func (p *Postgresql) Write(metrics []telegraf.Metric) error { tableSources := NewTableSources(p, metrics) @@ -370,7 +366,7 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS if p.TagsAsForeignKeys { if err := p.WriteTagTable(ctx, db, tableSource); err != nil { - if p.ForignTagConstraint { + if p.ForeignTagConstraint { return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } else { // log and continue. As the admin can correct the issue, and tags don't change over time, they can be diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 157af3a55dd89..c4c9e4193806c 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -617,7 +617,7 @@ func TestWrite_tagError(t *testing.T) { func TestWrite_tagError_foreignConstraint(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true - p.ForignTagConstraint = true + p.ForeignTagConstraint = true require.NoError(t, p.Connect()) metrics := []telegraf.Metric{ diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 09cc5000ddd82..df176e953103c 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -3,11 +3,11 @@ package postgresql import ( "context" "fmt" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "strings" "sync" "sync/atomic" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -351,7 +351,9 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl if len(missingCols) > 0 { colDefs := make([]string, len(missingCols)) for i, col := range missingCols { - rowSource.DropColumn(col) + if err := rowSource.DropColumn(col); err != nil { + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err) + } colDefs[i] = col.Name + " " + string(col.Type) } tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tagTable.name, strings.Join(colDefs, ", ")) @@ -375,10 +377,12 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl if len(missingCols) > 0 { colDefs := make([]string, len(missingCols)) for i, col := range missingCols { - rowSource.DropColumn(col) + if err := rowSource.DropColumn(col); err != nil { + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err) + } colDefs[i] = col.Name + " " + string(col.Type) } - tm.Logger.Errorf("table '%s' is missing columns (dropping fields): %s", metricTable.name, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", metricTable.name, strings.Join(colDefs, ", ")) } return nil diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index ccea55d104024..b8bdd08d7c4b7 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -2,10 +2,46 @@ package postgresql import ( "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) +type columnList struct { + columns []utils.Column + indices map[string]int +} + +func newColumnList() *columnList { + return &columnList{ + indices: map[string]int{}, + } +} + +func (cl *columnList) Add(column utils.Column) bool { + if _, ok := cl.indices[column.Name]; ok { + return false + } + cl.columns = append(cl.columns, column) + cl.indices[column.Name] = len(cl.columns) - 1 + return true +} + +func (cl *columnList) Remove(name string) bool { + idx, ok := cl.indices[name] + if !ok { + return false + } + cl.columns = append(cl.columns[:idx], cl.columns[idx+1:]...) + delete(cl.indices, name) + + for idx, col := range cl.columns[idx:] { + cl.indices[col.Name] = idx + } + + return true +} + // TableSource satisfies pgx.CopyFromSource type TableSource struct { postgresql *Postgresql @@ -14,19 +50,13 @@ type TableSource struct { cursorValues []interface{} cursorError error - // tagPositions is the position of each tag within the tag set, regardless of whether tags are foreign keys or not. - tagPositions map[string]int - // tagColumns is the list of tags to emit. List is in order. - tagColumns []utils.Column + tagColumns *columnList // tagSets is the list of tag IDs to tag values in use within the TableSource. The position of each value in the list // corresponds to the key name in the tagColumns list. // This data is used to build out the foreign tag table when enabled. tagSets map[int64][]*telegraf.Tag - // fieldPositions is the position of each field within the field list. - fieldPositions map[string]int - // fieldColumns is the list of fields to emit. List is in order. - fieldColumns []utils.Column + fieldColumns *columnList droppedTagColumns []string } @@ -53,10 +83,10 @@ func NewTableSource(postgresql *Postgresql) *TableSource { tagSets: make(map[int64][]*telegraf.Tag), } if !postgresql.TagsAsJsonb { - tsrc.tagPositions = map[string]int{} + tsrc.tagColumns = newColumnList() } if !postgresql.FieldsAsJsonb { - tsrc.fieldPositions = map[string]int{} + tsrc.fieldColumns = newColumnList() } return tsrc } @@ -71,19 +101,13 @@ func (tsrc *TableSource) AddMetric(metric telegraf.Metric) { if !tsrc.postgresql.TagsAsJsonb { for _, t := range metric.TagList() { - if _, ok := tsrc.tagPositions[t.Key]; !ok { - tsrc.tagPositions[t.Key] = len(tsrc.tagPositions) - tsrc.tagColumns = append(tsrc.tagColumns, ColumnFromTag(t.Key, t.Value)) - } + tsrc.tagColumns.Add(ColumnFromTag(t.Key, t.Value)) } } if !tsrc.postgresql.FieldsAsJsonb { for _, f := range metric.FieldList() { - if _, ok := tsrc.fieldPositions[f.Key]; !ok { - tsrc.fieldPositions[f.Key] = len(tsrc.fieldPositions) - tsrc.fieldColumns = append(tsrc.fieldColumns, ColumnFromField(f.Key, f.Value)) - } + tsrc.fieldColumns.Add(ColumnFromField(f.Key, f.Value)) } } @@ -104,7 +128,7 @@ func (tsrc *TableSource) TagColumns() []utils.Column { if tsrc.postgresql.TagsAsJsonb { cols = append(cols, TagsJSONColumn) } else { - cols = append(cols, tsrc.tagColumns...) + cols = append(cols, tsrc.tagColumns.columns...) } return cols @@ -112,7 +136,7 @@ func (tsrc *TableSource) TagColumns() []utils.Column { // Returns the superset of all fields of all metrics. func (tsrc *TableSource) FieldColumns() []utils.Column { - return tsrc.fieldColumns + return tsrc.fieldColumns.columns } // Returns the full column list, including time, tag id or tags, and fields. @@ -158,15 +182,18 @@ func (tsrc *TableSource) ColumnNames() []string { // Drops the specified column. // If column is a tag column, any metrics containing the tag will be skipped. // If column is a field column, any metrics containing the field will have it omitted. -func (tsrc *TableSource) DropColumn(col utils.Column) { +func (tsrc *TableSource) DropColumn(col utils.Column) error { switch col.Role { case utils.TagColType: tsrc.dropTagColumn(col) case utils.FieldColType: tsrc.dropFieldColumn(col) + case utils.TimeColType, utils.TagsIDColType: + return fmt.Errorf("critical column \"%s\"", col.Name) default: - panic(fmt.Sprintf("Tried to perform an invalid column drop. This should not have happened. measurement=%s name=%s role=%v", tsrc.Name(), col.Name, col.Role)) + return fmt.Errorf("internal error: unknown column \"%s\"", col.Name) } + return nil } // Drops the tag column from conversion. Any metrics containing this tag will be skipped. @@ -176,20 +203,10 @@ func (tsrc *TableSource) dropTagColumn(col utils.Column) { } tsrc.droppedTagColumns = append(tsrc.droppedTagColumns, col.Name) - pos, ok := tsrc.tagPositions[col.Name] - if !ok { + if !tsrc.tagColumns.Remove(col.Name) { return } - delete(tsrc.tagPositions, col.Name) - for n, p := range tsrc.tagPositions { - if p > pos { - tsrc.tagPositions[n] -= 1 - } - } - - tsrc.tagColumns = append(tsrc.tagColumns[:pos], tsrc.tagColumns[pos+1:]...) - for setID, set := range tsrc.tagSets { for _, tag := range set { if tag.Key == col.Name { @@ -207,19 +224,7 @@ func (tsrc *TableSource) dropFieldColumn(col utils.Column) { panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", tsrc.Name(), col.Name)) } - pos, ok := tsrc.fieldPositions[col.Name] - if !ok { - return - } - - delete(tsrc.fieldPositions, col.Name) - for n, p := range tsrc.fieldPositions { - if p > pos { - tsrc.fieldPositions[n] -= 1 - } - } - - tsrc.fieldColumns = append(tsrc.fieldColumns[:pos], tsrc.fieldColumns[pos+1:]...) + tsrc.fieldColumns.Remove(col.Name) } func (tsrc *TableSource) Next() bool { @@ -246,19 +251,17 @@ func (tsrc *TableSource) Reset() { // If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil. func (tsrc *TableSource) values() ([]interface{}, error) { metric := tsrc.metrics[tsrc.cursor] - tags := metric.TagList() - fields := metric.FieldList() - values := []interface{}{} - - values = append(values, metric.Time()) + values := []interface{}{ + metric.Time(), + } if !tsrc.postgresql.TagsAsForeignKeys { if !tsrc.postgresql.TagsAsJsonb { // tags_as_foreignkey=false, tags_as_json=false - tagValues := make([]interface{}, len(tsrc.tagPositions)) - for _, tag := range tags { - tagPos, ok := tsrc.tagPositions[tag.Key] + tagValues := make([]interface{}, len(tsrc.tagColumns.columns)) + for _, tag := range metric.TagList() { + tagPos, ok := tsrc.tagColumns.indices[tag.Key] if !ok { // tag has been dropped, we can't emit or we risk collision with another metric return nil, nil @@ -273,7 +276,7 @@ func (tsrc *TableSource) values() ([]interface{}, error) { } else { // tags_as_foreignkey=true tagID := utils.GetTagID(metric) - if tsrc.postgresql.ForignTagConstraint { + if tsrc.postgresql.ForeignTagConstraint { if _, ok := tsrc.tagSets[tagID]; !ok { // tag has been dropped return nil, nil @@ -284,11 +287,11 @@ func (tsrc *TableSource) values() ([]interface{}, error) { if !tsrc.postgresql.FieldsAsJsonb { // fields_as_json=false - fieldValues := make([]interface{}, len(tsrc.fieldPositions)) + fieldValues := make([]interface{}, len(tsrc.fieldColumns.columns)) fieldsEmpty := true - for _, field := range fields { + for _, field := range metric.FieldList() { // we might have dropped the field due to the table missing the column & schema updates being turned off - if fPos, ok := tsrc.fieldPositions[field.Key]; ok { + if fPos, ok := tsrc.fieldColumns.indices[field.Key]; ok { fieldValues[fPos] = field.Value fieldsEmpty = false } @@ -386,7 +389,7 @@ func (ttsrc *TagTableSource) values() []interface{} { if !ttsrc.postgresql.TagsAsJsonb { values = make([]interface{}, len(tagSet)+1) for _, tag := range tagSet { - values[ttsrc.TableSource.tagPositions[tag.Key]+1] = tag.Value // +1 to account for tag_id column + values[ttsrc.TableSource.tagColumns.indices[tag.Key]+1] = tag.Value // +1 to account for tag_id column } } else { values = make([]interface{}, 2) diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index 5b661ff308d68..f313a4a87a7c7 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -148,7 +148,7 @@ func TestTableSource_DropColumn_tag(t *testing.T) { func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true - p.ForignTagConstraint = true + p.ForeignTagConstraint = true p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ @@ -182,7 +182,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true - p.ForignTagConstraint = false + p.ForeignTagConstraint = false p.tagsCache = freecache.NewCache(5 * 1024 * 1024) metrics := []telegraf.Metric{ diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index acc55b98c1ec1..c2919f7b97047 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -1,6 +1,7 @@ package utils import ( + "context" "encoding/json" "fmt" "hash/fnv" @@ -120,6 +121,26 @@ func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { } } +// pgxLogger makes telegraf.Logger compatible with pgx.Logger +type PGXLogger struct { + telegraf.Logger +} + +func (l PGXLogger) Log(_ context.Context, level pgx.LogLevel, msg string, data map[string]interface{}) { + switch level { + case pgx.LogLevelError: + l.Errorf("PG %s - %+v", msg, data) + case pgx.LogLevelWarn: + l.Warnf("PG %s - %+v", msg, data) + case pgx.LogLevelInfo, pgx.LogLevelNone: + l.Infof("PG %s - %+v", msg, data) + case pgx.LogLevelDebug, pgx.LogLevelTrace: + l.Debugf("PG %s - %+v", msg, data) + default: + l.Debugf("PG %s - %+v", msg, data) + } +} + // GenerateInsert returns a SQL statement to insert values in a table // with $X placeholders for the values func GenerateInsert(fullSanitizedTableName string, columns []string) string { From 4fed55372aad09fb38c78827966df3b224fcba62 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 19 Apr 2021 23:07:40 -0400 Subject: [PATCH 088/121] outputs.postgresql: minor clean up tests --- plugins/outputs/postgresql/postgresql_test.go | 53 +++++-------------- .../outputs/postgresql/table_manager_test.go | 33 ++++++++++-- 2 files changed, 44 insertions(+), 42 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index c4c9e4193806c..d45a069fcb2aa 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -135,40 +135,6 @@ func (la *LogAccumulator) Clear() { la.cond.L.Unlock() } -//func (la *LogAccumulator) Chan() <-chan Log { -// ch := make(chan Log) -// rch := (<-chan Log)(ch) -// go func() { -// cond := la.cond -// logs := &la.logs -// runtime.SetFinalizer(rch, func(_ <-chan string) { -// cond.L.Lock() -// logs = nil -// cond.Broadcast() -// cond.L.Unlock() -// }) -// la = nil -// i := 0 -// cond.L.Lock() -// for { -// if logs == nil { -// break -// } -// if i == len(*logs) { -// cond.Wait() -// continue -// } -// log := (*logs)[i] -// i++ -// cond.L.Unlock() -// ch <- log -// cond.L.Lock() -// } -// cond.L.Unlock() -// }() -// return rch -//} - func (la *LogAccumulator) Logs() []Log { la.cond.L.Lock() defer la.cond.L.Unlock() @@ -215,17 +181,26 @@ func (la *LogAccumulator) Info(args ...interface{}) { la.append(pgx.LogLevelInfo, "%v", args) } -var ctx context.Context +var ctx = context.Background() func TestMain(m *testing.M) { - if os.Getenv("PGHOST") == "" && os.Getenv("PGHOSTADDR") == "" && os.Getenv("PGPORT") == "" { - // User has not specified a server, use the default, which is the one defined by docker-compose.yml at the top of the repo. + // Try and find the server. + // Try provided env vars & defaults first. + if c, err := pgx.Connect(ctx, ""); err != nil { os.Setenv("PGHOST", "127.0.0.1") - os.Setenv("PGPORT", "5433") os.Setenv("PGUSER", "postgres") + // Try the port used in docker-compose.yml first + os.Setenv("PGPORT", "5433") + if c, err := pgx.Connect(ctx, ""); err != nil { + // Fall back to the default port + os.Setenv("PGPORT", "5432") + } else { + c.Close(ctx) + } + } else { + c.Close(ctx) } - ctx = context.Background() if err := prepareDatabase("telegraf"); err != nil { fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) os.Exit(1) diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index a5f2bcd6b465b..d89fe22bedbdb 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -78,6 +78,33 @@ func TestTableManager_MatchSource(t *testing.T) { assert.Contains(t, p.tableManager.table(t.Name()).Columns(), "a") } +func TestTableManager_noCreateTable(t *testing.T) { + p := newPostgresqlTest(t) + p.CreateTemplates = nil + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) +} + +func TestTableManager_noCreateTagTable(t *testing.T) { + p := newPostgresqlTest(t) + p.TagTableCreateTemplates = nil + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) +} + // verify that TableManager updates & caches the DB table structure unless the incoming metric can't fit. func TestTableManager_cache(t *testing.T) { p := newPostgresqlTest(t) @@ -93,7 +120,7 @@ func TestTableManager_cache(t *testing.T) { } // Verify that when alter statements are disabled and a metric comes in with a new tag key, that the tag is omitted. -func TestTableSource_noAlterMissingTag(t *testing.T) { +func TestTableManager_noAlterMissingTag(t *testing.T) { p := newPostgresqlTest(t) p.AddColumnTemplates = []*template.Template{} require.NoError(t, p.Connect()) @@ -115,7 +142,7 @@ func TestTableSource_noAlterMissingTag(t *testing.T) { // Verify that when alter statements are disabled with foreign tags and a metric comes in with a new tag key, that the // field is omitted. -func TestTableSource_noAlterMissingTagTableTag(t *testing.T) { +func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true p.TagTableAddColumnTemplates = []*template.Template{} @@ -138,7 +165,7 @@ func TestTableSource_noAlterMissingTagTableTag(t *testing.T) { } // verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted. -func TestTableSource_noAlterMissingField(t *testing.T) { +func TestTableManager_noAlterMissingField(t *testing.T) { p := newPostgresqlTest(t) p.AddColumnTemplates = []*template.Template{} require.NoError(t, p.Connect()) From 82c3170921a792ad7e780647a8a5787e8c792667 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 19 Apr 2021 23:43:08 -0400 Subject: [PATCH 089/121] update modules after rebase --- go.mod | 12 +++--- go.sum | 40 ++++++++++--------- .../postgresql/postgresql_bench_test.go | 2 +- 3 files changed, 30 insertions(+), 24 deletions(-) diff --git a/go.mod b/go.mod index 2ea2a5a4fe4e3..dd884f750486a 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/caio/go-tdigest v3.1.0+incompatible github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 github.com/containerd/containerd v1.4.1 // indirect - github.com/coocood/freecache v1.1.1 // indirect + github.com/coocood/freecache v1.1.1 github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect @@ -82,9 +82,10 @@ require ( github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgconn v1.7.2 + github.com/jackc/pgconn v1.8.1 + github.com/jackc/pgproto3/v2 v2.0.7 // indirect github.com/jackc/pgx v3.6.2+incompatible - github.com/jackc/pgx/v4 v4.9.2 + github.com/jackc/pgx/v4 v4.11.0 github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jmespath/go-jmespath v0.4.0 github.com/kardianos/service v1.0.0 @@ -132,11 +133,12 @@ require ( github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect go.starlark.net v0.0.0-20210406145628-7a1108eaa012 go.uber.org/multierr v1.6.0 // indirect - golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 + golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc // indirect + golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 - golang.org/x/text v0.3.4 + golang.org/x/text v0.3.6 golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 google.golang.org/api v0.29.0 google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 diff --git a/go.sum b/go.sum index d820cbc230cfb..668885a6841e1 100644 --- a/go.sum +++ b/go.sum @@ -107,6 +107,7 @@ github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RP github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= @@ -119,11 +120,11 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb0 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -272,12 +273,11 @@ github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPK github.com/containerd/containerd v1.4.1 h1:pASeJT3R3YyVn+94qEPk0SnU1OQ20Jd/T+SPKy9xehY= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= +github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= github.com/coreos/etcd v3.3.22+incompatible h1:AnRMUyVdVvh1k7lHe61YEd227+CLoNogQuAypztGSK4= github.com/coreos/etcd v3.3.22+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coocood/freecache v1.1.1 h1:uukNF7QKCZEdZ9gAV7WQzvh0SbjwdMF6m3x3rxEkaPc= -github.com/coocood/freecache v1.1.1/go.mod h1:OKrEjkGVoxZhyWAJoeFi5BMLUJm2Tit0kpGkIr7NGYY= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -768,8 +768,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= -github.com/jackc/pgconn v1.7.2 h1:195tt17jkjy+FrFlY0pgyrul5kRLb7BGXY3JTrNxeXU= -github.com/jackc/pgconn v1.7.2/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.8.1 h1:ySBX7Q87vOMqKU2bbmKbUvtYhauDFclYbNDYIE1/h6s= +github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA= @@ -783,8 +783,9 @@ github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= -github.com/jackc/pgproto3/v2 v2.0.6 h1:b1105ZGEMFe7aCvrT1Cca3VoVb4ZFMaFJLJcg/3zD+8= github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.7 h1:6Pwi1b3QdY65cuv6SyVO0FgPd5J3Bl7wf/nQQjinHMA= +github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= @@ -794,8 +795,8 @@ github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrU github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= -github.com/jackc/pgtype v1.6.1 h1:CAtFD7TS95KrxRAh3bidgLwva48WYxk8YkbHZsSWfbI= -github.com/jackc/pgtype v1.6.1/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgtype v1.7.0 h1:6f4kVsW01QftE38ufBYxKciO6gyioXSC0ABIRLcZrGs= +github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= @@ -804,14 +805,14 @@ github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQ github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= -github.com/jackc/pgx/v4 v4.9.2 h1:1V7EAc5jvIqXwdzgk8+YyOK+4071hhePzBCAF6gxUUw= -github.com/jackc/pgx/v4 v4.9.2/go.mod h1:Jt/xJDqjUDUOMSv8VMWPQlCObVgF2XOgqKsW8S4ROYA= +github.com/jackc/pgx/v4 v4.11.0 h1:J86tSWd3Y7nKjwT/43xZBvpi04keQWx8gNC2YkdJhZI= +github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jackc/puddle v1.1.2 h1:mpQEXihFnWGDy6X98EOTh81JYuxn7txby8ilJ3iIPGM= -github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94hjQcp4= github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI= github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a h1:JxcWget6X/VfBMKxPIc28Jel37LGREut2fpV+ObkwJ0= @@ -1255,12 +1256,12 @@ github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5Q github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1425,8 +1426,10 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc h1:+q90ECDSAQirdykUN6sPEiBXBsp8Csjcca8Oy7bgLTA= +golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1509,8 +1512,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110 h1:qWPm9rbaAMKs8Bq/9LRpbMqxWRVUAQwMI9fVrssnTfw= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1608,8 +1611,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go index 0e416c10799d4..9d52bb3f4dd8f 100644 --- a/plugins/outputs/postgresql/postgresql_bench_test.go +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -66,7 +66,7 @@ func batchGenerator(ctx context.Context, b *testing.B, batchSize int, numTables tags := tagSets[rand.Intn(len(tagSets))] - m, _ := metric.New(tableName, tags, nil, time.Now()) + m := metric.New(tableName, tags, nil, time.Now()) m.AddTag("tableName", tableName) // ensure the tag set is unique to this table. Just in case... // We do field cardinality by randomizing the name of the final field to an integer < cardinality. From a7e2b51eedce4bfcc79d3791ed7de19218d46c11 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 11 Jul 2021 22:46:58 -0400 Subject: [PATCH 090/121] outputs.postgresql: fix index error on inconsistent tags --- plugins/outputs/postgresql/table_source.go | 2 +- .../outputs/postgresql/table_source_test.go | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index b8bdd08d7c4b7..a720337b8f0aa 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -387,7 +387,7 @@ func (ttsrc *TagTableSource) values() []interface{} { var values []interface{} if !ttsrc.postgresql.TagsAsJsonb { - values = make([]interface{}, len(tagSet)+1) + values = make([]interface{}, len(ttsrc.TableSource.tagColumns.indices)+1) for _, tag := range tagSet { values[ttsrc.TableSource.tagColumns.indices[tag.Key]+1] = tag.Value // +1 to account for tag_id column } diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index f313a4a87a7c7..c64171974906d 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -237,3 +237,42 @@ func TestTableSource_DropColumn_field(t *testing.T) { assert.EqualValues(t, 3, row["b"]) assert.False(t, tsrc.Next()) } + +func TestTableSource_InconsistentTags(t *testing.T) { + p := newPostgresqlTest(t) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}), + newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + + trow := tSrcRow(tsrc) + assert.EqualValues(t, "1", trow["a"]) + assert.EqualValues(t, nil, trow["c"]) + + trow = tSrcRow(tsrc) + assert.EqualValues(t, nil, trow["a"]) + assert.EqualValues(t, "3", trow["c"]) +} + +func TestTagTableSource_InconsistentTags(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.tagsCache = freecache.NewCache(5 * 1024 * 1024) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}), + newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}), + } + tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + ttsrc := NewTagTableSource(tsrc) + + ttrow := tSrcRow(ttsrc) + assert.EqualValues(t, "1", ttrow["a"]) + assert.EqualValues(t, nil, ttrow["c"]) + + ttrow = tSrcRow(ttsrc) + assert.EqualValues(t, nil, ttrow["a"]) + assert.EqualValues(t, "3", ttrow["c"]) +} From 46f31cdd8de70c838a63290758cfc0f560f32b3f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 13 Jul 2021 16:20:18 -0400 Subject: [PATCH 091/121] address 'go vet' unkeyed field complaints. This is a gray result. The point behind the go vet complaint is to prevent breakages when 3rd party packages update. However this isn't a third party package. Also adds some slight documentation cleanup on the utils package. --- plugins/outputs/postgresql/columns.go | 12 +++++----- plugins/outputs/postgresql/postgresql.go | 2 +- .../postgresql/utils/{types.go => column.go} | 22 +++++-------------- plugins/outputs/postgresql/utils/utils.go | 4 ++-- 4 files changed, 15 insertions(+), 25 deletions(-) rename plugins/outputs/postgresql/utils/{types.go => column.go} (62%) diff --git a/plugins/outputs/postgresql/columns.go b/plugins/outputs/postgresql/columns.go index ec438e8582f3b..ad4c778e0101e 100644 --- a/plugins/outputs/postgresql/columns.go +++ b/plugins/outputs/postgresql/columns.go @@ -13,14 +13,14 @@ const ( JSONColumnDataType = utils.PgJSONb ) -var TimeColumn = utils.Column{TimeColumnName, TimeColumnDataType, utils.TimeColType} -var TagIDColumn = utils.Column{TagIDColumnName, TagIDColumnDataType, utils.TagsIDColType} -var FieldsJSONColumn = utils.Column{FieldsJSONColumnName, JSONColumnDataType, utils.FieldColType} -var TagsJSONColumn = utils.Column{TagsJSONColumnName, JSONColumnDataType, utils.TagColType} +var TimeColumn = utils.Column{Name: TimeColumnName, Type: TimeColumnDataType, Role: utils.TimeColType} +var TagIDColumn = utils.Column{Name: TagIDColumnName, Type: TagIDColumnDataType, Role: utils.TagsIDColType} +var FieldsJSONColumn = utils.Column{Name: FieldsJSONColumnName, Type: JSONColumnDataType, Role: utils.FieldColType} +var TagsJSONColumn = utils.Column{Name: TagsJSONColumnName, Type: JSONColumnDataType, Role: utils.TagColType} func ColumnFromTag(key string, value interface{}) utils.Column { - return utils.Column{key, utils.DerivePgDatatype(value), utils.TagColType} + return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.TagColType} } func ColumnFromField(key string, value interface{}) utils.Column { - return utils.Column{key, utils.DerivePgDatatype(value), utils.FieldColType} + return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.FieldColType} } diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index b8a4b9b758890..74493e91dd47d 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -159,7 +159,7 @@ func (p *Postgresql) Connect() error { } if p.LogLevel != "" { - poolConfig.ConnConfig.Logger = utils.PGXLogger{p.Logger} + poolConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger} poolConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) if err != nil { return fmt.Errorf("invalid log level") diff --git a/plugins/outputs/postgresql/utils/types.go b/plugins/outputs/postgresql/utils/column.go similarity index 62% rename from plugins/outputs/postgresql/utils/types.go rename to plugins/outputs/postgresql/utils/column.go index ad0ed3a4a275e..e1428d2600a87 100644 --- a/plugins/outputs/postgresql/utils/types.go +++ b/plugins/outputs/postgresql/utils/column.go @@ -1,5 +1,8 @@ package utils +// This is split out from the 'postgresql' package as its depended upon by both the 'postgresql' and +// 'postgresql/template' packages. + import ( "sort" "strings" @@ -28,22 +31,9 @@ type Column struct { Role ColumnRole } -//// TargetColumns contains all the information needed to map a collection of -//// metrics who belong to the same Measurement. -//type TargetColumns struct { -// // the names the columns will have in the database -// Names []string -// // column name -> order number. where to place each column in rows -// // batched to the db -// Target map[string]int -// // the data type of each column should have in the db. used when checking -// // if the schema matches or it needs updates -// DataTypes []PgDataType -// // the role each column has, helps properly map the metric to the db -// Roles []ColumnRole -//} -// - +// ColumnList implements sort.Interface. +// Columns are sorted first into groups of time,tag_id,tags,fields, and then alphabetically within +// each group. type ColumnList []Column func (cl ColumnList) Len() int { diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index c2919f7b97047..e4745d0f7c9f4 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -100,8 +100,8 @@ func DerivePgDatatype(value interface{}) PgDataType { } } -// PgTypeCanContain tells you if one PostgreSQL data type can contain -// the values of another without data loss. +// PgTypeCanContain tells you if one PostgreSQL data type can contain the values of another without +// significant data loss (e.g. a double can store an integer, but you may lose some precision). func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { switch canThis { case containThis: From cd6203d6b286122e87f5f08bccc3347ed6350f6a Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 14 Jul 2021 20:53:32 -0400 Subject: [PATCH 092/121] outputs.postgresql: remove panics on dropTagColumn & dropFieldColumn These scenarios should never happen, but might as well just return an error instead. --- plugins/outputs/postgresql/table_source.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index a720337b8f0aa..a8ccdb7a7a80d 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -185,26 +185,25 @@ func (tsrc *TableSource) ColumnNames() []string { func (tsrc *TableSource) DropColumn(col utils.Column) error { switch col.Role { case utils.TagColType: - tsrc.dropTagColumn(col) + return tsrc.dropTagColumn(col) case utils.FieldColType: - tsrc.dropFieldColumn(col) + return tsrc.dropFieldColumn(col) case utils.TimeColType, utils.TagsIDColType: return fmt.Errorf("critical column \"%s\"", col.Name) default: return fmt.Errorf("internal error: unknown column \"%s\"", col.Name) } - return nil } // Drops the tag column from conversion. Any metrics containing this tag will be skipped. -func (tsrc *TableSource) dropTagColumn(col utils.Column) { +func (tsrc *TableSource) dropTagColumn(col utils.Column) error { if col.Role != utils.TagColType || tsrc.postgresql.TagsAsJsonb { - panic(fmt.Sprintf("Tried to perform an invalid tag drop. This should not have happened. measurement=%s tag=%s", tsrc.Name(), col.Name)) + return fmt.Errorf("internal error: Tried to perform an invalid tag drop. measurement=%s tag=%s", tsrc.Name(), col.Name) } tsrc.droppedTagColumns = append(tsrc.droppedTagColumns, col.Name) if !tsrc.tagColumns.Remove(col.Name) { - return + return nil } for setID, set := range tsrc.tagSets { @@ -216,15 +215,17 @@ func (tsrc *TableSource) dropTagColumn(col utils.Column) { } } } + return nil } // Drops the field column from conversion. Any metrics containing this field will have the field omitted. -func (tsrc *TableSource) dropFieldColumn(col utils.Column) { +func (tsrc *TableSource) dropFieldColumn(col utils.Column) error { if col.Role != utils.FieldColType || tsrc.postgresql.FieldsAsJsonb { - panic(fmt.Sprintf("Tried to perform an invalid field drop. This should not have happened. measurement=%s field=%s", tsrc.Name(), col.Name)) + return fmt.Errorf("internal error: Tried to perform an invalid field drop. measurement=%s field=%s", tsrc.Name(), col.Name) } tsrc.fieldColumns.Remove(col.Name) + return nil } func (tsrc *TableSource) Next() bool { From 2bd9699e487984fd49cab024e5df6c4f01579110 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 14 Jul 2021 22:20:34 -0400 Subject: [PATCH 093/121] outputs.postgresql: rename template package to sqltemplate golangci-lint is complaining that the package name shadows an import. This statement is incorrect, and no shaowing is happening. So renaming just to make the linter happy. --- plugins/outputs/postgresql/postgresql.go | 10 ++++----- .../{template => sqltemplate}/template.go | 2 +- plugins/outputs/postgresql/table_manager.go | 22 ++++++++++--------- .../outputs/postgresql/table_manager_test.go | 8 +++---- 4 files changed, 22 insertions(+), 20 deletions(-) rename plugins/outputs/postgresql/{template => sqltemplate}/template.go (99%) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 74493e91dd47d..3ca0041308d14 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -16,7 +16,7 @@ import ( "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" "github.com/influxdata/toml" ) @@ -110,10 +110,10 @@ type Postgresql struct { ForeignTagConstraint bool TagsAsJsonb bool FieldsAsJsonb bool - CreateTemplates []*template.Template - AddColumnTemplates []*template.Template - TagTableCreateTemplates []*template.Template - TagTableAddColumnTemplates []*template.Template + CreateTemplates []*sqltemplate.Template + AddColumnTemplates []*sqltemplate.Template + TagTableCreateTemplates []*sqltemplate.Template + TagTableAddColumnTemplates []*sqltemplate.Template RetryMaxBackoff config.Duration LogLevel string diff --git a/plugins/outputs/postgresql/template/template.go b/plugins/outputs/postgresql/sqltemplate/template.go similarity index 99% rename from plugins/outputs/postgresql/template/template.go rename to plugins/outputs/postgresql/sqltemplate/template.go index 0652087473a9b..5b35cfc5c1590 100644 --- a/plugins/outputs/postgresql/template/template.go +++ b/plugins/outputs/postgresql/sqltemplate/template.go @@ -108,7 +108,7 @@ A very complex example for versions of TimescaleDB which don't support adding co WHERE t.tag_id = tt.tag_id''', ] */ -package template +package sqltemplate import ( "bytes" diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index df176e953103c..f02fd73160373 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -7,7 +7,7 @@ import ( "sync" "sync/atomic" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -148,8 +148,8 @@ func (tm *TableManager) EnsureStructure( db dbh, tbl *tableState, columns []utils.Column, - createTemplates []*template.Template, - addColumnsTemplates []*template.Template, + createTemplates []*sqltemplate.Template, + addColumnsTemplates []*sqltemplate.Template, metricsTable *tableState, tagsTable *tableState, ) ([]utils.Column, error) { @@ -227,19 +227,19 @@ func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColum func (tm *TableManager) executeTemplates( ctx context.Context, db dbh, - tmpls []*template.Template, + tmpls []*sqltemplate.Template, tbl *tableState, newColumns []utils.Column, metricsTable *tableState, tagsTable *tableState, ) error { - tmplTable := template.NewTable(tm.Schema, tbl.name, colMapToSlice(tbl.Columns())) - metricsTmplTable := template.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.Columns())) - var tagsTmplTable *template.Table + tmplTable := sqltemplate.NewTable(tm.Schema, tbl.name, colMapToSlice(tbl.Columns())) + metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.Columns())) + var tagsTmplTable *sqltemplate.Table if tagsTable != nil { - tagsTmplTable = template.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.Columns())) + tagsTmplTable = sqltemplate.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.Columns())) } else { - tagsTmplTable = template.NewTable("", "", nil) + tagsTmplTable = sqltemplate.NewTable("", "", nil) } /* https://github.com/jackc/pgx/issues/872 @@ -300,7 +300,9 @@ func (tm *TableManager) executeTemplates( if col.Role != utils.TagColType { continue } - if _, err := tx.Exec(ctx, "COMMENT ON COLUMN "+tmplTable.String()+"."+template.QuoteIdentifier(col.Name)+" IS 'tag'"); err != nil { + stmt := fmt.Sprintf("COMMENT ON COLUMN %s.%s IS 'tag'", + tmplTable.String(), sqltemplate.QuoteIdentifier(col.Name)) + if _, err := tx.Exec(ctx, stmt); err != nil { return fmt.Errorf("setting column role comment: %s", err) } } diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index d89fe22bedbdb..b1c30139d51a8 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -2,7 +2,7 @@ package postgresql import ( "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs/postgresql/template" + "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -122,7 +122,7 @@ func TestTableManager_cache(t *testing.T) { // Verify that when alter statements are disabled and a metric comes in with a new tag key, that the tag is omitted. func TestTableManager_noAlterMissingTag(t *testing.T) { p := newPostgresqlTest(t) - p.AddColumnTemplates = []*template.Template{} + p.AddColumnTemplates = []*sqltemplate.Template{} require.NoError(t, p.Connect()) metrics := []telegraf.Metric{ @@ -145,7 +145,7 @@ func TestTableManager_noAlterMissingTag(t *testing.T) { func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true - p.TagTableAddColumnTemplates = []*template.Template{} + p.TagTableAddColumnTemplates = []*sqltemplate.Template{} require.NoError(t, p.Connect()) metrics := []telegraf.Metric{ @@ -167,7 +167,7 @@ func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { // verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted. func TestTableManager_noAlterMissingField(t *testing.T) { p := newPostgresqlTest(t) - p.AddColumnTemplates = []*template.Template{} + p.AddColumnTemplates = []*sqltemplate.Template{} require.NoError(t, p.Connect()) metrics := []telegraf.Metric{ From 21bdd2375f0256250d01960e72880ed98c187d5a Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 15 Jul 2021 18:30:05 -0400 Subject: [PATCH 094/121] outputs.postgresql: use testutil.MustMetric per-code-review, use testutil.MustMetric instead of metric.New --- plugins/outputs/postgresql/postgresql_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index d45a069fcb2aa..3c752525590a4 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -3,6 +3,7 @@ package postgresql import ( "context" "fmt" + "github.com/influxdata/telegraf/testutil" "os" "strings" "sync" @@ -15,7 +16,6 @@ import ( "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) @@ -256,7 +256,7 @@ func newMetric( tags map[string]string, fields map[string]interface{}, ) telegraf.Metric { - return metric.New(t.Name()+suffix, tags, fields, time.Now()) + return testutil.MustMetric(t.Name()+suffix, tags, fields, time.Now()) } type MSS = map[string]string From 9a0ed7c556f2bdb5e1c2fb4471a055f27b2ac3c6 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 15 Jul 2021 18:39:42 -0400 Subject: [PATCH 095/121] outputs.postgresql: remove postgres test server auto-detection --- plugins/outputs/postgresql/postgresql_test.go | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 3c752525590a4..b9c550b3fb250 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -184,23 +184,6 @@ func (la *LogAccumulator) Info(args ...interface{}) { var ctx = context.Background() func TestMain(m *testing.M) { - // Try and find the server. - // Try provided env vars & defaults first. - if c, err := pgx.Connect(ctx, ""); err != nil { - os.Setenv("PGHOST", "127.0.0.1") - os.Setenv("PGUSER", "postgres") - // Try the port used in docker-compose.yml first - os.Setenv("PGPORT", "5433") - if c, err := pgx.Connect(ctx, ""); err != nil { - // Fall back to the default port - os.Setenv("PGPORT", "5432") - } else { - c.Close(ctx) - } - } else { - c.Close(ctx) - } - if err := prepareDatabase("telegraf"); err != nil { fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) os.Exit(1) @@ -209,7 +192,7 @@ func TestMain(m *testing.M) { } func prepareDatabase(name string) error { - db, err := pgx.Connect(ctx, "") + db, err := pgx.Connect(ctx, os.Getenv("PGURI")) if err != nil { return err } From 2d553dbd9e42337e0994eb8e7d69a7b89322cbd6 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 15 Jul 2021 18:56:49 -0400 Subject: [PATCH 096/121] outputs.postgresql: use struct for args to test batchGenerator addresses linter warning --- .../postgresql/postgresql_bench_test.go | 32 ++++++++++++------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go index 9d52bb3f4dd8f..bac0de93ac820 100644 --- a/plugins/outputs/postgresql/postgresql_bench_test.go +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -12,11 +12,11 @@ import ( ) func BenchmarkPostgresql_sequential(b *testing.B) { - gen := batchGenerator(ctx, b, 1000, 3, 8, 12, 100, 2) + gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2}) benchmarkPostgresql(b, gen, 1, true) } func BenchmarkPostgresql_concurrent(b *testing.B) { - gen := batchGenerator(ctx, b, 1000, 3, 8, 12, 100, 2) + gen := batchGenerator(batchGeneratorArgs{ctx, b, 1000, 3, 8, 12, 100, 2}) benchmarkPostgresql(b, gen, 10, true) } @@ -46,12 +46,22 @@ func benchmarkPostgresql(b *testing.B, gen <-chan []telegraf.Metric, concurrency b.ReportMetric(float64(metricCount)/tStop.Sub(tStart).Seconds(), "metrics/s") } +type batchGeneratorArgs struct { + ctx context.Context + b *testing.B + batchSize int + numTables int + numTags int + numFields int + tagCardinality int + fieldCardinality int +} // tagCardinality counts all the tag keys & values as one element. fieldCardinality counts all the field keys (not values) as one element. -func batchGenerator(ctx context.Context, b *testing.B, batchSize int, numTables int, numTags int, numFields int, tagCardinality int, fieldCardinality int) <-chan []telegraf.Metric { - tagSets := make([]MSS, tagCardinality) - for i := 0; i < tagCardinality; i++ { +func batchGenerator(args batchGeneratorArgs) <-chan []telegraf.Metric { + tagSets := make([]MSS, args.tagCardinality) + for i := 0; i < args.tagCardinality; i++ { tags := MSS{} - for j := 0; j < numTags; j++ { + for j := 0; j < args.numTags; j++ { tags[fmt.Sprintf("tag_%d", j)] = fmt.Sprintf("%d", rand.Int()) } tagSets[i] = tags @@ -60,9 +70,9 @@ func batchGenerator(ctx context.Context, b *testing.B, batchSize int, numTables metricChan := make(chan []telegraf.Metric, 32) go func() { for { - batch := make([]telegraf.Metric, batchSize) - for i := 0; i < batchSize; i++ { - tableName := b.Name() + "_" + strconv.Itoa(rand.Intn(numTables)) + batch := make([]telegraf.Metric, args.batchSize) + for i := 0; i < args.batchSize; i++ { + tableName := args.b.Name() + "_" + strconv.Itoa(rand.Intn(args.numTables)) tags := tagSets[rand.Intn(len(tagSets))] @@ -70,10 +80,10 @@ func batchGenerator(ctx context.Context, b *testing.B, batchSize int, numTables m.AddTag("tableName", tableName) // ensure the tag set is unique to this table. Just in case... // We do field cardinality by randomizing the name of the final field to an integer < cardinality. - for j := 0; j < numFields-1; j++ { // use -1 to reserve the last field for cardinality + for j := 0; j < args.numFields-1; j++ { // use -1 to reserve the last field for cardinality m.AddField("f"+strconv.Itoa(j), rand.Int()) } - m.AddField("f"+strconv.Itoa(rand.Intn(fieldCardinality)), rand.Int()) + m.AddField("f"+strconv.Itoa(rand.Intn(args.fieldCardinality)), rand.Int()) batch[i] = m } From 08ed5e7bbd2d49e7353988fc926e8af9f6734fec Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 15 Jul 2021 18:58:49 -0400 Subject: [PATCH 097/121] outputs.postgresql: unexport WriteTagTable --- plugins/outputs/postgresql/postgresql.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 3ca0041308d14..7603d16160a4c 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -365,7 +365,7 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS } if p.TagsAsForeignKeys { - if err := p.WriteTagTable(ctx, db, tableSource); err != nil { + if err := p.writeTagTable(ctx, db, tableSource); err != nil { if p.ForeignTagConstraint { return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } else { @@ -384,7 +384,7 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS return nil } -func (p *Postgresql) WriteTagTable(ctx context.Context, db dbh, tableSource *TableSource) error { +func (p *Postgresql) writeTagTable(ctx context.Context, db dbh, tableSource *TableSource) error { ttsrc := NewTagTableSource(tableSource) // Check whether we have any tags to insert From 93aec6f78538ff155f5dd3df04811192d26ff49e Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 13 Aug 2021 11:08:47 -0400 Subject: [PATCH 098/121] outputs/postgresql: address PR comments --- plugins/outputs/postgresql/columns.go | 26 +++++------ plugins/outputs/postgresql/postgresql.go | 31 +++++++------ .../postgresql/postgresql_bench_test.go | 5 +- plugins/outputs/postgresql/table_manager.go | 46 ++++--------------- .../outputs/postgresql/table_manager_test.go | 16 ++++--- plugins/outputs/postgresql/table_source.go | 14 +++--- 6 files changed, 58 insertions(+), 80 deletions(-) diff --git a/plugins/outputs/postgresql/columns.go b/plugins/outputs/postgresql/columns.go index ad4c778e0101e..7e403c4d45e6b 100644 --- a/plugins/outputs/postgresql/columns.go +++ b/plugins/outputs/postgresql/columns.go @@ -4,23 +4,23 @@ import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" // Column names and data types for standard fields (time, tag_id, tags, and fields) const ( - TimeColumnName = "time" - TimeColumnDataType = utils.PgTimestampWithTimeZone - TagIDColumnName = "tag_id" - TagIDColumnDataType = utils.PgBigInt - TagsJSONColumnName = "tags" - FieldsJSONColumnName = "fields" - JSONColumnDataType = utils.PgJSONb + timeColumnName = "time" + timeColumnDataType = utils.PgTimestampWithTimeZone + tagIDColumnName = "tag_id" + tagIDColumnDataType = utils.PgBigInt + tagsJSONColumnName = "tags" + fieldsJSONColumnName = "fields" + jsonColumnDataType = utils.PgJSONb ) -var TimeColumn = utils.Column{Name: TimeColumnName, Type: TimeColumnDataType, Role: utils.TimeColType} -var TagIDColumn = utils.Column{Name: TagIDColumnName, Type: TagIDColumnDataType, Role: utils.TagsIDColType} -var FieldsJSONColumn = utils.Column{Name: FieldsJSONColumnName, Type: JSONColumnDataType, Role: utils.FieldColType} -var TagsJSONColumn = utils.Column{Name: TagsJSONColumnName, Type: JSONColumnDataType, Role: utils.TagColType} +var timeColumn = utils.Column{Name: timeColumnName, Type: timeColumnDataType, Role: utils.TimeColType} +var tagIDColumn = utils.Column{Name: tagIDColumnName, Type: tagIDColumnDataType, Role: utils.TagsIDColType} +var fieldsJSONColumn = utils.Column{Name: fieldsJSONColumnName, Type: jsonColumnDataType, Role: utils.FieldColType} +var tagsJSONColumn = utils.Column{Name: tagsJSONColumnName, Type: jsonColumnDataType, Role: utils.TagColType} -func ColumnFromTag(key string, value interface{}) utils.Column { +func columnFromTag(key string, value interface{}) utils.Column { return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.TagColType} } -func ColumnFromField(key string, value interface{}) utils.Column { +func columnFromField(key string, value interface{}) utils.Column { return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.FieldColType} } diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 7603d16160a4c..661729149cd67 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -12,13 +12,14 @@ import ( "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" + "github.com/influxdata/toml" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" - "github.com/influxdata/toml" ) type dbh interface { @@ -103,19 +104,19 @@ var sampleConfig = ` ` type Postgresql struct { - Connection string - Schema string - TagsAsForeignKeys bool - TagTableSuffix string - ForeignTagConstraint bool - TagsAsJsonb bool - FieldsAsJsonb bool - CreateTemplates []*sqltemplate.Template - AddColumnTemplates []*sqltemplate.Template - TagTableCreateTemplates []*sqltemplate.Template - TagTableAddColumnTemplates []*sqltemplate.Template - RetryMaxBackoff config.Duration - LogLevel string + Connection string `toml:"connection"` + Schema string `toml:"schema"` + TagsAsForeignKeys bool `toml:"tags_as_foreign_keys"` + TagTableSuffix string `toml:"tag_table_suffix"` + ForeignTagConstraint bool `toml:"foreign_tag_constraint"` + TagsAsJsonb bool `toml:"tags_as_jsonb"` + FieldsAsJsonb bool `toml:"fields_as_jsonb"` + CreateTemplates []*sqltemplate.Template `toml:"create_templates"` + AddColumnTemplates []*sqltemplate.Template `toml:"add_column_templates"` + TagTableCreateTemplates []*sqltemplate.Template `toml:"tag_table_create_templates"` + TagTableAddColumnTemplates []*sqltemplate.Template `toml:"tag_table_add_column_templates"` + RetryMaxBackoff config.Duration `toml:"retry_max_backoff"` + LogLevel string `toml:"log_level"` dbContext context.Context dbContextCancel func() @@ -126,7 +127,7 @@ type Postgresql struct { writeChan chan *TableSource writeWaitGroup *utils.WaitGroup - Logger telegraf.Logger + Logger telegraf.Logger `toml:"-"` } func init() { diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go index bac0de93ac820..cb3bab7523f70 100644 --- a/plugins/outputs/postgresql/postgresql_bench_test.go +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -3,12 +3,13 @@ package postgresql import ( "context" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" "math/rand" "strconv" "testing" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" ) func BenchmarkPostgresql_sequential(b *testing.B) { diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index f02fd73160373..b0fdb62f913c6 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -101,13 +101,13 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl * role := utils.FieldColType switch colName { - case TimeColumnName: + case timeColumnName: role = utils.TimeColType - case TagIDColumnName: + case tagIDColumnName: role = utils.TagsIDColType - case TagsJSONColumnName: + case tagsJSONColumnName: role = utils.TagColType - case FieldsJSONColumnName: + case fieldsJSONColumnName: role = utils.FieldColType default: // We don't want to monopolize the column comment (preventing user from storing other information there), so just look at the first word @@ -242,36 +242,6 @@ func (tm *TableManager) executeTemplates( tagsTmplTable = sqltemplate.NewTable("", "", nil) } - /* https://github.com/jackc/pgx/issues/872 - stmts := make([]string, len(tmpls)) - batch := &pgx.Batch{} - for i, tmpl := range tmpls { - sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) - if err != nil { - return err - } - stmts[i] = string(sql) - batch.Queue(stmts[i]) - } - - batch.Queue(refreshTableStructureStatement, tm.Schema, tableName) - - batchResult := tm.db.SendBatch(ctx, batch) - defer batchResult.Close() - - for i := 0; i < len(tmpls); i++ { - if x, err := batchResult.Exec(); err != nil { - return fmt.Errorf("executing `%.40s...`: %v %w", stmts[i], x, err) - } - } - - rows, err := batchResult.Query() - if err != nil { - return fmt.Errorf("refreshing table: %w", err) - } - tm.refreshTableStructureResponse(tableName, rows) - */ - // Lock to prevent concurrency issues in postgres (pg_type_typname_nsp_index unique constraint; SQLSTATE 23505) tm.schemaMutex.Lock() defer tm.schemaMutex.Unlock() @@ -358,7 +328,9 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl } colDefs[i] = col.Name + " " + string(col.Type) } - tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", tagTable.name, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", + tagTable.name, + strings.Join(colDefs, ", ")) } } @@ -384,7 +356,9 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl } colDefs[i] = col.Name + " " + string(col.Type) } - tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", metricTable.name, strings.Join(colDefs, ", ")) + tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", + metricTable.name, + strings.Join(colDefs, ", ")) } return nil diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index b1c30139d51a8..119f66a507586 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -1,12 +1,14 @@ package postgresql import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" ) func TestTableManager_EnsureStructure(t *testing.T) { @@ -14,8 +16,8 @@ func TestTableManager_EnsureStructure(t *testing.T) { require.NoError(t, p.Connect()) cols := []utils.Column{ - ColumnFromTag("foo", ""), - ColumnFromField("baz", 0), + columnFromTag("foo", ""), + columnFromField("baz", 0), } missingCols, err := p.tableManager.EnsureStructure( ctx, @@ -39,8 +41,8 @@ func TestTableManager_refreshTableStructure(t *testing.T) { require.NoError(t, p.Connect()) cols := []utils.Column{ - ColumnFromTag("foo", ""), - ColumnFromField("baz", 0), + columnFromTag("foo", ""), + columnFromField("baz", 0), } _, err := p.tableManager.EnsureStructure( ctx, diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index a8ccdb7a7a80d..da297b3e056e4 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -101,13 +101,13 @@ func (tsrc *TableSource) AddMetric(metric telegraf.Metric) { if !tsrc.postgresql.TagsAsJsonb { for _, t := range metric.TagList() { - tsrc.tagColumns.Add(ColumnFromTag(t.Key, t.Value)) + tsrc.tagColumns.Add(columnFromTag(t.Key, t.Value)) } } if !tsrc.postgresql.FieldsAsJsonb { for _, f := range metric.FieldList() { - tsrc.fieldColumns.Add(ColumnFromField(f.Key, f.Value)) + tsrc.fieldColumns.Add(columnFromField(f.Key, f.Value)) } } @@ -126,7 +126,7 @@ func (tsrc *TableSource) TagColumns() []utils.Column { var cols []utils.Column if tsrc.postgresql.TagsAsJsonb { - cols = append(cols, TagsJSONColumn) + cols = append(cols, tagsJSONColumn) } else { cols = append(cols, tsrc.tagColumns.columns...) } @@ -142,17 +142,17 @@ func (tsrc *TableSource) FieldColumns() []utils.Column { // Returns the full column list, including time, tag id or tags, and fields. func (tsrc *TableSource) MetricTableColumns() []utils.Column { cols := []utils.Column{ - TimeColumn, + timeColumn, } if tsrc.postgresql.TagsAsForeignKeys { - cols = append(cols, TagIDColumn) + cols = append(cols, tagIDColumn) } else { cols = append(cols, tsrc.TagColumns()...) } if tsrc.postgresql.FieldsAsJsonb { - cols = append(cols, FieldsJSONColumn) + cols = append(cols, fieldsJSONColumn) } else { cols = append(cols, tsrc.FieldColumns()...) } @@ -162,7 +162,7 @@ func (tsrc *TableSource) MetricTableColumns() []utils.Column { func (tsrc *TableSource) TagTableColumns() []utils.Column { cols := []utils.Column{ - TagIDColumn, + tagIDColumn, } cols = append(cols, tsrc.TagColumns()...) From ff644a9bc6dabfbc0d09f3581bc37dad867bc6ac Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 13 Aug 2021 14:26:22 -0400 Subject: [PATCH 099/121] outputs/postgresql: Refactor initialization to use telegraf convention --- plugins/outputs/postgresql/postgresql.go | 99 +++++++++++++------ plugins/outputs/postgresql/postgresql_test.go | 28 +++++- .../postgresql/sqltemplate/template.go | 9 -- 3 files changed, 98 insertions(+), 38 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 661729149cd67..5ea5b94ce97a8 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -12,8 +12,6 @@ import ( "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" - "github.com/influxdata/toml" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/models" @@ -51,53 +49,51 @@ var sampleConfig = ` ## with the same name as the user. This dbname is just for instantiating a ## connection with the server and doesn't restrict the databases we are trying ## to grab metrics for. - ## - #connection = "host=localhost user=postgres sslmode=verify-full" ## Postgres schema to use. - schema = "public" + # schema = "public" ## Store tags as foreign keys in the metrics table. Default is false. - tags_as_foreign_keys = false + # tags_as_foreign_keys = false ## Suffix to append to table name (measurement name) for the foreign tag table. - tag_table_suffix = "_tag" + # tag_table_suffix = "_tag" ## Deny inserting metrics if the foreign tag can't be inserted. - foreign_tag_constraint = false + # foreign_tag_constraint = false ## Store all tags as a JSONB object in a single 'tags' column. - tags_as_jsonb = false + # tags_as_jsonb = false ## Store all fields as a JSONB object in a single 'fields' column. - fields_as_jsonb = false + # fields_as_jsonb = false ## Templated statements to execute when creating a new table. - create_templates = [ - '''CREATE TABLE {{.table}} ({{.columns}})''', - ] + # create_templates = [ + # '''CREATE TABLE {{.table}} ({{.columns}})''', + # ] ## Templated statements to execute when adding columns to a table. ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points ## containing fields for which there is no column will have the field omitted. - add_column_templates = [ - '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', - ] + # add_column_templates = [ + # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + # ] ## Templated statements to execute when creating a new tag table. - tag_table_create_templates = [ - '''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))''', - ] + # tag_table_create_templates = [ + # '''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))''', + # ] ## Templated statements to execute when adding columns to a tag table. ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. - tag_table_add_column_templates = [ - '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', - ] + # tag_table_add_column_templates = [ + # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + # ] ## When using pool_max_conns>1, an a temporary error occurs, the query is retried with an incremental backoff. This ## controls the maximum backoff duration. - retry_max_backoff = "15s" + # retry_max_backoff = "15s" ## Enable & set the log level for the Postgres driver. # log_level = "info" # trace, debug, info, warn, error, none @@ -135,13 +131,60 @@ func init() { } func newPostgresql() *Postgresql { - p := &Postgresql{ - Logger: models.NewLogger("outputs", "postgresql", ""), + return &Postgresql{} +} + +func (p *Postgresql) Init() error { + if p.Schema == "" { + p.Schema = "public" + } + + if p.TagTableSuffix == "" { + p.TagTableSuffix = "_tag" } - if err := toml.Unmarshal([]byte(p.SampleConfig()), p); err != nil { - panic(err.Error()) + + if p.CreateTemplates == nil { + t := &sqltemplate.Template{} + t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}})`)) + p.CreateTemplates = []*sqltemplate.Template{t} + } + + if p.AddColumnTemplates == nil { + t := &sqltemplate.Template{} + t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + p.AddColumnTemplates = []*sqltemplate.Template{t} } - return p + + if p.TagTableCreateTemplates == nil { + t := &sqltemplate.Template{} + t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`)) + p.TagTableCreateTemplates = []*sqltemplate.Template{t} + } + + if p.TagTableAddColumnTemplates == nil { + t := &sqltemplate.Template{} + t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + p.TagTableAddColumnTemplates = []*sqltemplate.Template{t} + } + + if p.RetryMaxBackoff == 0 { + p.RetryMaxBackoff = config.Duration(time.Second * 15) + } + + if p.LogLevel == "" { + p.LogLevel = "info" + } + + if p.TagTableAddColumnTemplates == nil { + t := &sqltemplate.Template{} + t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + } + + if p.Logger == nil { + p.Logger = models.NewLogger("outputs", "postgresql", "") + } + + return nil } func (p *Postgresql) SampleConfig() string { return sampleConfig } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index b9c550b3fb250..d7e5078a5c3d6 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -2,14 +2,19 @@ package postgresql import ( "context" + "encoding/json" "fmt" - "github.com/influxdata/telegraf/testutil" "os" + "regexp" "strings" "sync" "testing" "time" + "github.com/influxdata/toml" + + "github.com/influxdata/telegraf/testutil" + "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" "github.com/stretchr/testify/assert" @@ -211,6 +216,7 @@ type PostgresqlTest struct { func newPostgresqlTest(tb testing.TB) *PostgresqlTest { p := newPostgresql() + p.Init() logger := NewLogAccumulator(tb) p.Logger = logger pt := &PostgresqlTest{Postgresql: *p} @@ -220,6 +226,26 @@ func newPostgresqlTest(tb testing.TB) *PostgresqlTest { return pt } +// Verify that the documented defaults match the actual defaults. +// +// Sample config must be in the format documented in `docs/developers/SAMPLE_CONFIG.md`. +func TestPostgresqlSampleConfig(t *testing.T) { + p1 := newPostgresql() + require.NoError(t, p1.Init()) + + p2 := newPostgresql() + re := regexp.MustCompile(`(?m)^\s*#`) + conf := re.ReplaceAllLiteralString(p1.SampleConfig(), "") + require.NoError(t, toml.Unmarshal([]byte(conf), p2)) + require.NoError(t, p2.Init()) + + // Can't use assert.Equal() because it dives into unexported fields that contain unequal values. + // Serializing to JSON is effective as any differences will be visible in exported fields. + p1json, _ := json.Marshal(p1) + p2json, _ := json.Marshal(p2) + assert.JSONEq(t, string(p1json), string(p2json), "Sample config does not match default config") +} + func TestPostgresqlConnect(t *testing.T) { p := newPostgresqlTest(t) require.NoError(t, p.Connect()) diff --git a/plugins/outputs/postgresql/sqltemplate/template.go b/plugins/outputs/postgresql/sqltemplate/template.go index 5b35cfc5c1590..d1f1d926ebf07 100644 --- a/plugins/outputs/postgresql/sqltemplate/template.go +++ b/plugins/outputs/postgresql/sqltemplate/template.go @@ -124,15 +124,6 @@ import ( "github.com/Masterminds/sprig" ) -// TableCreateTemplate is the default template used for creating new tables. -var TableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}})`) - -// TagTableCreateTemplate is the default template used when creating a new tag table. -var TagTableCreateTemplate = newTemplate(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`) - -// TableAddColumnTemplate is the default template used when adding new columns to an existing table. -var TableAddColumnTemplate = newTemplate(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`) - var templateFuncs = map[string]interface{}{ "quoteIdentifier": QuoteIdentifier, "quoteLiteral": QuoteLiteral, From 04d2764275851ae9d59b68228c942177bb869d4a Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 13 Aug 2021 21:47:43 -0400 Subject: [PATCH 100/121] outputs/postgresql: address linter complaints --- plugins/outputs/postgresql/postgresql.go | 33 +++++++++---------- .../postgresql/postgresql_bench_test.go | 1 + plugins/outputs/postgresql/postgresql_test.go | 20 ++++------- .../postgresql/sqltemplate/template.go | 12 ++----- plugins/outputs/postgresql/table_manager.go | 4 ++- plugins/outputs/postgresql/table_source.go | 16 ++++----- .../outputs/postgresql/table_source_test.go | 21 ++++-------- plugins/outputs/postgresql/utils/utils.go | 2 -- 8 files changed, 42 insertions(+), 67 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 5ea5b94ce97a8..c06cbd084a07c 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -145,25 +145,25 @@ func (p *Postgresql) Init() error { if p.CreateTemplates == nil { t := &sqltemplate.Template{} - t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}})`)) + _ = t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}})`)) p.CreateTemplates = []*sqltemplate.Template{t} } if p.AddColumnTemplates == nil { t := &sqltemplate.Template{} - t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + _ = t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) p.AddColumnTemplates = []*sqltemplate.Template{t} } if p.TagTableCreateTemplates == nil { t := &sqltemplate.Template{} - t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`)) + _ = t.UnmarshalText([]byte(`CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))`)) p.TagTableCreateTemplates = []*sqltemplate.Template{t} } if p.TagTableAddColumnTemplates == nil { t := &sqltemplate.Template{} - t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + _ = t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) p.TagTableAddColumnTemplates = []*sqltemplate.Template{t} } @@ -177,7 +177,7 @@ func (p *Postgresql) Init() error { if p.TagTableAddColumnTemplates == nil { t := &sqltemplate.Template{} - t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) + _ = t.UnmarshalText([]byte(`ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}`)) } if p.Logger == nil { @@ -259,9 +259,8 @@ func (p *Postgresql) Write(metrics []telegraf.Metric) error { if p.db.Stat().MaxConns() > 1 { return p.writeConcurrent(tableSources) - } else { - return p.writeSequential(tableSources) } + return p.writeSequential(tableSources) } func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error { @@ -269,7 +268,7 @@ func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error if err != nil { return fmt.Errorf("starting transaction: %w", err) } - defer tx.Rollback(p.dbContext) + defer tx.Rollback(p.dbContext) //nolint:errcheck for _, tableSource := range tableSources { err := p.writeMetricsFromMeasure(p.dbContext, tx, tableSource) @@ -346,7 +345,7 @@ func isTempError(err error) bool { case "57": // Operator Intervention return true case "23": // Integrity Constraint Violation - switch pgErr.Code { + switch pgErr.Code { //nolint:revive case "23505": // unique_violation if strings.Contains(err.Error(), "pg_type_typname_nsp_index") { // Happens when you try to create 2 tables simultaneously. @@ -378,11 +377,12 @@ func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) e err = p.writeMetricsFromMeasure(ctx, tx, tableSource) if err == nil { - tx.Commit(ctx) - return nil + if err := tx.Commit(ctx); err == nil { + return nil + } } - tx.Rollback(ctx) + _ = tx.Rollback(ctx) if !isTempError(err) { return err } @@ -412,11 +412,10 @@ func (p *Postgresql) writeMetricsFromMeasure(ctx context.Context, db dbh, tableS if err := p.writeTagTable(ctx, db, tableSource); err != nil { if p.ForeignTagConstraint { return fmt.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) - } else { - // log and continue. As the admin can correct the issue, and tags don't change over time, they can be - // added from future metrics after issue is corrected. - p.Logger.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } + // log and continue. As the admin can correct the issue, and tags don't change over time, they can be + // added from future metrics after issue is corrected. + p.Logger.Errorf("writing to tag table '%s': %s", tableSource.Name()+p.TagTableSuffix, err) } } @@ -442,7 +441,7 @@ func (p *Postgresql) writeTagTable(ctx context.Context, db dbh, tableSource *Tab if err != nil { return err } - defer tx.Rollback(ctx) + defer tx.Rollback(ctx) //nolint:errcheck ident := pgx.Identifier{ttsrc.postgresql.Schema, ttsrc.Name()} identTemp := pgx.Identifier{ttsrc.Name() + "_temp"} diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go index cb3bab7523f70..3711312a46642 100644 --- a/plugins/outputs/postgresql/postgresql_bench_test.go +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -57,6 +57,7 @@ type batchGeneratorArgs struct { tagCardinality int fieldCardinality int } + // tagCardinality counts all the tag keys & values as one element. fieldCardinality counts all the field keys (not values) as one element. func batchGenerator(args batchGeneratorArgs) <-chan []telegraf.Metric { tagSets := make([]MSS, args.tagCardinality) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index d7e5078a5c3d6..00a0dbc255b4e 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -24,14 +24,6 @@ import ( "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) -func timeout(t *testing.T, dur time.Duration) { - timer := time.AfterFunc(dur, func() { - t.Errorf("Test timed out after %s", dur) - t.FailNow() - }) - t.Cleanup(func() { timer.Stop() }) -} - type Log struct { level pgx.LogLevel format string @@ -190,7 +182,7 @@ var ctx = context.Background() func TestMain(m *testing.M) { if err := prepareDatabase("telegraf"); err != nil { - fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) + _, _ = fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) os.Exit(1) } os.Exit(m.Run()) @@ -216,7 +208,7 @@ type PostgresqlTest struct { func newPostgresqlTest(tb testing.TB) *PostgresqlTest { p := newPostgresql() - p.Init() + _ = p.Init() logger := NewLogAccumulator(tb) p.Logger = logger pt := &PostgresqlTest{Postgresql: *p} @@ -318,7 +310,7 @@ func TestWrite_sequential(t *testing.T) { stmtCount := 0 for _, log := range p.Logger.Logs() { if strings.Contains(log.String(), "info: PG ") { - stmtCount += 1 + stmtCount++ } } assert.Equal(t, 4, stmtCount) // BEGIN, COPY table _a, COPY table _b, COMMIT @@ -341,7 +333,7 @@ func TestWrite_concurrent(t *testing.T) { // Lock the table so that we ensure the writes hangs and the plugin has to open another connection. tx, err := p.db.Begin(ctx) require.NoError(t, err) - defer tx.Rollback(ctx) + defer tx.Rollback(ctx) //nolint:errcheck _, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdent(t.Name()+"_a")) require.NoError(t, err) @@ -361,7 +353,7 @@ func TestWrite_concurrent(t *testing.T) { p.Logger.WaitForCopy(t.Name()+"_b", true) // release the lock on table _a - tx.Rollback(ctx) + _ = tx.Rollback(ctx) p.Logger.WaitForCopy(t.Name()+"_a", true) dumpA := dbTableDump(t, p.db, "_a") @@ -565,7 +557,7 @@ func TestWriteTagTable(t *testing.T) { stmtCount := 0 for _, log := range p.Logger.Logs() { if strings.Contains(log.String(), "info: PG ") { - stmtCount += 1 + stmtCount++ } } assert.Equal(t, 3, stmtCount) // BEGIN, COPY metrics table, COMMIT diff --git a/plugins/outputs/postgresql/sqltemplate/template.go b/plugins/outputs/postgresql/sqltemplate/template.go index d1f1d926ebf07..9897ddc3ae5d3 100644 --- a/plugins/outputs/postgresql/sqltemplate/template.go +++ b/plugins/outputs/postgresql/sqltemplate/template.go @@ -379,22 +379,14 @@ func (cols Columns) Fields() Columns { func (cols Columns) Hash() string { hash := fnv.New32a() for _, tc := range cols.Sorted() { - hash.Write([]byte(tc.Name)) - hash.Write([]byte{0}) + _, _ = hash.Write([]byte(tc.Name)) + _, _ = hash.Write([]byte{0}) } return strings.ToLower(base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(hash.Sum(nil))) } type Template template.Template -func newTemplate(templateString string) *Template { - t := &Template{} - if err := t.UnmarshalText([]byte(templateString)); err != nil { - panic(err) - } - return t -} - func (t *Template) UnmarshalText(text []byte) error { tmpl := template.New("") tmpl.Option("missingkey=error") diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index b0fdb62f913c6..20ccabf6fc551 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -143,6 +143,7 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl * // metricsTableName and tagsTableName are passed to the templates. // // If the table cannot be modified, the returned column list is the columns which are missing from the table. +//nolint:revive func (tm *TableManager) EnsureStructure( ctx context.Context, db dbh, @@ -224,6 +225,7 @@ func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColum return missingColumns, nil } +//nolint:revive func (tm *TableManager) executeTemplates( ctx context.Context, db dbh, @@ -250,7 +252,7 @@ func (tm *TableManager) executeTemplates( if err != nil { return err } - defer tx.Rollback(ctx) + defer tx.Rollback(ctx) //nolint:errcheck for _, tmpl := range tmpls { sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index da297b3e056e4..279cab02e5122 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -235,9 +235,9 @@ func (tsrc *TableSource) Next() bool { tsrc.cursorError = nil return false } - tsrc.cursor += 1 + tsrc.cursor++ - tsrc.cursorValues, tsrc.cursorError = tsrc.values() + tsrc.cursorValues, tsrc.cursorError = tsrc.getValues() if tsrc.cursorValues != nil || tsrc.cursorError != nil { return true } @@ -248,9 +248,9 @@ func (tsrc *TableSource) Reset() { tsrc.cursor = -1 } -// values calculates the values for the metric at the cursor position. +// getValues calculates the values for the metric at the cursor position. // If the metric cannot be emitted, such as due to dropped tags, or all fields dropped, the return value is nil. -func (tsrc *TableSource) values() ([]interface{}, error) { +func (tsrc *TableSource) getValues() ([]interface{}, error) { metric := tsrc.metrics[tsrc.cursor] values := []interface{}{ @@ -364,14 +364,14 @@ func (ttsrc *TagTableSource) Next() bool { ttsrc.cursorValues = nil return false } - ttsrc.cursor += 1 + ttsrc.cursor++ if _, err := ttsrc.postgresql.tagsCache.GetInt(ttsrc.tagIDs[ttsrc.cursor]); err == nil { // tag ID already inserted continue } - ttsrc.cursorValues = ttsrc.values() + ttsrc.cursorValues = ttsrc.getValues() if ttsrc.cursorValues != nil { return true } @@ -382,7 +382,7 @@ func (ttsrc *TagTableSource) Reset() { ttsrc.cursor = -1 } -func (ttsrc *TagTableSource) values() []interface{} { +func (ttsrc *TagTableSource) getValues() []interface{} { tagID := ttsrc.tagIDs[ttsrc.cursor] tagSet := ttsrc.tagSets[tagID] @@ -407,7 +407,7 @@ func (ttsrc *TagTableSource) Values() ([]interface{}, error) { func (ttsrc *TagTableSource) UpdateCache() { for _, tagID := range ttsrc.tagIDs { - ttsrc.postgresql.tagsCache.SetInt(tagID, nil, 0) + _ = ttsrc.postgresql.tagsCache.SetInt(tagID, nil, 0) } } diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index e37f347d281f1..670e090da2c92 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -17,15 +17,6 @@ import ( func TestTableSource(t *testing.T) { } -func indexOfStr(list []string, target string) int { - for i, v := range list { - if v == target { - return i - } - } - return -1 -} - type source interface { pgx.CopyFromSource ColumnNames() []string @@ -137,7 +128,7 @@ func TestTableSource_DropColumn_tag(t *testing.T) { break } } - tsrc.DropColumn(col) + _ = tsrc.DropColumn(col) row := nextSrcRow(tsrc) assert.EqualValues(t, "one", row["a"]) @@ -167,7 +158,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { break } } - tsrc.DropColumn(col) + _ = tsrc.DropColumn(col) ttsrc := NewTagTableSource(tsrc) row := nextSrcRow(ttsrc) @@ -201,7 +192,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { break } } - tsrc.DropColumn(col) + _ = tsrc.DropColumn(col) ttsrc := NewTagTableSource(tsrc) row := nextSrcRow(ttsrc) @@ -232,7 +223,7 @@ func TestTableSource_DropColumn_field(t *testing.T) { break } } - tsrc.DropColumn(col) + _ = tsrc.DropColumn(col) row := nextSrcRow(tsrc) assert.EqualValues(t, "foo", row["tag"]) @@ -272,8 +263,8 @@ func TestTagTableSource_InconsistentTags(t *testing.T) { // ttsrc is in non-deterministic order expected := []MSI{ - MSI{"a": "1", "c": nil}, - MSI{"a": nil, "c": "3"}, + {"a": "1", "c": nil}, + {"a": nil, "c": "3"}, } var actual []MSI diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index e4745d0f7c9f4..8a790d2125844 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "hash/fnv" - "log" "strings" "sync/atomic" "time" @@ -95,7 +94,6 @@ func DerivePgDatatype(value interface{}) PgDataType { case time.Time: return PgTimestampWithTimeZone default: - log.Printf("E! Unknown datatype %T(%v)", value, value) return PgText } } From a1316aeed006148af8c081a8e290500fae721fd4 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 15 Aug 2021 21:59:36 -0400 Subject: [PATCH 101/121] outputs/postgresql: update licenses --- docs/LICENSE_OF_DEPENDENCIES.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 1ec09fe87f486..6e9cec4c5ae4c 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -15,6 +15,9 @@ following works: - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/Masterminds/goutils [Apache License 2.0](https://github.com/Masterminds/goutils/blob/master/LICENSE.txt) +- github.com/Masterminds/semver [MIT License](https://github.com/Masterminds/semver/blob/master/LICENSE.txt) +- github.com/Masterminds/sprig [MIT License](https://github.com/Masterminds/sprig/blob/master/LICENSE.txt) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) @@ -54,6 +57,7 @@ following works: - github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) - github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) - github.com/containerd/containerd [Apache License 2.0](https://github.com/containerd/containerd/blob/master/LICENSE) +- github.com/coocood/freecache [MIT License](https://github.com/coocood/freecache/blob/master/LICENSE) - github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) - github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) - github.com/couchbase/goutils [Apache License 2.0](https://github.com/couchbase/goutils/blob/master/LICENSE.md) @@ -116,6 +120,8 @@ following works: - github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/blob/master/LICENSE) - github.com/hashicorp/golang-lru [Mozilla Public License 2.0](https://github.com/hashicorp/golang-lru/blob/master/LICENSE) - github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) +- github.com/huandu/xstrings [MIT License](https://github.com/huandu/xstrings/blob/master/LICENSE) +- github.com/imdario/mergo [BSD 3-Clause "New" or "Revised" License](https://github.com/imdario/mergo/blob/master/LICENSE) - github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) - github.com/influxdata/influxdb-observability/common [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) - github.com/influxdata/influxdb-observability/influx2otel [MIT License](https://github.com/influxdata/influxdb-observability/blob/main/LICENSE) @@ -131,6 +137,7 @@ following works: - github.com/jackc/pgservicefile [MIT License](https://github.com/jackc/pgservicefile/blob/master/LICENSE) - github.com/jackc/pgtype [MIT License](https://github.com/jackc/pgtype/blob/master/LICENSE) - github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jackc/puddle [MIT License](https://github.com/jackc/puddle/blob/master/LICENSE) - github.com/jaegertracing/jaeger [Apache License 2.0](https://github.com/jaegertracing/jaeger/blob/master/LICENSE) - github.com/james4k/rcon [MIT License](https://github.com/james4k/rcon/blob/master/LICENSE) - github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) @@ -155,8 +162,10 @@ following works: - github.com/microsoft/ApplicationInsights-Go [MIT License](https://github.com/microsoft/ApplicationInsights-Go/blob/master/LICENSE) - github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) - github.com/minio/highwayhash [Apache License 2.0](https://github.com/minio/highwayhash/blob/master/LICENSE) +- github.com/mitchellh/copystructure [MIT License](https://github.com/mitchellh/copystructure/blob/master/LICENSE) - github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) - github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/mitchellh/reflectwalk [MIT License](https://github.com/mitchellh/reflectwalk/blob/master/LICENSE) - github.com/moby/ipvs [Apache License 2.0](https://github.com/moby/ipvs/blob/master/LICENSE) - github.com/modern-go/concurrent [Apache License 2.0](https://github.com/modern-go/concurrent/blob/master/LICENSE) - github.com/modern-go/reflect2 [Apache License 2.0](https://github.com/modern-go/reflect2/blob/master/LICENSE) From 55f071bdb7b522fb79f532df73d600aca5320b68 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 15 Aug 2021 23:02:18 -0400 Subject: [PATCH 102/121] outputs/postgresql: remove maybeTempError --- plugins/outputs/postgresql/postgresql.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index c06cbd084a07c..cecf667d0f4a9 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -314,12 +314,6 @@ func (p *Postgresql) writeWorker(ctx context.Context) { } } -// This is a subset of net.Error -type maybeTempError interface { - error - Temporary() bool -} - // isTempError reports whether the error received during a metric write operation is temporary or permanent. // A temporary error is one that if the write were retried at a later time, that it might succeed. // Note however that this applies to the transaction as a whole, not the individual operation. Meaning for example a @@ -357,8 +351,8 @@ func isTempError(err error) bool { return false } - if mtErr := maybeTempError(nil); errors.As(err, &mtErr) { - return mtErr.Temporary() + if err, ok := err.(interface{ Temporary() bool }); ok { + return err.Temporary() } // Assume that any other error is permanent. From ed55f2ff40e5afebc6723786b49d66cb1ea02ae3 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Mon, 16 Aug 2021 12:22:06 -0400 Subject: [PATCH 103/121] outputs/postgresql: configure integration testing --- docker-compose.yml | 6 ----- docs/INTEGRATION_TESTS.md | 1 + plugins/outputs/postgresql/postgresql_test.go | 23 +++++++++++++++++++ 3 files changed, 24 insertions(+), 6 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 4e7b44144933f..bd092d0718388 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -60,12 +60,6 @@ services: - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" - timescaledb: - image: timescale/timescaledb:2.1.1-pg12 - environment: - - POSTGRES_HOST_AUTH_METHOD=trust - ports: - - "5433:5432" rabbitmq: image: rabbitmq:3-management ports: diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md index b7af829588c8b..cfa80a1493757 100644 --- a/docs/INTEGRATION_TESTS.md +++ b/docs/INTEGRATION_TESTS.md @@ -51,6 +51,7 @@ Current areas we have integration tests: | Outputs: MQTT | | | Outputs: Nats | | | Outputs: NSQ | | +| Outputs: Postgresql | | Areas we would benefit most from new integration tests: diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 00a0dbc255b4e..b48aa61b51860 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -3,6 +3,7 @@ package postgresql import ( "context" "encoding/json" + "flag" "fmt" "os" "regexp" @@ -181,6 +182,23 @@ func (la *LogAccumulator) Info(args ...interface{}) { var ctx = context.Background() func TestMain(m *testing.M) { + flag.Parse() + if testing.Short() { + return + } + + // Use the integration server if no other PG addr env vars specified. + pguri := "postgresql://localhost:5432" + for _, varname := range []string{"PGURI", "PGHOST", "PGHOSTADDR", "PGPORT"} { + if os.Getenv(varname) != "" { + pguri = "" + break + } + } + if pguri != "" { + _ = os.Setenv("PGURI", pguri) + } + if err := prepareDatabase("telegraf"); err != nil { _, _ = fmt.Fprintf(os.Stderr, "Error preparing database: %s\n", err) os.Exit(1) @@ -207,6 +225,11 @@ type PostgresqlTest struct { } func newPostgresqlTest(tb testing.TB) *PostgresqlTest { + if testing.Short() { + tb.Skipf("skipping integration test in short mode") + tb.SkipNow() + } + p := newPostgresql() _ = p.Init() logger := NewLogAccumulator(tb) From 50c8b39192e89fc9aabb2c594c196dae13b70143 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 27 Aug 2021 12:03:24 -0400 Subject: [PATCH 104/121] outputs.postgresql: change default log level to warn --- plugins/outputs/postgresql/postgresql.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index cecf667d0f4a9..fa75a38c3cbb4 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -91,12 +91,12 @@ var sampleConfig = ` # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', # ] - ## When using pool_max_conns>1, an a temporary error occurs, the query is retried with an incremental backoff. This + ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This ## controls the maximum backoff duration. # retry_max_backoff = "15s" ## Enable & set the log level for the Postgres driver. - # log_level = "info" # trace, debug, info, warn, error, none + # log_level = "warn" # trace, debug, info, warn, error, none ` type Postgresql struct { @@ -172,7 +172,7 @@ func (p *Postgresql) Init() error { } if p.LogLevel == "" { - p.LogLevel = "info" + p.LogLevel = "warn" } if p.TagTableAddColumnTemplates == nil { From 3f946a27c292069b70228c435e25e6fd90edfcea Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 27 Aug 2021 12:08:15 -0400 Subject: [PATCH 105/121] outputs.postgresql: fix error handling on sequential writes permanent errors on sub-batches were resulting in retries --- plugins/outputs/postgresql/postgresql.go | 68 ++++++++++++------- plugins/outputs/postgresql/postgresql_test.go | 16 +++-- 2 files changed, 54 insertions(+), 30 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index fa75a38c3cbb4..e40869310e784 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -116,6 +116,7 @@ type Postgresql struct { dbContext context.Context dbContextCancel func() + dbConfig *pgxpool.Config db *pgxpool.Pool tableManager *TableManager tagsCache *freecache.Cache @@ -184,35 +185,36 @@ func (p *Postgresql) Init() error { p.Logger = models.NewLogger("outputs", "postgresql", "") } - return nil -} - -func (p *Postgresql) SampleConfig() string { return sampleConfig } -func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } - -// Connect establishes a connection to the target database and prepares the cache -func (p *Postgresql) Connect() error { - poolConfig, err := pgxpool.ParseConfig(p.Connection) - if err != nil { + var err error + if p.dbConfig, err = pgxpool.ParseConfig(p.Connection); err != nil { return err } parsedConfig, _ := pgx.ParseConfig(p.Connection) if _, ok := parsedConfig.Config.RuntimeParams["pool_max_conns"]; !ok { // The pgx default for pool_max_conns is 4. However we want to default to 1. - poolConfig.MaxConns = 1 + p.dbConfig.MaxConns = 1 } if p.LogLevel != "" { - poolConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger} - poolConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) + p.dbConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger} + p.dbConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) if err != nil { return fmt.Errorf("invalid log level") } } + return nil +} + +func (p *Postgresql) SampleConfig() string { return sampleConfig } +func (p *Postgresql) Description() string { return "Send metrics to PostgreSQL" } + +// Connect establishes a connection to the target database and prepares the cache +func (p *Postgresql) Connect() error { // Yes, we're not supposed to store the context. However since we don't receive a context, we have to. p.dbContext, p.dbContextCancel = context.WithCancel(context.Background()) - p.db, err = pgxpool.ConnectConfig(p.dbContext, poolConfig) + var err error + p.db, err = pgxpool.ConnectConfig(p.dbContext, p.dbConfig) if err != nil { p.Logger.Errorf("Couldn't connect to server\n%v", err) return err @@ -271,13 +273,30 @@ func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error defer tx.Rollback(p.dbContext) //nolint:errcheck for _, tableSource := range tableSources { - err := p.writeMetricsFromMeasure(p.dbContext, tx, tableSource) + sp := tx + if len(tableSources) > 1 { + // wrap each sub-batch in a savepoint so that if a permanent error is received, we can drop just that one sub-batch, and insert everything else. + sp, err = tx.Begin(p.dbContext) + if err != nil { + return fmt.Errorf("starting savepoint: %w", err) + } + } + + err := p.writeMetricsFromMeasure(p.dbContext, sp, tableSource) if err != nil { if isTempError(err) { + // return so that telegraf will retry the whole batch return err } p.Logger.Errorf("write error (permanent, dropping sub-batch): %v", err) + if len(tableSources) > 1 { + if err := sp.Rollback(p.dbContext); err != nil { + return err + } + } } + + // savepoints do not need to be committed (released), so save the round trip and skip it } if err := tx.Commit(p.dbContext); err != nil { @@ -327,6 +346,17 @@ func isTempError(err error) bool { // https://www.postgresql.org/docs/12/errcodes-appendix.html errClass := pgErr.Code[:2] switch errClass { + case "23": // Integrity Constraint Violation + switch pgErr.Code { //nolint:revive + case "23505": // unique_violation + if strings.Contains(err.Error(), "pg_type_typname_nsp_index") { + // Happens when you try to create 2 tables simultaneously. + return true + } + } + case "25": // Invalid Transaction State + // If we're here, this is a bug, but recoverable + return true case "42": // Syntax Error or Access Rule Violation switch pgErr.Code { case "42701": // duplicate_column @@ -338,14 +368,6 @@ func isTempError(err error) bool { return true case "57": // Operator Intervention return true - case "23": // Integrity Constraint Violation - switch pgErr.Code { //nolint:revive - case "23505": // unique_violation - if strings.Contains(err.Error(), "pg_type_typname_nsp_index") { - // Happens when you try to create 2 tables simultaneously. - return true - } - } } // Assume that any other error that comes from postgres is a permanent error return false diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index b48aa61b51860..d607331573f94 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -231,13 +231,14 @@ func newPostgresqlTest(tb testing.TB) *PostgresqlTest { } p := newPostgresql() - _ = p.Init() + p.Connection = "database=telegraf" logger := NewLogAccumulator(tb) p.Logger = logger + p.LogLevel = "debug" + require.NoError(tb, p.Init()) pt := &PostgresqlTest{Postgresql: *p} pt.Logger = logger - pt.Connection = "database=telegraf" - pt.LogLevel = "debug" + return pt } @@ -269,6 +270,7 @@ func TestPostgresqlConnect(t *testing.T) { p = newPostgresqlTest(t) p.Connection += " pool_max_conns=2" + _ = p.Init() require.NoError(t, p.Connect()) assert.EqualValues(t, 2, p.db.Stat().MaxConns()) p.Close() @@ -336,12 +338,12 @@ func TestWrite_sequential(t *testing.T) { stmtCount++ } } - assert.Equal(t, 4, stmtCount) // BEGIN, COPY table _a, COPY table _b, COMMIT + assert.Equal(t, 6, stmtCount) // BEGIN, SAVEPOINT, COPY table _a, SAVEPOINT, COPY table _b, COMMIT } func TestWrite_concurrent(t *testing.T) { p := newPostgresqlTest(t) - p.Connection += " pool_max_conns=3" + p.dbConfig.MaxConns = 3 require.NoError(t, p.Connect()) // Write a metric so it creates a table we can lock. @@ -429,7 +431,7 @@ func TestWrite_sequentialPermError(t *testing.T) { // Test that the bad metric is dropped, and the rest of the batch succeeds. func TestWrite_concurrentPermError(t *testing.T) { p := newPostgresqlTest(t) - p.Connection += " pool_max_conns=2" + p.dbConfig.MaxConns = 2 require.NoError(t, p.Connect()) metrics := []telegraf.Metric{ @@ -501,7 +503,7 @@ func TestWrite_sequentialTempError(t *testing.T) { // Verify that when using concurrency, errors are not returned, but instead logged and automatically retried func TestWrite_concurrentTempError(t *testing.T) { p := newPostgresqlTest(t) - p.Connection += " pool_max_conns=2" + p.dbConfig.MaxConns = 2 require.NoError(t, p.Connect()) // To avoid a race condition, we need to know when our goroutine has started listening to the log. From 48315bbfb32db4ee5b0ebe32d69312bb8edf73ae Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 27 Aug 2021 12:10:00 -0400 Subject: [PATCH 106/121] outputs.postgresql: don't try to detect mismatched data types the admin may deliberately re-type a column because because they know a value will never exceed a more optimized type. The error response is the same in both cases (permanent with sub-batch drop), so let the DB handle it instead. In the future, we may want to detect values that won't fit inside a column type on a per-row basis (in TableSource), but this means more code, and right now handling the admin's broken database scheme isn't worth it. --- plugins/outputs/postgresql/table_manager.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 20ccabf6fc551..cdf7c0ff5434a 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -213,14 +213,11 @@ func (tm *TableManager) EnsureStructure( func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) ([]utils.Column, error) { var missingColumns []utils.Column for _, srcCol := range srcColumns { - dbCol, ok := dbColumns[srcCol.Name] + _, ok := dbColumns[srcCol.Name] if !ok { missingColumns = append(missingColumns, srcCol) continue } - if !utils.PgTypeCanContain(dbCol.Type, srcCol.Type) { - return nil, fmt.Errorf("column type '%s' cannot store '%s'", dbCol.Type, srcCol.Type) - } } return missingColumns, nil } From 1936779efaf5336581030be54b32bca5a621d255 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 27 Aug 2021 12:28:48 -0400 Subject: [PATCH 107/121] outputs.postgresql: add support for pguint --- go.mod | 2 + plugins/outputs/postgresql/columns.go | 14 +- plugins/outputs/postgresql/datatype_uint8.go | 356 ++++++++++++++++++ plugins/outputs/postgresql/datatypes.go | 60 +++ plugins/outputs/postgresql/postgresql.go | 40 ++ plugins/outputs/postgresql/postgresql_test.go | 43 ++- plugins/outputs/postgresql/table_manager.go | 11 +- .../outputs/postgresql/table_manager_test.go | 51 ++- plugins/outputs/postgresql/table_source.go | 4 +- .../outputs/postgresql/table_source_test.go | 20 +- plugins/outputs/postgresql/utils/column.go | 5 +- plugins/outputs/postgresql/utils/utils.go | 66 ---- 12 files changed, 563 insertions(+), 109 deletions(-) create mode 100644 plugins/outputs/postgresql/datatype_uint8.go create mode 100644 plugins/outputs/postgresql/datatypes.go diff --git a/go.mod b/go.mod index 07221b980cfb6..d2be51ff6f033 100644 --- a/go.mod +++ b/go.mod @@ -88,7 +88,9 @@ require ( github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 github.com/jackc/pgconn v1.8.1 + github.com/jackc/pgio v1.0.0 github.com/jackc/pgproto3/v2 v2.0.7 // indirect + github.com/jackc/pgtype v1.7.0 github.com/jackc/pgx/v4 v4.11.0 github.com/james4k/rcon v0.0.0-20120923215419-8fbb8268b60a github.com/jhump/protoreflect v1.8.3-0.20210616212123-6cc1efa697ca diff --git a/plugins/outputs/postgresql/columns.go b/plugins/outputs/postgresql/columns.go index 7e403c4d45e6b..816539be65300 100644 --- a/plugins/outputs/postgresql/columns.go +++ b/plugins/outputs/postgresql/columns.go @@ -5,12 +5,12 @@ import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" // Column names and data types for standard fields (time, tag_id, tags, and fields) const ( timeColumnName = "time" - timeColumnDataType = utils.PgTimestampWithTimeZone + timeColumnDataType = PgTimestampWithTimeZone tagIDColumnName = "tag_id" - tagIDColumnDataType = utils.PgBigInt + tagIDColumnDataType = PgBigInt tagsJSONColumnName = "tags" fieldsJSONColumnName = "fields" - jsonColumnDataType = utils.PgJSONb + jsonColumnDataType = PgJSONb ) var timeColumn = utils.Column{Name: timeColumnName, Type: timeColumnDataType, Role: utils.TimeColType} @@ -18,9 +18,9 @@ var tagIDColumn = utils.Column{Name: tagIDColumnName, Type: tagIDColumnDataType, var fieldsJSONColumn = utils.Column{Name: fieldsJSONColumnName, Type: jsonColumnDataType, Role: utils.FieldColType} var tagsJSONColumn = utils.Column{Name: tagsJSONColumnName, Type: jsonColumnDataType, Role: utils.TagColType} -func columnFromTag(key string, value interface{}) utils.Column { - return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.TagColType} +func (p *Postgresql) columnFromTag(key string, value interface{}) utils.Column { + return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.TagColType} } -func columnFromField(key string, value interface{}) utils.Column { - return utils.Column{Name: key, Type: utils.DerivePgDatatype(value), Role: utils.FieldColType} +func (p *Postgresql) columnFromField(key string, value interface{}) utils.Column { + return utils.Column{Name: key, Type: p.derivePgDatatype(value), Role: utils.FieldColType} } diff --git a/plugins/outputs/postgresql/datatype_uint8.go b/plugins/outputs/postgresql/datatype_uint8.go new file mode 100644 index 0000000000000..3e771f1ec951d --- /dev/null +++ b/plugins/outputs/postgresql/datatype_uint8.go @@ -0,0 +1,356 @@ +//nolint +package postgresql + +// Copied from github.com/jackc/pgtype/int8.go and tweaked for uint64 +/* +Copyright (c) 2013-2021 Jack Christensen + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +import ( + "database/sql" + "database/sql/driver" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + . "github.com/jackc/pgtype" + "math" + "strconv" + + "github.com/jackc/pgio" +) + +var errUndefined = errors.New("cannot encode status undefined") +var errBadStatus = errors.New("invalid status") + +type Uint8 struct { + Int uint64 + Status Status +} + +func (dst *Uint8) Set(src interface{}) error { + if src == nil { + *dst = Uint8{Status: Null} + return nil + } + + if value, ok := src.(interface{ Get() interface{} }); ok { + value2 := value.Get() + if value2 != value { + return dst.Set(value2) + } + } + + switch value := src.(type) { + case int8: + *dst = Uint8{Int: uint64(value), Status: Present} + case uint8: + *dst = Uint8{Int: uint64(value), Status: Present} + case int16: + *dst = Uint8{Int: uint64(value), Status: Present} + case uint16: + *dst = Uint8{Int: uint64(value), Status: Present} + case int32: + *dst = Uint8{Int: uint64(value), Status: Present} + case uint32: + *dst = Uint8{Int: uint64(value), Status: Present} + case int64: + *dst = Uint8{Int: uint64(value), Status: Present} + case uint64: + *dst = Uint8{Int: value, Status: Present} + case int: + if value < 0 { + return fmt.Errorf("%d is less than maximum value for Uint8", value) + } + *dst = Uint8{Int: uint64(value), Status: Present} + case uint: + if uint64(value) > math.MaxInt64 { + return fmt.Errorf("%d is greater than maximum value for Uint8", value) + } + *dst = Uint8{Int: uint64(value), Status: Present} + case string: + num, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + *dst = Uint8{Int: num, Status: Present} + case float32: + if value > math.MaxInt64 { + return fmt.Errorf("%f is greater than maximum value for Uint8", value) + } + *dst = Uint8{Int: uint64(value), Status: Present} + case float64: + if value > math.MaxInt64 { + return fmt.Errorf("%f is greater than maximum value for Uint8", value) + } + *dst = Uint8{Int: uint64(value), Status: Present} + case *int8: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *uint8: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *int16: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *uint16: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *int32: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *uint32: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *int64: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *uint64: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *int: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *uint: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *string: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *float32: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + case *float64: + if value == nil { + *dst = Uint8{Status: Null} + } else { + return dst.Set(*value) + } + default: + return fmt.Errorf("cannot convert %v to Uint8", value) + } + + return nil +} + +func (dst Uint8) Get() interface{} { + switch dst.Status { + case Present: + return dst.Int + case Null: + return nil + default: + return dst.Status + } +} + +func (src *Uint8) AssignTo(dst interface{}) error { + switch v := dst.(type) { + case *int: + *v = int(src.Int) + case *int8: + *v = int8(src.Int) + case *int16: + *v = int16(src.Int) + case *int32: + *v = int32(src.Int) + case *int64: + *v = int64(src.Int) + case *uint: + *v = uint(src.Int) + case *uint8: + *v = uint8(src.Int) + case *uint16: + *v = uint16(src.Int) + case *uint32: + *v = uint32(src.Int) + case *uint64: + *v = src.Int + case *float32: + *v = float32(src.Int) + case *float64: + *v = float64(src.Int) + case *string: + *v = strconv.FormatUint(src.Int, 10) + case sql.Scanner: + return v.Scan(src.Int) + case interface{ Set(interface{}) error }: + return v.Set(src.Int) + default: + return fmt.Errorf("cannot assign %v into %T", src.Int, dst) + } + return nil +} + +func (dst *Uint8) DecodeText(ci *ConnInfo, src []byte) error { + if src == nil { + *dst = Uint8{Status: Null} + return nil + } + + n, err := strconv.ParseUint(string(src), 10, 64) + if err != nil { + return err + } + + *dst = Uint8{Int: n, Status: Present} + return nil +} + +func (dst *Uint8) DecodeBinary(ci *ConnInfo, src []byte) error { + if src == nil { + *dst = Uint8{Status: Null} + return nil + } + + if len(src) != 8 { + return fmt.Errorf("invalid length for int8: %v", len(src)) + } + + n := binary.BigEndian.Uint64(src) + + *dst = Uint8{Int: n, Status: Present} + return nil +} + +func (src Uint8) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) { + switch src.Status { + case Null: + return nil, nil + case Undefined: + return nil, errUndefined + } + + return append(buf, strconv.FormatUint(src.Int, 10)...), nil +} + +func (src Uint8) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) { + switch src.Status { + case Null: + return nil, nil + case Undefined: + return nil, errUndefined + } + + return pgio.AppendUint64(buf, src.Int), nil +} + +// Scan implements the database/sql Scanner interface. +func (dst *Uint8) Scan(src interface{}) error { + if src == nil { + *dst = Uint8{Status: Null} + return nil + } + + switch src := src.(type) { + case uint64: + *dst = Uint8{Int: src, Status: Present} + return nil + case string: + return dst.DecodeText(nil, []byte(src)) + case []byte: + srcCopy := make([]byte, len(src)) + copy(srcCopy, src) + return dst.DecodeText(nil, srcCopy) + } + + return fmt.Errorf("cannot scan %T", src) +} + +// Value implements the database/sql/driver Valuer interface. +func (src Uint8) Value() (driver.Value, error) { + switch src.Status { + case Present: + return int64(src.Int), nil + case Null: + return nil, nil + default: + return nil, errUndefined + } +} + +func (src Uint8) MarshalJSON() ([]byte, error) { + switch src.Status { + case Present: + return []byte(strconv.FormatUint(src.Int, 10)), nil + case Null: + return []byte("null"), nil + case Undefined: + return nil, errUndefined + } + + return nil, errBadStatus +} + +func (dst *Uint8) UnmarshalJSON(b []byte) error { + var n *uint64 + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + + if n == nil { + *dst = Uint8{Status: Null} + } else { + *dst = Uint8{Int: *n, Status: Present} + } + + return nil +} diff --git a/plugins/outputs/postgresql/datatypes.go b/plugins/outputs/postgresql/datatypes.go new file mode 100644 index 0000000000000..c16f6de791a49 --- /dev/null +++ b/plugins/outputs/postgresql/datatypes.go @@ -0,0 +1,60 @@ +package postgresql + +import ( + "time" +) + +// Constants for naming PostgreSQL data types both in +// their short and long versions. +const ( + PgBool = "boolean" + PgSmallInt = "smallint" + PgInteger = "integer" + PgBigInt = "bigint" + PgReal = "real" + PgDoublePrecision = "double precision" + PgNumeric = "numeric" + PgText = "text" + PgTimestampWithTimeZone = "timestamp with time zone" + PgTimestampWithoutTimeZone = "timestamp without time zone" + PgSerial = "serial" + PgJSONb = "jsonb" +) + +// Types from pguint +const ( + PgUint8 = "uint8" +) + +// DerivePgDatatype returns the appropriate PostgreSQL data type +// that could hold the value. +func (p *Postgresql) derivePgDatatype(value interface{}) string { + if p.UseUint8 { + if _, ok := value.(uint64); ok { + return PgUint8 + } + } + + switch value.(type) { + case bool: + return PgBool + case uint64: + return PgNumeric + case int64, int, uint, uint32: + return PgBigInt + case int32: + return PgInteger + case int16, int8: + return PgSmallInt + case float64: + return PgDoublePrecision + case float32: + return PgReal + case string: + return PgText + case time.Time: + return PgTimestampWithTimeZone + default: + return PgText + } +} diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index e40869310e784..4ba887545e404 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -9,6 +9,7 @@ import ( "github.com/coocood/freecache" "github.com/jackc/pgconn" + "github.com/jackc/pgtype" "github.com/jackc/pgx/v4" "github.com/jackc/pgx/v4/pgxpool" @@ -91,6 +92,9 @@ var sampleConfig = ` # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', # ] + ## Controls whether to use the uint8 data type provided by the pguint extension. + # use_uint8 = false + ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This ## controls the maximum backoff duration. # retry_max_backoff = "15s" @@ -111,6 +115,7 @@ type Postgresql struct { AddColumnTemplates []*sqltemplate.Template `toml:"add_column_templates"` TagTableCreateTemplates []*sqltemplate.Template `toml:"tag_table_create_templates"` TagTableAddColumnTemplates []*sqltemplate.Template `toml:"tag_table_add_column_templates"` + UseUint8 bool `toml:"use_uint8"` RetryMaxBackoff config.Duration `toml:"retry_max_backoff"` LogLevel string `toml:"log_level"` @@ -121,6 +126,8 @@ type Postgresql struct { tableManager *TableManager tagsCache *freecache.Cache + pguint8 *pgtype.DataType + writeChan chan *TableSource writeWaitGroup *utils.WaitGroup @@ -203,6 +210,10 @@ func (p *Postgresql) Init() error { } } + if p.UseUint8 { + p.dbConfig.AfterConnect = p.registerUint8 + } + return nil } @@ -238,6 +249,26 @@ func (p *Postgresql) Connect() error { return nil } +func (p *Postgresql) registerUint8(ctx context.Context, conn *pgx.Conn) error { + if p.pguint8 == nil { + dt := pgtype.DataType{ + // Use 'numeric' type for encoding/decoding across the wire + // It might be more efficient to create a native pgtype.Type, but would involve a lot of code. So this is + // probably good enough. + Value: &Uint8{}, + Name: "uint8", + } + row := conn.QueryRow(p.dbContext, "SELECT oid FROM pg_type WHERE typname=$1", dt.Name) + if err := row.Scan(&dt.OID); err != nil { + return fmt.Errorf("retreiving OID for uint8 data type: %w", err) + } + p.pguint8 = &dt + } + + conn.ConnInfo().RegisterDataType(*p.pguint8) + return nil +} + // Close closes the connection(s) to the database. func (p *Postgresql) Close() error { if p.writeChan != nil { @@ -367,6 +398,15 @@ func isTempError(err error) bool { case "53": // Insufficient Resources return true case "57": // Operator Intervention + switch pgErr.Code { //nolint:revive + case "57014": // query_cancelled + // This one is a bit of a mess. This code comes back when PGX cancels the query. Such as when PGX can't + // convert to the column's type. So even though the error was originally generated by PGX, we get the + // error from Postgres. + return false + case "57P04": // database_dropped + return false + } return true } // Assume that any other error that comes from postgres is a permanent error diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index d607331573f94..af335820a3260 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "flag" "fmt" + "math" "os" "regexp" "strings" @@ -220,7 +221,7 @@ func prepareDatabase(name string) error { } type PostgresqlTest struct { - Postgresql + *Postgresql Logger *LogAccumulator } @@ -236,9 +237,21 @@ func newPostgresqlTest(tb testing.TB) *PostgresqlTest { p.Logger = logger p.LogLevel = "debug" require.NoError(tb, p.Init()) - pt := &PostgresqlTest{Postgresql: *p} + pt := &PostgresqlTest{Postgresql: p} pt.Logger = logger + //p.dbConfig.AfterRelease = func(conn *pgx.Conn) bool { + // loglen := len(logger.logs) + // resp := conn.QueryRow(ctx, "select statement_timestamp() != transaction_timestamp(), statement_timestamp(), transaction_timestamp()") + // logger.logs = logger.logs[:loglen] // drop any logs we just created + // var inTx bool + // var ts1, ts2 time.Time + // if assert.NoError(tb, resp.Scan(&inTx, &ts1, &ts2)) { + // return assert.False(tb, inTx, "connection was left in a transaction %s != %s", ts1, ts2) + // } + // return true + //} + return pt } @@ -647,3 +660,29 @@ func TestWrite_tagError_foreignConstraint(t *testing.T) { require.Len(t, dump, 1) assert.EqualValues(t, 1, dump[0]["v"]) } + +func TestWrite_UnsignedIntegers(t *testing.T) { + p := newPostgresqlTest(t) + p.UseUint8 = true + _ = p.Init() + require.NoError(t, p.Connect()) + + row := p.db.QueryRow(ctx, "SELECT count(*) FROM pg_extension WHERE extname='uint'") + var n int + require.NoError(t, row.Scan(&n)) + if n == 0 { + t.Skipf("pguint extension is not installed") + t.SkipNow() + } + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{}, MSI{"v": uint64(math.MaxUint64)}), + } + require.NoError(t, p.Write(metrics)) + + dump := dbTableDump(t, p.db, "") + + if assert.Len(t, dump, 1) { + assert.EqualValues(t, uint64(math.MaxUint64), dump[0]["v"]) + } +} diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index cdf7c0ff5434a..40442d504fee9 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -13,7 +13,10 @@ import ( const ( refreshTableStructureStatement = ` - SELECT column_name, data_type, col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position) + SELECT + column_name, + CASE WHEN data_type='USER-DEFINED' THEN udt_name ELSE data_type END, + col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position) FROM information_schema.columns WHERE table_schema = $1 and table_name = $2 ` @@ -92,9 +95,9 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl * cols := make(map[string]utils.Column) for rows.Next() { - var colName, colTypeStr string + var colName, colType string desc := new(string) - err := rows.Scan(&colName, &colTypeStr, &desc) + err := rows.Scan(&colName, &colType, &desc) if err != nil { return err } @@ -121,7 +124,7 @@ func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl * cols[colName] = utils.Column{ Name: colName, - Type: utils.PgDataType(colTypeStr), + Type: colType, Role: role, } } diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index 119f66a507586..83e5b4a3da49b 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -16,8 +16,8 @@ func TestTableManager_EnsureStructure(t *testing.T) { require.NoError(t, p.Connect()) cols := []utils.Column{ - columnFromTag("foo", ""), - columnFromField("baz", 0), + p.columnFromTag("foo", ""), + p.columnFromField("baz", 0), } missingCols, err := p.tableManager.EnsureStructure( ctx, @@ -41,8 +41,8 @@ func TestTableManager_refreshTableStructure(t *testing.T) { require.NoError(t, p.Connect()) cols := []utils.Column{ - columnFromTag("foo", ""), - columnFromField("baz", 0), + p.columnFromTag("foo", ""), + p.columnFromField("baz", 0), } _, err := p.tableManager.EnsureStructure( ctx, @@ -73,13 +73,36 @@ func TestTableManager_MatchSource(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) assert.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).Columns(), "tag") assert.Contains(t, p.tableManager.table(t.Name()).Columns(), "a") } +func TestTableManager_MatchSource_UnsignedIntegers(t *testing.T) { + p := newPostgresqlTest(t) + p.UseUint8 = true + _ = p.Init() + require.NoError(t, p.Connect()) + + row := p.db.QueryRow(ctx, "SELECT count(*) FROM pg_extension WHERE extname='uint'") + var n int + require.NoError(t, row.Scan(&n)) + if n == 0 { + t.Skipf("pguint extension is not installed") + t.SkipNow() + } + + metrics := []telegraf.Metric{ + newMetric(t, "", nil, MSI{"a": uint64(1)}), + } + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] + + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.Equal(t, PgUint8, p.tableManager.table(t.Name()).Columns()["a"].Type) +} + func TestTableManager_noCreateTable(t *testing.T) { p := newPostgresqlTest(t) p.CreateTemplates = nil @@ -88,7 +111,7 @@ func TestTableManager_noCreateTable(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) } @@ -102,7 +125,7 @@ func TestTableManager_noCreateTagTable(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.Error(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) } @@ -116,7 +139,7 @@ func TestTableManager_cache(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) } @@ -130,14 +153,14 @@ func TestTableManager_noAlterMissingTag(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) metrics = []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}), } - tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) assert.NotContains(t, tsrc.ColumnNames(), "bar") } @@ -153,14 +176,14 @@ func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) metrics = []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}), } - tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] ttsrc := NewTagTableSource(tsrc) require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) assert.NotContains(t, ttsrc.ColumnNames(), "bar") @@ -175,14 +198,14 @@ func TestTableManager_noAlterMissingField(t *testing.T) { metrics := []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) metrics = []telegraf.Metric{ newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}), } - tsrc = NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) assert.NotContains(t, tsrc.ColumnNames(), "b") } diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 279cab02e5122..d81e3fe925f7f 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -101,13 +101,13 @@ func (tsrc *TableSource) AddMetric(metric telegraf.Metric) { if !tsrc.postgresql.TagsAsJsonb { for _, t := range metric.TagList() { - tsrc.tagColumns.Add(columnFromTag(t.Key, t.Value)) + tsrc.tagColumns.Add(tsrc.postgresql.columnFromTag(t.Key, t.Value)) } } if !tsrc.postgresql.FieldsAsJsonb { for _, f := range metric.FieldList() { - tsrc.fieldColumns.Add(columnFromField(f.Key, f.Value)) + tsrc.fieldColumns.Add(tsrc.postgresql.columnFromField(f.Key, f.Value)) } } diff --git a/plugins/outputs/postgresql/table_source_test.go b/plugins/outputs/postgresql/table_source_test.go index 670e090da2c92..03f2712ce0075 100644 --- a/plugins/outputs/postgresql/table_source_test.go +++ b/plugins/outputs/postgresql/table_source_test.go @@ -45,7 +45,7 @@ func TestTableSource_tagJSONB(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] row := nextSrcRow(tsrc) require.NoError(t, tsrc.Err()) @@ -65,7 +65,7 @@ func TestTableSource_tagTable(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] ttsrc := NewTagTableSource(tsrc) ttrow := nextSrcRow(ttsrc) assert.EqualValues(t, "one", ttrow["a"]) @@ -85,7 +85,7 @@ func TestTableSource_tagTableJSONB(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] ttsrc := NewTagTableSource(tsrc) ttrow := nextSrcRow(ttsrc) var tags MSI @@ -101,7 +101,7 @@ func TestTableSource_fieldsJSONB(t *testing.T) { newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1, "b": 2}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] row := nextSrcRow(tsrc) var fields MSI require.NoError(t, json.Unmarshal(row["fields"].([]byte), &fields)) @@ -118,7 +118,7 @@ func TestTableSource_DropColumn_tag(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] // Drop column "b" var col utils.Column @@ -148,7 +148,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcTrue(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] // Drop column "b" var col utils.Column @@ -182,7 +182,7 @@ func TestTableSource_DropColumn_tag_fkTrue_fcFalse(t *testing.T) { newMetric(t, "", MSS{"a": "one", "b": "two"}, MSI{"v": 1}), newMetric(t, "", MSS{"a": "one"}, MSI{"v": 2}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] // Drop column "b" var col utils.Column @@ -213,7 +213,7 @@ func TestTableSource_DropColumn_field(t *testing.T) { newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2, "b": 3}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] // Drop column "a" var col utils.Column @@ -238,7 +238,7 @@ func TestTableSource_InconsistentTags(t *testing.T) { newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}), newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] trow := nextSrcRow(tsrc) assert.EqualValues(t, "1", trow["a"]) @@ -258,7 +258,7 @@ func TestTagTableSource_InconsistentTags(t *testing.T) { newMetric(t, "", MSS{"a": "1"}, MSI{"b": 2}), newMetric(t, "", MSS{"c": "3"}, MSI{"d": 4}), } - tsrc := NewTableSources(&p.Postgresql, metrics)[t.Name()] + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] ttsrc := NewTagTableSource(tsrc) // ttsrc is in non-deterministic order diff --git a/plugins/outputs/postgresql/utils/column.go b/plugins/outputs/postgresql/utils/column.go index e1428d2600a87..5363a7d2d8a33 100644 --- a/plugins/outputs/postgresql/utils/column.go +++ b/plugins/outputs/postgresql/utils/column.go @@ -19,14 +19,11 @@ const ( FieldColType ) -// PgDataType defines a string that represents a PostgreSQL data type. -type PgDataType string - type Column struct { Name string // the data type of each column should have in the db. used when checking // if the schema matches or it needs updates - Type PgDataType + Type string // the role each column has, helps properly map the metric to the db Role ColumnRole } diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 8a790d2125844..18696299ce1aa 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -7,7 +7,6 @@ import ( "hash/fnv" "strings" "sync/atomic" - "time" "github.com/jackc/pgx/v4" @@ -54,71 +53,6 @@ func FullTableName(schema, name string) pgx.Identifier { return pgx.Identifier{name} } -// Constants for naming PostgreSQL data types both in -// their short and long versions. -const ( - PgBool = "boolean" - PgSmallInt = "smallint" - PgInteger = "integer" - PgBigInt = "bigint" - PgReal = "real" - PgDoublePrecision = "double precision" - PgNumeric = "numeric" - PgText = "text" - PgTimestampWithTimeZone = "timestamp with time zone" - PgTimestampWithoutTimeZone = "timestamp without time zone" - PgSerial = "serial" - PgJSONb = "jsonb" -) - -// DerivePgDatatype returns the appropriate PostgreSQL data type -// that could hold the value. -func DerivePgDatatype(value interface{}) PgDataType { - switch value.(type) { - case bool: - return PgBool - case uint64: - return PgNumeric - case int64, int, uint, uint32: - return PgBigInt - case int32: - return PgInteger - case int16, int8: - return PgSmallInt - case float64: - return PgDoublePrecision - case float32: - return PgReal - case string: - return PgText - case time.Time: - return PgTimestampWithTimeZone - default: - return PgText - } -} - -// PgTypeCanContain tells you if one PostgreSQL data type can contain the values of another without -// significant data loss (e.g. a double can store an integer, but you may lose some precision). -func PgTypeCanContain(canThis PgDataType, containThis PgDataType) bool { - switch canThis { - case containThis: - return true - case PgBigInt: - return containThis == PgInteger || containThis == PgSmallInt - case PgInteger: - return containThis == PgSmallInt - case PgDoublePrecision, PgReal: // You can store a real in a double, you just lose precision - return containThis == PgReal || containThis == PgBigInt || containThis == PgInteger || containThis == PgSmallInt - case PgNumeric: - return containThis == PgBigInt || containThis == PgSmallInt || containThis == PgInteger || containThis == PgReal || containThis == PgDoublePrecision - case PgTimestampWithTimeZone: - return containThis == PgTimestampWithoutTimeZone - default: - return false - } -} - // pgxLogger makes telegraf.Logger compatible with pgx.Logger type PGXLogger struct { telegraf.Logger From 0038e579ddca0a916e9d7ee55d3cc004f0ee9158 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 31 Aug 2021 12:41:42 -0400 Subject: [PATCH 108/121] outputs.postgresql: rewrite schema updates to address concurrency issues Total rewrite of TableManager.EnsureStructure. This addresses concurrency issues with multiple telegraf processes attempting to create tables at the same time. The code is longer, but also simpler. --- plugins/outputs/postgresql/postgresql.go | 36 +- .../postgresql/postgresql_bench_test.go | 1 + plugins/outputs/postgresql/postgresql_test.go | 124 +++-- .../postgresql/sqltemplate/template.go | 8 +- plugins/outputs/postgresql/table_manager.go | 430 +++++++++--------- .../outputs/postgresql/table_manager_test.go | 64 ++- plugins/outputs/postgresql/utils/utils.go | 26 +- 7 files changed, 408 insertions(+), 281 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index 4ba887545e404..b94f685f27e23 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -277,6 +277,7 @@ func (p *Postgresql) Close() error { select { case <-p.writeWaitGroup.C(): case <-time.NewTimer(time.Second * 5).C: + p.Logger.Warnf("Shutdown timeout expired while waiting for metrics to flush. Some metrics may not be written to database.") } } @@ -290,10 +291,23 @@ func (p *Postgresql) Close() error { func (p *Postgresql) Write(metrics []telegraf.Metric) error { tableSources := NewTableSources(p, metrics) + var err error if p.db.Stat().MaxConns() > 1 { - return p.writeConcurrent(tableSources) + err = p.writeConcurrent(tableSources) + } else { + err = p.writeSequential(tableSources) + } + if err != nil { + var pgErr *pgconn.PgError + if errors.As(err, &pgErr) { + // PgError doesn't include .Detail in Error(), so we concat it onto .Message. + if pgErr.Detail != "" { + pgErr.Message += "; " + pgErr.Detail + } + } } - return p.writeSequential(tableSources) + + return err } func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error { @@ -326,7 +340,6 @@ func (p *Postgresql) writeSequential(tableSources map[string]*TableSource) error } } } - // savepoints do not need to be committed (released), so save the round trip and skip it } @@ -388,6 +401,11 @@ func isTempError(err error) bool { case "25": // Invalid Transaction State // If we're here, this is a bug, but recoverable return true + case "40": // Transaction Rollback + switch pgErr.Code { //nolint:revive + case "40P01": // deadlock_detected + return true + } case "42": // Syntax Error or Access Rule Violation switch pgErr.Code { case "42701": // duplicate_column @@ -426,19 +444,11 @@ func isTempError(err error) bool { func (p *Postgresql) writeRetry(ctx context.Context, tableSource *TableSource) error { backoff := time.Duration(0) for { - tx, err := p.db.Begin(ctx) - if err != nil { - return err - } - - err = p.writeMetricsFromMeasure(ctx, tx, tableSource) + err := p.writeMetricsFromMeasure(ctx, p.db, tableSource) if err == nil { - if err := tx.Commit(ctx); err == nil { - return nil - } + return nil } - _ = tx.Rollback(ctx) if !isTempError(err) { return err } diff --git a/plugins/outputs/postgresql/postgresql_bench_test.go b/plugins/outputs/postgresql/postgresql_bench_test.go index 3711312a46642..8780385feffa4 100644 --- a/plugins/outputs/postgresql/postgresql_bench_test.go +++ b/plugins/outputs/postgresql/postgresql_bench_test.go @@ -26,6 +26,7 @@ func benchmarkPostgresql(b *testing.B, gen <-chan []telegraf.Metric, concurrency p.Connection += fmt.Sprintf(" pool_max_conns=%d", concurrency) p.TagsAsForeignKeys = foreignTags p.LogLevel = "" + _ = p.Init() if err := p.Connect(); err != nil { b.Fatalf("Error: %s", err) } diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index af335820a3260..b8180cdc66b20 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -6,6 +6,7 @@ import ( "flag" "fmt" "math" + "math/rand" "os" "regexp" "strings" @@ -38,9 +39,10 @@ func (l Log) String() string { // LogAccumulator is a log collector that satisfies telegraf.Logger. type LogAccumulator struct { - logs []Log - cond *sync.Cond - tb testing.TB + logs []Log + cond *sync.Cond + tb testing.TB + emitLevel pgx.LogLevel } func NewLogAccumulator(tb testing.TB) *LogAccumulator { @@ -51,16 +53,31 @@ func NewLogAccumulator(tb testing.TB) *LogAccumulator { } func (la *LogAccumulator) append(level pgx.LogLevel, format string, args []interface{}) { + la.tb.Helper() + la.cond.L.Lock() log := Log{level, format, args} la.logs = append(la.logs, log) - s := log.String() - la.tb.Helper() - la.tb.Log(s) + + if la.emitLevel == 0 || log.level <= la.emitLevel { + la.tb.Log(log.String()) + } + la.cond.Broadcast() la.cond.L.Unlock() } +func (la *LogAccumulator) HasLevel(level pgx.LogLevel) bool { + la.cond.L.Lock() + defer la.cond.L.Unlock() + for _, log := range la.logs { + if log.level > 0 && log.level <= level { + return true + } + } + return false +} + func (la *LogAccumulator) WaitLen(n int) []Log { la.cond.L.Lock() defer la.cond.L.Unlock() @@ -185,7 +202,7 @@ var ctx = context.Background() func TestMain(m *testing.M) { flag.Parse() if testing.Short() { - return + os.Exit(m.Run()) } // Use the integration server if no other PG addr env vars specified. @@ -240,17 +257,13 @@ func newPostgresqlTest(tb testing.TB) *PostgresqlTest { pt := &PostgresqlTest{Postgresql: p} pt.Logger = logger - //p.dbConfig.AfterRelease = func(conn *pgx.Conn) bool { - // loglen := len(logger.logs) - // resp := conn.QueryRow(ctx, "select statement_timestamp() != transaction_timestamp(), statement_timestamp(), transaction_timestamp()") - // logger.logs = logger.logs[:loglen] // drop any logs we just created - // var inTx bool - // var ts1, ts2 time.Time - // if assert.NoError(tb, resp.Scan(&inTx, &ts1, &ts2)) { - // return assert.False(tb, inTx, "connection was left in a transaction %s != %s", ts1, ts2) - // } - // return true - //} + tb.Cleanup(func() { + if pt.db != nil { + // This will block forever (timeout the test) if not all connections were released (committed/rollbacked). + // We can't use pt.db.Stats() because in some cases, pgx releases the connection asynchronously. + pt.db.Close() + } + }) return pt } @@ -279,14 +292,12 @@ func TestPostgresqlConnect(t *testing.T) { p := newPostgresqlTest(t) require.NoError(t, p.Connect()) assert.EqualValues(t, 1, p.db.Stat().MaxConns()) - p.Close() p = newPostgresqlTest(t) p.Connection += " pool_max_conns=2" _ = p.Init() require.NoError(t, p.Connect()) assert.EqualValues(t, 2, p.db.Stat().MaxConns()) - p.Close() } func newMetric( @@ -364,7 +375,7 @@ func TestWrite_concurrent(t *testing.T) { newMetric(t, "_a", MSS{}, MSI{"v": 1}), } require.NoError(t, p.Write(metrics)) - p.Logger.WaitForCopy(t.Name()+"_a", true) + p.Logger.WaitForCopy(t.Name()+"_a", false) // clear so that the WaitForCopy calls below don't pick up this one p.Logger.Clear() @@ -372,7 +383,7 @@ func TestWrite_concurrent(t *testing.T) { tx, err := p.db.Begin(ctx) require.NoError(t, err) defer tx.Rollback(ctx) //nolint:errcheck - _, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdent(t.Name()+"_a")) + _, err = tx.Exec(ctx, "LOCK TABLE "+utils.QuoteIdentifier(t.Name()+"_a")) require.NoError(t, err) metrics = []telegraf.Metric{ @@ -389,10 +400,10 @@ func TestWrite_concurrent(t *testing.T) { } require.NoError(t, p.Write(metrics)) - p.Logger.WaitForCopy(t.Name()+"_b", true) + p.Logger.WaitForCopy(t.Name()+"_b", false) // release the lock on table _a _ = tx.Rollback(ctx) - p.Logger.WaitForCopy(t.Name()+"_a", true) + p.Logger.WaitForCopy(t.Name()+"_a", false) dumpA := dbTableDump(t, p.db, "_a") dumpB := dbTableDump(t, p.db, "_b") @@ -451,7 +462,7 @@ func TestWrite_concurrentPermError(t *testing.T) { newMetric(t, "_a", MSS{}, MSI{"v": 1}), } require.NoError(t, p.Write(metrics)) - p.Logger.WaitForCopy(t.Name()+"_a", true) + p.Logger.WaitForCopy(t.Name()+"_a", false) metrics = []telegraf.Metric{ newMetric(t, "_a", MSS{}, MSI{"v": "a"}), @@ -461,7 +472,7 @@ func TestWrite_concurrentPermError(t *testing.T) { p.Logger.WaitFor(func(l Log) bool { return strings.Contains(l.String(), "write error") }, false) - p.Logger.WaitForCopy(t.Name()+"_b", true) + p.Logger.WaitForCopy(t.Name()+"_b", false) dumpA := dbTableDump(t, p.db, "_a") dumpB := dbTableDump(t, p.db, "_b") @@ -556,7 +567,7 @@ func TestWrite_concurrentTempError(t *testing.T) { } require.NoError(t, p.Write(metrics)) - p.Logger.WaitForCopy(t.Name()+"_a", true) + p.Logger.WaitForCopy(t.Name()+"_a", false) dumpA := dbTableDump(t, p.db, "_a") assert.Len(t, dumpA, 1) @@ -686,3 +697,62 @@ func TestWrite_UnsignedIntegers(t *testing.T) { assert.EqualValues(t, uint64(math.MaxUint64), dump[0]["v"]) } } + +// Last ditch effort to find any concurrency issues. +func TestStressConcurrency(t *testing.T) { + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"foo": "bar"}, MSI{"a": 1}), + newMetric(t, "", MSS{"pop": "tart"}, MSI{"b": 1}), + newMetric(t, "", MSS{"foo": "bar", "pop": "tart"}, MSI{"a": 2, "b": 2}), + newMetric(t, "_b", MSS{"foo": "bar"}, MSI{"a": 1}), + } + + concurrency := 4 + loops := 100 + + pctl := newPostgresqlTest(t) + pctl.Logger.emitLevel = pgx.LogLevelWarn + require.NoError(t, pctl.Connect()) + + for i := 0; i < loops; i++ { + var wgStart, wgDone sync.WaitGroup + wgStart.Add(concurrency) + wgDone.Add(concurrency) + for j := 0; j < concurrency; j++ { + go func() { + mShuf := make([]telegraf.Metric, len(metrics)) + copy(mShuf, metrics) + rand.Shuffle(len(mShuf), func(a, b int) { mShuf[a], mShuf[b] = mShuf[b], mShuf[a] }) + + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + p.Logger.emitLevel = pgx.LogLevelWarn + p.dbConfig.MaxConns = int32(rand.Intn(3) + 1) + require.NoError(t, p.Connect()) + wgStart.Done() + wgStart.Wait() + + err := p.Write(mShuf) + assert.NoError(t, err) + assert.NoError(t, p.Close()) + assert.False(t, p.Logger.HasLevel(pgx.LogLevelWarn)) + wgDone.Done() + }() + } + wgDone.Wait() + + if t.Failed() { + break + } + + for _, stmt := range []string{ + "DROP TABLE \"" + t.Name() + "_tag\"", + "DROP TABLE \"" + t.Name() + "\"", + "DROP TABLE \"" + t.Name() + "_b_tag\"", + "DROP TABLE \"" + t.Name() + "_b\"", + } { + _, err := pctl.db.Exec(ctx, stmt) + require.NoError(t, err) + } + } +} diff --git a/plugins/outputs/postgresql/sqltemplate/template.go b/plugins/outputs/postgresql/sqltemplate/template.go index 9897ddc3ae5d3..b4a8a699b72c4 100644 --- a/plugins/outputs/postgresql/sqltemplate/template.go +++ b/plugins/outputs/postgresql/sqltemplate/template.go @@ -27,7 +27,7 @@ The following variables are available within all template executions: * tagTable - A Table object referring to the table containing the tags. In the case of TagsAsForeignKeys and `table` is the metrics table, - then `tagTable` is the table containing the tags for this one. + then `tagTable` is the table containing the tags for it. Each object has helper methods that may be used within the template. See the documentation for the appropriate type. @@ -146,14 +146,14 @@ func asString(obj interface{}) string { // // QuoteIdentifier is accessible within templates as 'quoteIdentifier'. func QuoteIdentifier(name interface{}) string { - return `"` + strings.ReplaceAll(asString(name), `"`, `""`) + `"` + return utils.QuoteIdentifier(asString(name)) } // QuoteLiteral quotes the given string as a Postgres literal (single-quotes the value). // // QuoteLiteral is accessible within templates as 'quoteLiteral'. func QuoteLiteral(str interface{}) string { - return "'" + strings.ReplaceAll(asString(str), "'", "''") + "'" + return utils.QuoteLiteral(asString(str)) } // Table is an object which represents a Postgres table. @@ -225,7 +225,7 @@ func (tc Column) String() string { // Definition returns the column's definition (as used in a CREATE TABLE statement). E.G: // "my_column" bigint func (tc Column) Definition() string { - return tc.Identifier() + " " + string(tc.Type) + return tc.Identifier() + " " + tc.Type } // Identifier returns the column's quoted identifier. diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 40442d504fee9..65c66fd568870 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -5,42 +5,20 @@ import ( "fmt" "strings" "sync" - "sync/atomic" + + "github.com/jackc/pgx/v4" "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" ) -const ( - refreshTableStructureStatement = ` - SELECT - column_name, - CASE WHEN data_type='USER-DEFINED' THEN udt_name ELSE data_type END, - col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position) - FROM information_schema.columns - WHERE table_schema = $1 and table_name = $2 - ` -) +// This is an arbitrary constant value shared between multiple telegraf processes used for locking schema updates. +const schemaAdvisoryLockID int64 = 5705450890675909945 type tableState struct { - name string - // The atomic.Value protects columns from simple data race corruption as columns can be read while the mutex is - // locked. - columns atomic.Value - // The mutex protects columns when doing a check-and-set operation. It prevents 2 goroutines from independently - // checking the table's schema, and both trying to modify it, whether inconsistently, or to the same result. - sync.Mutex -} - -func (ts *tableState) Columns() map[string]utils.Column { - cols := ts.columns.Load() - if cols == nil { - return nil - } - return cols.(map[string]utils.Column) -} -func (ts *tableState) SetColumns(cols map[string]utils.Column) { - ts.columns.Store(cols) + name string + columns map[string]utils.Column + sync.RWMutex } type TableManager struct { @@ -49,8 +27,6 @@ type TableManager struct { // map[tableName]map[columnName]utils.Column tables map[string]*tableState tablesMutex sync.Mutex - // schemaMutex is used to prevent parallel table creations/alters in Postgres. - schemaMutex sync.Mutex } // NewTableManager returns an instance of the tables.Manager interface @@ -66,7 +42,9 @@ func NewTableManager(postgresql *Postgresql) *TableManager { func (tm *TableManager) ClearTableCache() { tm.tablesMutex.Lock() for _, tbl := range tm.tables { - tbl.SetColumns(nil) + tbl.Lock() + tbl.columns = nil + tbl.Unlock() } tm.tablesMutex.Unlock() @@ -86,54 +64,70 @@ func (tm *TableManager) table(name string) *tableState { return tbl } -func (tm *TableManager) refreshTableStructure(ctx context.Context, db dbh, tbl *tableState) error { - rows, err := db.Query(ctx, refreshTableStructureStatement, tm.Schema, tbl.name) - if err != nil { - return err - } - defer rows.Close() +// MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches. +// +// If the schema does not match, and schema updates are disabled: +// If a field missing from the DB, the field is omitted. +// If a tag is missing from the DB, the metric is dropped. +func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error { + metricTable := tm.table(rowSource.Name()) + var tagTable *tableState + if tm.TagsAsForeignKeys { + tagTable = tm.table(metricTable.name + tm.TagTableSuffix) - cols := make(map[string]utils.Column) - for rows.Next() { - var colName, colType string - desc := new(string) - err := rows.Scan(&colName, &colType, &desc) + missingCols, err := tm.EnsureStructure( + ctx, + db, + tagTable, + rowSource.TagTableColumns(), + tm.TagTableCreateTemplates, + tm.TagTableAddColumnTemplates, + metricTable, + tagTable, + ) if err != nil { return err } - role := utils.FieldColType - switch colName { - case timeColumnName: - role = utils.TimeColType - case tagIDColumnName: - role = utils.TagsIDColType - case tagsJSONColumnName: - role = utils.TagColType - case fieldsJSONColumnName: - role = utils.FieldColType - default: - // We don't want to monopolize the column comment (preventing user from storing other information there), so just look at the first word - if desc != nil { - descWords := strings.Split(*desc, " ") - if descWords[0] == "tag" { - role = utils.TagColType + if len(missingCols) > 0 { + colDefs := make([]string, len(missingCols)) + for i, col := range missingCols { + if err := rowSource.DropColumn(col); err != nil { + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err) } + colDefs[i] = col.Name + " " + col.Type } - } - - cols[colName] = utils.Column{ - Name: colName, - Type: colType, - Role: role, + tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", + tagTable.name, + strings.Join(colDefs, ", ")) } } - if err := rows.Err(); err != nil { + + missingCols, err := tm.EnsureStructure( + ctx, + db, + metricTable, + rowSource.MetricTableColumns(), + tm.CreateTemplates, + tm.AddColumnTemplates, + metricTable, + tagTable, + ) + if err != nil { return err } - if len(cols) > 0 { - tbl.SetColumns(cols) + if len(missingCols) > 0 { + colDefs := make([]string, len(missingCols)) + for i, col := range missingCols { + if err := rowSource.DropColumn(col); err != nil { + return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err) + } + colDefs[i] = col.Name + " " + col.Type + } + tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", + metricTable.name, + strings.Join(colDefs, ", ")) } return nil @@ -162,100 +156,187 @@ func (tm *TableManager) EnsureStructure( // * When we display errors about missing columns, the order is also sane, and consistent utils.ColumnList(columns).Sort() - tbl.Lock() - tblColumns := tbl.Columns() - if tblColumns == nil { - // We don't know about the table. First try to query it. - if err := tm.refreshTableStructure(ctx, db, tbl); err != nil { - tbl.Unlock() - return nil, fmt.Errorf("querying table structure: %w", err) - } - tblColumns = tbl.Columns() + // rlock, read, runlock, wlock, read, read_db, wlock_db, read_db, write_db, wunlock_db, wunlock - if tblColumns == nil { - // Ok, table doesn't exist, now we can create it. - if err := tm.executeTemplates(ctx, db, createTemplates, tbl, columns, metricsTable, tagsTable); err != nil { - tbl.Unlock() - return nil, fmt.Errorf("creating table: %w", err) - } + // rlock + tbl.RLock() + // read + currCols := tbl.columns + // runlock + tbl.RUnlock() + missingCols := diffMissingColumns(currCols, columns) + if len(missingCols) == 0 { + return nil, nil + } + + // wlock + // We also need to lock the other table as it may be referenced by a template. + // To prevent deadlock, the metric & tag table must always be locked in the same order: 1) Tag, 2) Metric + if tbl == tagsTable { + tagsTable.Lock() + defer tagsTable.Unlock() - tblColumns = tbl.Columns() + metricsTable.RLock() + defer metricsTable.RUnlock() + } else { + if tagsTable != nil { + tagsTable.RLock() + defer tagsTable.RUnlock() } + + metricsTable.Lock() + defer metricsTable.Unlock() } - tbl.Unlock() - missingColumns, err := tm.checkColumns(tblColumns, columns) - if err != nil { - return nil, fmt.Errorf("column validation: %w", err) + // read + currCols = tbl.columns + missingCols = diffMissingColumns(currCols, columns) + if len(missingCols) == 0 { + return nil, nil } - if len(missingColumns) == 0 { + + // read_db + var err error + if currCols, err = tm.getColumns(ctx, db, tbl.name); err != nil { + return nil, err + } + missingCols = diffMissingColumns(currCols, columns) + if len(missingCols) == 0 { + tbl.columns = currCols return nil, nil } - if len(addColumnsTemplates) == 0 { - return missingColumns, nil + if len(currCols) == 0 && len(createTemplates) == 0 { + // can't create + return missingCols, nil + } + if len(currCols) != 0 && len(addColumnsTemplates) == 0 { + // can't add + return missingCols, nil } - tbl.Lock() - // Check again in case someone else got it while table was unlocked. - tblColumns = tbl.Columns() - missingColumns, _ = tm.checkColumns(tblColumns, columns) - if len(missingColumns) == 0 { - tbl.Unlock() - return nil, nil + // wlock_db + tx, err := db.Begin(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback(ctx) //nolint:errcheck + // It's possible to have multiple telegraf processes, in which we can't ensure they all lock tables in the same + // order. So to prevent possible deadlocks, we have to have a single lock for all schema modifications. + if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", schemaAdvisoryLockID); err != nil { + return nil, err } - if err := tm.executeTemplates(ctx, db, addColumnsTemplates, tbl, missingColumns, metricsTable, tagsTable); err != nil { - tbl.Unlock() - return nil, fmt.Errorf("adding columns: %w", err) + // read_db + if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil { + return nil, err + } + if currCols != nil { + missingCols = diffMissingColumns(currCols, columns) + if len(missingCols) == 0 { + return nil, nil + } } - tbl.Unlock() - return tm.checkColumns(tbl.Columns(), columns) + + // write_db + var tmpls []*sqltemplate.Template + if len(currCols) == 0 { + tmpls = createTemplates + } else { + tmpls = addColumnsTemplates + } + if err := tm.update(ctx, tx, tbl, tmpls, missingCols, metricsTable, tagsTable); err != nil { + return nil, err + } + + if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil { + return nil, err + } + + if err := tx.Commit(ctx); err != nil { + return nil, err + } + + tbl.columns = currCols + + // wunlock_db (deferred) + // wunlock (deferred) + + return nil, nil } -func (tm *TableManager) checkColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) ([]utils.Column, error) { - var missingColumns []utils.Column - for _, srcCol := range srcColumns { - _, ok := dbColumns[srcCol.Name] - if !ok { - missingColumns = append(missingColumns, srcCol) - continue +func (tm *TableManager) getColumns(ctx context.Context, db dbh, name string) (map[string]utils.Column, error) { + rows, err := db.Query(ctx, ` + SELECT + column_name, + CASE WHEN data_type='USER-DEFINED' THEN udt_name ELSE data_type END, + col_description(format('%I.%I', table_schema, table_name)::regclass::oid, ordinal_position) + FROM information_schema.columns + WHERE table_schema = $1 and table_name = $2`, tm.Schema, name) + if err != nil { + return nil, err + } + defer rows.Close() + + cols := make(map[string]utils.Column) + for rows.Next() { + var colName, colType string + desc := new(string) + err := rows.Scan(&colName, &colType, &desc) + if err != nil { + return nil, err + } + + role := utils.FieldColType + switch colName { + case timeColumnName: + role = utils.TimeColType + case tagIDColumnName: + role = utils.TagsIDColType + case tagsJSONColumnName: + role = utils.TagColType + case fieldsJSONColumnName: + role = utils.FieldColType + default: + // We don't want to monopolize the column comment (preventing user from storing other information there), so just look at the first word + if desc != nil { + descWords := strings.Split(*desc, " ") + if descWords[0] == "tag" { + role = utils.TagColType + } + } + } + + cols[colName] = utils.Column{ + Name: colName, + Type: colType, + Role: role, } } - return missingColumns, nil + + return cols, rows.Err() } //nolint:revive -func (tm *TableManager) executeTemplates( - ctx context.Context, - db dbh, +func (tm *TableManager) update(ctx context.Context, + tx pgx.Tx, + state *tableState, tmpls []*sqltemplate.Template, - tbl *tableState, - newColumns []utils.Column, + missingCols []utils.Column, metricsTable *tableState, tagsTable *tableState, ) error { - tmplTable := sqltemplate.NewTable(tm.Schema, tbl.name, colMapToSlice(tbl.Columns())) - metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.Columns())) + tmplTable := sqltemplate.NewTable(tm.Schema, state.name, colMapToSlice(state.columns)) + metricsTmplTable := sqltemplate.NewTable(tm.Schema, metricsTable.name, colMapToSlice(metricsTable.columns)) var tagsTmplTable *sqltemplate.Table if tagsTable != nil { - tagsTmplTable = sqltemplate.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.Columns())) + tagsTmplTable = sqltemplate.NewTable(tm.Schema, tagsTable.name, colMapToSlice(tagsTable.columns)) } else { tagsTmplTable = sqltemplate.NewTable("", "", nil) } - // Lock to prevent concurrency issues in postgres (pg_type_typname_nsp_index unique constraint; SQLSTATE 23505) - tm.schemaMutex.Lock() - defer tm.schemaMutex.Unlock() - - tx, err := db.Begin(ctx) - if err != nil { - return err - } - defer tx.Rollback(ctx) //nolint:errcheck - for _, tmpl := range tmpls { - sql, err := tmpl.Render(tmplTable, newColumns, metricsTmplTable, tagsTmplTable) + sql, err := tmpl.Render(tmplTable, missingCols, metricsTmplTable, tagsTmplTable) if err != nil { return err } @@ -268,7 +349,7 @@ func (tm *TableManager) executeTemplates( // For some columns we can determine this by the column name (time, tag_id, etc). However tags and fields can have any // name, and look the same. So we add a comment to tag columns, and through process of elimination what remains are // field columns. - for _, col := range newColumns { + for _, col := range missingCols { if col.Role != utils.TagColType { continue } @@ -279,11 +360,23 @@ func (tm *TableManager) executeTemplates( } } - if err := tx.Commit(ctx); err != nil { - return err + return nil +} + +// diffMissingColumns filters srcColumns to the ones not present in dbColumns. +func diffMissingColumns(dbColumns map[string]utils.Column, srcColumns []utils.Column) []utils.Column { + if len(dbColumns) == 0 { + return srcColumns } - return tm.refreshTableStructure(ctx, db, tbl) + var missingColumns []utils.Column + for _, srcCol := range srcColumns { + if _, ok := dbColumns[srcCol.Name]; !ok { + missingColumns = append(missingColumns, srcCol) + continue + } + } + return missingColumns } func colMapToSlice(colMap map[string]utils.Column) []utils.Column { @@ -296,72 +389,3 @@ func colMapToSlice(colMap map[string]utils.Column) []utils.Column { } return cols } - -// MatchSource scans through the metrics, determining what columns are needed for inserting, and ensuring the DB schema matches. -// -// If the schema does not match, and schema updates are disabled: -// If a field missing from the DB, the field is omitted. -// If a tag is missing from the DB, the metric is dropped. -func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *TableSource) error { - metricTable := tm.table(rowSource.Name()) - var tagTable *tableState - if tm.TagsAsForeignKeys { - tagTable = tm.table(metricTable.name + tm.TagTableSuffix) - - missingCols, err := tm.EnsureStructure( - ctx, - db, - tagTable, - rowSource.TagTableColumns(), - tm.TagTableCreateTemplates, - tm.TagTableAddColumnTemplates, - metricTable, - tagTable, - ) - if err != nil { - return err - } - - if len(missingCols) > 0 { - colDefs := make([]string, len(missingCols)) - for i, col := range missingCols { - if err := rowSource.DropColumn(col); err != nil { - return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", tagTable.name, err) - } - colDefs[i] = col.Name + " " + string(col.Type) - } - tm.Logger.Errorf("table '%s' is missing tag columns (dropping metrics): %s", - tagTable.name, - strings.Join(colDefs, ", ")) - } - } - - missingCols, err := tm.EnsureStructure( - ctx, - db, - metricTable, - rowSource.MetricTableColumns(), - tm.CreateTemplates, - tm.AddColumnTemplates, - metricTable, - tagTable, - ) - if err != nil { - return err - } - - if len(missingCols) > 0 { - colDefs := make([]string, len(missingCols)) - for i, col := range missingCols { - if err := rowSource.DropColumn(col); err != nil { - return fmt.Errorf("metric/table mismatch: Unable to omit field/column from \"%s\": %w", metricTable.name, err) - } - colDefs[i] = col.Name + " " + string(col.Type) - } - tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", - metricTable.name, - strings.Join(colDefs, ", ")) - } - - return nil -} diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index 83e5b4a3da49b..aafa2ca0c0ff7 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -30,13 +30,54 @@ func TestTableManager_EnsureStructure(t *testing.T) { nil, ) require.NoError(t, err) - require.Empty(t, missingCols) + assert.Empty(t, missingCols) - assert.EqualValues(t, cols[0], p.tableManager.table(t.Name()).Columns()["foo"]) - assert.EqualValues(t, cols[1], p.tableManager.table(t.Name()).Columns()["baz"]) + tblCols := p.tableManager.table(t.Name()).columns + assert.EqualValues(t, cols[0], tblCols["foo"]) + assert.EqualValues(t, cols[1], tblCols["baz"]) } -func TestTableManager_refreshTableStructure(t *testing.T) { +func TestTableManager_EnsureStructure_alter(t *testing.T) { + p := newPostgresqlTest(t) + require.NoError(t, p.Connect()) + + cols := []utils.Column{ + p.columnFromTag("foo", ""), + p.columnFromField("bar", 0), + } + _, err := p.tableManager.EnsureStructure( + ctx, + p.db, + p.tableManager.table(t.Name()), + cols, + p.CreateTemplates, + p.AddColumnTemplates, + p.tableManager.table(t.Name()), + nil, + ) + require.NoError(t, err) + + cols = append(cols, p.columnFromField("baz", 0)) + missingCols, err := p.tableManager.EnsureStructure( + ctx, + p.db, + p.tableManager.table(t.Name()), + cols, + p.CreateTemplates, + p.AddColumnTemplates, + p.tableManager.table(t.Name()), + nil, + ) + require.NoError(t, err) + assert.Empty(t, missingCols) + + tblCols := p.tableManager.table(t.Name()).columns + assert.EqualValues(t, cols[0], tblCols["foo"]) + assert.EqualValues(t, cols[1], tblCols["bar"]) + assert.EqualValues(t, cols[2], tblCols["baz"]) +} + +func TestTableManager_getColumns(t *testing.T) { p := newPostgresqlTest(t) require.NoError(t, p.Connect()) @@ -57,12 +98,13 @@ func TestTableManager_refreshTableStructure(t *testing.T) { require.NoError(t, err) p.tableManager.ClearTableCache() - require.Empty(t, p.tableManager.table(t.Name()).Columns()) + require.Empty(t, p.tableManager.table(t.Name()).columns) - require.NoError(t, p.tableManager.refreshTableStructure(ctx, p.db, p.tableManager.table(t.Name()))) + curCols, err := p.tableManager.getColumns(ctx, p.db, t.Name()) + require.NoError(t, err) - assert.EqualValues(t, cols[0], p.tableManager.table(t.Name()).Columns()["foo"]) - assert.EqualValues(t, cols[1], p.tableManager.table(t.Name()).Columns()["baz"]) + assert.EqualValues(t, cols[0], curCols["foo"]) + assert.EqualValues(t, cols[1], curCols["baz"]) } func TestTableManager_MatchSource(t *testing.T) { @@ -76,8 +118,8 @@ func TestTableManager_MatchSource(t *testing.T) { tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) - assert.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).Columns(), "tag") - assert.Contains(t, p.tableManager.table(t.Name()).Columns(), "a") + assert.Contains(t, p.tableManager.table(t.Name()+p.TagTableSuffix).columns, "tag") + assert.Contains(t, p.tableManager.table(t.Name()).columns, "a") } func TestTableManager_MatchSource_UnsignedIntegers(t *testing.T) { @@ -100,7 +142,7 @@ func TestTableManager_MatchSource_UnsignedIntegers(t *testing.T) { tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) - assert.Equal(t, PgUint8, p.tableManager.table(t.Name()).Columns()["a"].Type) + assert.Equal(t, PgUint8, p.tableManager.table(t.Name()).columns["a"].Type) } func TestTableManager_noCreateTable(t *testing.T) { diff --git a/plugins/outputs/postgresql/utils/utils.go b/plugins/outputs/postgresql/utils/utils.go index 18696299ce1aa..2d8d1a6007460 100644 --- a/plugins/outputs/postgresql/utils/utils.go +++ b/plugins/outputs/postgresql/utils/utils.go @@ -3,7 +3,6 @@ package utils import ( "context" "encoding/json" - "fmt" "hash/fnv" "strings" "sync/atomic" @@ -13,10 +12,6 @@ import ( "github.com/influxdata/telegraf" ) -const ( - insertIntoSQLTemplate = "INSERT INTO %s(%s) VALUES(%s)" -) - func TagListToJSON(tagList []*telegraf.Tag) []byte { tags := make(map[string]string, len(tagList)) for _, tag := range tagList { @@ -34,8 +29,8 @@ func FieldListToJSON(fieldList []*telegraf.Field) ([]byte, error) { return json.Marshal(fields) } -// QuoteIdent returns a sanitized string safe to use in SQL as an identifier -func QuoteIdent(name string) string { +// QuoteIdentifier returns a sanitized string safe to use in SQL as an identifier +func QuoteIdentifier(name string) string { return pgx.Identifier{name}.Sanitize() } @@ -44,7 +39,7 @@ func QuoteLiteral(name string) string { return "'" + strings.Replace(name, "'", "''", -1) + "'" } -// FullTableName returns a sanitized table name with it's schema (if supplied) +// FullTableName returns a sanitized table name with its schema (if supplied) func FullTableName(schema, name string) pgx.Identifier { if schema != "" { return pgx.Identifier{schema, name} @@ -73,21 +68,6 @@ func (l PGXLogger) Log(_ context.Context, level pgx.LogLevel, msg string, data m } } -// GenerateInsert returns a SQL statement to insert values in a table -// with $X placeholders for the values -func GenerateInsert(fullSanitizedTableName string, columns []string) string { - valuePlaceholders := make([]string, len(columns)) - quotedColumns := make([]string, len(columns)) - for i, column := range columns { - valuePlaceholders[i] = fmt.Sprintf("$%d", i+1) - quotedColumns[i] = QuoteIdent(column) - } - - columnNames := strings.Join(quotedColumns, ",") - values := strings.Join(valuePlaceholders, ",") - return fmt.Sprintf(insertIntoSQLTemplate, fullSanitizedTableName, columnNames, values) -} - func GetTagID(metric telegraf.Metric) int64 { hash := fnv.New64a() for _, tag := range metric.TagList() { From 1bcc5c26769528436eedb58b3ecaac27fc9a522e Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 31 Aug 2021 20:51:46 -0400 Subject: [PATCH 109/121] outputs.postgresql: update README & add test to keep updated --- plugins/outputs/postgresql/README.md | 228 ++++++++++++++---- plugins/outputs/postgresql/postgresql_test.go | 11 + 2 files changed, 194 insertions(+), 45 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 3acc1dd743585..f1b2ce0cc7539 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -1,54 +1,192 @@ # PostgreSQL Output Plugin -This output plugin writes all metrics to PostgreSQL. -The plugin manages the schema automatically updating missing columns, and checking if existing ones are of the proper type. +This output plugin writes metrics to PostgreSQL (or compatible database). +The plugin manages the schema, automatically updating missing columns. -**_WARNING_**: In order to enable automatic schema update, the connection to the database must -be established with a user that has sufficient permissions. Either be a admin, or an owner of the -target schema. +# Configuration: +```toml +[[outputs.postgresql]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. Also supported are PG environment vars + ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE + ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html + ## + ## Non-standard parameters: + ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. + ## pool_min_conns (default: 0) - Minimum size of connection pool. + ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. + ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. + ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. + ## + ## Without the dbname parameter, the driver will default to a database + ## with the same name as the user. This dbname is just for instantiating a + ## connection with the server and doesn't restrict the databases we are trying + ## to grab metrics for. + + ## Postgres schema to use. + # schema = "public" + + ## Store tags as foreign keys in the metrics table. Default is false. + # tags_as_foreign_keys = false + + ## Suffix to append to table name (measurement name) for the foreign tag table. + # tag_table_suffix = "_tag" + + ## Deny inserting metrics if the foreign tag can't be inserted. + # foreign_tag_constraint = false + + ## Store all tags as a JSONB object in a single 'tags' column. + # tags_as_jsonb = false + + ## Store all fields as a JSONB object in a single 'fields' column. + # fields_as_jsonb = false + + ## Templated statements to execute when creating a new table. + # create_templates = [ + # '''CREATE TABLE {{.table}} ({{.columns}})''', + # ] + + ## Templated statements to execute when adding columns to a table. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. Points + ## containing fields for which there is no column will have the field omitted. + # add_column_templates = [ + # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + # ] + + ## Templated statements to execute when creating a new tag table. + # tag_table_create_templates = [ + # '''CREATE TABLE {{.table}} ({{.columns}}, PRIMARY KEY (tag_id))''', + # ] + + ## Templated statements to execute when adding columns to a tag table. + ## Set to an empty list to disable. Points containing tags for which there is no column will be skipped. + # tag_table_add_column_templates = [ + # '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + # ] + + ## Controls whether to use the uint8 data type provided by the pguint extension. + # use_uint8 = false + + ## When using pool_max_conns>1, and a temporary error occurs, the query is retried with an incremental backoff. This + ## controls the maximum backoff duration. + # retry_max_backoff = "15s" + + ## Enable & set the log level for the Postgres driver. + # log_level = "warn" # trace, debug, info, warn, error, none +``` + +### Concurrency +By default the postgresql plugin does not utilize any concurrency. However it can for increased throughput. When concurrency is off, telegraf core handles things like retrying on failure, buffering, etc. When concurrency is used, these aspects have to be handled by the plugin. + +To enable concurrent writes to the database, set the `pool_max_conns` connection parameter to a value >1. When enabled, incoming batches will be split by measurement/table name. In addition, if a batch comes in and the previous batch has not completed, concurrency will be used for the new batch as well. + +If all connections are utilized and the pool is exhausted, further incoming batches will be buffered within telegraf core. + +### Foreign tags -### Configuration: +When using `tags_as_foreign_keys`, tags will be written to a separate table with a `tag_id` column used for joins. Each series (unique combination of tag values) gets its own entry in the tags table, and a unique `tag_id`. + +# Data types +By default the postgresql plugin maps Influx data types to the following PostgreSQL types: + +| Influx | PostgreSQL | +|--------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------| +| [float](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#float) | [double precision](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-FLOAT) | +| [integer](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#integer) | [bigint](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-INT) | +| [uinteger](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#uinteger) | [numeric](https://www.postgresql.org/docs/current/datatype-numeric.html#DATATYPE-NUMERIC-DECIMAL)* | +| [string](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#string) | [text](https://www.postgresql.org/docs/current/datatype-character.html) | +| [boolean](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#boolean) | [boolean](https://www.postgresql.org/docs/current/datatype-boolean.html) | +| [unix timestamp](https://docs.influxdata.com/influxdb/latest/reference/syntax/line-protocol/#unix-timestamp) | [timestamp](https://www.postgresql.org/docs/current/datatype-datetime.html) | + +It is important to note that `uinteger` (unsigned 64-bit integer) is mapped to the `numeric` PostgreSQL data type. The `numeric` data type is an arbitrary precision decimal data type that is less efficient than `bigint`. This is necessary as the range of values for the Influx `uinteger` data type can exceed `bigint`, and thus cause errors when inserting data. + +### pguint +As a solution to the `uinteger`/`numeric` data type problem, there is a PostgreSQL extension that offers unsigned 64-bit integer support: https://github.com/petere/pguint. + +If this extension is installed, you can enable the `unsigned_integers` config parameter which will cause the plugin to use the `uint8` datatype instead of `numeric`. + + +# Templating +The postgresql plugin uses templates for the schema modification SQL statements. This allows for complete control of the schema by the user. + +Documentation on how to write templates can be found here: +https://pkg.go.dev/github.com/influxdb/telegraf/plugins/outputs/postgresql/sqltemplate + +## Samples +### TimescaleDB ```toml -# Send metrics to postgres -[[outputs.postgresql]] - ## specify address via a url: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ - ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production - ## - ## All connection parameters are optional. Also supported are PG environment vars - ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE - ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html - connection = "host=localhost user=postgres sslmode=verify-full" - - ## Update existing tables to match the incoming metrics. Default is true - # do_schema_updates = true - - ## Store tags as foreign keys in the metrics table. Default is false. - # tags_as_foreignkeys = false - - ## Template to use for generating tables - ## Available Variables: - ## {TABLE} - tablename as identifier - ## {TABLELITERAL} - tablename as string literal - ## {COLUMNS} - column definitions - ## {KEY_COLUMNS} - comma-separated list of key columns (time + tags) - - ## Default template - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS})" - ## Example for timescaledb - # table_template = "CREATE TABLE IF NOT EXISTS {TABLE}({COLUMNS}); SELECT create_hypertable({TABLELITERAL},'time',chunk_time_interval := '1 week'::interval, if_not_exists := true);" - - ## Schema to create the tables into - # schema = "public" - - ## Use jsonb datatype for tags. Default is false. - # tags_as_jsonb = false - - ## Use jsonb datatype for fields. Default is false. - # fields_as_jsonb = false +tags_as_foreign_keys = true +create_templates = [ + '''CREATE TABLE {{.table}} ({{.columns}})''', + '''SELECT create_distributed_hypertable({{ .table|quoteLiteral }}, 'time', partitioning_column => 'tag_id', number_partitions => (SELECT count(*) FROM timescaledb_information.data_nodes)::integer, replication_factor => 2, chunk_time_interval => INTERVAL '1h')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', +] +``` + +### Tag table with view +This example enables `tags_as_foreign_keys`, but creates a postgres view to automatically join the metric & tag tables. The metric & tag tables are stored in a "telegraf" schema, with the view in the "public" schema. +```toml +tags_as_foreign_keys = true +schema = "telegraf" +create_templates = [ + '''CREATE TABLE {{ .table }} ({{ .columns }})''', + '''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', +] +add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + '''DROP VIEW {{ .table.WithSchema "public" }} IF EXISTS''', + '''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', +] +tag_table_add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + '''DROP VIEW {{ .metricTable.WithSchema "public" }} IF EXISTS''', + '''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', +] ``` + +### Immutable data table +Some PostgreSQL-compatible databases don't allow modification of table schema after initial creation. This example works around the limitation by creating a new table and then using a view to join them together. + +```toml +tags_as_foreignkeys = true +schema = 'telegraf' +create_templates = [ + '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', + '''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1h')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', + '''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''', + '''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }}''', + '''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', +] +add_column_templates = [ + '''ALTER TABLE {{ .table }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash).WithSchema "" }}''', + '''ALTER VIEW {{ .table.WithSuffix "_data" }} RENAME TO {{ (.table.WithSuffix "_" .table.Columns.Hash "_data").WithSchema "" }}''', + '''DROP VIEW {{ .table.WithSchema "public" }}''', + + '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', + '''SELECT create_hypertable({{ .table|quoteLiteral }}, 'time', chunk_time_interval => INTERVAL '1h')''', + '''ALTER TABLE {{ .table }} SET (timescaledb.compress, timescaledb.compress_segmentby = 'tag_id')''', + '''SELECT add_compression_policy({{ .table|quoteLiteral }}, INTERVAL '2h')''', + '''CREATE VIEW {{ .table.WithSuffix "_data" }} AS SELECT {{ .allColumns.Selectors | join "," }} FROM {{ .table }} UNION ALL SELECT {{ (.allColumns.Union .table.Columns).Selectors | join "," }} FROM {{ .table.WithSuffix "_" .table.Columns.Hash "_data" }}''', + '''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table.WithSuffix "_data" }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', +] +tag_table_add_column_templates = [ + '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', + '''DROP VIEW {{ .metricTable.WithSchema "public" }}''', + '''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable.WithSuffix "_data" }} t, {{ .table }} tt WHERE t.tag_id = tt.tag_id''', +] +``` + +# Error handling +When the plugin encounters an error writing to the database, it attempts to determine whether the error is temporary or permanent. An error is considered temporary if it's possible that retrying the write will succeed. Some examples of temporary errors are things like connection interruption, deadlocks, etc. Permanent errors are things like invalid data type, insufficient permissions, etc. + +When an error is determined to be temporary, the plugin will retry the write with an incremental backoff. +When an error is determined to be permanent, the plugin will discard the sub-batch. The "sub-batch" is the portion of the input batch that is being written to the same table. diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index b8180cdc66b20..cb517005ea276 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -1,6 +1,7 @@ package postgresql import ( + "bytes" "context" "encoding/json" "flag" @@ -756,3 +757,13 @@ func TestStressConcurrency(t *testing.T) { } } } + +func TestReadme(t *testing.T) { + f, err := os.Open("README.md") + require.NoError(t, err) + buf := bytes.NewBuffer(nil) + _, _ = buf.ReadFrom(f) + _ = f.Close() + txt := strings.ReplaceAll(buf.String(), "\r", "") // windows files contain CR + assert.Contains(t, txt, (&Postgresql{}).SampleConfig(), "Readme is out of date with sample config") +} From 94f0b74e230aa05dad232b950e8742b9b558cc21 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 8 Sep 2021 14:50:26 -0400 Subject: [PATCH 110/121] outputs.postgresql: fix missing .allColumns template values In the event that the local table cache was missing columns, but the DB had them, when performing a table update the missing columns wouldn't be passed to the template. --- plugins/outputs/postgresql/table_manager.go | 2 + .../outputs/postgresql/table_manager_test.go | 42 ++++++++++++++++++- 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index 65c66fd568870..b06aed8b391aa 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -200,6 +200,7 @@ func (tm *TableManager) EnsureStructure( if currCols, err = tm.getColumns(ctx, db, tbl.name); err != nil { return nil, err } + tbl.columns = currCols missingCols = diffMissingColumns(currCols, columns) if len(missingCols) == 0 { tbl.columns = currCols @@ -231,6 +232,7 @@ func (tm *TableManager) EnsureStructure( if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil { return nil, err } + tbl.columns = currCols if currCols != nil { missingCols = diffMissingColumns(currCols, columns) if len(missingCols) == 0 { diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index aafa2ca0c0ff7..bb9a329bc9c2e 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -1,10 +1,10 @@ package postgresql import ( - "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "strings" + "testing" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" @@ -251,3 +251,41 @@ func TestTableManager_noAlterMissingField(t *testing.T) { require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) assert.NotContains(t, tsrc.ColumnNames(), "b") } + +func TestTableManager_addColumnTemplates(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"foo": "bar"}, MSI{"a": 1}), + } + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + p = newPostgresqlTest(t) + p.TagsAsForeignKeys = true + tmpl := &sqltemplate.Template{} + require.NoError(t, tmpl.UnmarshalText([]byte(`-- addColumnTemplate: {{ . }}`))) + p.AddColumnTemplates = append(p.AddColumnTemplates, tmpl) + require.NoError(t, p.Connect()) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"pop": "tart"}, MSI{"a": 1, "b": 2}), + } + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + p.Logger.Info("ok") + var log string + for _, l := range p.Logger.Logs() { + if strings.Contains(l.String(), "-- addColumnTemplate") { + log = l.String() + break + } + } + assert.Contains(t, log, `table:"public"."TestTableManager_addColumnTemplates"`) + assert.Contains(t, log, `columns:"b" bigint`) + assert.Contains(t, log, `allColumns:"time" timestamp with time zone, "tag_id" bigint, "a" bigint, "b" bigint`) + assert.Contains(t, log, `metricTable:"public"."TestTableManager_addColumnTemplates"`) + assert.Contains(t, log, `tagTable:"public"."TestTableManager_addColumnTemplates_tag"`) +} From 40ef1f90551ad567432993bdd97b88484ef02c51 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Fri, 17 Sep 2021 12:09:04 -0400 Subject: [PATCH 111/121] outputs.postgresql: use timestamp without time zone major performance hit in timescaledb for some queries when timestamp with time zone is used: https://github.com/timescale/timescaledb/issues/118#issuecomment-313776080 --- plugins/outputs/postgresql/columns.go | 2 +- plugins/outputs/postgresql/datatypes.go | 2 +- plugins/outputs/postgresql/table_manager_test.go | 7 ++++--- plugins/outputs/postgresql/table_source.go | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/outputs/postgresql/columns.go b/plugins/outputs/postgresql/columns.go index 816539be65300..ca08fdfd5c615 100644 --- a/plugins/outputs/postgresql/columns.go +++ b/plugins/outputs/postgresql/columns.go @@ -5,7 +5,7 @@ import "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" // Column names and data types for standard fields (time, tag_id, tags, and fields) const ( timeColumnName = "time" - timeColumnDataType = PgTimestampWithTimeZone + timeColumnDataType = PgTimestampWithoutTimeZone tagIDColumnName = "tag_id" tagIDColumnDataType = PgBigInt tagsJSONColumnName = "tags" diff --git a/plugins/outputs/postgresql/datatypes.go b/plugins/outputs/postgresql/datatypes.go index c16f6de791a49..eed36c1c9dea1 100644 --- a/plugins/outputs/postgresql/datatypes.go +++ b/plugins/outputs/postgresql/datatypes.go @@ -53,7 +53,7 @@ func (p *Postgresql) derivePgDatatype(value interface{}) string { case string: return PgText case time.Time: - return PgTimestampWithTimeZone + return PgTimestampWithoutTimeZone default: return PgText } diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index bb9a329bc9c2e..fe7f915b72129 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -1,11 +1,12 @@ package postgresql import ( - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/sqltemplate" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" @@ -285,7 +286,7 @@ func TestTableManager_addColumnTemplates(t *testing.T) { } assert.Contains(t, log, `table:"public"."TestTableManager_addColumnTemplates"`) assert.Contains(t, log, `columns:"b" bigint`) - assert.Contains(t, log, `allColumns:"time" timestamp with time zone, "tag_id" bigint, "a" bigint, "b" bigint`) + assert.Contains(t, log, `allColumns:"time" timestamp without time zone, "tag_id" bigint, "a" bigint, "b" bigint`) assert.Contains(t, log, `metricTable:"public"."TestTableManager_addColumnTemplates"`) assert.Contains(t, log, `tagTable:"public"."TestTableManager_addColumnTemplates_tag"`) } diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index d81e3fe925f7f..5d621dc6e2c13 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -254,7 +254,7 @@ func (tsrc *TableSource) getValues() ([]interface{}, error) { metric := tsrc.metrics[tsrc.cursor] values := []interface{}{ - metric.Time(), + metric.Time().UTC(), } if !tsrc.postgresql.TagsAsForeignKeys { From 2d84174616e87a8a995c5e5a70df13e9abc2fb87 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 22 Sep 2021 11:48:30 -0400 Subject: [PATCH 112/121] outputs.postgresql: set pg connection application name --- plugins/outputs/postgresql/postgresql.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index b94f685f27e23..cffeff8bb70e7 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -202,6 +202,10 @@ func (p *Postgresql) Init() error { p.dbConfig.MaxConns = 1 } + if _, ok := p.dbConfig.ConnConfig.RuntimeParams["application_name"]; !ok { + p.dbConfig.ConnConfig.RuntimeParams["application_name"] = "telegraf" + } + if p.LogLevel != "" { p.dbConfig.ConnConfig.Logger = utils.PGXLogger{Logger: p.Logger} p.dbConfig.ConnConfig.LogLevel, err = pgx.LogLevelFromString(p.LogLevel) From a00e2921227e6050fe444884ddcc793b06d35e5c Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Wed, 22 Sep 2021 11:48:47 -0400 Subject: [PATCH 113/121] outputs.postgresql: fixes for cache Addresses 2 problems with the tag cache: 1. The same tag ID in use by multiple tables would collide, causing one not to be inserted. 2. The cache sizing was grossly underestimated. Also added some debug logging for cache statistics. --- plugins/outputs/postgresql/README.md | 5 ++++ plugins/outputs/postgresql/postgresql.go | 25 +++++++++++++++- plugins/outputs/postgresql/table_source.go | 34 +++++++++++++++++----- 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index f1b2ce0cc7539..aa8f26c1dc58c 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -77,6 +77,11 @@ The plugin manages the schema, automatically updating missing columns. ## controls the maximum backoff duration. # retry_max_backoff = "15s" + ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). + ## This is an optimization to skip inserting known tag IDs. + ## Each entry consumes approximately 34 bytes of memory. + # tag_cache_size = 100000 + ## Enable & set the log level for the Postgres driver. # log_level = "warn" # trace, debug, info, warn, error, none ``` diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index cffeff8bb70e7..dfd74b157cefe 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -99,6 +99,11 @@ var sampleConfig = ` ## controls the maximum backoff duration. # retry_max_backoff = "15s" + ## Approximate number of tag IDs to store in in-memory cache (when using tags_as_foreign_keys). + ## This is an optimization to skip inserting known tag IDs. + ## Each entry consumes approximately 34 bytes of memory. + # tag_cache_size = 100000 + ## Enable & set the log level for the Postgres driver. # log_level = "warn" # trace, debug, info, warn, error, none ` @@ -117,6 +122,7 @@ type Postgresql struct { TagTableAddColumnTemplates []*sqltemplate.Template `toml:"tag_table_add_column_templates"` UseUint8 bool `toml:"use_uint8"` RetryMaxBackoff config.Duration `toml:"retry_max_backoff"` + TagCacheSize int `toml:"tag_cache_size"` LogLevel string `toml:"log_level"` dbContext context.Context @@ -179,6 +185,12 @@ func (p *Postgresql) Init() error { p.RetryMaxBackoff = config.Duration(time.Second * 15) } + if p.TagCacheSize == 0 { + p.TagCacheSize = 100000 + } else if p.TagCacheSize < 0 { + return fmt.Errorf("invalid tag_cache_size") + } + if p.LogLevel == "" { p.LogLevel = "warn" } @@ -237,7 +249,7 @@ func (p *Postgresql) Connect() error { p.tableManager = NewTableManager(p) if p.TagsAsForeignKeys { - p.tagsCache = freecache.NewCache(5 * 1024 * 1024) // 5MB + p.tagsCache = freecache.NewCache(p.TagCacheSize * 34) // from testing, each entry consumes approx 34 bytes } maxConns := int(p.db.Stat().MaxConns()) @@ -293,6 +305,17 @@ func (p *Postgresql) Close() error { } func (p *Postgresql) Write(metrics []telegraf.Metric) error { + if p.tagsCache != nil { + // gather at the start of write so there's less chance of any async operations ongoing + p.Logger.Debugf("cache: size=%d hit=%d miss=%d full=%d\n", + p.tagsCache.EntryCount(), + p.tagsCache.HitCount(), + p.tagsCache.MissCount(), + p.tagsCache.EvacuateCount(), + ) + p.tagsCache.ResetStatistics() + } + tableSources := NewTableSources(p, metrics) var err error diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 5d621dc6e2c13..072f466839ea5 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -2,6 +2,7 @@ package postgresql import ( "fmt" + "hash/fnv" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs/postgresql/utils" @@ -49,6 +50,9 @@ type TableSource struct { cursor int cursorValues []interface{} cursorError error + // tagHashSalt is so that we can use a global tag cache for all tables. The salt is unique per table, and combined + // with the tag ID when looked up in the cache. + tagHashSalt int64 tagColumns *columnList // tagSets is the list of tag IDs to tag values in use within the TableSource. The position of each value in the list @@ -67,7 +71,7 @@ func NewTableSources(p *Postgresql, metrics []telegraf.Metric) map[string]*Table for _, m := range metrics { tsrc := tableSources[m.Name()] if tsrc == nil { - tsrc = NewTableSource(p) + tsrc = NewTableSource(p, m.Name()) tableSources[m.Name()] = tsrc } tsrc.AddMetric(m) @@ -76,11 +80,15 @@ func NewTableSources(p *Postgresql, metrics []telegraf.Metric) map[string]*Table return tableSources } -func NewTableSource(postgresql *Postgresql) *TableSource { +func NewTableSource(postgresql *Postgresql, name string) *TableSource { + h := fnv.New64a() + _, _ = h.Write([]byte(name)) + tsrc := &TableSource{ - postgresql: postgresql, - cursor: -1, - tagSets: make(map[int64][]*telegraf.Tag), + postgresql: postgresql, + cursor: -1, + tagSets: make(map[int64][]*telegraf.Tag), + tagHashSalt: int64(h.Sum64()), } if !postgresql.TagsAsJsonb { tsrc.tagColumns = newColumnList() @@ -349,6 +357,18 @@ func (ttsrc *TagTableSource) Name() string { return ttsrc.TableSource.Name() + ttsrc.postgresql.TagTableSuffix } +func (ttsrc *TagTableSource) cacheCheck(tagID int64) bool { + // Adding the 2 hashes is good enough. It's not a perfect solution, but given that we're operating in an int64 + // space, the risk of collision is extremely small. + key := ttsrc.tagHashSalt + tagID + _, err := ttsrc.postgresql.tagsCache.GetInt(key) + return err == nil +} +func (ttsrc *TagTableSource) cacheTouch(tagID int64) { + key := ttsrc.tagHashSalt + tagID + _ = ttsrc.postgresql.tagsCache.SetInt(key, nil, 0) +} + func (ttsrc *TagTableSource) ColumnNames() []string { cols := ttsrc.TagTableColumns() names := make([]string, len(cols)) @@ -366,7 +386,7 @@ func (ttsrc *TagTableSource) Next() bool { } ttsrc.cursor++ - if _, err := ttsrc.postgresql.tagsCache.GetInt(ttsrc.tagIDs[ttsrc.cursor]); err == nil { + if ttsrc.cacheCheck(ttsrc.tagIDs[ttsrc.cursor]) { // tag ID already inserted continue } @@ -407,7 +427,7 @@ func (ttsrc *TagTableSource) Values() ([]interface{}, error) { func (ttsrc *TagTableSource) UpdateCache() { for _, tagID := range ttsrc.tagIDs { - _ = ttsrc.postgresql.tagsCache.SetInt(tagID, nil, 0) + ttsrc.cacheTouch(tagID) } } From 10c081089c8caa8e67f5f0feaad464726121f1f4 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Sun, 10 Oct 2021 21:10:50 -0400 Subject: [PATCH 114/121] outputs.postgresql: add "connection" parameter to sample config --- plugins/outputs/postgresql/README.md | 19 ++++++++----------- plugins/outputs/postgresql/postgresql.go | 19 ++++++++----------- 2 files changed, 16 insertions(+), 22 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index aa8f26c1dc58c..8a0c72d9fd8c3 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -7,15 +7,16 @@ The plugin manages the schema, automatically updating missing columns. ```toml [[outputs.postgresql]] - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## Specify connection address via the standard libpq connection string: + ## host=... user=... password=... sslmode=... dbname=... + ## Or a URL: + ## postgres://[user[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## - ## All connection parameters are optional. Also supported are PG environment vars + ## All connection parameters are optional. Environment vars are also supported. ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE - ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html + ## All supported vars can be found here: + ## https://www.postgresql.org/docs/current/libpq-envars.html ## ## Non-standard parameters: ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. @@ -23,11 +24,7 @@ The plugin manages the schema, automatically updating missing columns. ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. - ## - ## Without the dbname parameter, the driver will default to a database - ## with the same name as the user. This dbname is just for instantiating a - ## connection with the server and doesn't restrict the databases we are trying - ## to grab metrics for. + # connection = "" ## Postgres schema to use. # schema = "public" diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index dfd74b157cefe..ada10ee09da2f 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -29,15 +29,16 @@ type dbh interface { } var sampleConfig = ` - ## specify address via a url matching: - ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## Specify connection address via the standard libpq connection string: + ## host=... user=... password=... sslmode=... dbname=... + ## Or a URL: + ## postgres://[user[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] - ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production ## - ## All connection parameters are optional. Also supported are PG environment vars + ## All connection parameters are optional. Environment vars are also supported. ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE - ## all supported vars here: https://www.postgresql.org/docs/current/libpq-envars.html + ## All supported vars can be found here: + ## https://www.postgresql.org/docs/current/libpq-envars.html ## ## Non-standard parameters: ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. @@ -45,11 +46,7 @@ var sampleConfig = ` ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. - ## - ## Without the dbname parameter, the driver will default to a database - ## with the same name as the user. This dbname is just for instantiating a - ## connection with the server and doesn't restrict the databases we are trying - ## to grab metrics for. + # connection = "" ## Postgres schema to use. # schema = "public" From 96a1b414a198b4f6ce4f57d3868ea6a655e39bdf Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 12 Oct 2021 21:36:54 -0400 Subject: [PATCH 115/121] outputs.postgresql: better handling of DDL errors When a DDL statement (adding columns) errors, instead of failing the whole write operation, treat permanent errors as if DDL statements were disabled. Meaning on fields, just drop the field, and on tags, drop the metric. Temporary errors still fail so that retry won't result in data loss. --- plugins/outputs/postgresql/table_manager.go | 25 +++++---- .../outputs/postgresql/table_manager_test.go | 53 ++++++++++++++++++- 2 files changed, 67 insertions(+), 11 deletions(-) diff --git a/plugins/outputs/postgresql/table_manager.go b/plugins/outputs/postgresql/table_manager.go index b06aed8b391aa..8cf7e0704d375 100644 --- a/plugins/outputs/postgresql/table_manager.go +++ b/plugins/outputs/postgresql/table_manager.go @@ -86,7 +86,10 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl tagTable, ) if err != nil { - return err + if isTempError(err) { + return err + } + tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", tagTable.name, err) } if len(missingCols) > 0 { @@ -114,7 +117,10 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl tagTable, ) if err != nil { - return err + if isTempError(err) { + return err + } + tm.Postgresql.Logger.Errorf("permanent error updating schema for %s: %w", metricTable.name, err) } if len(missingCols) > 0 { @@ -125,7 +131,7 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl } colDefs[i] = col.Name + " " + col.Type } - tm.Logger.Errorf("table \"%s\" is missing columns (omitting fields): %s", + tm.Logger.Errorf("table '%s' is missing columns (omitting fields): %s", metricTable.name, strings.Join(colDefs, ", ")) } @@ -139,7 +145,8 @@ func (tm *TableManager) MatchSource(ctx context.Context, db dbh, rowSource *Tabl // (respectively). // metricsTableName and tagsTableName are passed to the templates. // -// If the table cannot be modified, the returned column list is the columns which are missing from the table. +// If the table cannot be modified, the returned column list is the columns which are missing from the table. This +// includes when an error is returned. //nolint:revive func (tm *TableManager) EnsureStructure( ctx context.Context, @@ -219,13 +226,13 @@ func (tm *TableManager) EnsureStructure( // wlock_db tx, err := db.Begin(ctx) if err != nil { - return nil, err + return missingCols, err } defer tx.Rollback(ctx) //nolint:errcheck // It's possible to have multiple telegraf processes, in which we can't ensure they all lock tables in the same // order. So to prevent possible deadlocks, we have to have a single lock for all schema modifications. if _, err := tx.Exec(ctx, "SELECT pg_advisory_xact_lock($1)", schemaAdvisoryLockID); err != nil { - return nil, err + return missingCols, err } // read_db @@ -248,15 +255,15 @@ func (tm *TableManager) EnsureStructure( tmpls = addColumnsTemplates } if err := tm.update(ctx, tx, tbl, tmpls, missingCols, metricsTable, tagsTable); err != nil { - return nil, err + return missingCols, err } if currCols, err = tm.getColumns(ctx, tx, tbl.name); err != nil { - return nil, err + return missingCols, err } if err := tx.Commit(ctx); err != nil { - return nil, err + return missingCols, err } tbl.columns = currCols diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index fe7f915b72129..5242758eea34a 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -208,8 +208,8 @@ func TestTableManager_noAlterMissingTag(t *testing.T) { assert.NotContains(t, tsrc.ColumnNames(), "bar") } -// Verify that when alter statements are disabled with foreign tags and a metric comes in with a new tag key, that the -// field is omitted. +// Verify that when using foreign tags and alter statements are disabled and a metric comes in with a new tag key, that +// the tag is omitted. func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true @@ -232,6 +232,32 @@ func TestTableManager_noAlterMissingTagTableTag(t *testing.T) { assert.NotContains(t, ttsrc.ColumnNames(), "bar") } +// Verify that when using foreign tags and alter statements generate a permanent error and a metric comes in with a new +// tag key, that the tag is omitted. +func TestTableManager_badAlterTagTable(t *testing.T) { + p := newPostgresqlTest(t) + p.TagsAsForeignKeys = true + tmpl := &sqltemplate.Template{} + _ = tmpl.UnmarshalText([]byte("bad")) + p.TagTableAddColumnTemplates = []*sqltemplate.Template{tmpl} + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), + newMetric(t, "", MSS{"tag": "foo", "bar": "baz"}, MSI{"a": 3}), + } + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] + ttsrc := NewTagTableSource(tsrc) + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.NotContains(t, ttsrc.ColumnNames(), "bar") +} + // verify that when alter statements are disabled and a metric comes in with a new field key, that the field is omitted. func TestTableManager_noAlterMissingField(t *testing.T) { p := newPostgresqlTest(t) @@ -253,6 +279,29 @@ func TestTableManager_noAlterMissingField(t *testing.T) { assert.NotContains(t, tsrc.ColumnNames(), "b") } +// verify that when alter statements generate a permanent error and a metric comes in with a new field key, that the field is omitted. +func TestTableManager_badAlterField(t *testing.T) { + p := newPostgresqlTest(t) + tmpl := &sqltemplate.Template{} + _ = tmpl.UnmarshalText([]byte("bad")) + p.AddColumnTemplates = []*sqltemplate.Template{tmpl} + require.NoError(t, p.Connect()) + + metrics := []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 1}), + } + tsrc := NewTableSources(p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + + metrics = []telegraf.Metric{ + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 2}), + newMetric(t, "", MSS{"tag": "foo"}, MSI{"a": 3, "b": 3}), + } + tsrc = NewTableSources(p.Postgresql, metrics)[t.Name()] + require.NoError(t, p.tableManager.MatchSource(ctx, p.db, tsrc)) + assert.NotContains(t, tsrc.ColumnNames(), "b") +} + func TestTableManager_addColumnTemplates(t *testing.T) { p := newPostgresqlTest(t) p.TagsAsForeignKeys = true From 04cfa33c6765e4a4332340f93e555eb07ef33e50 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 12 Oct 2021 22:04:50 -0400 Subject: [PATCH 116/121] outputs.postgresql: fix tests error logging & handling of %w --- plugins/outputs/postgresql/postgresql_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index cb517005ea276..632e3c4933ddd 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -35,7 +35,8 @@ type Log struct { } func (l Log) String() string { - return fmt.Sprintf("%s: "+l.format, append([]interface{}{l.level}, l.args...)...) + // We have to use Errorf() as Sprintf() doesn't allow usage of %w. + return fmt.Errorf("%s: "+l.format, append([]interface{}{l.level}, l.args...)...).Error() } // LogAccumulator is a log collector that satisfies telegraf.Logger. From 402b4ecb8262ba5c2bd520aec9baad2f613df566 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 26 Oct 2021 11:30:02 -0400 Subject: [PATCH 117/121] outputs.postgresql: fix column removal fixes incorrect column re-indexing when a column was removed from TableSource --- plugins/outputs/postgresql/table_source.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/table_source.go b/plugins/outputs/postgresql/table_source.go index 072f466839ea5..351cdaf2fe446 100644 --- a/plugins/outputs/postgresql/table_source.go +++ b/plugins/outputs/postgresql/table_source.go @@ -36,8 +36,8 @@ func (cl *columnList) Remove(name string) bool { cl.columns = append(cl.columns[:idx], cl.columns[idx+1:]...) delete(cl.indices, name) - for idx, col := range cl.columns[idx:] { - cl.indices[col.Name] = idx + for i, col := range cl.columns[idx:] { + cl.indices[col.Name] = idx + i } return true From 0665544949c632f4182df546be36cf21733ca78f Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Tue, 1 Feb 2022 12:55:53 -0500 Subject: [PATCH 118/121] outputs.postgresql: fix tags_as_foreign_keys typo in README --- plugins/outputs/postgresql/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 8a0c72d9fd8c3..b58a32db34d5d 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -158,7 +158,7 @@ tag_table_add_column_templates = [ Some PostgreSQL-compatible databases don't allow modification of table schema after initial creation. This example works around the limitation by creating a new table and then using a view to join them together. ```toml -tags_as_foreignkeys = true +tags_as_foreign_keys = true schema = 'telegraf' create_templates = [ '''CREATE TABLE {{ .table }} ({{ .allColumns }})''', From 0e511d6ee075862e6d319b3768825f21fd417aac Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 17 Feb 2022 22:42:42 -0500 Subject: [PATCH 119/121] outputs/postgresql: fix go test detection for pguint extension --- plugins/outputs/postgresql/postgresql_test.go | 14 ++++++-------- plugins/outputs/postgresql/table_manager_test.go | 14 ++++++-------- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/plugins/outputs/postgresql/postgresql_test.go b/plugins/outputs/postgresql/postgresql_test.go index 632e3c4933ddd..af5bfd388bbb3 100644 --- a/plugins/outputs/postgresql/postgresql_test.go +++ b/plugins/outputs/postgresql/postgresql_test.go @@ -678,14 +678,12 @@ func TestWrite_UnsignedIntegers(t *testing.T) { p := newPostgresqlTest(t) p.UseUint8 = true _ = p.Init() - require.NoError(t, p.Connect()) - - row := p.db.QueryRow(ctx, "SELECT count(*) FROM pg_extension WHERE extname='uint'") - var n int - require.NoError(t, row.Scan(&n)) - if n == 0 { - t.Skipf("pguint extension is not installed") - t.SkipNow() + if err := p.Connect(); err != nil { + if strings.Contains(err.Error(), "retreiving OID for uint8 data type") { + t.Skipf("pguint extension is not installed") + t.SkipNow() + } + require.NoError(t, err) } metrics := []telegraf.Metric{ diff --git a/plugins/outputs/postgresql/table_manager_test.go b/plugins/outputs/postgresql/table_manager_test.go index 5242758eea34a..82fd830338891 100644 --- a/plugins/outputs/postgresql/table_manager_test.go +++ b/plugins/outputs/postgresql/table_manager_test.go @@ -127,14 +127,12 @@ func TestTableManager_MatchSource_UnsignedIntegers(t *testing.T) { p := newPostgresqlTest(t) p.UseUint8 = true _ = p.Init() - require.NoError(t, p.Connect()) - - row := p.db.QueryRow(ctx, "SELECT count(*) FROM pg_extension WHERE extname='uint'") - var n int - require.NoError(t, row.Scan(&n)) - if n == 0 { - t.Skipf("pguint extension is not installed") - t.SkipNow() + if err := p.Connect(); err != nil { + if strings.Contains(err.Error(), "retreiving OID for uint8 data type") { + t.Skipf("pguint extension is not installed") + t.SkipNow() + } + require.NoError(t, err) } metrics := []telegraf.Metric{ From e27281f335a428e6888405b74ad11db63140a240 Mon Sep 17 00:00:00 2001 From: Patrick Hemmer Date: Thu, 17 Feb 2022 23:13:49 -0500 Subject: [PATCH 120/121] outputs/postgresql: update README for linter --- docs/INTEGRATION_TESTS.md | 2 +- docs/LICENSE_OF_DEPENDENCIES.md | 4 +-- plugins/outputs/postgresql/README.md | 37 ++++++++++++++---------- plugins/outputs/postgresql/postgresql.go | 8 ++--- 4 files changed, 29 insertions(+), 22 deletions(-) diff --git a/docs/INTEGRATION_TESTS.md b/docs/INTEGRATION_TESTS.md index 667cfc617ae4c..51ae42ee887d7 100644 --- a/docs/INTEGRATION_TESTS.md +++ b/docs/INTEGRATION_TESTS.md @@ -52,7 +52,7 @@ Current areas we have integration tests: | Outputs: MQTT | | | Outputs: Nats | | | Outputs: NSQ | | -| Outputs: Postgresql | | +| Outputs: Postgresql | | Areas we would benefit most from new integration tests: diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index ca56334e32297..8e0cc396ff96e 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -15,11 +15,11 @@ following works: - github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) - github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) - github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) +- github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) - github.com/Masterminds/goutils [Apache License 2.0](https://github.com/Masterminds/goutils/blob/master/LICENSE.txt) - github.com/Masterminds/semver [MIT License](https://github.com/Masterminds/semver/blob/master/LICENSE.txt) - github.com/Masterminds/sprig [MIT License](https://github.com/Masterminds/sprig/blob/master/LICENSE.txt) -- github.com/Azure/go-ntlmssp [MIT License](https://github.com/Azure/go-ntlmssp/blob/master/LICENSE) -- github.com/ClickHouse/clickhouse-go [MIT License](https://github.com/ClickHouse/clickhouse-go/blob/master/LICENSE) - github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) - github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) - github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index b58a32db34d5d..64cd5f0b0402d 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -1,22 +1,22 @@ # PostgreSQL Output Plugin -This output plugin writes metrics to PostgreSQL (or compatible database). +This output plugin writes metrics to PostgreSQL (or compatible database). The plugin manages the schema, automatically updating missing columns. -# Configuration: +## Configuration ```toml [[outputs.postgresql]] - ## Specify connection address via the standard libpq connection string: + ## Specify connection address via the standard libpq connection string: ## host=... user=... password=... sslmode=... dbname=... - ## Or a URL: + ## Or a URL: ## postgres://[user[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## ## All connection parameters are optional. Environment vars are also supported. ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE ## All supported vars can be found here: - ## https://www.postgresql.org/docs/current/libpq-envars.html + ## https://www.postgresql.org/docs/current/libpq-envars.html ## ## Non-standard parameters: ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. @@ -24,7 +24,7 @@ The plugin manages the schema, automatically updating missing columns. ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. - # connection = "" + # connection = "" ## Postgres schema to use. # schema = "public" @@ -84,6 +84,7 @@ The plugin manages the schema, automatically updating missing columns. ``` ### Concurrency + By default the postgresql plugin does not utilize any concurrency. However it can for increased throughput. When concurrency is off, telegraf core handles things like retrying on failure, buffering, etc. When concurrency is used, these aspects have to be handled by the plugin. To enable concurrent writes to the database, set the `pool_max_conns` connection parameter to a value >1. When enabled, incoming batches will be split by measurement/table name. In addition, if a batch comes in and the previous batch has not completed, concurrency will be used for the new batch as well. @@ -94,7 +95,8 @@ If all connections are utilized and the pool is exhausted, further incoming batc When using `tags_as_foreign_keys`, tags will be written to a separate table with a `tag_id` column used for joins. Each series (unique combination of tag values) gets its own entry in the tags table, and a unique `tag_id`. -# Data types +## Data types + By default the postgresql plugin maps Influx data types to the following PostgreSQL types: | Influx | PostgreSQL | @@ -109,19 +111,21 @@ By default the postgresql plugin maps Influx data types to the following Postgre It is important to note that `uinteger` (unsigned 64-bit integer) is mapped to the `numeric` PostgreSQL data type. The `numeric` data type is an arbitrary precision decimal data type that is less efficient than `bigint`. This is necessary as the range of values for the Influx `uinteger` data type can exceed `bigint`, and thus cause errors when inserting data. ### pguint -As a solution to the `uinteger`/`numeric` data type problem, there is a PostgreSQL extension that offers unsigned 64-bit integer support: https://github.com/petere/pguint. + +As a solution to the `uinteger`/`numeric` data type problem, there is a PostgreSQL extension that offers unsigned 64-bit integer support: [https://github.com/petere/pguint](https://github.com/petere/pguint). If this extension is installed, you can enable the `unsigned_integers` config parameter which will cause the plugin to use the `uint8` datatype instead of `numeric`. +## Templating -# Templating The postgresql plugin uses templates for the schema modification SQL statements. This allows for complete control of the schema by the user. Documentation on how to write templates can be found here: -https://pkg.go.dev/github.com/influxdb/telegraf/plugins/outputs/postgresql/sqltemplate +[https://pkg.go.dev/github.com/influxdb/telegraf/plugins/outputs/postgresql/sqltemplate](https://pkg.go.dev/github.com/influxdb/telegraf/plugins/outputs/postgresql/sqltemplate) + +### Samples -## Samples -### TimescaleDB +#### TimescaleDB ```toml tags_as_foreign_keys = true @@ -132,7 +136,8 @@ create_templates = [ ] ``` -### Tag table with view +#### Tag table with view + This example enables `tags_as_foreign_keys`, but creates a postgres view to automatically join the metric & tag tables. The metric & tag tables are stored in a "telegraf" schema, with the view in the "public" schema. ```toml @@ -154,7 +159,8 @@ tag_table_add_column_templates = [ ] ``` -### Immutable data table +#### Immutable data table + Some PostgreSQL-compatible databases don't allow modification of table schema after initial creation. This example works around the limitation by creating a new table and then using a view to join them together. ```toml @@ -187,7 +193,8 @@ tag_table_add_column_templates = [ ] ``` -# Error handling +## Error handling + When the plugin encounters an error writing to the database, it attempts to determine whether the error is temporary or permanent. An error is considered temporary if it's possible that retrying the write will succeed. Some examples of temporary errors are things like connection interruption, deadlocks, etc. Permanent errors are things like invalid data type, insufficient permissions, etc. When an error is determined to be temporary, the plugin will retry the write with an incremental backoff. diff --git a/plugins/outputs/postgresql/postgresql.go b/plugins/outputs/postgresql/postgresql.go index ada10ee09da2f..743e3e80b0ab7 100644 --- a/plugins/outputs/postgresql/postgresql.go +++ b/plugins/outputs/postgresql/postgresql.go @@ -29,16 +29,16 @@ type dbh interface { } var sampleConfig = ` - ## Specify connection address via the standard libpq connection string: + ## Specify connection address via the standard libpq connection string: ## host=... user=... password=... sslmode=... dbname=... - ## Or a URL: + ## Or a URL: ## postgres://[user[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## ## All connection parameters are optional. Environment vars are also supported. ## e.g. PGPASSWORD, PGHOST, PGUSER, PGDATABASE ## All supported vars can be found here: - ## https://www.postgresql.org/docs/current/libpq-envars.html + ## https://www.postgresql.org/docs/current/libpq-envars.html ## ## Non-standard parameters: ## pool_max_conns (default: 1) - Maximum size of connection pool for parallel (per-batch per-table) inserts. @@ -46,7 +46,7 @@ var sampleConfig = ` ## pool_max_conn_lifetime (default: 0s) - Maximum age of a connection before closing. ## pool_max_conn_idle_time (default: 0s) - Maximum idle time of a connection before closing. ## pool_health_check_period (default: 0s) - Duration between health checks on idle connections. - # connection = "" + # connection = "" ## Postgres schema to use. # schema = "public" From efaecaeb98a80c2b95f583c11d4105240961495f Mon Sep 17 00:00:00 2001 From: Manuel Maute Date: Fri, 25 Feb 2022 14:36:09 +0100 Subject: [PATCH 121/121] fix: example in readme --- plugins/outputs/postgresql/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/outputs/postgresql/README.md b/plugins/outputs/postgresql/README.md index 64cd5f0b0402d..4a7fbb672f02e 100644 --- a/plugins/outputs/postgresql/README.md +++ b/plugins/outputs/postgresql/README.md @@ -149,12 +149,12 @@ create_templates = [ ] add_column_templates = [ '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', - '''DROP VIEW {{ .table.WithSchema "public" }} IF EXISTS''', + '''DROP VIEW IF EXISTS {{ .table.WithSchema "public" }}''', '''CREATE VIEW {{ .table.WithSchema "public" }} AS SELECT time, {{ (.tagTable.Columns.Tags.Concat .allColumns.Fields).Identifiers | join "," }} FROM {{ .table }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', ] tag_table_add_column_templates = [ '''ALTER TABLE {{.table}} ADD COLUMN IF NOT EXISTS {{.columns|join ", ADD COLUMN IF NOT EXISTS "}}''', - '''DROP VIEW {{ .metricTable.WithSchema "public" }} IF EXISTS''', + '''DROP VIEW IF EXISTS {{ .metricTable.WithSchema "public" }}''', '''CREATE VIEW {{ .metricTable.WithSchema "public" }} AS SELECT time, {{ (.allColumns.Tags.Concat .metricTable.Columns.Fields).Identifiers | join "," }} FROM {{ .metricTable }} t, {{ .tagTable }} tt WHERE t.tag_id = tt.tag_id''', ] ```