diff --git a/CHANGELOG.md b/CHANGELOG.md
index fb56ab287..ba2f5f7f0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,8 +4,34 @@
### Release Notes
+New TICKscript syntax that uses a different operator for chaining methods vs property methods.
+
+* A chaining method is a method that creates a new node in the pipeline. Uses the `|` operator.
+* A property method is a method that changes a property on a node. Uses the `.` operator.
+
+For example below the `from`, `mean`, and `alert` methods create new nodes while the other methods modify the node.
+
+```javascript
+stream
+ |from()
+ .measurement('cpu')
+ .where(lambda: "cpu" == 'cpu-total')
+ |mean('usage_idle')
+ .as('value')
+ |alert()
+ .crit(lambda: "value" < 30)
+ .log('/tmp/cpu.log')
+```
+
+With this change a new binary is provided with Kapacitor `tickfmt` which will
+format a TICKscript file according to a common standard.
+
+
### Features
+- [#299](https://github.com/influxdata/kapacitor/issues/299): Changes TICKscript chaining method operator and adds `tickfmt` binary.
+
+
### Bugfixes
diff --git a/README.md b/README.md
index 6e7e28d38..207e3bd28 100644
--- a/README.md
+++ b/README.md
@@ -38,15 +38,16 @@ A simple TICKscript that alerts on high cpu usage looks like this:
```javascript
stream
- .from().measurement('cpu_usage_idle')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu_usage_idle')
+ .groupBy('host')
+ |window()
.period(1m)
.every(1m)
- .mean('value')
- .eval(lambda: 100.0 - "mean")
+ |mean('value')
+ |eval(lambda: 100.0 - "mean")
.as('used')
- .alert()
+ |alert()
.message('{{ .Level}}: {{ .Name }}/{{ index .Tags "host" }} has high cpu usage: {{ index .Fields "used" }}')
.warn(lambda: "used" > 70.0)
.crit(lambda: "used" > 85.0)
@@ -63,7 +64,6 @@ stream
// PagerDuty
.pagerDuty()
-
```
Place the above script into a file `cpu_alert.tick` then run these commands to start the task:
diff --git a/cmd/kapacitord/run/command.go b/cmd/kapacitord/run/command.go
index b25faaaed..359c59d29 100644
--- a/cmd/kapacitord/run/command.go
+++ b/cmd/kapacitord/run/command.go
@@ -13,6 +13,7 @@ import (
"github.com/BurntSushi/toml"
"github.com/influxdata/kapacitor/services/logging"
+ "github.com/influxdata/kapacitor/tick"
)
const logo = `
@@ -99,6 +100,10 @@ func (cmd *Command) Run(args ...string) error {
if err != nil {
return fmt.Errorf("init logging: %s", err)
}
+ // Initialize packages loggers
+ tick.SetLogger(cmd.logService.NewLogger("[tick] ", log.LstdFlags))
+
+ // Initialize cmd logger
cmd.Logger = cmd.logService.NewLogger("[run] ", log.LstdFlags)
// Mark start-up in log.,
diff --git a/cmd/kapacitord/run/server_test.go b/cmd/kapacitord/run/server_test.go
index 4c56d937e..74acb11f9 100644
--- a/cmd/kapacitord/run/server_test.go
+++ b/cmd/kapacitord/run/server_test.go
@@ -69,7 +69,10 @@ func TestServer_DefineTask(t *testing.T) {
RetentionPolicy: "default",
},
}
- tick := "stream.from().measurement('test')"
+ tick := `stream
+ |from()
+ .measurement('test')
+`
r, err := s.DefineTask(name, ttype, tick, dbrps)
if err != nil {
t.Fatal(err)
@@ -123,7 +126,10 @@ func TestServer_EnableTask(t *testing.T) {
RetentionPolicy: "default",
},
}
- tick := "stream.from().measurement('test')"
+ tick := `stream
+ |from()
+ .measurement('test')
+`
r, err := s.DefineTask(name, ttype, tick, dbrps)
if err != nil {
t.Fatal(err)
@@ -192,7 +198,10 @@ func TestServer_DisableTask(t *testing.T) {
RetentionPolicy: "default",
},
}
- tick := "stream.from().measurement('test')"
+ tick := `stream
+ |from()
+ .measurement('test')
+`
r, err := s.DefineTask(name, ttype, tick, dbrps)
if err != nil {
t.Fatal(err)
@@ -262,7 +271,10 @@ func TestServer_DeleteTask(t *testing.T) {
RetentionPolicy: "default",
},
}
- tick := "stream.from().measurement('test')"
+ tick := `stream
+ |from()
+ .measurement('test')
+`
r, err := s.DefineTask(name, ttype, tick, dbrps)
if err != nil {
t.Fatal(err)
@@ -288,7 +300,10 @@ func TestServer_ListTasks(t *testing.T) {
count := 10
ttype := "stream"
- tick := "stream.from().measurement('test')"
+ tick := `stream
+ |from()
+ .measurement('test')
+`
dbrps := []kapacitor.DBRP{
{
Database: "mydb",
@@ -356,14 +371,14 @@ func TestServer_StreamTask(t *testing.T) {
Database: "mydb",
RetentionPolicy: "myrp",
}}
- tick := `
-stream
- .from().measurement('test')
- .window()
- .period(10s)
- .every(10s)
- .count('value')
- .httpOut('count')
+ tick := `stream
+ |from()
+ .measurement('test')
+ |window()
+ .period(10s)
+ .every(10s)
+ |count('value')
+ |httpOut('count')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -458,13 +473,12 @@ func TestServer_BatchTask(t *testing.T) {
Database: "mydb",
RetentionPolicy: "myrp",
}}
- tick := `
-batch
- .query(' SELECT value from mydb.myrp.cpu ')
- .period(5ms)
- .every(5ms)
- .count('value')
- .httpOut('count')
+ tick := `batch
+ |query(' SELECT value from mydb.myrp.cpu ')
+ .period(5ms)
+ .every(5ms)
+ |count('value')
+ |httpOut('count')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -519,13 +533,12 @@ func TestServer_InvalidBatchTask(t *testing.T) {
Database: "mydb",
RetentionPolicy: "myrp",
}}
- tick := `
-batch
- .query(' SELECT value from unknowndb.unknownrp.cpu ')
- .period(5ms)
- .every(5ms)
- .count('value')
- .httpOut('count')
+ tick := `batch
+ |query(' SELECT value from unknowndb.unknownrp.cpu ')
+ .period(5ms)
+ .every(5ms)
+ |count('value')
+ |httpOut('count')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -564,18 +577,18 @@ func TestServer_RecordReplayStream(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
- tick := `
-stream
- .from().measurement('test')
- .window()
- .period(10s)
- .every(10s)
- .count('value')
- .alert()
- .id('test-count')
- .message('{{ .ID }} got: {{ index .Fields "count" }}')
- .crit(lambda: TRUE)
- .log('` + tmpDir + `/alert.log')
+ tick := `stream
+ |from()
+ .measurement('test')
+ |window()
+ .period(10s)
+ .every(10s)
+ |count('value')
+ |alert()
+ .id('test-count')
+ .message('{{ .ID }} got: {{ index .Fields "count" }}')
+ .crit(lambda: TRUE)
+ .log('` + tmpDir + `/alert.log')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -710,16 +723,15 @@ func TestServer_RecordReplayBatch(t *testing.T) {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
- tick := `
-batch
- .query('SELECT value from mydb.myrp.cpu')
- .period(2s)
- .every(2s)
- .alert()
- .id('test-batch')
- .message('{{ .ID }} got: {{ index .Fields "value" }}')
- .crit(lambda: "value" > 2.0)
- .log('` + tmpDir + `/alert.log')
+ tick := `batch
+ |query('SELECT value from mydb.myrp.cpu')
+ .period(2s)
+ .every(2s)
+ |alert()
+ .id('test-batch')
+ .message('{{ .ID }} got: {{ index .Fields "value" }}')
+ .crit(lambda: "value" > 2.0)
+ .log('` + tmpDir + `/alert.log')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -902,19 +914,19 @@ func testStreamAgent(t *testing.T, c *run.Config) {
Database: "mydb",
RetentionPolicy: "myrp",
}}
- tick := `
-stream
- .from().measurement('test')
- .groupBy('group')
- .movingAvg()
- .field('value')
- .size(10)
- .as('mean')
- .window()
- .period(11s)
- .every(11s)
- .last('mean').as('mean')
- .httpOut('moving_avg')
+ tick := `stream
+ |from()
+ .measurement('test')
+ .groupBy('group')
+ |movingAvg()
+ .field('value')
+ .size(10)
+ .as('mean')
+ |window()
+ .period(11s)
+ .every(11s)
+ |last('mean').as('mean')
+ |httpOut('moving_avg')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
@@ -1124,17 +1136,16 @@ func testBatchAgent(t *testing.T, c *run.Config) {
Database: "mydb",
RetentionPolicy: "myrp",
}}
- tick := `
-batch
- .query(' SELECT value from mydb.myrp.cpu ')
- .period(5ms)
- .every(5ms)
- .groupBy('count')
- .outliers()
- .field('value')
- .scale(1.5)
- .count('value')
- .httpOut('count')
+ tick := `batch
+ |query(' SELECT value from mydb.myrp.cpu ')
+ .period(5ms)
+ .every(5ms)
+ .groupBy('count')
+ |outliers()
+ .field('value')
+ .scale(1.5)
+ |count('value')
+ |httpOut('count')
`
r, err := s.DefineTask(name, ttype, tick, dbrps)
diff --git a/integrations/batcher_test.go b/integrations/batcher_test.go
index a92429be1..40710750e 100644
--- a/integrations/batcher_test.go
+++ b/integrations/batcher_test.go
@@ -19,15 +19,15 @@ func TestBatch_Derivative(t *testing.T) {
var script = `
batch
- .query('''
+ |query('''
SELECT sum("value") as "value"
FROM "telegraf"."default".packets
''')
.period(10s)
.every(10s)
.groupBy(time(2s))
- .derivative('value')
- .httpOut('TestBatch_Derivative')
+ |derivative('value')
+ |httpOut('TestBatch_Derivative')
`
er := kapacitor.Result{
@@ -65,16 +65,16 @@ func TestBatch_DerivativeUnit(t *testing.T) {
var script = `
batch
- .query('''
+ |query('''
SELECT sum("value") as "value"
FROM "telegraf"."default".packets
''')
.period(10s)
.every(10s)
.groupBy(time(2s))
- .derivative('value')
+ |derivative('value')
.unit(2s)
- .httpOut('TestBatch_Derivative')
+ |httpOut('TestBatch_Derivative')
`
er := kapacitor.Result{
@@ -112,15 +112,15 @@ func TestBatch_DerivativeN(t *testing.T) {
var script = `
batch
- .query('''
+ |query('''
SELECT sum("value") as "value"
FROM "telegraf"."default".packets
''')
.period(10s)
.every(10s)
.groupBy(time(2s))
- .derivative('value')
- .httpOut('TestBatch_DerivativeNN')
+ |derivative('value')
+ |httpOut('TestBatch_DerivativeNN')
`
er := kapacitor.Result{
@@ -158,16 +158,16 @@ func TestBatch_DerivativeNN(t *testing.T) {
var script = `
batch
- .query('''
+ |query('''
SELECT sum("value") as "value"
FROM "telegraf"."default".packets
''')
.period(10s)
.every(10s)
.groupBy(time(2s))
- .derivative('value')
+ |derivative('value')
.nonNegative()
- .httpOut('TestBatch_DerivativeNN')
+ |httpOut('TestBatch_DerivativeNN')
`
er := kapacitor.Result{
@@ -201,7 +201,7 @@ func TestBatch_SimpleMR(t *testing.T) {
var script = `
batch
- .query('''
+ |query('''
SELECT mean("value")
FROM "telegraf"."default".cpu_usage_idle
WHERE "host" = 'serverA'
@@ -209,12 +209,12 @@ batch
.period(10s)
.every(10s)
.groupBy(time(2s), 'cpu')
- .count('mean')
- .window()
+ |count('mean')
+ |window()
.period(20s)
.every(20s)
- .sum('count')
- .httpOut('TestBatch_SimpleMR')
+ |sum('count')
+ |httpOut('TestBatch_SimpleMR')
`
er := kapacitor.Result{
@@ -256,7 +256,7 @@ func TestBatch_Join(t *testing.T) {
var script = `
var cpu0 = batch
- .query('''
+ |query('''
SELECT mean("value")
FROM "telegraf"."default".cpu_usage_idle
WHERE "cpu" = 'cpu0'
@@ -266,7 +266,7 @@ var cpu0 = batch
.groupBy(time(2s))
var cpu1 = batch
- .query('''
+ |query('''
SELECT mean("value")
FROM "telegraf"."default".cpu_usage_idle
WHERE "cpu" = 'cpu1'
@@ -275,14 +275,15 @@ var cpu1 = batch
.every(10s)
.groupBy(time(2s))
-cpu0.join(cpu1)
- .as('cpu0', 'cpu1')
- .count('cpu0.mean')
- .window()
+cpu0
+ |join(cpu1)
+ .as('cpu0', 'cpu1')
+ |count('cpu0.mean')
+ |window()
.period(20s)
.every(20s)
- .sum('count')
- .httpOut('TestBatch_Join')
+ |sum('count')
+ |httpOut('TestBatch_Join')
`
er := kapacitor.Result{
@@ -305,7 +306,7 @@ func TestBatch_JoinTolerance(t *testing.T) {
var script = `
var cpu0 = batch
- .query('''
+ |query('''
SELECT mean("value")
FROM "telegraf"."default".cpu_usage_idle
WHERE "cpu" = 'cpu0'
@@ -315,7 +316,7 @@ var cpu0 = batch
.groupBy(time(2s))
var cpu1 = batch
- .query('''
+ |query('''
SELECT mean("value")
FROM "telegraf"."default".cpu_usage_idle
WHERE "cpu" = 'cpu1'
@@ -324,15 +325,16 @@ var cpu1 = batch
.every(10s)
.groupBy(time(2s))
-cpu0.join(cpu1)
- .as('cpu0', 'cpu1')
- .tolerance(1s)
- .count('cpu0.mean')
- .window()
+cpu0
+ |join(cpu1)
+ .as('cpu0', 'cpu1')
+ .tolerance(1s)
+ |count('cpu0.mean')
+ |window()
.period(20s)
.every(20s)
- .sum('count')
- .httpOut('TestBatch_JoinTolerance')
+ |sum('count')
+ |httpOut('TestBatch_JoinTolerance')
`
er := kapacitor.Result{
diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go
index d23fbf7c6..971d3a10a 100644
--- a/integrations/streamer_test.go
+++ b/integrations/streamer_test.go
@@ -62,13 +62,13 @@ func TestStream_Derivative(t *testing.T) {
var script = `
stream
- .from().measurement('packets')
- .derivative('value')
- .window()
+ |from().measurement('packets')
+ |derivative('value')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.mean('value'))
- .httpOut('TestStream_Derivative')
+ |mean('value')
+ |httpOut('TestStream_Derivative')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -91,14 +91,14 @@ func TestStream_DerivativeUnit(t *testing.T) {
var script = `
stream
- .from().measurement('packets')
- .derivative('value')
+ |from().measurement('packets')
+ |derivative('value')
.unit(10s)
- .window()
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.mean('value'))
- .httpOut('TestStream_Derivative')
+ |mean('value')
+ |httpOut('TestStream_Derivative')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -121,14 +121,14 @@ func TestStream_DerivativeNN(t *testing.T) {
var script = `
stream
- .from().measurement('packets')
- .derivative('value')
+ |from().measurement('packets')
+ |derivative('value')
.nonNegative()
- .window()
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.mean('value'))
- .httpOut('TestStream_DerivativeNN')
+ |mean('value')
+ |httpOut('TestStream_DerivativeNN')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -151,13 +151,13 @@ func TestStream_DerivativeN(t *testing.T) {
var script = `
stream
- .from().measurement('packets')
- .derivative('value')
- .window()
+ |from().measurement('packets')
+ |derivative('value')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.mean('value'))
- .httpOut('TestStream_DerivativeNN')
+ |mean('value')
+ |httpOut('TestStream_DerivativeNN')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -182,16 +182,16 @@ func TestStream_WindowMissing(t *testing.T) {
var period = 3s
var every = 2s
stream
- .from()
+ |from()
.database('dbname')
.retentionPolicy('rpname')
.measurement('cpu')
.where(lambda: "host" == 'serverA')
- .window()
+ |window()
.period(period)
.every(every)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_WindowMissing')
+ |count('value')
+ |httpOut('TestStream_WindowMissing')
`
er := kapacitor.Result{
@@ -217,17 +217,17 @@ func TestStream_WindowMissingAligned(t *testing.T) {
var period = 3s
var every = 2s
stream
- .from()
+ |from()
.database('dbname')
.retentionPolicy('rpname')
.measurement('cpu')
.where(lambda: "host" == 'serverA')
- .window()
+ |window()
.period(period)
.every(every)
.align()
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_WindowMissing')
+ |count('value')
+ |httpOut('TestStream_WindowMissing')
`
er := kapacitor.Result{
@@ -253,15 +253,15 @@ func TestStream_Window(t *testing.T) {
var period = 10s
var every = 10s
stream
- .from()
+ |from()
.database('dbname')
.retentionPolicy('rpname')
.measurement('cpu')
.where(lambda: "host" == 'serverA')
- .window()
+ |window()
.period(period)
.every(every)
- .httpOut('TestStream_Window')
+ |httpOut('TestStream_Window')
`
nums := []float64{
@@ -307,31 +307,32 @@ func TestStream_Shift(t *testing.T) {
var period = 5s
var data = stream
- .from()
+ |from()
.measurement('cpu')
.where(lambda: "host" == 'serverA')
var past = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
- .shift(period)
+ |count('value')
+ |shift(period)
var current = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
+ |count('value')
-past.join(current)
- .as('past', 'current')
- .eval(lambda: "current.count" - "past.count")
+past
+ |join(current)
+ .as('past', 'current')
+ |eval(lambda: "current.count" - "past.count")
.keep()
.as('diff')
- .httpOut('TestStream_Shift')
+ |httpOut('TestStream_Shift')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -358,31 +359,32 @@ func TestStream_ShiftBatch(t *testing.T) {
var period = 5s
var data = stream
- .from()
+ |from()
.measurement('cpu')
.where(lambda: "host" == 'serverA')
var past = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .shift(period)
- .mapReduce(influxql.count('value'))
+ |shift(period)
+ |count('value')
var current = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
+ |count('value')
-past.join(current)
- .as('past', 'current')
- .eval(lambda: "current.count" - "past.count")
+past
+ |join(current)
+ .as('past', 'current')
+ |eval(lambda: "current.count" - "past.count")
.keep()
.as('diff')
- .httpOut('TestStream_Shift')
+ |httpOut('TestStream_Shift')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -409,31 +411,32 @@ func TestStream_ShiftNegative(t *testing.T) {
var period = 5s
var data = stream
- .from()
+ |from()
.measurement('cpu')
.where(lambda: "host" == 'serverA')
var past = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
+ |count('value')
var current = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
- .shift(-period)
+ |count('value')
+ |shift(-period)
-past.join(current)
- .as('past', 'current')
- .eval(lambda: "current.count" - "past.count")
+past
+ |join(current)
+ .as('past', 'current')
+ |eval(lambda: "current.count" - "past.count")
.keep()
.as('diff')
- .httpOut('TestStream_Shift')
+ |httpOut('TestStream_Shift')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -460,31 +463,32 @@ func TestStream_ShiftBatchNegative(t *testing.T) {
var period = 5s
var data = stream
- .from()
+ |from()
.measurement('cpu')
.where(lambda: "host" == 'serverA')
var past = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .mapReduce(influxql.count('value'))
+ |count('value')
var current = data
- .window()
+ |window()
.period(period)
.every(period)
.align()
- .shift(-period)
- .mapReduce(influxql.count('value'))
+ |shift(-period)
+ |count('value')
-past.join(current)
- .as('past', 'current')
- .eval(lambda: "current.count" - "past.count")
+past
+ |join(current)
+ .as('past', 'current')
+ |eval(lambda: "current.count" - "past.count")
.keep()
.as('diff')
- .httpOut('TestStream_Shift')
+ |httpOut('TestStream_Shift')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -509,13 +513,14 @@ func TestStream_SimpleMR(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_SimpleMR')
+ |count('value')
+ |httpOut('TestStream_SimpleMR')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -538,14 +543,15 @@ func TestStream_HttpOutPassThrough(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .httpOut('unused')
- .httpOut('TestStream_SimpleMR')
+ |count('value')
+ |httpOut('unused')
+ |httpOut('TestStream_SimpleMR')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -568,13 +574,14 @@ func TestStream_BatchGroupBy(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .window()
+ |from()
+ .measurement('cpu')
+ |window()
.period(5s)
.every(5s)
- .groupBy('host')
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_BatchGroupBy')
+ |groupBy('host')
+ |count('value')
+ |httpOut('TestStream_BatchGroupBy')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -615,13 +622,14 @@ func TestStream_BatchGroupByAll(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .window()
+ |from()
+ .measurement('cpu')
+ |window()
.period(5s)
.every(5s)
- .groupBy(*)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_BatchGroupBy')
+ |groupBy(*)
+ |count('value')
+ |httpOut('TestStream_BatchGroupBy')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -662,16 +670,17 @@ func TestStream_SimpleWhere(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
+ |from()
+ .measurement('cpu')
.where(lambda: "host" == 'serverA')
.where(lambda: "host" != 'serverB')
- .window()
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .where(lambda: "count" > 0)
- .where(lambda: "count" < 12)
- .httpOut('TestStream_SimpleMR')
+ |count('value')
+ |where(lambda: "count" > 0)
+ |where(lambda: "count" < 12)
+ |httpOut('TestStream_SimpleMR')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -695,13 +704,14 @@ func TestStream_VarWhereString(t *testing.T) {
var script = `
var serverStr = 'serverA'
stream
- .from().measurement('cpu')
- .where(lambda: "host" == serverStr )
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == serverStr )
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_SimpleMR')
+ |count('value')
+ |httpOut('TestStream_SimpleMR')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -725,13 +735,14 @@ func TestStream_VarWhereRegex(t *testing.T) {
var script = `
var serverPattern = /^serverA$/
stream
- .from().measurement('cpu')
- .where(lambda: "host" =~ serverPattern )
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" =~ serverPattern )
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_SimpleMR')
+ |count('value')
+ |httpOut('TestStream_SimpleMR')
`
er := kapacitor.Result{
Series: imodels.Rows{
@@ -754,13 +765,14 @@ func TestStream_GroupBy(t *testing.T) {
var script = `
stream
- .from().measurement('errors')
- .groupBy('service')
- .window()
+ |from()
+ .measurement('errors')
+ .groupBy('service')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.sum('value'))
- .httpOut('TestStream_GroupBy')
+ |sum('value')
+ |httpOut('TestStream_GroupBy')
`
er := kapacitor.Result{
@@ -802,30 +814,33 @@ func TestStream_Join(t *testing.T) {
var script = `
var errorCounts = stream
- .from().measurement('errors')
- .groupBy('service')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
+ |from()
+ .measurement('errors')
+ .groupBy('service')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
var viewCounts = stream
- .from().measurement('views')
- .groupBy('service')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
-
-errorCounts.join(viewCounts)
+ |from()
+ .measurement('views')
+ .groupBy('service')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
+
+errorCounts
+ |join(viewCounts)
.as('errors', 'views')
.streamName('error_view')
- .eval(lambda: "errors.sum" / "views.sum")
+ |eval(lambda: "errors.sum" / "views.sum")
.as('error_percent')
.keep()
- .httpOut('TestStream_Join')
+ |httpOut('TestStream_Join')
`
er := kapacitor.Result{
@@ -873,25 +888,28 @@ func TestStream_JoinTolerance(t *testing.T) {
var script = `
var errorCounts = stream
- .from().measurement('errors')
- .groupBy('service')
+ |from()
+ .measurement('errors')
+ .groupBy('service')
var viewCounts = stream
- .from().measurement('views')
- .groupBy('service')
+ |from()
+ .measurement('views')
+ .groupBy('service')
-errorCounts.join(viewCounts)
+errorCounts
+ |join(viewCounts)
.as('errors', 'views')
.tolerance(2s)
.streamName('error_view')
- .eval(lambda: "errors.value" / "views.value")
+ |eval(lambda: "errors.value" / "views.value")
.as('error_percent')
- .window()
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.mean('error_percent'))
+ |mean('error_percent')
.as('error_percent')
- .httpOut('TestStream_JoinTolerance')
+ |httpOut('TestStream_JoinTolerance')
`
er := kapacitor.Result{
@@ -932,24 +950,27 @@ errorCounts.join(viewCounts)
func TestStream_JoinFill(t *testing.T) {
var script = `
var errorCounts = stream
- .from().measurement('errors')
- .groupBy('service')
+ |from()
+ .measurement('errors')
+ .groupBy('service')
var viewCounts = stream
- .from().measurement('views')
- .groupBy('service')
+ |from()
+ .measurement('views')
+ .groupBy('service')
-errorCounts.join(viewCounts)
+errorCounts
+ |join(viewCounts)
.as('errors', 'views')
.fill(0.0)
.streamName('error_view')
- .eval(lambda: "errors.value" + "views.value")
+ |eval(lambda: "errors.value" + "views.value")
.as('error_percent')
- .window()
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('error_percent'))
- .httpOut('TestStream_JoinFill')
+ |count('error_percent')
+ |httpOut('TestStream_JoinFill')
`
er := kapacitor.Result{
@@ -991,24 +1012,28 @@ func TestStream_JoinN(t *testing.T) {
var script = `
var cpu = stream
- .from().measurement('cpu')
- .where(lambda: "cpu" == 'total')
+ |from()
+ .measurement('cpu')
+ .where(lambda: "cpu" == 'total')
var mem = stream
- .from().measurement('memory')
- .where(lambda: "type" == 'free')
+ |from()
+ .measurement('memory')
+ .where(lambda: "type" == 'free')
var disk = stream
- .from().measurement('disk')
- .where(lambda: "device" == 'sda')
+ |from()
+ .measurement('disk')
+ .where(lambda: "device" == 'sda')
-cpu.join(mem, disk)
+cpu
+ |join(mem, disk)
.as('cpu', 'mem', 'disk')
.streamName('magic')
.fill(0.0)
- .window()
- .period(10s)
- .every(10s)
- .mapReduce(influxql.count('cpu.value'))
- .httpOut('TestStream_JoinN')
+ |window()
+ .period(10s)
+ .every(10s)
+ |count('cpu.value')
+ |httpOut('TestStream_JoinN')
`
er := kapacitor.Result{
@@ -1031,31 +1056,34 @@ cpu.join(mem, disk)
func TestStream_JoinOn(t *testing.T) {
var script = `
var errorsByServiceDC = stream
- .from().measurement('errors')
- .groupBy('dc', 'service')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
+ |from()
+ .measurement('errors')
+ .groupBy('dc', 'service')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
var errorsByServiceGlobal = stream
- .from().measurement('errors')
- .groupBy('service')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
-
-errorsByServiceGlobal.join(errorsByServiceDC)
+ |from()
+ .measurement('errors')
+ .groupBy('service')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
+
+errorsByServiceGlobal
+ |join(errorsByServiceDC)
.as('service', 'dc')
.on('service')
.streamName('dc_error_percent')
- .eval(lambda: "dc.sum" / "service.sum")
+ |eval(lambda: "dc.sum" / "service.sum")
.keep()
.as('value')
- .httpOut('TestStream_JoinOn')
+ |httpOut('TestStream_JoinOn')
`
er := kapacitor.Result{
@@ -1146,31 +1174,34 @@ errorsByServiceGlobal.join(errorsByServiceDC)
func TestStream_JoinOnGap(t *testing.T) {
var script = `
var errorsByServiceDCRack = stream
- .from().measurement('errors')
- .groupBy('dc', 'service', 'rack')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
+ |from()
+ .measurement('errors')
+ .groupBy('dc', 'service', 'rack')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
var errorsByServiceGlobal = stream
- .from().measurement('errors')
- .groupBy('service')
- .window()
- .period(10s)
- .every(10s)
- .align()
- .mapReduce(influxql.sum('value'))
-
-errorsByServiceGlobal.join(errorsByServiceDCRack)
+ |from()
+ .measurement('errors')
+ .groupBy('service')
+ |window()
+ .period(10s)
+ .every(10s)
+ .align()
+ |sum('value')
+
+errorsByServiceGlobal
+ |join(errorsByServiceDCRack)
.as('service', 'loc')
.on('service')
.streamName('loc_error_percent')
- .eval(lambda: "loc.sum" / "service.sum")
+ |eval(lambda: "loc.sum" / "service.sum")
.keep()
.as('value')
- .httpOut('TestStream_JoinOn')
+ |httpOut('TestStream_JoinOn')
`
er := kapacitor.Result{
@@ -1328,22 +1359,26 @@ func TestStream_Union(t *testing.T) {
var script = `
var cpu = stream
- .from().measurement('cpu')
- .where(lambda: "cpu" == 'total')
+ |from()
+ .measurement('cpu')
+ .where(lambda: "cpu" == 'total')
var mem = stream
- .from().measurement('memory')
- .where(lambda: "type" == 'free')
+ |from()
+ .measurement('memory')
+ .where(lambda: "type" == 'free')
var disk = stream
- .from().measurement('disk')
- .where(lambda: "device" == 'sda')
+ |from()
+ .measurement('disk')
+ .where(lambda: "device" == 'sda')
-cpu.union(mem, disk)
+cpu
+ |union(mem, disk)
.rename('cpu_mem_disk')
- .window()
- .period(10s)
- .every(10s)
- .mapReduce(influxql.count('value'))
- .httpOut('TestStream_Union')
+ |window()
+ .period(10s)
+ .every(10s)
+ |count('value')
+ |httpOut('TestStream_Union')
`
er := kapacitor.Result{
@@ -1374,26 +1409,28 @@ func TestStream_InfluxQL(t *testing.T) {
var scriptTmpl = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.{{ .Method }}({{ .Args }}))
+ |mapReduce(influxql|{{ .Method }}({{ .Args }}))
{{ if .UsePointTimes }}.usePointTimes(){{ end }}
- .httpOut('TestStream_InfluxQL')
+ |httpOut('TestStream_InfluxQL')
`
var newScriptTmpl = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .{{ .Method }}({{ .Args }})
+ |{{ .Method }}({{ .Args }})
{{ if .UsePointTimes }}.usePointTimes(){{ end }}
- .httpOut('TestStream_InfluxQL')
+ |httpOut('TestStream_InfluxQL')
`
endTime := time.Date(1971, 1, 1, 0, 0, 10, 0, time.UTC)
testCases := []testCase{
@@ -1831,7 +1868,7 @@ stream
testStreamerWithOutput(
t,
"TestStream_InfluxQL",
- string(script.Bytes()),
+ script.String(),
13*time.Second,
tc.ER,
nil,
@@ -1844,16 +1881,17 @@ stream
func TestStream_CustomFunctions(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .customFunc()
+ |count('value')
+ |customFunc()
.opt1('count')
.opt2(FALSE, 1, 1.0, '1.0', 1s)
- .httpOut('TestStream_CustomFunctions')
+ |httpOut('TestStream_CustomFunctions')
`
cmd := cmd_test.NewCommandHelper()
@@ -2036,14 +2074,15 @@ var warnThreshold = 7.0
var critThreshold = 8.0
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.details('details')
.info(lambda: "count" > infoThreshold)
@@ -2111,14 +2150,15 @@ func TestStream_AlertSensu(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor.{{ .Name }}.{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
@@ -2199,22 +2239,23 @@ func TestStream_AlertSlack(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
.crit(lambda: "count" > 8.0)
.slack()
- .channel('#alerts')
+ .channel('#alerts')
.slack()
- .channel('@jim')
+ .channel('@jim')
`
clock, et, replayErr, tm := testStreamer(t, "TestStream_Alert", script, nil)
@@ -2276,14 +2317,15 @@ func TestStream_AlertHipChat(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
@@ -2390,14 +2432,15 @@ func TestStream_AlertAlerta(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('{{ index .Tags "host" }}')
.message('kapacitor/{{ .Name }}/{{ index .Tags "host" }} is {{ .Level }} @{{.Time}}')
.info(lambda: "count" > 6.0)
@@ -2412,7 +2455,7 @@ stream
.environment('{{ index .Tags "host" }}')
.origin('override')
.group('{{ .ID }}')
- .value('{{ index .Fields "count" | printf "%0.0f" }}')
+ .value('{{ index .Fields "count" }}')
.services('serviceA', 'serviceB')
`
@@ -2515,14 +2558,15 @@ func TestStream_AlertOpsGenie(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
@@ -2591,14 +2635,15 @@ func TestStream_AlertPagerDuty(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.message('{{ .Level }} alert for {{ .ID }}')
.info(lambda: "count" > 6.0)
@@ -2674,14 +2719,15 @@ func TestStream_AlertVictorOps(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
@@ -2741,14 +2787,15 @@ func TestStream_AlertTalk(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .groupBy('host')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ .groupBy('host')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .alert()
+ |count('value')
+ |alert()
.id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}')
.info(lambda: "count" > 6.0)
.warn(lambda: "count" > 7.0)
@@ -2841,12 +2888,13 @@ func TestStream_AlertSigma(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .eval(lambda: sigma("value"))
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |eval(lambda: sigma("value"))
.as('sigma')
.keep()
- .alert()
+ |alert()
.details('{{ .Message }}')
.info(lambda: "sigma" > 2.0)
.warn(lambda: "sigma" > 3.0)
@@ -2900,9 +2948,10 @@ func TestStream_AlertComplexWhere(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA' AND sigma("value") > 2)
- .alert()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA' AND sigma("value") > 2)
+ |alert()
.details('')
.crit(lambda: TRUE)
.post('` + ts.URL + `')
@@ -2924,8 +2973,9 @@ func TestStream_AlertStateChangesOnly(t *testing.T) {
defer ts.Close()
var script = `
stream
- .from().measurement('cpu')
- .alert()
+ |from()
+ .measurement('cpu')
+ |alert()
.crit(lambda: "value" < 93)
.stateChangesOnly()
.post('` + ts.URL + `')
@@ -2948,9 +2998,10 @@ func TestStream_AlertFlapping(t *testing.T) {
defer ts.Close()
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .alert()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |alert()
.info(lambda: "value" < 95)
.warn(lambda: "value" < 94)
.crit(lambda: "value" < 93)
@@ -2971,13 +3022,14 @@ func TestStream_InfluxDBOut(t *testing.T) {
var script = `
stream
- .from().measurement('cpu')
- .where(lambda: "host" == 'serverA')
- .window()
+ |from()
+ .measurement('cpu')
+ .where(lambda: "host" == 'serverA')
+ |window()
.period(10s)
.every(10s)
- .mapReduce(influxql.count('value'))
- .influxDBOut()
+ |count('value')
+ |influxDBOut()
.database('db')
.retentionPolicy('rp')
.measurement('m')
@@ -3044,7 +3096,7 @@ stream
if p.Name() != "m" {
t.Errorf("got %v exp %v", p.Name(), "m")
}
- if p.Fields()["count"] != 10.0 {
+ if p.Fields()["count"] != int64(10) {
t.Errorf("got %v exp %v", p.Fields()["count"], 10.0)
}
if len(p.Tags()) != 1 {
@@ -3064,24 +3116,26 @@ func TestStream_TopSelector(t *testing.T) {
var script = `
var topScores = stream
- .from().measurement('scores')
- // Get the most recent score for each player
- .groupBy('game', 'player')
- .window()
+ |from()
+ .measurement('scores')
+ // Get the most recent score for each player
+ .groupBy('game', 'player')
+ |window()
.period(2s)
.every(2s)
.align()
- .mapReduce(influxql.last('value'))
+ |last('value')
// Calculate the top 5 scores per game
- .groupBy('game')
- .top(5, 'last', 'player')
+ |groupBy('game')
+ |top(5, 'last', 'player')
topScores
- .httpOut('top_scores')
+ |httpOut('top_scores')
-topScores.sample(4s)
- .mapReduce(influxql.count('top'))
- .httpOut('top_scores_sampled')
+topScores
+ |sample(4s)
+ |count('top')
+ |httpOut('top_scores_sampled')
`
tw := time.Date(1971, 1, 1, 0, 0, 4, 0, time.UTC)
diff --git a/pipeline/alert.go b/pipeline/alert.go
index 922a51d50..4976a73d7 100644
--- a/pipeline/alert.go
+++ b/pipeline/alert.go
@@ -60,16 +60,16 @@ const defaultDetailsTmpl = "{{ json . }}"
//
// Example:
// stream
-// .groupBy('service')
-// .alert()
-// .id('kapacitor/{{ index .Tags "service" }}')
-// .message('{{ .ID }} is {{ .Level }} value:{{ index .Fields "value" }}')
-// .info(lambda: "value" > 10)
-// .warn(lambda: "value" > 20)
-// .crit(lambda: "value" > 30)
-// .post("http://example.com/api/alert")
-// .post("http://another.example.com/api/alert")
-// .email().to('oncall@example.com')
+// .groupBy('service')
+// |alert()
+// .id('kapacitor/{{ index .Tags "service" }}')
+// .message('{{ .ID }} is {{ .Level }} value:{{ index .Fields "value" }}')
+// .info(lambda: "value" > 10)
+// .warn(lambda: "value" > 20)
+// .crit(lambda: "value" > 30)
+// .post("http://example.com/api/alert")
+// .post("http://another.example.com/api/alert")
+// .email().to('oncall@example.com')
//
//
// It is assumed that each successive level filters a subset
@@ -93,26 +93,32 @@ type AlertNode struct {
// * Tags -- Map of tags. Use '{{ index .Tags "key" }}' to get a specific tag value.
//
// Example:
- // stream.from().measurement('cpu')
- // .groupBy('cpu')
- // .alert()
- // .id('kapacitor/{{ .Name }}/{{ .Group }}')
+ // stream
+ // |from()
+ // .measurement('cpu')
+ // .groupBy('cpu')
+ // |alert()
+ // .id('kapacitor/{{ .Name }}/{{ .Group }}')
//
// ID: kapacitor/cpu/cpu=cpu0,
//
// Example:
- // stream...
- // .groupBy('service')
- // .alert()
- // .id('kapacitor/{{ index .Tags "service" }}')
+ // stream
+ // |from()
+ // .measurement('cpu')
+ // .groupBy('service')
+ // |alert()
+ // .id('kapacitor/{{ index .Tags "service" }}')
//
// ID: kapacitor/authentication
//
// Example:
- // stream...
- // .groupBy('service', 'host')
- // .alert()
- // .id('kapacitor/{{ index .Tags "service" }}/{{ index .Tags "host" }}')
+ // stream
+ // |from()
+ // .measurement('cpu')
+ // .groupBy('service', 'host')
+ // |alert()
+ // .id('kapacitor/{{ index .Tags "service" }}/{{ index .Tags "host" }}')
//
// ID: kapacitor/authentication/auth001.example.com
//
@@ -134,11 +140,13 @@ type AlertNode struct {
// * Time -- The time of the point that triggered the event.
//
// Example:
- // stream...
- // .groupBy('service', 'host')
- // .alert()
- // .id('{{ index .Tags "service" }}/{{ index .Tags "host" }}')
- // .message('{{ .ID }} is {{ .Level}} value: {{ index .Fields "value" }}')
+ // stream
+ // |from()
+ // .measurement('cpu')
+ // .groupBy('service', 'host')
+ // |alert()
+ // .id('{{ index .Tags "service" }}/{{ index .Tags "host" }}')
+ // .message('{{ .ID }} is {{ .Level}} value: {{ index .Fields "value" }}')
//
// Message: authentication/auth001.example.com is CRITICAL value:42
//
@@ -160,7 +168,7 @@ type AlertNode struct {
// JSON string.
//
// Example:
- // .alert()
+ // |alert()
// .id('{{ .Name }}')
// .details('''
//
{{ .ID }}
@@ -183,7 +191,7 @@ type AlertNode struct {
Crit tick.Node
//tick:ignore
- UseFlapping bool
+ UseFlapping bool `tick:"Flapping"`
//tick:ignore
FlapLow float64
//tick:ignore
@@ -198,55 +206,55 @@ type AlertNode struct {
// Send alerts only on state changes.
// tick:ignore
- IsStateChangesOnly bool
+ IsStateChangesOnly bool `tick:"StateChangesOnly"`
// Post the JSON alert data to the specified URL.
// tick:ignore
- PostHandlers []*PostHandler
+ PostHandlers []*PostHandler `tick:"Post"`
// Email handlers
// tick:ignore
- EmailHandlers []*EmailHandler
+ EmailHandlers []*EmailHandler `tick:"Email"`
// A commands to run when an alert triggers
// tick:ignore
- ExecHandlers []*ExecHandler
+ ExecHandlers []*ExecHandler `tick:"Exec"`
// Log JSON alert data to file. One event per line.
// tick:ignore
- LogHandlers []*LogHandler
+ LogHandlers []*LogHandler `tick:"Log"`
// Send alert to VictorOps.
// tick:ignore
- VictorOpsHandlers []*VictorOpsHandler
+ VictorOpsHandlers []*VictorOpsHandler `tick:"VictorOps"`
// Send alert to PagerDuty.
// tick:ignore
- PagerDutyHandlers []*PagerDutyHandler
+ PagerDutyHandlers []*PagerDutyHandler `tick:"PagerDuty"`
// Send alert to Sensu.
// tick:ignore
- SensuHandlers []*SensuHandler
+ SensuHandlers []*SensuHandler `tick:"Sensu"`
// Send alert to Slack.
// tick:ignore
- SlackHandlers []*SlackHandler
+ SlackHandlers []*SlackHandler `tick:"Slack"`
// Send alert to HipChat.
// tick:ignore
- HipChatHandlers []*HipChatHandler
+ HipChatHandlers []*HipChatHandler `tick:"HipChat"`
// Send alert to Alerta.
// tick:ignore
- AlertaHandlers []*AlertaHandler
+ AlertaHandlers []*AlertaHandler `tick:"Alerta"`
// Send alert to OpsGenie
// tick:ignore
- OpsGenieHandlers []*OpsGenieHandler
+ OpsGenieHandlers []*OpsGenieHandler `tick:"OpsGenie"`
// Send alert to Talk.
// tick:ignore
- TalkHandlers []*TalkHandler
+ TalkHandlers []*TalkHandler `tick:"Talk"`
}
func newAlertNode(wants EdgeType) *AlertNode {
@@ -268,14 +276,16 @@ func newAlertNode(wants EdgeType) *AlertNode {
// are considered different states.
//
// Example:
-// stream...
-// .window()
-// .period(10s)
-// .every(10s)
-// .alert()
-// .crit(lambda: "value" > 10)
-// .stateChangesOnly()
-// .slack()
+// stream
+// |from()
+// .measurement('cpu')
+// |window()
+// .period(10s)
+// .every(10s)
+// |alert()
+// .crit(lambda: "value" > 10)
+// .stateChangesOnly()
+// .slack()
//
// If the "value" is greater than 10 for a total of 60s, then
// only two events will be sent. First, when the value crosses
@@ -342,7 +352,7 @@ type PostHandler struct {
// in the TICKscript.
//
// Example:
-// .alert()
+// |alert()
// .id('{{ .Name }}')
// // Email subject
// .meassage('{{ .ID }}:{{ .Level }}')
@@ -370,8 +380,8 @@ type PostHandler struct {
// state-changes-only = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send email to 'oncall@example.com' from 'kapacitor@example.com'
//
@@ -452,15 +462,15 @@ type LogHandler struct {
// With the correct configuration you can now use VictorOps in TICKscripts.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .victorOps()
//
// Send alerts to VictorOps using the routing key in the configuration file.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .victorOps()
// .routingKey('team_rocket')
//
@@ -478,8 +488,8 @@ type LogHandler struct {
// global = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send alert to VictorOps using the default routing key, found in the configuration.
// tick:property
@@ -520,8 +530,8 @@ type VictorOpsHandler struct {
// With the correct configuration you can now use PagerDuty in TICKscripts.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .pagerDuty()
//
// If the 'pagerduty' section in the configuration has the option: global = true
@@ -535,8 +545,8 @@ type VictorOpsHandler struct {
// global = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send alert to PagerDuty.
// tick:property
@@ -570,15 +580,15 @@ type PagerDutyHandler struct {
// where the alert changed state are posted to the room.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .hipChat()
//
// Send alerts to HipChat room in the configuration file.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .hipChat()
// .room('Kapacitor')
//
@@ -599,8 +609,8 @@ type PagerDutyHandler struct {
// state-changes-only = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send alert to HipChat using default room 'Test Room'.
// tick:property
@@ -642,8 +652,8 @@ type HipChatHandler struct {
// Send alerts to Alerta. The resource and event properties are required.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .alerta()
// .resource('Hostname or service')
// .event('Something went wrong')
@@ -651,8 +661,8 @@ type HipChatHandler struct {
// Alerta also accepts optional alert information.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .alerta()
// .resource('Hostname or service')
// .event('Something went wrong')
@@ -708,7 +718,7 @@ type AlertaHandler struct {
// List of effected Services
// tick:ignore
- Service []string
+ Service []string `tick:"Services"`
}
// List of effected services.
@@ -727,8 +737,8 @@ func (a *AlertaHandler) Services(service ...string) *AlertaHandler {
// source = "Kapacitor"
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .sensu()
//
// Send alerts to Sensu client.
@@ -764,23 +774,23 @@ type SensuHandler struct {
// where the alert changed state are posted to the channel.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .slack()
//
// Send alerts to Slack channel in the configuration file.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .slack()
// .channel('#alerts')
//
// Send alerts to Slack channel '#alerts'
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .slack()
// .channel('@jsmith')
//
@@ -799,8 +809,8 @@ type SensuHandler struct {
// state-changes-only = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send alert to Slack using default channel '#general'.
// tick:property
@@ -836,15 +846,15 @@ type SlackHandler struct {
// With the correct configuration you can now use OpsGenie in TICKscripts.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .opsGenie()
//
// Send alerts to OpsGenie using the teams and recipients in the configuration file.
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .opsGenie()
// .teams('team_rocket','team_test')
//
@@ -862,8 +872,8 @@ type SlackHandler struct {
// global = true
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
//
// Send alert to OpsGenie using the default recipients, found in the configuration.
// tick:property
@@ -881,11 +891,11 @@ type OpsGenieHandler struct {
// OpsGenie Teams.
// tick:ignore
- TeamsList []string
+ TeamsList []string `tick:"Teams"`
// OpsGenie Recipients.
// tick:ignore
- RecipientsList []string
+ RecipientsList []string `tick:"Recipients"`
}
// The list of teams to be alerted. If empty defaults to the teams from the configuration.
@@ -920,8 +930,8 @@ func (og *OpsGenieHandler) Recipients(recipients ...string) *OpsGenieHandler {
// author_name = "Kapacitor"
//
// Example:
-// stream...
-// .alert()
+// stream
+// |alert()
// .talk()
//
// Send alerts to Talk client.
diff --git a/pipeline/batch.go b/pipeline/batch.go
index e85ef9ca4..e16f1cf08 100644
--- a/pipeline/batch.go
+++ b/pipeline/batch.go
@@ -13,10 +13,10 @@ import (
//
// Example:
// var errors = batch
-// .query('SELECT value from errors')
+// |query('SELECT value from errors')
// ...
// var views = batch
-// .query('SELECT value from views')
+// |query('SELECT value from views')
// ...
//
type SourceBatchNode struct {
@@ -57,14 +57,14 @@ func (b *SourceBatchNode) dot(buf *bytes.Buffer) {
//
// Example:
// batch
-// .query('''
+// |query('''
// SELECT mean("value")
// FROM "telegraf"."default".cpu_usage_idle
// WHERE "host" = 'serverA'
// ''')
-// .period(1m)
-// .every(20s)
-// .groupBy(time(10s), 'cpu')
+// .period(1m)
+// .every(20s)
+// .groupBy(time(10s), 'cpu')
// ...
//
// In the above example InfluxDB is queried every 20 seconds; the window of time returned
@@ -103,7 +103,7 @@ type BatchNode struct {
// The list of dimensions for the group-by clause.
//tick:ignore
- Dimensions []interface{}
+ Dimensions []interface{} `tick:"GroupBy"`
// Fill the data.
// Options are:
@@ -134,7 +134,8 @@ func newBatchNode() *BatchNode {
//
// Example:
// batch
-// .groupBy(time(10s), 'tag1', 'tag2'))
+// |query(...)
+// .groupBy(time(10s), 'tag1', 'tag2'))
//
// tick:property
func (b *BatchNode) GroupBy(d ...interface{}) *BatchNode {
diff --git a/pipeline/derivative.go b/pipeline/derivative.go
index 6cb23ca40..a7e157bf8 100644
--- a/pipeline/derivative.go
+++ b/pipeline/derivative.go
@@ -12,8 +12,9 @@ import (
//
// Example:
// stream
-// .from().measurement('net_rx_packets')
-// .derivative('value')
+// |from()
+// .measurement('net_rx_packets')
+// |derivative('value')
// .unit(1s) // default
// .nonNegative()
// ...
@@ -42,7 +43,7 @@ type DerivativeNode struct {
// Where negative values are acceptable.
// tick:ignore
- NonNegativeFlag bool
+ NonNegativeFlag bool `tick:"NonNegative"`
}
func newDerivativeNode(wants EdgeType, field string) *DerivativeNode {
diff --git a/pipeline/eval.go b/pipeline/eval.go
index 22aa8440e..48365d97c 100644
--- a/pipeline/eval.go
+++ b/pipeline/eval.go
@@ -11,7 +11,7 @@ import (
//
// Example:
// stream
-// .eval(lambda: "error_count" / "total_count")
+// |eval(lambda: "error_count" / "total_count")
// .as('error_percent')
//
// The above example will add a new field `error_percent` to each
@@ -23,13 +23,13 @@ type EvalNode struct {
// The name of the field that results from applying the expression.
// tick:ignore
- AsList []string
+ AsList []string `tick:"As"`
// tick:ignore
Expressions []tick.Node
// tick:ignore
- KeepFlag bool
+ KeepFlag bool `tick:"Keep"`
// List of fields to keep
// if empty and KeepFlag is true
// keep all fields.
@@ -52,7 +52,7 @@ func newEvalNode(e EdgeType, exprs []tick.Node) *EvalNode {
//
// Example:
// stream
-// .eval(lambda: "value" * "value", lambda: 1.0 / "value2")
+// |eval(lambda: "value" * "value", lambda: 1.0 / "value2")
// .as('value2', 'inv_value2')
//
// The above example calculates two fields from the value and names them
@@ -75,9 +75,9 @@ func (e *EvalNode) As(names ...string) *EvalNode {
//
// Example:
// stream
-// .eval(lambda: "value" * "value", lambda: 1.0 / "value2")
+// |eval(lambda: "value" * "value", lambda: 1.0 / "value2")
// .as('value2', 'inv_value2')
-// .keep('value', 'inv_value2')
+// .keep('value', 'inv_value2')
//
// In the above example the original field `value` is preserved.
// In addition the new field `value2` is calculated and used in evaluating
diff --git a/pipeline/group_by.go b/pipeline/group_by.go
index c5df85ab0..b9ef0257d 100644
--- a/pipeline/group_by.go
+++ b/pipeline/group_by.go
@@ -7,7 +7,7 @@ package pipeline
//
// Example:
// stream
-// .groupBy('service', 'datacenter')
+// |groupBy('service', 'datacenter')
// ...
//
// The above example groups the data along two dimensions `service` and `datacenter`.
diff --git a/pipeline/http_out.go b/pipeline/http_out.go
index c43521fde..ed57b268b 100644
--- a/pipeline/http_out.go
+++ b/pipeline/http_out.go
@@ -9,12 +9,12 @@ package pipeline
//
// Example:
// stream
-// .window()
+// |window()
// .period(10s)
// .every(5s)
-// .top('value', 10)
+// |top('value', 10)
// //Publish the top 10 results over the last 10s updated every 5s.
-// .httpOut('top10')
+// |httpOut('top10')
//
type HTTPOutNode struct {
chainnode
diff --git a/pipeline/influxdb_out.go b/pipeline/influxdb_out.go
index 6db688e0f..8bee8da81 100644
--- a/pipeline/influxdb_out.go
+++ b/pipeline/influxdb_out.go
@@ -9,10 +9,10 @@ const DefaultFlushInterval = time.Second * 10
//
// Example:
// stream
-// .eval(lambda: "errors" / "total")
+// |eval(lambda: "errors" / "total")
// .as('error_percent')
// // Write the transformed data to InfluxDB
-// .influxDBOut()
+// |influxDBOut()
// .database('mydb')
// .retentionPolicy('myrp')
// .measurement('errors')
@@ -43,7 +43,7 @@ type InfluxDBOutNode struct {
FlushInterval time.Duration
// Static set of tags to add to all data points before writing them.
//tick:ignore
- Tags map[string]string
+ Tags map[string]string `tick:"Tag"`
}
func newInfluxDBOutNode(wants EdgeType) *InfluxDBOutNode {
diff --git a/pipeline/influxql.go b/pipeline/influxql.go
index a607ee4ba..507a3aa89 100644
--- a/pipeline/influxql.go
+++ b/pipeline/influxql.go
@@ -14,11 +14,11 @@ import "github.com/influxdata/influxdb/influxql"
//
// Example:
// stream
-// .window()
+// |window()
// .period(10s)
// .every(10s)
// // Sum the values for each 10s window of data.
-// .sum('value')
+// |sum('value')
//
//
// Note: Derivative has its own implementation as a DerivativeNode instead of as part of the
@@ -39,7 +39,7 @@ type InfluxQLNode struct {
ReduceCreater ReduceCreater
// tick:ignore
- PointTimes bool
+ PointTimes bool `tick:"UsePointTimes"`
}
func newInfluxQLNode(method, field string, wants, provides EdgeType, reducer ReduceCreater) *InfluxQLNode {
diff --git a/pipeline/join.go b/pipeline/join.go
index 7ec9a8060..25ec1b7bc 100644
--- a/pipeline/join.go
+++ b/pipeline/join.go
@@ -20,11 +20,14 @@ import (
//
// Example:
// var errors = stream
-// .from().measurement('errors')
+// |from()
+// .measurement('errors')
// var requests = stream
-// .from().measurement('requests')
+// |from()
+// .measurement('requests')
// // Join the errors and requests streams
-// errors.join(requests)
+// errors
+// |join(requests)
// // Provide prefix names for the fields of the data points.
// .as('errors', 'requests')
// // points that are within 1 second are considered the same time.
@@ -35,7 +38,7 @@ import (
// .streamName('error_rate')
// // Both the "value" fields from each parent have been prefixed
// // with the respective names 'errors' and 'requests'.
-// .eval(lambda: "errors.value" / "requests.value"))
+// |eval(lambda: "errors.value" / "requests.value"))
// .as('rate')
// ...
//
@@ -48,11 +51,11 @@ type JoinNode struct {
// Names[1] corresponds to the left parent
// Names[0] corresponds to the right parent
// tick:ignore
- Names []string
+ Names []string `tick:"As"`
// The dimensions on which to join
// tick:ignore
- Dimensions []string
+ Dimensions []string `tick:"On"`
// The name of this new joined data stream.
// If empty the name of the left parent is used.
@@ -105,14 +108,19 @@ func (j *JoinNode) As(names ...string) *JoinNode {
// You want to calculate the percentage of the total building power consumed by each floor.
//
// Example:
-// var buidling = stream.from().measurement('building_power')
-// .groupBy('building')
-// var floor = stream.from().measurement('floor_power')
-// .groupBy('building', 'floor')
-// building.join(floor)
-// .as('building', 'floor')
-// .on('building')
-// .eval(lambda: "floor.value" / "building.value")
+// var buidling = stream
+// |from()
+// .measurement('building_power')
+// .groupBy('building')
+// var floor = stream
+// |from()
+// .measurement('floor_power')
+// .groupBy('building', 'floor')
+// building
+// |join(floor)
+// .as('building', 'floor')
+// .on('building')
+// |eval(lambda: "floor.value" / "building.value")
// ... // Values here are grouped by 'building' and 'floor'
//
// tick:property
diff --git a/pipeline/log.go b/pipeline/log.go
index 34b893215..8fce99f3a 100644
--- a/pipeline/log.go
+++ b/pipeline/log.go
@@ -4,9 +4,11 @@ package pipeline
//
// Example:
// stream.from()...
-// .window().period(10s).every(10s)
-// .log()
-// .count('value')
+// |window()
+// .period(10s)
+// .every(10s)
+// |log()
+// |count('value')
//
type LogNode struct {
chainnode
diff --git a/pipeline/map_reduce.go b/pipeline/map_reduce.go
index d22ec93ac..15e8f4e41 100644
--- a/pipeline/map_reduce.go
+++ b/pipeline/map_reduce.go
@@ -18,11 +18,11 @@ type MapReduceInfo struct {
//
// Example:
// stream
-// .window()
+// |window()
// .period(10s)
// .every(10s)
// // Sum the values for each 10s window of data.
-// .sum('value')
+// |sum('value')
// ...
type MapNode struct {
chainnode
@@ -49,11 +49,11 @@ func newMapNode(wants EdgeType, i interface{}) *MapNode {
//
// Example:
// stream
-// .window()
+// |window()
// .period(10s)
// .every(10s)
// // Sum the values for each 10s window of data.
-// .sum('value')
+// |sum('value')
// ...
type ReduceNode struct {
chainnode
@@ -64,7 +64,7 @@ type ReduceNode struct {
// Whether to use the max time or the
// time of the selected point
// tick:ignore
- PointTimes bool
+ PointTimes bool `tick:"UsePointTimes"`
// The name of the field, defaults to the name of
// MR function used (i.e. influxql.mean -> 'mean')
diff --git a/pipeline/node.go b/pipeline/node.go
index 33ed966ab..cc455ec19 100644
--- a/pipeline/node.go
+++ b/pipeline/node.go
@@ -203,45 +203,55 @@ const intervalMarker = "INTERVAL"
// - Expressions -- optional list of expressions to also evaluate. Useful for time of day alerting.
//
// Example:
-// var data = stream.from()...
+// var data = stream
+// |from()...
// // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s.
-// data.deadman(100.0, 10s)
+// data
+// |deadman(100.0, 10s)
// //Do normal processing of data
-// data....
+// data...
//
// The above is equivalent to this
// Example:
-// var data = stream.from()...
+// var data = stream
+// |from()...
// // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s.
-// data.stats(10s)
-// .derivative('collected')
-// .unit(10s)
-// .nonNegative()
-// .alert()
-// .id('node \'stream0\' in task \'{{ .TaskName }}\'')
-// .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "collected" | printf "%0.3f" }} points/10s.')
-// .crit(lamdba: "collected" <= 100.0)
+// data
+// |stats(10s)
+// |derivative('collected')
+// .unit(10s)
+// .nonNegative()
+// |alert()
+// .id('node \'stream0\' in task \'{{ .TaskName }}\'')
+// .message('{{ .ID }} is {{ if eq .Level "OK" }}alive{{ else }}dead{{ end }}: {{ index .Fields "collected" | printf "%0.3f" }} points/10s.')
+// .crit(lamdba: "collected" <= 100.0)
// //Do normal processing of data
-// data....
+// data...
//
// The `id` and `message` alert properties can be configured globally via the 'deadman' configuration section.
//
// Since the AlertNode is the last piece it can be further modified as normal.
// Example:
-// var data = stream.from()...
+// var data = stream
+// |from()...
// // Trigger critical alert if the throughput drops below 100 points per 1s and checked every 10s.
-// data.deadman(100.0, 10s).slack().channel('#dead_tasks')
+// data
+// |deadman(100.0, 10s)
+// .slack()
+// .channel('#dead_tasks')
// //Do normal processing of data
-// data....
+// data...
//
// You can specify additional lambda expressions to further constrain when the deadman's switch is triggered.
// Example:
-// var data = stream.from()...
+// var data = stream
+// |from()...
// // Trigger critical alert if the throughput drops below 100 points per 10s and checked every 10s.
// // Only trigger the alert if the time of day is between 8am-5pm.
-// data.deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17)
+// data
+// |deadman(100.0, 10s, lambda: hour("time") >= 8 AND hour("time") <= 17)
// //Do normal processing of data
-// data....
+// data...
//
func (n *node) Deadman(threshold float64, interval time.Duration, expr ...tick.Node) *AlertNode {
dn := n.Stats(interval).
@@ -351,7 +361,7 @@ func (n *chainnode) Eval(expressions ...tick.Node) *EvalNode {
//
// Can pass literal * to group by all dimensions.
// Example:
-// .groupBy(*)
+// |groupBy(*)
//
func (n *chainnode) GroupBy(tag ...interface{}) *GroupByNode {
g := newGroupByNode(n.provides, tag)
diff --git a/pipeline/sample.go b/pipeline/sample.go
index f616f5ebc..450e4f1d4 100644
--- a/pipeline/sample.go
+++ b/pipeline/sample.go
@@ -8,14 +8,14 @@ import (
// One point will be emitted every count or duration specified.
//
// Example:
-// stream.
-// .sample(3)
+// stream
+// |sample(3)
//
// Keep every third data point or batch.
//
// Example:
-// stream.
-// .sample(10s)
+// stream
+// |sample(10s)
//
// Keep only samples that land on the 10s boundary.
// See StreamNode.Truncate, BatchNode.GroupBy time or WindowNode.Align
@@ -23,9 +23,9 @@ import (
type SampleNode struct {
chainnode
- // Keep every Count point or batch
+ // Keep every N point or batch
// tick:ignore
- Count int64
+ N int64
// Keep one point or batch every Duration
// tick:ignore
@@ -33,11 +33,11 @@ type SampleNode struct {
}
func newSampleNode(wants EdgeType, rate interface{}) *SampleNode {
- var c int64
+ var n int64
var d time.Duration
switch r := rate.(type) {
case int64:
- c = r
+ n = r
case time.Duration:
d = r
default:
@@ -46,7 +46,7 @@ func newSampleNode(wants EdgeType, rate interface{}) *SampleNode {
return &SampleNode{
chainnode: newBasicChainNode("sample", wants, wants),
- Count: c,
+ N: n,
Duration: d,
}
}
diff --git a/pipeline/shift.go b/pipeline/shift.go
index 670cfc39e..761914ab2 100644
--- a/pipeline/shift.go
+++ b/pipeline/shift.go
@@ -9,13 +9,13 @@ import (
//
// Example:
// stream
-// .shift(5m)
+// |shift(5m)
//
// Shift all data points 5m forward in time.
//
// Example:
// stream
-// .shift(-10s)
+// |shift(-10s)
//
// Shift all data points 10s backward in time.
type ShiftNode struct {
diff --git a/pipeline/stats.go b/pipeline/stats.go
index c6825d354..741a573f6 100644
--- a/pipeline/stats.go
+++ b/pipeline/stats.go
@@ -22,11 +22,14 @@ import "time"
// are considered.
//
// Example:
-// var data = stream.from()...
+// var data = stream
+// |from()...
// // Emit statistics every 1 minute and cache them via the HTTP API.
-// data.stats(1m).httpOut('stats')
+// data
+// |stats(1m)
+// |httpOut('stats')
// // Continue normal processing of the data stream
-// data....
+// data...
//
// WARNING: It is not recommended to join the stats stream with the original data stream.
// Since they operate on different clocks you could potentially create a deadlock.
diff --git a/pipeline/stream.go b/pipeline/stream.go
index 58b11df45..d7e33108d 100644
--- a/pipeline/stream.go
+++ b/pipeline/stream.go
@@ -33,15 +33,18 @@ func newSourceStreamNode() *SourceStreamNode {
// Example:
// // Select the 'cpu' measurement from just the database 'mydb'
// // and retention policy 'myrp'.
-// var cpu = stream.from()
-// .database('mydb')
-// .retentionPolicy('myrp')
-// .measurement('cpu')
+// var cpu = stream
+// |from()
+// .database('mydb')
+// .retentionPolicy('myrp')
+// .measurement('cpu')
// // Select the 'load' measurement from any database and retention policy.
-// var load = stream.from()
-// .measurement('load')
+// var load = stream
+// |from()
+// .measurement('load')
// // Join cpu and load streams and do further processing.
-// cpu.join(load)
+// cpu
+// |join(load)
// .as('cpu', 'load')
// ...
//
@@ -56,12 +59,12 @@ func (s *SourceStreamNode) From() *StreamNode {
//
// Example:
// stream
-// .from()
+// |from()
// .database('mydb')
// .retentionPolicy('myrp')
// .measurement('mymeasurement')
// .where(lambda: "host" =~ /logger\d+/)
-// .window()
+// |window()
// ...
//
// The above example selects only data points from the database `mydb`
@@ -71,11 +74,11 @@ type StreamNode struct {
chainnode
// An expression to filter the data stream.
// tick:ignore
- Expression tick.Node
+ Expression tick.Node `tick:"Where"`
// The dimensions by which to group to the data.
// tick:ignore
- Dimensions []interface{}
+ Dimensions []interface{} `tick:"GroupBy"`
// The database name.
// If empty any database will be used.
@@ -93,7 +96,8 @@ type StreamNode struct {
// Helpful to ensure data points land on specfic boundaries
// Example:
// stream
- // .from().measurement('mydata')
+ // |from()
+ // .measurement('mydata')
// .truncate(1s)
//
// All incoming data will be truncated to 1 second resolution.
@@ -114,15 +118,18 @@ func newStreamNode() *StreamNode {
// Example:
// // Select the 'cpu' measurement from just the database 'mydb'
// // and retention policy 'myrp'.
-// var cpu = stream.from()
-// .database('mydb')
-// .retentionPolicy('myrp')
-// .measurement('cpu')
+// var cpu = stream
+// |from()
+// .database('mydb')
+// .retentionPolicy('myrp')
+// .measurement('cpu')
// // Select the 'load' measurement from any database and retention policy.
-// var load = stream.from()
-// .measurement('load')
+// var load = stream
+// |from()
+// .measurement('load')
// // Join cpu and load streams and do further processing.
-// cpu.join(load)
+// cpu
+// |join(load)
// .as('cpu', 'load')
// ...
//
@@ -141,32 +148,36 @@ func (s *StreamNode) From() *StreamNode {
//
// Example:
// stream
-// .from()
+// |from()
// .where(lambda: condition1)
// .where(lambda: condition2)
//
// The above is equivalent to this
// Example:
// stream
-// .from()
+// |from()
// .where(lambda: condition1 AND condition2)
//
//
-// NOTE: Becareful to always use `.from` if you want multiple different streams.
+// NOTE: Becareful to always use `|from` if you want multiple different streams.
//
// Example:
-// var data = stream.from().measurement('cpu')
-// var total = data.where(lambda: "cpu" == 'cpu-total')
-// var others = data.where(lambda: "cpu" != 'cpu-total')
+// var data = stream
+// |from()
+// .measurement('cpu')
+// var total = data
+// .where(lambda: "cpu" == 'cpu-total')
+// var others = data
+// .where(lambda: "cpu" != 'cpu-total')
//
// The example above is equivalent to the example below,
// which is obviously not what was intended.
//
// Example:
// var data = stream
-// .from()
-// .measurement('cpu')
-// .where(lambda: "cpu" == 'cpu-total' AND "cpu" != 'cpu-total')
+// |from()
+// .measurement('cpu')
+// .where(lambda: "cpu" == 'cpu-total' AND "cpu" != 'cpu-total')
// var total = data
// var others = total
//
@@ -174,9 +185,17 @@ func (s *StreamNode) From() *StreamNode {
// a different subset of the original stream.
//
// Example:
-// var data = stream.from().measurement('cpu')
-// var total = stream.from().measurement('cpu').where(lambda: "cpu" == 'cpu-total')
-// var others = stream.from().measurement('cpu').where(lambda: "cpu" != 'cpu-total')
+// var data = stream
+// |from()
+// .measurement('cpu')
+// var total = stream
+// |from()
+// .measurement('cpu')
+// .where(lambda: "cpu" == 'cpu-total')
+// var others = stream
+// |from()
+// .measurement('cpu')
+// .where(lambda: "cpu" != 'cpu-total')
//
//
// If empty then all data points are considered to match.
@@ -198,7 +217,9 @@ func (s *StreamNode) Where(expression tick.Node) *StreamNode {
//
// Can pass literal * to group by all dimensions.
// Example:
-// .groupBy(*)
+// stream
+// |from()
+// .groupBy(*)
//
func (s *StreamNode) GroupBy(tag ...interface{}) *StreamNode {
s.Dimensions = tag
diff --git a/pipeline/udf.go b/pipeline/udf.go
index 75f4eb2b6..59cff6cd4 100644
--- a/pipeline/udf.go
+++ b/pipeline/udf.go
@@ -36,12 +36,12 @@ import (
// // The UDF can define what its options are and then can be
// // invoked via a TICKscript like so:
// stream
-// .from()...
-// .movingAverage()
+// |from()...
+// |movingAverage()
// .field('value')
// .size(100)
// .as('mavg')
-// .httpOut('movingaverage')
+// |httpOut('movingaverage')
//
// NOTE: The UDF process runs as the same user as the Kapacitor daemon.
// As a result make the user is properly secured as well as the configuration file.
@@ -79,7 +79,7 @@ func NewUDF(
Timeout: timeout,
options: options,
}
- udf.describer = tick.NewReflectionDescriber(udf)
+ udf.describer, _ = tick.NewReflectionDescriber(udf)
parent.linkChild(udf)
return udf
}
@@ -90,16 +90,31 @@ func (u *UDFNode) Desc() string {
}
// tick:ignore
-func (u *UDFNode) HasMethod(name string) bool {
+func (u *UDFNode) HasChainMethod(name string) bool {
+ return u.describer.HasChainMethod(name)
+}
+
+// tick:ignore
+func (u *UDFNode) CallChainMethod(name string, args ...interface{}) (interface{}, error) {
+ return u.describer.CallChainMethod(name, args...)
+}
+
+// tick:ignore
+func (u *UDFNode) HasProperty(name string) bool {
_, ok := u.options[name]
if ok {
return ok
}
- return u.describer.HasMethod(name)
+ return u.describer.HasProperty(name)
+}
+
+// tick:ignore
+func (u *UDFNode) Property(name string) interface{} {
+ return u.describer.Property(name)
}
// tick:ignore
-func (u *UDFNode) CallMethod(name string, args ...interface{}) (interface{}, error) {
+func (u *UDFNode) SetProperty(name string, args ...interface{}) (interface{}, error) {
opt, ok := u.options[name]
if ok {
if got, exp := len(args), len(opt.ValueTypes); got != exp {
@@ -135,20 +150,5 @@ func (u *UDFNode) CallMethod(name string, args ...interface{}) (interface{}, err
})
return u, nil
}
- return u.describer.CallMethod(name, args...)
-}
-
-// tick:ignore
-func (u *UDFNode) HasProperty(name string) bool {
- return u.describer.HasProperty(name)
-}
-
-// tick:ignore
-func (u *UDFNode) Property(name string) interface{} {
- return u.describer.Property(name)
-}
-
-// tick:ignore
-func (u *UDFNode) SetProperty(name string, value interface{}) error {
- return u.describer.SetProperty(name, value)
+ return u.describer.SetProperty(name, args...)
}
diff --git a/pipeline/union.go b/pipeline/union.go
index fb9618afc..ecc3539e5 100644
--- a/pipeline/union.go
+++ b/pipeline/union.go
@@ -6,11 +6,18 @@ package pipeline
// without modification.
//
// Example:
-// var logins = stream.from().measurement('logins')
-// var logouts = stream.from().measurement('logouts')
-// var frontpage = stream.from().measurement('frontpage')
+// var logins = stream
+// |from()
+// .measurement('logins')
+// var logouts = stream
+// |from()
+// .measurement('logouts')
+// var frontpage = stream
+// |from()
+// .measurement('frontpage')
// // Union all user actions into a single stream
-// logins.union(logouts, frontpage)
+// logins
+// |union(logouts, frontpage)
// .rename('user_actions')
// ...
//
diff --git a/pipeline/where.go b/pipeline/where.go
index c2ddf5ad4..99e3b134f 100644
--- a/pipeline/where.go
+++ b/pipeline/where.go
@@ -8,12 +8,13 @@ import (
//
// Example:
// var sums = stream
-// .groupBy('service', 'host')
-// .sum('value')
+// |from()
+// .groupBy('service', 'host')
+// |sum('value')
// //Watch particular host for issues.
// sums
-// .where(lambda: "host" == 'h001.example.com')
-// .alert()
+// |where(lambda: "host" == 'h001.example.com')
+// |alert()
// .crit(lambda: TRUE)
// .email().to('user@example.com')
//
diff --git a/pipeline/window.go b/pipeline/window.go
index 77f15f70d..f723bcffd 100644
--- a/pipeline/window.go
+++ b/pipeline/window.go
@@ -10,10 +10,10 @@ import (
//
// Example:
// stream
-// .window()
+// |window()
// .period(10m)
// .every(5m)
-// .httpOut('recent')
+// |httpOut('recent')
//
// The above windowing example emits a window to the pipeline every `5 minutes`
// and the window contains the last `10 minutes` worth of data.
@@ -30,7 +30,7 @@ type WindowNode struct {
Every time.Duration
// Wether to align the window edges with the zero time
// tick:ignore
- AlignFlag bool
+ AlignFlag bool `tick:"Align"`
}
func newWindowNode() *WindowNode {
diff --git a/sample.go b/sample.go
index 37e7294b4..d8bf63beb 100644
--- a/sample.go
+++ b/sample.go
@@ -26,7 +26,7 @@ func newSampleNode(et *ExecutingTask, n *pipeline.SampleNode, l *log.Logger) (*S
duration: n.Duration,
}
sn.node.runF = sn.runSample
- if n.Duration == 0 && n.Count == 0 {
+ if n.Duration == 0 && n.N == 0 {
return nil, errors.New("invalid sample rate: must be positive integer or duration")
}
return sn, nil
@@ -74,7 +74,7 @@ func (s *SampleNode) shouldKeep(group models.GroupID, t time.Time) bool {
return t.Equal(keepTime)
} else {
count := s.counts[group]
- keep := count%s.s.Count == 0
+ keep := count%s.s.N == 0
count++
s.counts[group] = count
return keep
diff --git a/services/task_store/service.go b/services/task_store/service.go
index 5bbed6957..65ca5a2c0 100644
--- a/services/task_store/service.go
+++ b/services/task_store/service.go
@@ -19,6 +19,7 @@ import (
"github.com/influxdata/influxdb/influxql"
"github.com/influxdata/kapacitor"
"github.com/influxdata/kapacitor/services/httpd"
+ "github.com/influxdata/kapacitor/tick"
)
const taskDB = "task.db"
@@ -472,8 +473,15 @@ func (ts *Service) handleDisable(w http.ResponseWriter, r *http.Request) {
func (ts *Service) Save(task *rawTask) error {
+ // Format TICKscript
+ formatted, err := tick.Format(task.TICKscript)
+ if err != nil {
+ return err
+ }
+ task.TICKscript = formatted
+
// Validate task
- _, err := ts.TaskMaster.NewTask(task.Name,
+ _, err = ts.TaskMaster.NewTask(task.Name,
task.TICKscript,
task.Type,
task.DBRPs,
diff --git a/tick/TICKscript.md b/tick/TICKscript.md
index c9c28be76..d0ebab464 100644
--- a/tick/TICKscript.md
+++ b/tick/TICKscript.md
@@ -11,7 +11,8 @@ The TICKscript language is an invocation chaining language used to define data p
Notation
-------
-The syntax is specified using Extended Backus-Naur Form (“EBNF”). EBNF is the same notation used in the [Go](http://golang.org/) programming language specification, which can be found [here](https://golang.org/ref/spec).
+The syntax is specified using Extended Backus-Naur Form (“EBNF”).
+EBNF is the same notation used in the [Go](http://golang.org/) programming language specification, which can be found [here](https://golang.org/ref/spec).
```
Production = production_name "=" [ Expression ] "." .
@@ -60,9 +61,9 @@ operator_lit = "+" | "-" | "*" | "/" | "==" | "!=" |
Program = Statement { Statement } .
Statement = Declaration | Expression .
-Declaration = "var" identifier "=" Expression .
+Declaration = "var" identifier "=" Expression .
Expression = identifier { Chain } | Function { Chain } | Primary .
-Chain = "." Function { Chain} | "." identifier { Chain } .
+Chain = "|" Function { Chain } | "." Function { Chain} | "." identifier { Chain } .
Function = identifier "(" Parameters ")" .
Parameters = { Parameter "," } [ Parameter ] .
Parameter = Expression | "lambda:" LambdaExpr | Primary .
@@ -70,10 +71,10 @@ Primary = "(" LambdaExpr ")" | number_lit | string_lit |
boolean_lit | duration_lit | regex_lit | star_lit |
LFunc | identifier | Reference | "-" Primary | "!" Primary .
Reference = `"` { unicode_char } `"` .
-LambdaExpr = Primary operator_lit Primary .
+LambdaExpr = Primary operator_lit Primary .
LFunc = identifier "(" LParameters ")"
-LParameters = { LParameter "," } [ LParameter ] .
-LParameter = LambdaExpr | Primary .
+LParameters = { LParameter "," } [ LParameter ] .
+LParameter = LambdaExpr | Primary .
```
diff --git a/tick/cmd/tickdoc/config.go b/tick/cmd/tickdoc/config.go
new file mode 100644
index 000000000..8032973a1
--- /dev/null
+++ b/tick/cmd/tickdoc/config.go
@@ -0,0 +1,14 @@
+package main
+
+import "text/template"
+
+type Config struct {
+ Root string `toml:"root"`
+ PageHeader string `toml:"page-header"`
+ IndexWidth int `toml:"index-width"`
+ Weights map[string]int `toml:"weights"`
+ ChainMethodDesc string `toml:"chain-method-desc"`
+ PropertyMethodDesc string `toml:"property-method-desc"`
+
+ headerTemplate *template.Template
+}
diff --git a/tick/cmd/tickdoc/main.go b/tick/cmd/tickdoc/main.go
index 76dd1d554..e66b6de54 100644
--- a/tick/cmd/tickdoc/main.go
+++ b/tick/cmd/tickdoc/main.go
@@ -27,6 +27,7 @@ package main
import (
"bufio"
"bytes"
+ "flag"
"fmt"
"go/ast"
"go/parser"
@@ -38,19 +39,15 @@ import (
"regexp"
"sort"
"strings"
+ "text/template"
"unicode"
+ "github.com/naoina/toml"
"github.com/serenize/snaker"
"github.com/shurcooL/markdownfmt/markdown"
)
// The weight difference between two pages.
-const indexWidth = 10
-
-var specialWeights = map[string]int{
- "BatchNode": 4,
- "StreamNode": 5,
-}
const tickIgnore = "tick:ignore"
const tickProperty = "tick:property"
@@ -59,19 +56,37 @@ const tickLang = "javascript"
var tickEmbedded = regexp.MustCompile(`^tick:embedded:(\w+Node).(\w+)$`)
-var absPath string
+var configPath = flag.String("config", "tickdoc.conf", "path to tickdoc configuration file.")
+
+var config Config
+
+var usageStr = `Usage: %s [options] [package dir] [output dir]
+
+Options:
+`
+
+func usage() {
+ fmt.Fprintf(os.Stderr, usageStr, os.Args[0])
+ flag.PrintDefaults()
+}
func main() {
- if len(os.Args) != 4 {
- fmt.Println("Usage: tickdoc absPath path/to/golang/package output/dir")
- fmt.Println()
- fmt.Println("absPath - the absolute path of rendered documentation, used to generate links.")
+ flag.Usage = usage
+ flag.Parse()
+ args := flag.Args()
+
+ if len(args) != 2 {
+ flag.Usage()
os.Exit(1)
}
- absPath = os.Args[1]
- dir := os.Args[2]
- out := os.Args[3]
+ dir := args[0]
+ out := args[1]
+ // Decode config
+ err := decodeConfig(*configPath)
+ if err != nil {
+ log.Fatal(err)
+ }
fset := token.NewFileSet() // positions are relative to fset
@@ -119,8 +134,8 @@ func main() {
for i, name := range ordered {
var buf bytes.Buffer
n := nodes[name]
- weight := (i + 1) * indexWidth
- if w, ok := specialWeights[name]; ok {
+ weight := (i + 1) * config.IndexWidth
+ if w, ok := config.Weights[name]; ok {
weight = w
}
n.Render(&buf, r, nodes, weight)
@@ -135,6 +150,23 @@ func main() {
}
}
+func decodeConfig(path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ dec := toml.NewDecoder(f)
+ err = dec.Decode(&config)
+ if err != nil {
+ return err
+ }
+ config.headerTemplate, err = template.New("header").Parse(config.PageHeader)
+ if err != nil {
+ return fmt.Errorf("invalid page header template: %s", err)
+ }
+ return nil
+}
+
func handleGenDecl(nodes map[string]*Node, decl *ast.GenDecl) {
if shouldIgnore(decl.Doc) {
return
@@ -312,7 +344,7 @@ func nameToTickName(name string) string {
}
func nodeNameToLink(name string) string {
- return fmt.Sprintf("%s/%s/", absPath, snaker.CamelToSnake(name))
+ return fmt.Sprintf("%s/%s/", config.Root, snaker.CamelToSnake(name))
}
func methodNameToLink(node, name string) string {
@@ -442,26 +474,21 @@ func (n *Node) Embed(nodes map[string]*Node) error {
return nil
}
+type headerInfo struct {
+ Title string
+ Name string
+ Identifier string
+ Weight int
+}
+
func (n *Node) Render(buf *bytes.Buffer, r Renderer, nodes map[string]*Node, weight int) error {
- header := fmt.Sprintf(`---
-title: %s
-note: Auto generated by tickdoc
-
-menu:
- kapacitor_011:
- name: %s
- identifier: %s
- weight: %d
- parent: nodes
----
-`,
- n.Name,
- strings.Replace(n.Name, "Node", "", 1),
- snaker.CamelToSnake(n.Name),
- weight,
- )
-
- buf.Write([]byte(header))
+ info := headerInfo{
+ Title: n.Name,
+ Name: strings.Replace(n.Name, "Node", "", 1),
+ Identifier: snaker.CamelToSnake(n.Name),
+ Weight: weight,
+ }
+ config.headerTemplate.Execute(buf, info)
renderDoc(buf, nodes, r, n.Doc)
@@ -502,7 +529,7 @@ menu:
if len(n.Properties) > 0 {
r.Header(buf, func() bool { buf.Write([]byte("Properties")); return true }, 2, "")
r.Paragraph(buf, func() bool {
- buf.Write([]byte("Property methods modify state on the calling node. They do not add another node to the pipeline, and always return a reference to the calling node."))
+ buf.Write([]byte(config.PropertyMethodDesc))
return true
})
renderProperties(buf, r, n.Properties, nodes, 3, "node", "")
@@ -512,7 +539,7 @@ menu:
if len(methods) > 0 {
r.Header(buf, func() bool { buf.Write([]byte("Chaining Methods")); return true }, 2, "")
r.Paragraph(buf, func() bool {
- buf.Write([]byte("Chaining methods create a new node in the pipeline as a child of the calling node. They do not modify the calling node."))
+ buf.Write([]byte(config.ChainMethodDesc))
return true
})
for _, name := range methods {
@@ -585,7 +612,7 @@ func (m *Method) Render(buf *bytes.Buffer, r Renderer, nodes map[string]*Node) e
renderDoc(buf, nodes, r, m.Doc)
var code bytes.Buffer
- code.Write([]byte("node."))
+ code.Write([]byte("node|"))
code.Write([]byte(nameToTickName(m.Name)))
code.Write([]byte("("))
for i, param := range m.Params {
diff --git a/tick/cmd/tickfmt/main.go b/tick/cmd/tickfmt/main.go
new file mode 100644
index 000000000..bd3cefdba
--- /dev/null
+++ b/tick/cmd/tickfmt/main.go
@@ -0,0 +1,113 @@
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+
+ "github.com/influxdata/kapacitor/tick"
+)
+
+const backupExt = ".orig"
+
+var writeFlag = flag.Bool("w", false, "write formatted contents to source file instead of STDOUT.")
+var backupFlag = flag.Bool("b", false, fmt.Sprintf("create backup files with extension '%s'.", backupExt))
+
+func usage() {
+ message := `Usage: %s [options] [path...]
+
+ If no source files are provided reads from STDIN.
+
+Options:
+`
+ fmt.Fprintf(os.Stderr, message, os.Args[0])
+ flag.PrintDefaults()
+}
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+ args := flag.Args()
+ if len(args) == 0 {
+ if *writeFlag {
+ fmt.Fprintln(os.Stderr, "Cannot write source files, none given.")
+ flag.Usage()
+ os.Exit(2)
+ }
+ args = []string{"-"}
+ }
+ for _, path := range args {
+ path = filepath.Clean(path)
+ err := formatFile(path, *writeFlag, *backupFlag)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+func formatFile(filename string, write, backup bool) error {
+ data, err := readFile(filename)
+ if err != nil {
+ return err
+ }
+ formatted, err := tick.Format(data)
+ if err != nil {
+ return err
+ }
+ if write {
+ dir := filepath.Dir(filename)
+ tmp, err := writeTmpFile(dir, formatted)
+ if err != nil {
+ return err
+ }
+ defer os.Remove(tmp)
+ if backup {
+ err := os.Rename(filename, filename+backupExt)
+ if err != nil {
+ return err
+ }
+ }
+ err = os.Rename(tmp, filename)
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err := os.Stdout.Write([]byte(formatted))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func readFile(filename string) (string, error) {
+ var f *os.File
+ if filename == "-" {
+ f = os.Stdin
+ } else {
+ var err error
+ f, err = os.Open(filename)
+ if err != nil {
+ return "", nil
+ }
+ }
+ defer f.Close()
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", nil
+ }
+ return string(data), nil
+}
+
+func writeTmpFile(dir, contents string) (string, error) {
+ f, err := ioutil.TempFile(dir, "tickfmt")
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ _, err = f.Write([]byte(contents))
+ return f.Name(), err
+}
diff --git a/tick/eval.go b/tick/eval.go
index b51d0875e..be89ce930 100644
--- a/tick/eval.go
+++ b/tick/eval.go
@@ -1,17 +1,33 @@
-// A reflection based evaluation of an AST.
package tick
import (
"fmt"
+ "log"
+ "os"
"reflect"
"regexp"
"runtime"
"strings"
+ "sync"
"time"
"unicode"
"unicode/utf8"
)
+var mu sync.Mutex
+var logger = log.New(os.Stderr, "[tick] ", log.LstdFlags)
+
+func getLogger() *log.Logger {
+ mu.Lock()
+ defer mu.Unlock()
+ return logger
+}
+func SetLogger(l *log.Logger) {
+ mu.Lock()
+ defer mu.Unlock()
+ logger = l
+}
+
// Interface for interacting with objects.
// If an object does not self describe via this interface
// than a reflection based implemenation will be used.
@@ -19,12 +35,12 @@ type SelfDescriber interface {
//A description the object
Desc() string
- HasMethod(name string) bool
- CallMethod(name string, args ...interface{}) (interface{}, error)
+ HasChainMethod(name string) bool
+ CallChainMethod(name string, args ...interface{}) (interface{}, error)
HasProperty(name string) bool
Property(name string) interface{}
- SetProperty(name string, arg interface{}) error
+ SetProperty(name string, args ...interface{}) (interface{}, error)
}
// Parse and evaluate a given script for the scope.
@@ -53,6 +69,18 @@ func Evaluate(script string, scope *Scope) (err error) {
return eval(root, scope, stck)
}
+func errorf(p Position, fmtStr string, args ...interface{}) error {
+ lineStr := fmt.Sprintf("line %d char %d: %s", p.Line(), p.Char(), fmtStr)
+ return fmt.Errorf(lineStr, args...)
+}
+
+func wrapError(p Position, err error) error {
+ if err == nil {
+ return nil
+ }
+ return fmt.Errorf("line %d char %d: %s", p.Line(), p.Char(), err.Error())
+}
+
// Evaluate a node using a stack machine in a given scope
func eval(n Node, scope *Scope, stck *stack) (err error) {
switch node := n.(type) {
@@ -75,7 +103,7 @@ func eval(n Node, scope *Scope, stck *stack) (err error) {
if err != nil {
return
}
- err := evalUnary(node.Operator, scope, stck)
+ err := evalUnary(node, node.Operator, scope, stck)
if err != nil {
return err
}
@@ -95,7 +123,20 @@ func eval(n Node, scope *Scope, stck *stack) (err error) {
return
}
stck.Push(node.Node)
- case *BinaryNode:
+ case *DeclarationNode:
+ err = eval(node.Left, scope, stck)
+ if err != nil {
+ return
+ }
+ err = eval(node.Right, scope, stck)
+ if err != nil {
+ return
+ }
+ err = evalDeclaration(scope, stck)
+ if err != nil {
+ return
+ }
+ case *ChainNode:
err = eval(node.Left, scope, stck)
if err != nil {
return
@@ -104,7 +145,7 @@ func eval(n Node, scope *Scope, stck *stack) (err error) {
if err != nil {
return
}
- err = evalBinary(node.Operator, scope, stck)
+ err = evalChain(node, scope, stck)
if err != nil {
return
}
@@ -154,7 +195,7 @@ func eval(n Node, scope *Scope, stck *stack) (err error) {
return nil
}
-func evalUnary(op tokenType, scope *Scope, stck *stack) error {
+func evalUnary(p Position, op tokenType, scope *Scope, stck *stack) error {
v := stck.Pop()
switch op {
case TokenMinus:
@@ -165,15 +206,15 @@ func evalUnary(op tokenType, scope *Scope, stck *stack) error {
}
v = value
}
- switch n := v.(type) {
+ switch num := v.(type) {
case float64:
- stck.Push(-1 * n)
+ stck.Push(-1 * num)
case int64:
- stck.Push(-1 * n)
+ stck.Push(-1 * num)
case time.Duration:
- stck.Push(-1 * n)
+ stck.Push(-1 * num)
default:
- return fmt.Errorf("invalid arugument to '-' %v", v)
+ return errorf(p, "invalid arugument to '-' %v", v)
}
case TokenNot:
if ident, ok := v.(*IdentifierNode); ok {
@@ -186,53 +227,59 @@ func evalUnary(op tokenType, scope *Scope, stck *stack) error {
if b, ok := v.(bool); ok {
stck.Push(!b)
} else {
- return fmt.Errorf("invalid arugument to '!' %v", v)
+ return errorf(p, "invalid arugument to '!' %v", v)
}
}
return nil
}
-func evalBinary(op tokenType, scope *Scope, stck *stack) error {
+func evalDeclaration(scope *Scope, stck *stack) error {
r := stck.Pop()
l := stck.Pop()
- switch op {
- case TokenAsgn:
- i := l.(*IdentifierNode)
- scope.Set(i.Ident, r)
- case TokenDot:
- // Resolve identifier
- if left, ok := l.(*IdentifierNode); ok {
+ i := l.(*IdentifierNode)
+ scope.Set(i.Ident, r)
+ return nil
+}
+
+func evalChain(p Position, scope *Scope, stck *stack) error {
+ r := stck.Pop()
+ l := stck.Pop()
+ // Resolve identifier
+ if left, ok := l.(*IdentifierNode); ok {
+ var err error
+ l, err = scope.Get(left.Ident)
+ if err != nil {
+ return err
+ }
+ }
+ switch right := r.(type) {
+ case unboundFunc:
+ ret, err := right(l)
+ if err != nil {
+ return err
+ }
+ stck.Push(ret)
+ case *IdentifierNode:
+ name := right.Ident
+
+ //Lookup field by name of left object
+ var describer SelfDescriber
+ if d, ok := l.(SelfDescriber); ok {
+ describer = d
+ } else {
var err error
- l, err = scope.Get(left.Ident)
+ describer, err = NewReflectionDescriber(l)
if err != nil {
- return err
+ return wrapError(p, err)
}
}
- switch right := r.(type) {
- case unboundFunc:
- ret, err := right(l)
- if err != nil {
- return err
- }
- stck.Push(ret)
- case *IdentifierNode:
- name := right.Ident
-
- //Lookup field by name of left object
- var describer SelfDescriber
- if d, ok := l.(SelfDescriber); ok {
- describer = d
- } else {
- describer = NewReflectionDescriber(l)
- }
- if describer.HasProperty(name) {
- stck.Push(describer.Property(name))
- } else {
- return fmt.Errorf("object %T has no property %s", l, name)
- }
- default:
- return fmt.Errorf("invalid right operand of type %T to '.' operator", r)
+ if describer.HasProperty(name) {
+ stck.Push(describer.Property(name))
+ } else {
+ return errorf(p, "object %T has no property %s", l, name)
}
+ default:
+ return errorf(p, "invalid right operand of type %T to '.' operator", r)
}
return nil
}
@@ -241,9 +288,9 @@ func evalFunc(f *FunctionNode, scope *Scope, stck *stack, args []interface{}) er
rec := func(obj interface{}, errp *error) {
e := recover()
if e != nil {
- *errp = fmt.Errorf("error calling func %q on obj %T: %v", f.Func, obj, e)
+ *errp = fmt.Errorf("line %d char%d: error calling func %q on obj %T: %v", f.Line(), f.Char(), f.Func, obj, e)
if strings.Contains((*errp).Error(), "*tick.ReferenceNode") && strings.Contains((*errp).Error(), "type string") {
- *errp = fmt.Errorf("cannot assign *tick.ReferenceNode to type string, did you use double quotes instead of single quotes?")
+ *errp = fmt.Errorf("line %d char%d: cannot assign *tick.ReferenceNode to type string, did you use double quotes instead of single quotes?", f.Line(), f.Char())
}
}
@@ -252,14 +299,18 @@ func evalFunc(f *FunctionNode, scope *Scope, stck *stack, args []interface{}) er
//Setup recover method if there is a panic during the method call
defer rec(obj, &err)
- if obj == nil {
+ if f.Type == globalFunc {
+ if obj != nil {
+ return nil, fmt.Errorf("line %d char%d: calling global function on object %T", f.Line(), f.Char(), obj)
+ }
// Object is nil, check for func in scope
fnc, _ := scope.Get(f.Func)
if fnc == nil {
- return nil, fmt.Errorf("no global function %q defined", f.Func)
+ return nil, fmt.Errorf("line %d char%d: no global function %q defined", f.Line(), f.Char(), f.Func)
}
method := reflect.ValueOf(fnc)
- return callMethodReflection(method, args)
+ o, err := callMethodReflection(method, args)
+ return o, wrapError(f, err)
}
// Get SelfDescriber
@@ -268,17 +319,47 @@ func evalFunc(f *FunctionNode, scope *Scope, stck *stack, args []interface{}) er
if d, ok := obj.(SelfDescriber); ok {
describer = d
} else {
- describer = NewReflectionDescriber(obj)
+ var err error
+ describer, err = NewReflectionDescriber(obj)
+ if err != nil {
+ return nil, wrapError(f, err)
+ }
}
- // Check for Method
- if describer.HasMethod(name) {
- return describer.CallMethod(name, args...)
+ // Call correct type of function
+ switch f.Type {
+ case chainFunc:
+ if describer.HasChainMethod(name) {
+ o, err := describer.CallChainMethod(name, args...)
+ return o, wrapError(f, err)
+ }
+ if describer.HasProperty(name) {
+ return nil, errorf(f, "no chaining method %q on %T, but property does exist. Use '.' operator instead: 'node.%s(..)'.", name, obj, name)
+ }
+ case propertyFunc:
+ if describer.HasProperty(name) {
+ o, err := describer.SetProperty(name, args...)
+ return o, wrapError(f, err)
+ }
+ if describer.HasChainMethod(name) {
+ getLogger().Printf("W! DEPRECATED Syntax line %d char %d: found use of '.' as chaining method. Please adopt new syntax 'node|%s(..)'.", f.Line(), f.Char(), name)
+ o, err := describer.CallChainMethod(name, args...)
+ return o, wrapError(f, err)
+ }
+ // Uncomment for 0.13 release, to finish deprecation of old syntax
+ //if describer.HasChainMethod(name) {
+ // return nil, errorf(f, "no property method %q on %T, but chaining method does exist. Use '|' operator instead: 'node|%s(..)'.", name, obj, name)
+ //}
+ default:
+ return nil, errorf(f, "unexpected function type %v on function %T.%s", f.Type, obj, name)
}
// Check for dynamic method.
dm := scope.DynamicMethod(name)
if dm != nil {
+ if f.Type != chainFunc {
+ getLogger().Printf("W! DEPRECATED Syntax line %d char %d: found use of '.' as chaining method. Please adopt new syntax 'node|%s(...)'.", f.Line(), f.Char(), name)
+ }
ret, err := dm(obj, args...)
if err != nil {
return nil, err
@@ -287,7 +368,7 @@ func evalFunc(f *FunctionNode, scope *Scope, stck *stack, args []interface{}) er
}
// Ran out of options...
- return nil, fmt.Errorf("No method or property %q on %s", name, describer.Desc())
+ return nil, errorf(f, "no method or property %q on %s", name, describer.Desc())
})
stck.Push(fnc)
return nil
@@ -296,10 +377,98 @@ func evalFunc(f *FunctionNode, scope *Scope, stck *stack, args []interface{}) er
// Wraps any object as a SelfDescriber using reflection.
type ReflectionDescriber struct {
obj interface{}
+ // Set of chain methods
+ chainMethods map[string]reflect.Value
+ // Set of methods that modify properties
+ propertyMethods map[string]reflect.Value
+ // Set of fields on obj that can be set
+ properties map[string]reflect.Value
}
-func NewReflectionDescriber(obj interface{}) *ReflectionDescriber {
- return &ReflectionDescriber{obj: obj}
+var structTagPattern = regexp.MustCompile(`tick:"(\w+)"`)
+
+func NewReflectionDescriber(obj interface{}) (*ReflectionDescriber, error) {
+ r := &ReflectionDescriber{
+ obj: obj,
+ }
+ rv := reflect.ValueOf(r.obj)
+ if !rv.IsValid() && rv.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("object is invalid %v of type %T", obj, obj)
+ }
+ rStructType := reflect.Indirect(rv).Type()
+ rRecvType := reflect.TypeOf(r.obj)
+ // Get all methods
+ r.chainMethods = make(map[string]reflect.Value, rRecvType.NumMethod())
+ for i := 0; i < rRecvType.NumMethod(); i++ {
+ method := rRecvType.Method(i)
+ if !rv.MethodByName(method.Name).IsValid() {
+ return nil, fmt.Errorf("invalid method %s on type %T", method.Name, r.obj)
+ }
+ r.chainMethods[method.Name] = rv.MethodByName(method.Name)
+ }
+
+ // Get all properties
+ var err error
+ r.properties, r.propertyMethods, err = getProperties(r.Desc(), rv, rStructType, r.chainMethods)
+ if err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+// Get properties from a struct and populate properties and propertyMethods maps, while removing
+// and property methods from chainMethods.
+// Recurses up anonymous fields.
+func getProperties(desc string, rv reflect.Value, rStructType reflect.Type, chainMethods map[string]reflect.Value) (
+ map[string]reflect.Value,
+ map[string]reflect.Value,
+ error) {
+ properties := make(map[string]reflect.Value, rStructType.NumField())
+ propertyMethods := make(map[string]reflect.Value)
+ for i := 0; i < rStructType.NumField(); i++ {
+ property := rStructType.Field(i)
+ if property.Anonymous {
+ // Recursively get properties from anon fields
+ anonValue := reflect.Indirect(rv).Field(i)
+ anonType := reflect.Indirect(anonValue).Type()
+ props, propMethods, err := getProperties(desc, anonValue, anonType, chainMethods)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Update local maps
+ for k, v := range props {
+ if _, ok := properties[k]; !ok {
+ properties[k] = v
+ }
+ }
+ for k, v := range propMethods {
+ if _, ok := propertyMethods[k]; !ok {
+ propertyMethods[k] = v
+ }
+ }
+ continue
+ }
+ matches := structTagPattern.FindStringSubmatch(string(property.Tag))
+ if matches != nil && matches[1] != "" {
+ // Property is set via a property method.
+ methodName := matches[1]
+ method := rv.MethodByName(methodName)
+ if method.IsValid() {
+ propertyMethods[methodName] = method
+ // Remove property method from chainMethods.
+ delete(chainMethods, methodName)
+ } else {
+ return nil, nil, fmt.Errorf("referenced method %s for type %s is invalid", methodName, desc)
+ }
+ } else {
+ // Property is set directly via reflection.
+ field := reflect.Indirect(rv).FieldByName(property.Name)
+ if field.IsValid() && field.CanSet() {
+ properties[property.Name] = field
+ }
+ }
+ }
+ return properties, propertyMethods, nil
}
func (r *ReflectionDescriber) Desc() string {
@@ -308,76 +477,57 @@ func (r *ReflectionDescriber) Desc() string {
// Using reflection check if the object has the method or field.
// A field is a valid method because we can set it via reflection too.
-func (r *ReflectionDescriber) HasMethod(name string) bool {
+func (r *ReflectionDescriber) HasChainMethod(name string) bool {
name = capilatizeFirst(name)
- v := reflect.ValueOf(r.obj)
- if !v.IsValid() {
- return false
- }
- if v.MethodByName(name).IsValid() {
- return true
- }
- // Check for a field of the same name,
- // we can wrap setting it in a method.
- return r.HasProperty(name)
+ _, ok := r.chainMethods[name]
+ return ok
}
-func (r *ReflectionDescriber) CallMethod(name string, args ...interface{}) (interface{}, error) {
- name = capilatizeFirst(name)
- v := reflect.ValueOf(r.obj)
- if !v.IsValid() {
- return nil, fmt.Errorf("cannot get reflect.ValueOf %T", r.obj)
- }
-
+func (r *ReflectionDescriber) CallChainMethod(name string, args ...interface{}) (interface{}, error) {
// Check for a method and call it
- if method := v.MethodByName(name); method.IsValid() {
+ name = capilatizeFirst(name)
+ if method, ok := r.chainMethods[name]; ok {
return callMethodReflection(method, args)
}
-
- // Check for a field and set it
- if len(args) == 1 && r.HasProperty(name) {
- err := r.SetProperty(name, args[0])
- if err != nil {
- return nil, err
- }
- return r.obj, nil
- }
- return nil, fmt.Errorf("unknown method or field %s on %T", name, r.obj)
+ return nil, fmt.Errorf("unknown method %s on %T", name, r.obj)
}
// Using reflection check if the object has a field with the property name.
func (r *ReflectionDescriber) HasProperty(name string) bool {
name = capilatizeFirst(name)
- v := reflect.Indirect(reflect.ValueOf(r.obj))
- if v.Kind() == reflect.Struct {
- field := v.FieldByName(name)
- return field.IsValid() && field.CanSet()
+ _, ok := r.propertyMethods[name]
+ if ok {
+ return ok
}
- return false
+ _, ok = r.properties[name]
+ return ok
}
func (r *ReflectionDescriber) Property(name string) interface{} {
+ // Properties set by property methods cannot be read
name = capilatizeFirst(name)
- v := reflect.Indirect(reflect.ValueOf(r.obj))
- if v.Kind() == reflect.Struct {
- field := v.FieldByName(name)
- if field.IsValid() {
- return field.Interface()
- }
- }
- return nil
+ property := r.properties[name]
+ return property.Interface()
}
-func (r *ReflectionDescriber) SetProperty(name string, value interface{}) error {
- v := reflect.Indirect(reflect.ValueOf(r.obj))
- if v.Kind() == reflect.Struct {
- field := v.FieldByName(name)
- if field.IsValid() && field.CanSet() {
- field.Set(reflect.ValueOf(value))
- return nil
+func (r *ReflectionDescriber) SetProperty(name string, values ...interface{}) (interface{}, error) {
+ name = capilatizeFirst(name)
+ propertyMethod, ok := r.propertyMethods[name]
+ if ok {
+ return callMethodReflection(propertyMethod, values)
+ } else {
+ if len(values) == 1 {
+ property, ok := r.properties[name]
+ if ok {
+ v := reflect.ValueOf(values[0])
+ property.Set(v)
+ return r.obj, nil
+ }
+ } else {
+ return nil, fmt.Errorf("too many arguments to set property %s on %T", name, r.obj)
}
}
- return fmt.Errorf("no field %s on %T", name, r.obj)
+ return nil, fmt.Errorf("no property %s on %T", name, r.obj)
}
func callMethodReflection(method reflect.Value, args []interface{}) (interface{}, error) {
@@ -398,10 +548,8 @@ func callMethodReflection(method reflect.Value, args []interface{}) (interface{}
} else {
return ret[0].Interface(), nil
}
- } else {
- return nil, fmt.Errorf("functions must return a single value or (interface{}, error)")
}
-
+ return nil, fmt.Errorf("function must return a single value or (interface{}, error)")
}
// Capilatizes the first rune in the string
@@ -421,7 +569,7 @@ func resolveIdents(n Node, scope *Scope) Node {
if err != nil {
panic(err)
}
- return valueToLiteralNode(node.pos, v)
+ return valueToLiteralNode(node.position, v)
case *UnaryNode:
node.Node = resolveIdents(node.Node, scope)
case *BinaryNode:
@@ -440,41 +588,41 @@ func resolveIdents(n Node, scope *Scope) Node {
}
// Convert raw value to literal node, for all supported basic types.
-func valueToLiteralNode(pos pos, v interface{}) Node {
+func valueToLiteralNode(p position, v interface{}) Node {
switch value := v.(type) {
case bool:
return &BoolNode{
- pos: pos,
- Bool: value,
+ position: p,
+ Bool: value,
}
case int64:
return &NumberNode{
- pos: pos,
- IsInt: true,
- Int64: value,
+ position: p,
+ IsInt: true,
+ Int64: value,
}
case float64:
return &NumberNode{
- pos: pos,
- IsFloat: true,
- Float64: value,
+ position: p,
+ IsFloat: true,
+ Float64: value,
}
case time.Duration:
return &DurationNode{
- pos: pos,
- Dur: value,
+ position: p,
+ Dur: value,
}
case string:
return &StringNode{
- pos: pos,
- Literal: value,
+ position: p,
+ Literal: value,
}
case *regexp.Regexp:
return &RegexNode{
- pos: pos,
- Regex: value,
+ position: p,
+ Regex: value,
}
default:
- panic(fmt.Errorf("unsupported literal type %T", v))
+ panic(errorf(p, "unsupported literal type %T", v))
}
}
diff --git a/tick/eval_test.go b/tick/eval_test.go
index 06ed9d772..0eaa584fe 100644
--- a/tick/eval_test.go
+++ b/tick/eval_test.go
@@ -2,11 +2,11 @@ package tick_test
import (
"fmt"
+ "reflect"
"testing"
"time"
"github.com/influxdata/kapacitor/tick"
- "github.com/stretchr/testify/assert"
)
//Test structure for evaluating a DSL
@@ -23,7 +23,7 @@ type structB struct {
}
type structC struct {
- field1 string
+ field1 string `tick:"Options"`
field2 float64
field3 time.Duration
AggFunc aggFunc
@@ -70,17 +70,15 @@ func aggSum(values []float64) []float64 {
}
func TestEvaluate(t *testing.T) {
- assert := assert.New(t)
-
//Run a test that evaluates the DSL against the above structures.
script := `
-var s2 = a.structB()
+var s2 = a|structB()
.field1('f1')
.field2(42)
s2.field3(15m)
-s2.structC()
+s2|structC()
.options('c', 21.5, 7h)
.aggFunc(influxql.agg.sum)
`
@@ -106,24 +104,34 @@ s2.structC()
t.Fatal(err)
}
s2 := s2I.(*structB)
- assert.NotNil(s2)
- assert.Equal("f1", s2.Field1)
- assert.Equal(int64(42), s2.Field2)
- assert.Equal(time.Minute*15, s2.Field3)
-
- s3 := s2.c
- if assert.NotNil(s3) {
- assert.Equal("c", s3.field1)
- assert.Equal(21.5, s3.field2)
- assert.Equal(time.Hour*7, s3.field3)
- if assert.NotNil(s3.AggFunc) {
- assert.Equal([]float64{10.0}, s3.AggFunc([]float64{5, 5}))
- }
+ exp := structB{
+ Field1: "f1",
+ Field2: 42,
+ Field3: time.Minute * 15,
+ }
+
+ s3 := *s2.c
+ s2.c = nil
+ if !reflect.DeepEqual(*s2, exp) {
+ t.Errorf("unexpected s2 exp:%v got%v", exp, *s2)
+ }
+ c := structC{
+ field1: "c",
+ field2: 21.5,
+ field3: time.Hour * 7,
+ }
+ aggFunc := s3.AggFunc
+ s3.AggFunc = nil
+ if !reflect.DeepEqual(s3, c) {
+ t.Errorf("unexpected s3 exp:%v got%v", c, s3)
+ }
+ if exp, got := []float64{10.0}, aggFunc([]float64{5, 5}); !reflect.DeepEqual(exp, got) {
+ t.Errorf("unexpected s3.AggFunc exp:%v got%v", exp, got)
}
}
func TestEvaluate_DynamicMethod(t *testing.T) {
- script := `var x = a.dynamicMethod(1,'str', 10s).sad(FALSE)`
+ script := `var x = a|dynamicMethod(1,'str', 10s).sad(FALSE)`
scope := tick.NewScope()
a := &structA{}
@@ -238,3 +246,23 @@ var m = !n
}
}
+
+// Test that using the wrong chain operator fails
+func TestStrictEvaluate(t *testing.T) {
+ // Skip test until DEPRECATED syntax is removed
+ t.Skip()
+ script := `
+var s2 = a.structB()
+ .field1('f1')
+ .field2(42)
+`
+
+ scope := tick.NewScope()
+ a := &structA{}
+ scope.Set("a", a)
+
+ err := tick.Evaluate(script, scope)
+ if err == nil {
+ t.Fatal("expected error from Evaluate")
+ }
+}
diff --git a/tick/example_scope_test.go b/tick/example_scope_test.go
index e175cf372..e90a1467d 100644
--- a/tick/example_scope_test.go
+++ b/tick/example_scope_test.go
@@ -27,16 +27,16 @@ func ExampleEvaluate() {
parent.name('parent')
// Spawn a first child
-var child1 = parent.spawn()
+var child1 = parent|spawn()
// Name the first child
child1.name('child1')
//Spawn a grandchild and name it
-child1.spawn().name('grandchild')
+child1|spawn().name('grandchild')
//Spawn a second child and name it
-parent.spawn().name('child2')
+parent|spawn().name('child2')
`
scope := NewScope()
diff --git a/tick/fmt.go b/tick/fmt.go
new file mode 100644
index 000000000..2be76b240
--- /dev/null
+++ b/tick/fmt.go
@@ -0,0 +1,18 @@
+package tick
+
+import "bytes"
+
+// Indent string for formatted TICKscripts
+const indentStep = " "
+
+// Formats a TICKscript according to the standard.
+func Format(script string) (string, error) {
+ root, err := parse(script)
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+ buf.Grow(len(script))
+ root.Format(&buf, "", false)
+ return buf.String(), nil
+}
diff --git a/tick/fmt_test.go b/tick/fmt_test.go
new file mode 100644
index 000000000..ab3e6f541
--- /dev/null
+++ b/tick/fmt_test.go
@@ -0,0 +1,307 @@
+package tick
+
+import "testing"
+
+func TestFormat(t *testing.T) {
+ testCases := []struct {
+ script string
+ exp string
+ }{
+ {
+ script: `var x = 1`,
+ exp: "var x = 1\n",
+ },
+ {
+ script: `var x=1`,
+ exp: "var x = 1\n",
+ },
+ {
+ script: `var x=stream()|window().period(10s).every(10s)`,
+ exp: `var x = stream()
+ |window()
+ .period(10s)
+ .every(10s)
+`,
+ },
+ {
+ script: `var x = stream()
+//Window data
+|window()
+// Period / Every 10s
+.period(10s).every(10s)`,
+ exp: `var x = stream()
+ // Window data
+ |window()
+ // Period / Every 10s
+ .period(10s)
+ .every(10s)
+`,
+ },
+ {
+ script: `var x = stream()
+|udf()
+ .option(
+ // Param 1
+ 1,
+ // Param 2
+ 2,
+ // Param 3
+ 3,
+ // Param 4
+ 4,
+ )
+`,
+ exp: `var x = stream()
+ |udf()
+ .option(
+ // Param 1
+ 1,
+ // Param 2
+ 2,
+ // Param 3
+ 3,
+ // Param 4
+ 4
+ )
+`,
+ },
+ {
+ script: `global(lambda: ("a" + (1)) / (( 4 +"b") * ("c")))`,
+ exp: "global(lambda: (\"a\" + 1) / ((4 + \"b\") * \"c\"))\n",
+ },
+ {
+ script: `global(lambda: (1 + 2 - 3 * 4 / 5) < (sin(6)) AND (TRUE OR FALSE))`,
+ exp: "global(lambda: (1 + 2 - 3 * 4 / 5) < sin(6) AND (TRUE OR FALSE))\n",
+ },
+ {
+ script: `global(lambda:
+(1 + 2 - 3 * 4 / 5)
+<
+(sin(6))
+AND
+(TRUE
+OR (FALSE
+AND TRUE)))`,
+ exp: `global(lambda: (1 + 2 - 3 * 4 / 5) <
+ sin(6) AND
+ (TRUE OR
+ (FALSE AND
+ TRUE)))
+`,
+ },
+ {
+ script: `global(lambda:
+// If this
+// is less than that
+(1 + 2 - 3 * 4 / 5)
+< (sin(6))
+AND
+// more comments.
+(TRUE OR FALSE), 'arg',)`,
+ exp: `global(
+ lambda:
+ // If this
+ // is less than that
+ (1 + 2 - 3 * 4 / 5) <
+ sin(6) AND
+ // more comments.
+ (TRUE OR FALSE),
+ 'arg'
+)
+`,
+ },
+ {
+ script: `// Comment all the things
+var
+x =
+stream()
+// 1
+|
+udf()
+// 2
+ .option(
+ // 3
+ 1,
+ // 4
+ 2.0,
+ // 5
+ 3h,
+ // 6
+ 'a',
+ )
+// 7
+|
+eval(
+// 8
+lambda:
+a * b + c
+,
+)
+// 9
+|
+groupBy(
+//10
+*
+)
+// 11
+`,
+ exp: `// Comment all the things
+var x = stream()
+ // 1
+ |udf()
+ // 2
+ .option(
+ // 3
+ 1,
+ // 4
+ 2.0,
+ // 5
+ 3h,
+ // 6
+ 'a'
+ )
+ // 7
+ |eval(
+ // 8
+ lambda: a * b + c
+ )
+ // 9
+ |groupBy(
+ // 10
+ *
+ )
+
+// 11
+
+`,
+ },
+ {
+ script: `
+ // Define a result that contains the most recent score per player.
+var topPlayerScores = stream
+ |from().measurement('scores')
+ // Get the most recent score for each player per game.
+// Not likely that a player is playing two games but just in case.
+.groupBy('game', 'player')
+ |window()
+ // keep a buffer of the last 11s of scores
+ // just in case a player score hasn't updated in a while
+ .period(11s)
+ // Emit the current score per player every second.
+.every(1s)
+ // Align the window boundaries to be on the second.
+.align()
+ |last('value')
+
+// Calculate the top 15 scores per game
+var topScores = topPlayerScores
+ |groupBy('game')
+ |top(15, 'last', 'player')
+
+// Expose top scores over the HTTP API at the 'top_scores' endpoint.
+// Now your app can just request the top scores from Kapacitor
+// and always get the most recent result.
+//
+// http://localhost:9092/api/v1/top_scores/top_scores
+topScores
+ |httpOut('top_scores')
+
+// Sample the top scores and keep a score once every 10s
+var topScoresSampled = topScores
+ |sample(10s)
+
+// Store top fifteen player scores in InfluxDB.
+topScoresSampled
+ |influxDBOut()
+ .database('game')
+ .measurement('top_scores')
+
+// Calculate the max and min of the top scores.
+var max = topScoresSampled
+ |max('top')
+var min = topScoresSampled
+ |min('top')
+
+// Join the max and min streams back together and calculate the gap.
+max|join(min)
+ .as('max', 'min')
+ // calculate the difference between the max and min scores.
+|eval(lambda: "max.max" - "min.min", lambda: "max.max", lambda: "min.min")
+ .as('gap', 'topFirst', 'topLast')
+ // store the fields: gap, topFirst, and topLast in InfluxDB.
+|influxDBOut()
+ .database('game')
+ .measurement('top_scores_gap')
+`,
+ exp: `// Define a result that contains the most recent score per player.
+var topPlayerScores = stream
+ |from()
+ .measurement('scores')
+ // Get the most recent score for each player per game.
+ // Not likely that a player is playing two games but just in case.
+ .groupBy('game', 'player')
+ |window()
+ // keep a buffer of the last 11s of scores
+ // just in case a player score hasn't updated in a while
+ .period(11s)
+ // Emit the current score per player every second.
+ .every(1s)
+ // Align the window boundaries to be on the second.
+ .align()
+ |last('value')
+
+// Calculate the top 15 scores per game
+var topScores = topPlayerScores
+ |groupBy('game')
+ |top(15, 'last', 'player')
+
+// Expose top scores over the HTTP API at the 'top_scores' endpoint.
+// Now your app can just request the top scores from Kapacitor
+// and always get the most recent result.
+//
+// http://localhost:9092/api/v1/top_scores/top_scores
+topScores
+ |httpOut('top_scores')
+
+// Sample the top scores and keep a score once every 10s
+var topScoresSampled = topScores
+ |sample(10s)
+
+// Store top fifteen player scores in InfluxDB.
+topScoresSampled
+ |influxDBOut()
+ .database('game')
+ .measurement('top_scores')
+
+// Calculate the max and min of the top scores.
+var max = topScoresSampled
+ |max('top')
+
+var min = topScoresSampled
+ |min('top')
+
+// Join the max and min streams back together and calculate the gap.
+max
+ |join(min)
+ .as('max', 'min')
+ // calculate the difference between the max and min scores.
+ |eval(lambda: "max.max" - "min.min", lambda: "max.max", lambda: "min.min")
+ .as('gap', 'topFirst', 'topLast')
+ // store the fields: gap, topFirst, and topLast in InfluxDB.
+ |influxDBOut()
+ .database('game')
+ .measurement('top_scores_gap')
+`,
+ },
+ }
+
+ for _, tc := range testCases {
+ got, err := Format(tc.script)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got != tc.exp {
+ t.Fatalf("unexpected format:\nexp:\n%s\ngot:\n%s\nlength exp:%d got:%d", tc.exp, got, len(tc.exp), len(got))
+ }
+ }
+}
diff --git a/tick/lex.go b/tick/lex.go
index 9bfc1b8a2..a06f69cb1 100644
--- a/tick/lex.go
+++ b/tick/lex.go
@@ -19,6 +19,7 @@ const (
TokenVar
TokenAsgn
TokenDot
+ TokenPipe
TokenIdent
TokenReference
TokenLambda
@@ -32,6 +33,7 @@ const (
TokenTrue
TokenFalse
TokenRegex
+ TokenComment
// begin operator tokens
begin_tok_operator
@@ -90,13 +92,22 @@ var operatorStr = [...]string{
var strToOperator map[string]tokenType
+const (
+ KW_And = "AND"
+ KW_Or = "OR"
+ KW_True = "TRUE"
+ KW_False = "FALSE"
+ KW_Var = "var"
+ KW_Lambda = "lambda"
+)
+
var keywords = map[string]tokenType{
- "AND": TokenAnd,
- "OR": TokenOr,
- "TRUE": TokenTrue,
- "FALSE": TokenFalse,
- "var": TokenVar,
- "lambda": TokenLambda,
+ KW_And: TokenAnd,
+ KW_Or: TokenOr,
+ KW_True: TokenTrue,
+ KW_False: TokenFalse,
+ KW_Var: TokenVar,
+ KW_Lambda: TokenLambda,
}
func init() {
@@ -127,8 +138,12 @@ func (t tokenType) String() string {
return "string"
case t == TokenRegex:
return "regex"
+ case t == TokenComment:
+ return "//"
case t == TokenDot:
return "."
+ case t == TokenPipe:
+ return "|"
case t == TokenAsgn:
return "="
case t == TokenLParen:
@@ -143,20 +158,23 @@ func (t tokenType) String() string {
return "TRUE"
case t == TokenFalse:
return "FALSE"
- case isOperator(t):
+ case isExprOperator(t):
return operatorStr[t]
}
return fmt.Sprintf("%d", t)
}
-func isOperator(typ tokenType) bool {
+// True if token type is an operator used in mathematical or boolean expressions.
+func isExprOperator(typ tokenType) bool {
return typ > begin_tok_operator && typ < end_tok_operator
}
+// True if token type is an operator used in mathematical expressions.
func isMathOperator(typ tokenType) bool {
return typ > begin_tok_operator_math && typ < end_tok_operator_math
}
+// True if token type is an operator used in comparisions.
func isCompOperator(typ tokenType) bool {
return typ > begin_tok_operator_comp && typ < end_tok_operator_comp
}
@@ -305,6 +323,9 @@ func lexToken(l *lexer) stateFn {
case r == '.':
l.emit(TokenDot)
return lexToken
+ case r == '|':
+ l.emit(TokenPipe)
+ return lexToken
case r == ',':
l.emit(TokenComma)
return lexToken
@@ -533,7 +554,7 @@ func lexComment(l *lexer) stateFn {
for {
switch r := l.next(); {
case r == '\n' || r == eof:
- l.ignore()
+ l.emit(TokenComment)
return lexToken
}
}
diff --git a/tick/lex_test.go b/tick/lex_test.go
index 25b592291..837e3c3c5 100644
--- a/tick/lex_test.go
+++ b/tick/lex_test.go
@@ -152,6 +152,13 @@ func TestLexer(t *testing.T) {
token{TokenEOF, 1, ""},
},
},
+ {
+ in: "|",
+ tokens: []token{
+ token{TokenPipe, 0, "|"},
+ token{TokenEOF, 1, ""},
+ },
+ },
// Keywords
{
in: "AND",
@@ -438,7 +445,7 @@ func TestLexer(t *testing.T) {
},
},
{
- in: "var x = avg().parallel(4)x.groupby('cpu').window().period(10s)",
+ in: "var x = avg()|parallel(4)x.groupby('cpu')|window().period(10s)",
tokens: []token{
token{TokenVar, 0, "var"},
token{TokenIdent, 4, "x"},
@@ -446,7 +453,7 @@ func TestLexer(t *testing.T) {
token{TokenIdent, 8, "avg"},
token{TokenLParen, 11, "("},
token{TokenRParen, 12, ")"},
- token{TokenDot, 13, "."},
+ token{TokenPipe, 13, "|"},
token{TokenIdent, 14, "parallel"},
token{TokenLParen, 22, "("},
token{TokenNumber, 23, "4"},
@@ -457,7 +464,7 @@ func TestLexer(t *testing.T) {
token{TokenLParen, 34, "("},
token{TokenString, 35, "'cpu'"},
token{TokenRParen, 40, ")"},
- token{TokenDot, 41, "."},
+ token{TokenPipe, 41, "|"},
token{TokenIdent, 42, "window"},
token{TokenLParen, 48, "("},
token{TokenRParen, 49, ")"},
@@ -479,6 +486,7 @@ func TestLexer(t *testing.T) {
token{TokenIdent, 8, "avg"},
token{TokenLParen, 11, "("},
token{TokenRParen, 12, ")"},
+ token{TokenComment, 14, "// Comment all of this is ignored\n"},
token{TokenIdent, 48, "x"},
token{TokenDot, 49, "."},
token{TokenIdent, 50, "groupby"},
@@ -497,6 +505,7 @@ func TestLexer(t *testing.T) {
token{TokenIdent, 8, "avg"},
token{TokenLParen, 11, "("},
token{TokenRParen, 12, ")"},
+ token{TokenComment, 14, "// Comment all of this is ignored"},
token{TokenEOF, 47, ""},
},
},
diff --git a/tick/node.go b/tick/node.go
index 3d1e3384c..60ccd7650 100644
--- a/tick/node.go
+++ b/tick/node.go
@@ -5,39 +5,69 @@ import (
"fmt"
"regexp"
"strconv"
+ "strings"
"time"
+ "unicode"
"github.com/influxdata/influxdb/influxql"
)
type unboundFunc func(obj interface{}) (interface{}, error)
+type Position interface {
+ Position() int // byte position of start of node in full original input string
+ Line() int
+ Char() int
+}
+
type Node interface {
+ Position
String() string
- Position() int // byte position of start of node in full original input string
+ Format(buf *bytes.Buffer, indent string, onNewLine bool)
}
-type pos int
+func writeIndent(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if onNewLine {
+ buf.WriteString(indent)
+ }
+}
-func (p pos) Position() int {
- return int(p)
+type position struct {
+ pos int
+ line int
+ char int
+}
+
+func (p position) Position() int {
+ return p.pos
+}
+func (p position) Line() int {
+ return p.line
+}
+func (p position) Char() int {
+ return p.char
+}
+func (p position) String() string {
+ return fmt.Sprintf("%dl%dc%d", p.pos, p.line, p.char)
}
// numberNode holds a number: signed or unsigned integer or float.
// The value is parsed and stored under all the types that can represent the value.
// This simulates in a small amount of code the behavior of Go's ideal constants.
type NumberNode struct {
- pos
+ position
IsInt bool // Number has an integral value.
IsFloat bool // Number has a floating-point value.
Int64 int64 // The integer value.
Float64 float64 // The floating-point value.
+ Comment Node
}
// create a new number from a text string
-func newNumber(p int, text string) (*NumberNode, error) {
+func newNumber(p position, text string, c Node) (*NumberNode, error) {
n := &NumberNode{
- pos: pos(p),
+ position: p,
+ Comment: c,
}
i, err := strconv.ParseInt(text, 10, 64)
if err == nil {
@@ -64,23 +94,42 @@ func newNumber(p int, text string) (*NumberNode, error) {
func (n *NumberNode) String() string {
if n.IsInt {
- return fmt.Sprintf("NumberNode@%d{%di}", n.pos, n.Int64)
+ return fmt.Sprintf("NumberNode@%v{%di}%v", n.position, n.Int64, n.Comment)
+ }
+ return fmt.Sprintf("NumberNode@%v{%f}%v", n.position, n.Float64, n.Comment)
+}
+
+func (n *NumberNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ if n.IsInt {
+ buf.WriteString(strconv.FormatInt(n.Int64, 10))
+ } else {
+ s := strconv.FormatFloat(n.Float64, 'f', -1, 64)
+ if strings.IndexRune(s, '.') == -1 {
+ s += ".0"
+ }
+ buf.WriteString(s)
}
- return fmt.Sprintf("NumberNode@%d{%f}", n.pos, n.Float64)
}
// durationNode holds a number: signed or unsigned integer or float.
// The value is parsed and stored under all the types that can represent the value.
// This simulates in a small amount of code the behavior of Go's ideal constants.
type DurationNode struct {
- pos
- Dur time.Duration //the duration
+ position
+ Dur time.Duration //the duration
+ Comment Node
}
// create a new number from a text string
-func newDur(p int, text string) (*DurationNode, error) {
+func newDur(p position, text string, c Node) (*DurationNode, error) {
n := &DurationNode{
- pos: pos(p),
+ position: p,
+ Comment: c,
}
d, err := influxql.ParseDuration(text)
if err != nil {
@@ -90,95 +139,241 @@ func newDur(p int, text string) (*DurationNode, error) {
return n, nil
}
-func (d *DurationNode) String() string {
- return fmt.Sprintf("DurationNode@%d{%v}", d.pos, d.Dur)
+func (n *DurationNode) String() string {
+ return fmt.Sprintf("DurationNode@%v{%v}%v", n.position, n.Dur, n.Comment)
+}
+
+func (n *DurationNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteString(influxql.FormatDuration(n.Dur))
}
// boolNode holds one argument and an operator.
type BoolNode struct {
- pos
- Bool bool
+ position
+ Bool bool
+ Comment Node
}
-func newBool(p int, text string) (*BoolNode, error) {
+func newBool(p position, text string, c Node) (*BoolNode, error) {
b, err := strconv.ParseBool(text)
if err != nil {
return nil, err
}
return &BoolNode{
- pos: pos(p),
- Bool: b,
+ position: p,
+ Bool: b,
+ Comment: c,
}, nil
}
-func (b *BoolNode) String() string {
- return fmt.Sprintf("BoolNode@%d{%v}", b.pos, b.Bool)
+func (n *BoolNode) String() string {
+ return fmt.Sprintf("BoolNode@%v{%v}%v", n.position, n.Bool, n.Comment)
+}
+func (n *BoolNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ if n.Bool {
+ buf.WriteString(KW_True)
+ } else {
+ buf.WriteString(KW_False)
+ }
}
// unaryNode holds one argument and an operator.
type UnaryNode struct {
- pos
+ position
Node Node
Operator tokenType
+ Comment Node
}
-func newUnary(operator token, n Node) *UnaryNode {
+func newUnary(p position, op tokenType, n Node, c Node) *UnaryNode {
return &UnaryNode{
- pos: pos(operator.pos),
+ position: p,
Node: n,
- Operator: operator.typ,
+ Operator: op,
+ Comment: c,
}
}
-func (u *UnaryNode) String() string {
- return fmt.Sprintf("UnaryNode@%d{%s %s}", u.pos, u.Operator, u.Node)
+func (n *UnaryNode) String() string {
+ return fmt.Sprintf("UnaryNode@%v{%s %s}%v", n.position, n.Operator, n.Node, n.Comment)
+}
+
+func (n *UnaryNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteString(n.Operator.String())
+ n.Node.Format(buf, indent, false)
}
// binaryNode holds two arguments and an operator.
type BinaryNode struct {
- pos
+ position
+ Left Node
+ Right Node
+ Operator tokenType
+ Comment Node
+ Parens bool
+ MultiLine bool
+}
+
+func newBinary(p position, op tokenType, left, right Node, multiLine bool, comment Node) *BinaryNode {
+ return &BinaryNode{
+ position: p,
+ Left: left,
+ Right: right,
+ Operator: op,
+ MultiLine: multiLine,
+ Comment: comment,
+ }
+}
+
+func (n *BinaryNode) String() string {
+ return fmt.Sprintf("BinaryNode@%v{p:%v m:%v %v %v %v}%v", n.position, n.Parens, n.MultiLine, n.Left, n.Operator, n.Right, n.Comment)
+}
+
+func (n *BinaryNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ if n.Parens {
+ buf.WriteByte('(')
+ indent += indentStep
+ }
+ n.Left.Format(buf, indent, false)
+ buf.WriteByte(' ')
+ buf.WriteString(n.Operator.String())
+ if n.MultiLine {
+ buf.WriteByte('\n')
+ } else {
+ buf.WriteByte(' ')
+ }
+ n.Right.Format(buf, indent, n.MultiLine)
+ if n.Parens {
+ buf.WriteByte(')')
+ }
+}
+
+type DeclarationNode struct {
+ position
+ Left *IdentifierNode
+ Right Node
+ Comment Node
+}
+
+func newDecl(p position, left *IdentifierNode, right Node, comment Node) *DeclarationNode {
+ return &DeclarationNode{
+ position: p,
+ Left: left,
+ Right: right,
+ Comment: comment,
+ }
+}
+
+func (n *DeclarationNode) String() string {
+ return fmt.Sprintf("DeclarationNode@%v{%v %v}%v", n.position, n.Left, n.Right, n.Comment)
+}
+
+func (n *DeclarationNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ }
+ buf.WriteString(KW_Var)
+ buf.WriteByte(' ')
+ n.Left.Format(buf, indent, false)
+ buf.WriteByte(' ')
+ buf.WriteString(TokenAsgn.String())
+ buf.WriteByte(' ')
+ n.Right.Format(buf, indent, false)
+}
+
+type ChainNode struct {
+ position
Left Node
Right Node
Operator tokenType
+ Comment Node
}
-func newBinary(operator token, left, right Node) *BinaryNode {
- return &BinaryNode{
- pos: pos(operator.pos),
+func newChain(p position, op tokenType, left, right Node, comment Node) *ChainNode {
+ return &ChainNode{
+ position: p,
Left: left,
Right: right,
- Operator: operator.typ,
+ Operator: op,
+ Comment: comment,
}
}
-func (b *BinaryNode) String() string {
- return fmt.Sprintf("BinaryNode@%d{%v %v %v}", b.pos, b.Left, b.Operator, b.Right)
+func (n *ChainNode) String() string {
+ return fmt.Sprintf("ChainNode@%v{%v %v %v}%v", n.position, n.Left, n.Operator, n.Right, n.Comment)
+}
+
+func (n *ChainNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ n.Left.Format(buf, indent, onNewLine)
+ buf.WriteByte('\n')
+ indent = indent + indentStep
+ if n.Operator == TokenDot {
+ indent = indent + indentStep
+ }
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, true)
+ }
+ buf.WriteString(indent)
+ buf.WriteString(n.Operator.String())
+ n.Right.Format(buf, indent, false)
}
//Holds the textual representation of an identifier
type IdentifierNode struct {
- pos
- Ident string // The identifier
+ position
+ Ident string // The identifier
+ Comment Node
}
-func newIdent(p int, ident string) *IdentifierNode {
+func newIdent(p position, ident string, c Node) *IdentifierNode {
return &IdentifierNode{
- pos: pos(p),
- Ident: ident,
+ position: p,
+ Ident: ident,
+ Comment: c,
}
}
-func (i *IdentifierNode) String() string {
- return fmt.Sprintf("IdentifierNode@%d{%s}", i.pos, i.Ident)
+func (n *IdentifierNode) String() string {
+ return fmt.Sprintf("IdentifierNode@%v{%s}%v", n.position, n.Ident, n.Comment)
+}
+
+func (n *IdentifierNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteString(n.Ident)
}
//Holds the textual representation of an identifier
type ReferenceNode struct {
- pos
+ position
Reference string // The field reference
+ Comment Node
}
-func newReference(p int, txt string) *ReferenceNode {
+func newReference(p position, txt string, c Node) *ReferenceNode {
// Remove leading and trailing quotes
literal := txt[1 : len(txt)-1]
// Unescape quotes
@@ -196,27 +391,48 @@ func newReference(p int, txt string) *ReferenceNode {
literal = buf.String()
return &ReferenceNode{
- pos: pos(p),
+ position: p,
Reference: literal,
+ Comment: c,
}
}
-func (r *ReferenceNode) String() string {
- return fmt.Sprintf("ReferenceNode@%d{%s}", r.pos, r.Reference)
+func (n *ReferenceNode) String() string {
+ return fmt.Sprintf("ReferenceNode@%v{%s}%v", n.position, n.Reference, n.Comment)
+}
+
+func (n *ReferenceNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteByte('"')
+ for _, c := range n.Reference {
+ if c == '"' {
+ buf.WriteByte('\\')
+ }
+ buf.WriteRune(c)
+ }
+ buf.WriteByte('"')
}
//Holds the textual representation of a string literal
type StringNode struct {
- pos
- Literal string // The string literal
+ position
+ Literal string // The string literal
+ TripleQuotes bool
+ Comment Node
}
-func newString(p int, txt string) *StringNode {
+func newString(p position, txt string, c Node) *StringNode {
+ tripleQuotes := false
// Remove leading and trailing quotes
var literal string
if len(txt) >= 6 && txt[0:3] == "'''" {
literal = txt[3 : len(txt)-3]
+ tripleQuotes = true
} else {
literal = txt[1 : len(txt)-1]
quote := txt[0]
@@ -236,22 +452,53 @@ func newString(p int, txt string) *StringNode {
}
return &StringNode{
- pos: pos(p),
- Literal: literal,
+ position: p,
+ Literal: literal,
+ TripleQuotes: tripleQuotes,
+ Comment: c,
}
}
-func (s *StringNode) String() string {
- return fmt.Sprintf("StringNode@%d{%s}", s.pos, s.Literal)
+func (n *StringNode) String() string {
+ return fmt.Sprintf("StringNode@%v{%s}%v", n.position, n.Literal, n.Comment)
+}
+
+func (n *StringNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ if n.TripleQuotes {
+ buf.WriteString("'''")
+ } else {
+ buf.WriteByte('\'')
+ }
+ if n.TripleQuotes {
+ buf.WriteString(n.Literal)
+ } else {
+ for _, c := range n.Literal {
+ if c == '\'' {
+ buf.WriteByte('\\')
+ }
+ buf.WriteRune(c)
+ }
+ }
+ if n.TripleQuotes {
+ buf.WriteString("'''")
+ } else {
+ buf.WriteByte('\'')
+ }
}
//Holds the textual representation of a regex literal
type RegexNode struct {
- pos
- Regex *regexp.Regexp
+ position
+ Regex *regexp.Regexp
+ Comment Node
}
-func newRegex(p int, txt string) (*RegexNode, error) {
+func newRegex(p position, txt string, c Node) (*RegexNode, error) {
// Remove leading and trailing quotes
literal := txt[1 : len(txt)-1]
@@ -275,82 +522,219 @@ func newRegex(p int, txt string) (*RegexNode, error) {
}
return &RegexNode{
- pos: pos(p),
- Regex: r,
+ position: p,
+ Regex: r,
+ Comment: c,
}, nil
}
-func (s *RegexNode) String() string {
- return fmt.Sprintf("RegexNode@%d{%v}", s.pos, s.Regex)
+func (n *RegexNode) String() string {
+ return fmt.Sprintf("RegexNode@%v{%v}%v", n.position, n.Regex, n.Comment)
+}
+
+func (n *RegexNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteByte('/')
+ buf.WriteString(n.Regex.String())
+ buf.WriteByte('/')
}
// Represents a standalone '*' token.
type StarNode struct {
- pos
+ position
+ Comment Node
}
-func newStar(p int) *StarNode {
+func newStar(p position, c Node) *StarNode {
return &StarNode{
- pos: pos(p),
+ position: p,
+ Comment: c,
}
}
-func (s *StarNode) String() string {
- return fmt.Sprintf("StarNode@%d{*}", s.pos)
+func (n *StarNode) String() string {
+ return fmt.Sprintf("StarNode@%v{*}%v", n.position, n.Comment)
+}
+
+func (n *StarNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteByte('*')
+}
+
+type funcType int
+
+const (
+ globalFunc funcType = iota
+ chainFunc
+ propertyFunc
+)
+
+func (ft funcType) String() string {
+ switch ft {
+ case globalFunc:
+ return "global"
+ case chainFunc:
+ return "chain"
+ case propertyFunc:
+ return "property"
+ default:
+ return "unknown"
+ }
}
//Holds the a function call with its args
type FunctionNode struct {
- pos
- Func string // The identifier
- Args []Node
+ position
+ Type funcType
+ Func string // The identifier
+ Args []Node
+ Comment Node
+ MultiLine bool
}
-func newFunc(p int, ident string, args []Node) *FunctionNode {
+func newFunc(p position, ft funcType, ident string, args []Node, multi bool, c Node) *FunctionNode {
return &FunctionNode{
- pos: pos(p),
- Func: ident,
- Args: args,
+ position: p,
+ Type: ft,
+ Func: ident,
+ Args: args,
+ Comment: c,
+ MultiLine: multi,
}
}
-func (f *FunctionNode) String() string {
- return fmt.Sprintf("FunctionNode@%d{%s %v}", f.pos, f.Func, f.Args)
+func (n *FunctionNode) String() string {
+ return fmt.Sprintf("FunctionNode@%v{%v %s %v}%v", n.position, n.Type, n.Func, n.Args, n.Comment)
+}
+
+func (n *FunctionNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteString(n.Func)
+ buf.WriteByte('(')
+ argIndent := indent + indentStep
+ for i, arg := range n.Args {
+ if i != 0 {
+ buf.WriteByte(',')
+ if !n.MultiLine {
+ buf.WriteByte(' ')
+ }
+ }
+ if n.MultiLine {
+ buf.WriteByte('\n')
+ }
+ arg.Format(buf, argIndent, n.MultiLine)
+ }
+
+ if n.MultiLine && len(n.Args) > 0 {
+ buf.WriteByte('\n')
+ buf.WriteString(indent)
+ }
+ buf.WriteByte(')')
}
// Represents the begining of a lambda expression
type LambdaNode struct {
- pos
- Node Node
+ position
+ Node Node
+ Comment Node
}
-func newLambda(p int, node Node) *LambdaNode {
+func newLambda(p position, node Node, c Node) *LambdaNode {
return &LambdaNode{
- pos: pos(p),
- Node: node,
+ position: p,
+ Node: node,
+ Comment: c,
}
}
-func (l *LambdaNode) String() string {
- return fmt.Sprintf("LambdaNode@%d{%v}", l.pos, l.Node)
+func (n *LambdaNode) String() string {
+ return fmt.Sprintf("LambdaNode@%v{%v}%v", n.position, n.Node, n.Comment)
+}
+
+func (n *LambdaNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if n.Comment != nil {
+ n.Comment.Format(buf, indent, onNewLine)
+ onNewLine = true
+ }
+ writeIndent(buf, indent, onNewLine)
+ buf.WriteString("lambda: ")
+ n.Node.Format(buf, indent, false)
}
//Holds a function call with its args
type ListNode struct {
- pos
+ position
Nodes []Node
}
-func newList(p int) *ListNode {
+func newList(p position) *ListNode {
return &ListNode{
- pos: pos(p),
+ position: p,
+ }
+}
+
+func (n *ListNode) Add(node Node) {
+ n.Nodes = append(n.Nodes, node)
+}
+
+func (n *ListNode) String() string {
+ return fmt.Sprintf("ListNode@%v{%v}", n.position, n.Nodes)
+}
+
+func (n *ListNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ for i, node := range n.Nodes {
+ if i != 0 {
+ buf.WriteByte('\n')
+ }
+ node.Format(buf, indent, true)
+ buf.WriteByte('\n')
}
}
-func (l *ListNode) Add(n Node) {
- l.Nodes = append(l.Nodes, n)
+// Hold the contents of a comment
+type CommentNode struct {
+ position
+ Comments []string
}
-func (l *ListNode) String() string {
- return fmt.Sprintf("ListNode@%d{%v}", l.pos, l.Nodes)
+func newComment(p position, comments []string) *CommentNode {
+ for i := range comments {
+ comments[i] = strings.TrimSpace(comments[i])
+ comments[i] = strings.TrimLeftFunc(comments[i][2:], unicode.IsSpace)
+ }
+ return &CommentNode{
+ position: p,
+ Comments: comments,
+ }
+}
+
+func (n *CommentNode) String() string {
+ return fmt.Sprintf("CommentNode@%v{%v}", n.position, n.Comments)
+}
+
+func (n *CommentNode) Format(buf *bytes.Buffer, indent string, onNewLine bool) {
+ if !onNewLine {
+ buf.WriteByte('\n')
+ }
+ for _, comment := range n.Comments {
+ buf.WriteString(indent)
+ buf.WriteString("//")
+ if len(comment) > 0 {
+ buf.WriteByte(' ')
+ buf.WriteString(comment)
+ }
+ buf.WriteByte('\n')
+ }
}
diff --git a/tick/node_test.go b/tick/node_test.go
index 4ff6ad160..adf51f9e6 100644
--- a/tick/node_test.go
+++ b/tick/node_test.go
@@ -7,12 +7,12 @@ import (
"github.com/stretchr/testify/assert"
)
-func TestnumberNode(t *testing.T) {
+func TestNumberNode(t *testing.T) {
assert := assert.New(t)
type testCase struct {
Text string
- Pos int
+ Pos position
IsInt bool
IsFloat bool
Int64 int64
@@ -21,14 +21,14 @@ func TestnumberNode(t *testing.T) {
}
test := func(tc testCase) {
- n, err := newNumber(tc.Pos, tc.Text)
+ n, err := newNumber(tc.Pos, tc.Text, nil)
if tc.Err != nil {
assert.Equal(tc.Err, err)
} else {
if !assert.NotNil(n) {
t.FailNow()
}
- assert.Equal(tc.Pos, int(n.pos))
+ assert.Equal(tc.Pos.pos, n.position.pos)
assert.Equal(tc.IsInt, n.IsInt)
assert.Equal(tc.IsFloat, n.IsFloat)
assert.Equal(tc.Int64, n.Int64)
@@ -39,31 +39,31 @@ func TestnumberNode(t *testing.T) {
cases := []testCase{
testCase{
Text: "04",
- Pos: 6,
+ Pos: position{pos: 6},
IsInt: true,
Int64: 4,
},
testCase{
Text: "42",
- Pos: 5,
+ Pos: position{pos: 5},
IsInt: true,
Int64: 42,
},
testCase{
Text: "42.21",
- Pos: 4,
+ Pos: position{pos: 4},
IsFloat: true,
Float64: 42.21,
},
testCase{
Text: "42.",
- Pos: 3,
+ Pos: position{pos: 3},
IsFloat: true,
Float64: 42.0,
},
testCase{
Text: "0.42",
- Pos: 2,
+ Pos: position{pos: 2},
IsFloat: true,
Float64: 0.42,
},
@@ -92,11 +92,11 @@ func TestNewBinaryNode(t *testing.T) {
}
test := func(tc testCase) {
- n := newBinary(tc.Operator, tc.Left, tc.Right)
+ n := newBinary(position{pos: tc.Operator.pos}, tc.Operator.typ, tc.Left, tc.Right, false, nil)
if !assert.NotNil(n) {
t.FailNow()
}
- assert.Equal(tc.Operator.pos, int(n.pos))
+ assert.Equal(tc.Operator.pos, n.position.pos)
assert.Equal(tc.Left, n.Right)
assert.Equal(tc.Right, n.Left)
assert.Equal(tc.Operator.typ, n.Operator)
diff --git a/tick/parser.go b/tick/parser.go
index 5ea37947a..77fff318c 100644
--- a/tick/parser.go
+++ b/tick/parser.go
@@ -29,6 +29,10 @@ func parse(text string) (Node, error) {
return p.Root, nil
}
+func (p *parser) hasNewLine(start, end int) bool {
+ return strings.IndexRune(p.Text[start:end], '\n') != -1
+}
+
// --------------------
// Parsing methods
//
@@ -112,6 +116,15 @@ func (p *parser) unexpected(tok token, expected ...tokenType) {
p.errorf("unexpected %s line %d char %d in \"%s\". expected: %s", tokStr, line, char, p.Text[start:stop], expectedStr)
}
+func (p *parser) position(pos int) position {
+ line, char := p.lex.lineNumber(pos)
+ return position{
+ pos: pos,
+ line: line,
+ char: char,
+ }
+}
+
// recover is the handler that turns panics into returns from the top level of Parse.
func (p *parser) recover(errp *error) {
e := recover()
@@ -155,93 +168,114 @@ func (p *parser) parse() {
//parse a complete program
func (p *parser) program() Node {
- l := newList(p.peek().pos)
+ l := newList(p.position(p.peek().pos))
+ var s, prevc, nextc Node
for {
switch p.peek().typ {
case TokenEOF:
+ if prevc != nil {
+ l.Add(prevc)
+ }
return l
default:
- s := p.statement()
+ s, nextc = p.statement(prevc)
l.Add(s)
}
+ prevc = nextc
}
}
//parse a statement
-func (p *parser) statement() Node {
- var n Node
- if p.peek().typ == TokenVar {
- n = p.declaration()
- } else {
- n = p.expression()
+func (p *parser) statement(c Node) (Node, Node) {
+ if c == nil {
+ c = p.comment()
+ }
+ switch t := p.peek().typ; t {
+ case TokenVar:
+ return p.declaration(c)
+ default:
+ return p.expression(c)
}
- return n
}
//parse a declaration statement
-func (p *parser) declaration() Node {
+func (p *parser) declaration(c Node) (Node, Node) {
+ if c == nil {
+ c = p.comment()
+ }
v := p.vr()
op := p.expect(TokenAsgn)
- b := p.expression()
- return newBinary(op, v, b)
+ b, extra := p.expression(nil)
+ return newDecl(p.position(op.pos), v, b, c), extra
}
//parse a 'var ident' expression
-func (p *parser) vr() Node {
+func (p *parser) vr() *IdentifierNode {
p.expect(TokenVar)
ident := p.expect(TokenIdent)
- return newIdent(ident.pos, ident.val)
+ return newIdent(p.position(ident.pos), ident.val, nil)
}
//parse an expression
-func (p *parser) expression() Node {
+func (p *parser) expression(c Node) (Node, Node) {
+ if c == nil {
+ c = p.comment()
+ }
switch p.peek().typ {
case TokenIdent:
- term := p.funcOrIdent()
+ term := p.funcOrIdent(globalFunc, c)
return p.chain(term)
default:
- return p.primary()
+ return p.primary(c), nil
}
}
//parse a function or identifier invocation chain
-// '.' operator is left-associative
-func (p *parser) chain(lhs Node) Node {
- for look := p.peek().typ; look == TokenDot; look = p.peek().typ {
+// '|', '.' operators are left-associative.
+func (p *parser) chain(lhs Node) (Node, Node) {
+ c := p.comment()
+ if t := p.peek().typ; t == TokenDot || t == TokenPipe {
op := p.next()
- rhs := p.funcOrIdent()
- lhs = newBinary(op, lhs, rhs)
+ ft := propertyFunc
+ if op.typ == TokenPipe {
+ ft = chainFunc
+ }
+ rhs := p.funcOrIdent(ft, nil)
+ return p.chain(newChain(p.position(op.pos), op.typ, lhs, rhs, c))
}
- return lhs
+ return lhs, c
}
-func (p *parser) funcOrIdent() (n Node) {
+func (p *parser) funcOrIdent(ft funcType, c Node) (n Node) {
p.next()
- if p.peek().typ == TokenLParen {
+ if ft == chainFunc || p.peek().typ == TokenLParen {
p.backup()
- n = p.function()
+ n = p.function(ft, c)
} else {
p.backup()
- n = p.identifier()
+ n = p.identifier(c)
}
return
}
//parse an identifier
-func (p *parser) identifier() Node {
+func (p *parser) identifier(c Node) Node {
ident := p.expect(TokenIdent)
- n := newIdent(ident.pos, ident.val)
+ n := newIdent(p.position(ident.pos), ident.val, c)
return n
}
//parse a function call
-func (p *parser) function() Node {
+func (p *parser) function(ft funcType, c Node) Node {
ident := p.expect(TokenIdent)
p.expect(TokenLParen)
args := p.parameters()
p.expect(TokenRParen)
-
- n := newFunc(ident.pos, ident.val, args)
+ multiLine := false
+ if l := len(args); l > 0 {
+ multiLine = p.hasNewLine(ident.pos, args[l-1].Position())
+ }
+ n := newFunc(p.position(ident.pos), ft, ident.val, args, multiLine, c)
return n
}
@@ -260,22 +294,23 @@ func (p *parser) parameters() (args []Node) {
}
func (p *parser) parameter() (n Node) {
+ c := p.comment()
switch p.peek().typ {
case TokenIdent:
- n = p.expression()
+ n, _ = p.expression(c)
case TokenLambda:
lambda := p.next()
- l := p.lambdaExpr()
- n = newLambda(lambda.pos, l)
+ l := p.lambdaExpr(nil)
+ n = newLambda(p.position(lambda.pos), l, c)
default:
- n = p.primary()
+ n = p.primary(c)
}
return
}
// parse the lambda expression.
-func (p *parser) lambdaExpr() Node {
- return p.precedence(p.primary(), 0)
+func (p *parser) lambdaExpr(c Node) Node {
+ return p.precedence(p.primary(nil), 0, c)
}
// Operator Precedence parsing
@@ -299,18 +334,20 @@ var precedence = [...]int{
// parse the expression considering operator precedence.
// https://en.wikipedia.org/wiki/Operator-precedence_parser#Pseudo-code
-func (p *parser) precedence(lhs Node, minP int) Node {
+func (p *parser) precedence(lhs Node, minP int, c Node) Node {
look := p.peek()
- for isOperator(look.typ) && precedence[look.typ] >= minP {
+ for isExprOperator(look.typ) && precedence[look.typ] >= minP {
op := p.next()
- rhs := p.primary()
+ rhs := p.primary(nil)
look = p.peek()
// right-associative
- for isOperator(look.typ) && precedence[look.typ] >= precedence[op.typ] {
- rhs = p.precedence(rhs, precedence[look.typ])
+ for isExprOperator(look.typ) && precedence[look.typ] >= precedence[op.typ] {
+ rhs = p.precedence(rhs, precedence[look.typ], nil)
look = p.peek()
}
- lhs = newBinary(op, lhs, rhs)
+
+ multiLine := p.hasNewLine(lhs.Position(), rhs.Position())
+ lhs = newBinary(p.position(op.pos), op.typ, lhs, rhs, multiLine, c)
}
return lhs
}
@@ -322,7 +359,11 @@ func (p *parser) lfunction() Node {
args := p.lparameters()
p.expect(TokenRParen)
- n := newFunc(ident.pos, ident.val, args)
+ multiLine := false
+ if l := len(args); l > 0 {
+ multiLine = p.hasNewLine(ident.pos, args[l-1].Position())
+ }
+ n := newFunc(p.position(ident.pos), globalFunc, ident.val, args, multiLine, nil)
return n
}
@@ -341,35 +382,41 @@ func (p *parser) lparameters() (args []Node) {
}
func (p *parser) lparameter() (n Node) {
- n = p.primary()
- if isOperator(p.peek().typ) {
- n = p.precedence(n, 0)
+ n = p.primary(nil)
+ if isExprOperator(p.peek().typ) {
+ n = p.precedence(n, 0, nil)
}
return
}
// parse a primary expression
-func (p *parser) primary() Node {
+func (p *parser) primary(c Node) Node {
+ if c == nil {
+ c = p.comment()
+ }
switch tok := p.peek(); {
case tok.typ == TokenLParen:
p.next()
- n := p.lambdaExpr()
+ n := p.lambdaExpr(c)
+ if b, ok := n.(*BinaryNode); ok {
+ b.Parens = true
+ }
p.expect(TokenRParen)
return n
case tok.typ == TokenNumber:
- return p.number()
+ return p.number(c)
case tok.typ == TokenString:
- return p.string()
+ return p.string(c)
case tok.typ == TokenTrue, tok.typ == TokenFalse:
- return p.boolean()
+ return p.boolean(c)
case tok.typ == TokenDuration:
- return p.duration()
+ return p.duration(c)
case tok.typ == TokenRegex:
- return p.regex()
+ return p.regex(c)
case tok.typ == TokenMult:
- return p.star()
+ return p.star(c)
case tok.typ == TokenReference:
- return p.reference()
+ return p.reference(c)
case tok.typ == TokenIdent:
p.next()
if p.peek().typ == TokenLParen {
@@ -377,10 +424,10 @@ func (p *parser) primary() Node {
return p.lfunction()
}
p.backup()
- return p.identifier()
+ return p.identifier(c)
case tok.typ == TokenMinus, tok.typ == TokenNot:
p.next()
- return newUnary(tok, p.primary())
+ return newUnary(p.position(tok.pos), tok.typ, p.primary(nil), c)
default:
p.unexpected(
tok,
@@ -400,9 +447,9 @@ func (p *parser) primary() Node {
}
//parse a duration literal
-func (p *parser) duration() Node {
+func (p *parser) duration(c Node) Node {
token := p.expect(TokenDuration)
- num, err := newDur(token.pos, token.val)
+ num, err := newDur(p.position(token.pos), token.val, c)
if err != nil {
p.error(err)
}
@@ -410,9 +457,9 @@ func (p *parser) duration() Node {
}
//parse a number literal
-func (p *parser) number() Node {
+func (p *parser) number(c Node) Node {
token := p.expect(TokenNumber)
- num, err := newNumber(token.pos, token.val)
+ num, err := newNumber(p.position(token.pos), token.val, c)
if err != nil {
p.error(err)
}
@@ -420,16 +467,16 @@ func (p *parser) number() Node {
}
//parse a string literal
-func (p *parser) string() Node {
+func (p *parser) string(c Node) Node {
token := p.expect(TokenString)
- s := newString(token.pos, token.val)
+ s := newString(p.position(token.pos), token.val, c)
return s
}
//parse a regex literal
-func (p *parser) regex() Node {
+func (p *parser) regex(c Node) Node {
token := p.expect(TokenRegex)
- r, err := newRegex(token.pos, token.val)
+ r, err := newRegex(p.position(token.pos), token.val, c)
if err != nil {
p.error(err)
}
@@ -437,23 +484,39 @@ func (p *parser) regex() Node {
}
// parse '*' literal
-func (p *parser) star() Node {
+func (p *parser) star(c Node) Node {
tok := p.expect(TokenMult)
- return newStar(tok.pos)
+ return newStar(p.position(tok.pos), c)
}
//parse a reference literal
-func (p *parser) reference() Node {
+func (p *parser) reference(c Node) Node {
token := p.expect(TokenReference)
- r := newReference(token.pos, token.val)
+ r := newReference(p.position(token.pos), token.val, c)
return r
}
-func (p *parser) boolean() Node {
+func (p *parser) boolean(c Node) Node {
n := p.next()
- num, err := newBool(n.pos, n.val)
+ num, err := newBool(p.position(n.pos), n.val, c)
if err != nil {
p.error(err)
}
return num
}
+
+func (p *parser) comment() Node {
+ var comments []string
+ pos := -1
+ for p.peek().typ == TokenComment {
+ n := p.next()
+ if pos == -1 {
+ pos = n.pos
+ }
+ comments = append(comments, n.val)
+ }
+ if len(comments) > 0 {
+ return newComment(p.position(pos), comments)
+ }
+ return nil
+}
diff --git a/tick/parser_test.go b/tick/parser_test.go
index e2c83229c..799794e79 100644
--- a/tick/parser_test.go
+++ b/tick/parser_test.go
@@ -130,16 +130,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = 'str'`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &StringNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Literal: "str",
},
},
@@ -149,16 +165,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = TRUE`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &BoolNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Bool: true,
},
},
@@ -168,19 +200,39 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = !FALSE`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &UnaryNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Operator: TokenNot,
Node: &BoolNode{
- pos: 9,
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
Bool: false,
},
},
@@ -191,16 +243,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = 1`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &NumberNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
IsInt: true,
Int64: 1,
},
@@ -211,19 +279,39 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = -1`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &UnaryNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Operator: TokenMinus,
Node: &NumberNode{
- pos: 9,
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
IsInt: true,
Int64: 1,
},
@@ -235,16 +323,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = 1.0`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &NumberNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
IsFloat: true,
Float64: 1.0,
},
@@ -255,19 +359,39 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = -1.0`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &UnaryNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Operator: TokenMinus,
Node: &NumberNode{
- pos: 9,
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
IsFloat: true,
Float64: 1.0,
},
@@ -279,16 +403,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = 5h`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &DurationNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Dur: time.Hour * 5,
},
},
@@ -298,19 +438,39 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = -5h`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &UnaryNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Operator: TokenMinus,
Node: &DurationNode{
- pos: 9,
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
Dur: time.Hour * 5,
},
},
@@ -321,16 +481,32 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = /.*\//`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &RegexNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Regex: regexp.MustCompile(".*/"),
},
},
@@ -340,23 +516,48 @@ func TestParseStatements(t *testing.T) {
{
script: `var x = a.f()`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
- Right: &BinaryNode{
- pos: 9,
+ Right: &ChainNode{
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
Operator: TokenDot,
Left: &IdentifierNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Ident: "a",
},
Right: &FunctionNode{
- pos: 10,
+ position: position{
+ pos: 10,
+ line: 1,
+ char: 11,
+ },
+ Type: propertyFunc,
Func: "f",
},
},
@@ -368,31 +569,62 @@ func TestParseStatements(t *testing.T) {
script: `var x = 3m
var y = -x`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
Ident: "x",
},
Right: &DurationNode{
- pos: 8,
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
Dur: time.Minute * 3,
},
},
- &BinaryNode{
- pos: 20,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 20,
+ line: 2,
+ char: 10,
+ },
Left: &IdentifierNode{
- pos: 18,
+ position: position{
+ pos: 18,
+ line: 2,
+ char: 8,
+ },
Ident: "y",
},
Right: &UnaryNode{
- pos: 22,
+ position: position{
+ pos: 22,
+ line: 2,
+ char: 12,
+ },
Operator: TokenMinus,
Node: &IdentifierNode{
- pos: 23,
+ position: position{
+ pos: 23,
+ line: 2,
+ char: 13,
+ },
Ident: "x",
},
},
@@ -400,47 +632,450 @@ func TestParseStatements(t *testing.T) {
},
},
},
+ {
+ script: `var x = a|b()`,
+ Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
+ Nodes: []Node{
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
+ Left: &IdentifierNode{
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
+ Ident: "x",
+ },
+ Right: &ChainNode{
+ position: position{
+ pos: 9,
+ line: 1,
+ char: 10,
+ },
+ Operator: TokenPipe,
+ Left: &IdentifierNode{
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
+ Ident: "a",
+ },
+ Right: &FunctionNode{
+ position: position{
+ pos: 10,
+ line: 1,
+ char: 11,
+ },
+ Type: chainFunc,
+ Func: "b",
+ },
+ },
+ },
+ },
+ },
+ },
{
script: `var t = 42
stream.where(lambda: "value" > t)
`,
Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
+ Nodes: []Node{
+ &DeclarationNode{
+ position: position{
+ pos: 6,
+ line: 1,
+ char: 7,
+ },
+ Left: &IdentifierNode{
+ position: position{
+ pos: 4,
+ line: 1,
+ char: 5,
+ },
+ Ident: "t",
+ },
+ Right: &NumberNode{
+ position: position{
+ pos: 8,
+ line: 1,
+ char: 9,
+ },
+ IsInt: true,
+ Int64: 42,
+ },
+ },
+ &ChainNode{
+ position: position{
+ pos: 20,
+ line: 2,
+ char: 10,
+ },
+ Operator: TokenDot,
+ Left: &IdentifierNode{
+ position: position{
+ pos: 14,
+ line: 2,
+ char: 4,
+ },
+ Ident: "stream",
+ },
+ Right: &FunctionNode{
+ position: position{
+ pos: 21,
+ line: 2,
+ char: 11,
+ },
+ Type: propertyFunc,
+ Func: "where",
+ Args: []Node{
+ &LambdaNode{
+ position: position{
+ pos: 27,
+ line: 2,
+ char: 17,
+ },
+ Node: &BinaryNode{
+ position: position{
+ pos: 43,
+ line: 2,
+ char: 33,
+ },
+ Operator: TokenGreater,
+ Left: &ReferenceNode{
+ position: position{
+ pos: 35,
+ line: 2,
+ char: 25,
+ },
+ Reference: "value",
+ },
+ Right: &IdentifierNode{
+ position: position{
+ pos: 45,
+ line: 2,
+ char: 35,
+ },
+ Ident: "t",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ script: `global(lambda:
+// If this
+// is less than that
+(1 + 2 - 3 * 4 / 5)
+< (sin(6))
+AND
+// more comments.
+(TRUE OR FALSE))`,
+ Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
+ Nodes: []Node{
+ &FunctionNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
+ Func: "global",
+ Type: globalFunc,
+ Args: []Node{
+ &LambdaNode{
+ position: position{
+ pos: 7,
+ line: 1,
+ char: 8,
+ },
+ Node: &BinaryNode{
+ position: position{
+ pos: 80,
+ line: 6,
+ char: 1,
+ },
+ Operator: TokenAnd,
+ MultiLine: true,
+ Left: &BinaryNode{
+ position: position{
+ pos: 69,
+ line: 5,
+ char: 1,
+ },
+ Operator: TokenLess,
+ MultiLine: true,
+ Left: &BinaryNode{
+ position: position{
+ pos: 51,
+ line: 4,
+ char: 4,
+ },
+ Operator: TokenPlus,
+ Parens: true,
+ Left: &NumberNode{
+ position: position{
+ pos: 49,
+ line: 4,
+ char: 2,
+ },
+ IsInt: true,
+ Int64: 1,
+ },
+ Right: &BinaryNode{
+ position: position{
+ pos: 55,
+ line: 4,
+ char: 8,
+ },
+ Operator: TokenMinus,
+ Left: &NumberNode{
+ position: position{
+ pos: 53,
+ line: 4,
+ char: 6,
+ },
+ IsInt: true,
+ Int64: 2,
+ },
+ Right: &BinaryNode{
+ position: position{
+ pos: 59,
+ line: 4,
+ char: 12,
+ },
+ Operator: TokenMult,
+ Left: &NumberNode{
+ position: position{
+ pos: 57,
+ line: 4,
+ char: 10,
+ },
+ IsInt: true,
+ Int64: 3,
+ },
+ Right: &BinaryNode{
+ position: position{
+ pos: 63,
+ line: 4,
+ char: 16,
+ },
+ Operator: TokenDiv,
+ Left: &NumberNode{
+ position: position{
+ pos: 61,
+ line: 4,
+ char: 14,
+ },
+ IsInt: true,
+ Int64: 4,
+ },
+ Right: &NumberNode{
+ position: position{
+ pos: 65,
+ line: 4,
+ char: 18,
+ },
+ IsInt: true,
+ Int64: 5,
+ },
+ },
+ },
+ },
+ Comment: &CommentNode{
+ position: position{
+ pos: 16,
+ line: 2,
+ char: 1,
+ },
+ Comments: []string{"If this", "is less than that"},
+ },
+ },
+ Right: &FunctionNode{
+ position: position{
+ pos: 72,
+ line: 5,
+ char: 4,
+ },
+ Type: globalFunc,
+ Func: "sin",
+ Args: []Node{&NumberNode{
+ position: position{
+ pos: 76,
+ line: 5,
+ char: 8,
+ },
+ IsInt: true,
+ Int64: 6,
+ }},
+ },
+ },
+ Right: &BinaryNode{
+ position: position{
+ pos: 109,
+ line: 8,
+ char: 7,
+ },
+ Operator: TokenOr,
+ Parens: true,
+ Left: &BoolNode{
+ position: position{
+ pos: 104,
+ line: 8,
+ char: 2,
+ },
+ Bool: true,
+ },
+ Right: &BoolNode{
+ position: position{
+ pos: 112,
+ line: 8,
+ char: 10,
+ },
+ Bool: false,
+ },
+ Comment: &CommentNode{
+ position: position{
+ pos: 85,
+ line: 7,
+ char: 1,
+ },
+ Comments: []string{"more comments."},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ script: `// set perfect threshold
+var t = 42
+// only select data above threshold
+stream.where(lambda: "value" > t)
+ `,
+ Root: &ListNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
Nodes: []Node{
- &BinaryNode{
- pos: 6,
- Operator: TokenAsgn,
+ &DeclarationNode{
+ position: position{
+ pos: 31,
+ line: 2,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 4,
+ position: position{
+ pos: 29,
+ line: 2,
+ char: 5,
+ },
Ident: "t",
},
Right: &NumberNode{
- pos: 8,
+ position: position{
+ pos: 33,
+ line: 2,
+ char: 9,
+ },
IsInt: true,
Int64: 42,
},
+ Comment: &CommentNode{
+ position: position{
+ pos: 0,
+ line: 1,
+ char: 1,
+ },
+ Comments: []string{"set perfect threshold"},
+ },
},
- &BinaryNode{
- pos: 20,
+ &ChainNode{
+ position: position{
+ pos: 78,
+ line: 4,
+ char: 7,
+ },
Operator: TokenDot,
Left: &IdentifierNode{
- pos: 14,
+ position: position{
+ pos: 72,
+ line: 4,
+ char: 1,
+ },
Ident: "stream",
+ Comment: &CommentNode{
+ position: position{
+ pos: 36,
+ line: 3,
+ char: 1,
+ },
+ Comments: []string{"only select data above threshold"},
+ },
},
Right: &FunctionNode{
- pos: 21,
+ position: position{
+ pos: 79,
+ line: 4,
+ char: 8,
+ },
+ Type: propertyFunc,
Func: "where",
Args: []Node{
&LambdaNode{
- pos: 27,
+ position: position{
+ pos: 85,
+ line: 4,
+ char: 14,
+ },
Node: &BinaryNode{
- pos: 43,
+ position: position{
+ pos: 101,
+ line: 4,
+ char: 30,
+ },
Operator: TokenGreater,
Left: &ReferenceNode{
- pos: 35,
+ position: position{
+ pos: 93,
+ line: 4,
+ char: 22,
+ },
Reference: "value",
},
Right: &IdentifierNode{
- pos: 45,
+ position: position{
+ pos: 103,
+ line: 4,
+ char: 32,
+ },
Ident: "t",
},
},
@@ -454,81 +1089,165 @@ func TestParseStatements(t *testing.T) {
{
script: `
var x = stream
- .window()
+ |window()
.period(5m)
.every(1m)
- .map(influxql.agg.mean('value'))`,
+ |map(influxql.agg.mean('value'))`,
Root: &ListNode{
- pos: 1,
- Nodes: []Node{&BinaryNode{
- pos: 7,
- Operator: TokenAsgn,
+ position: position{
+ pos: 1,
+ line: 2,
+ char: 1,
+ },
+ Nodes: []Node{&DeclarationNode{
+ position: position{
+ pos: 7,
+ line: 2,
+ char: 7,
+ },
Left: &IdentifierNode{
- pos: 5,
+ position: position{
+ pos: 5,
+ line: 2,
+ char: 5,
+ },
Ident: "x",
},
- Right: &BinaryNode{
- pos: 57,
- Operator: TokenDot,
- Left: &BinaryNode{
- pos: 44,
+ Right: &ChainNode{
+ position: position{
+ pos: 57,
+ line: 6,
+ char: 3,
+ },
+ Operator: TokenPipe,
+ Left: &ChainNode{
+ position: position{
+ pos: 44,
+ line: 5,
+ char: 3,
+ },
Operator: TokenDot,
- Left: &BinaryNode{
- pos: 30,
+ Left: &ChainNode{
+ position: position{
+ pos: 30,
+ line: 4,
+ char: 3,
+ },
Operator: TokenDot,
- Left: &BinaryNode{
- pos: 18,
- Operator: TokenDot,
+ Left: &ChainNode{
+ position: position{
+ pos: 18,
+ line: 3,
+ char: 3,
+ },
+ Operator: TokenPipe,
Left: &IdentifierNode{
- pos: 9,
+ position: position{
+ pos: 9,
+ line: 2,
+ char: 9,
+ },
Ident: "stream",
},
Right: &FunctionNode{
- pos: 19,
+ position: position{
+ pos: 19,
+ line: 3,
+ char: 4,
+ },
+ Type: chainFunc,
Func: "window",
},
},
Right: &FunctionNode{
- pos: 31,
+ position: position{
+ pos: 31,
+ line: 4,
+ char: 4,
+ },
+ Type: propertyFunc,
Func: "period",
Args: []Node{&DurationNode{
- pos: 38,
+ position: position{
+ pos: 38,
+ line: 4,
+ char: 11,
+ },
Dur: 5 * time.Minute,
}},
},
},
Right: &FunctionNode{
- pos: 45,
+ position: position{
+ pos: 45,
+ line: 5,
+ char: 4,
+ },
+ Type: propertyFunc,
Func: "every",
Args: []Node{&DurationNode{
- pos: 51,
+ position: position{
+ pos: 51,
+ line: 5,
+ char: 10,
+ },
Dur: time.Minute,
}},
},
},
Right: &FunctionNode{
- pos: 58,
+ position: position{
+ pos: 58,
+ line: 6,
+ char: 4,
+ },
+ Type: chainFunc,
Func: "map",
- Args: []Node{&BinaryNode{
- pos: 74,
+ Args: []Node{&ChainNode{
+ position: position{
+ pos: 74,
+ line: 6,
+ char: 20,
+ },
Operator: TokenDot,
- Left: &BinaryNode{
- pos: 70,
+ Left: &ChainNode{
+ position: position{
+ pos: 70,
+ line: 6,
+ char: 16,
+ },
Operator: TokenDot,
Left: &IdentifierNode{
- pos: 62,
+ position: position{
+ pos: 62,
+ line: 6,
+ char: 8,
+ },
Ident: "influxql",
},
Right: &IdentifierNode{
- pos: 71,
+ position: position{
+ pos: 71,
+ line: 6,
+ char: 17,
+ },
Ident: "agg",
},
},
Right: &FunctionNode{
- pos: 75,
+ position: position{
+ pos: 75,
+ line: 6,
+ char: 21,
+ },
+ Type: propertyFunc,
Func: "mean",
Args: []Node{&StringNode{
- pos: 80,
+ position: position{
+ pos: 80,
+ line: 6,
+ char: 26,
+ },
Literal: "value",
}},
},
@@ -547,7 +1266,7 @@ var x = stream
}
if !reflect.DeepEqual(root, tc.Root) {
- t.Fatalf("unequal trees: \ngot %v \nexp %v", root, tc.Root)
+ t.Fatalf("unequal trees: script:%s\ngot %v \nexp %v", tc.script, root, tc.Root)
}
}
}
diff --git a/tick/stateful_expr.go b/tick/stateful_expr.go
index c0908aa57..4649f3e38 100644
--- a/tick/stateful_expr.go
+++ b/tick/stateful_expr.go
@@ -56,7 +56,7 @@ func (s *StatefulExpr) EvalBool(scope *Scope) (bool, error) {
return false, ErrInvalidExpr
}
-func (s *StatefulExpr) EvalNum(scope *Scope) (float64, error) {
+func (s *StatefulExpr) EvalNum(scope *Scope) (interface{}, error) {
stck := &stack{}
err := s.eval(s.Node, scope, stck)
if err != nil {
@@ -71,10 +71,10 @@ func (s *StatefulExpr) EvalNum(scope *Scope) (float64, error) {
return math.NaN(), err
}
}
- n, ok := value.(float64)
- if ok {
- return n, nil
- } else {
+ switch value.(type) {
+ case float64, int64:
+ return value, nil
+ default:
return math.NaN(), fmt.Errorf("expression returned unexpected type %T", value)
}
}
diff --git a/tickdoc.conf b/tickdoc.conf
new file mode 100644
index 000000000..9ec44a8d7
--- /dev/null
+++ b/tickdoc.conf
@@ -0,0 +1,33 @@
+root = "/kapacitor/v0.12/nodes"
+page-header = '''---
+title: {{ .Title }}
+note: Auto generated by tickdoc
+
+menu:
+ kapacitor_012:
+ name: {{ .Name }}
+ identifier: {{ .Identifier }}
+ weight: {{ .Weight }}
+ parent: nodes
+---
+'''
+
+
+chain-method-desc = '''Chaining methods create a new node in the pipeline as a child of the calling node.
+They do not modify the calling node.
+Chaining methods are marked using the `|` operator.
+'''
+
+
+property-method-desc = '''Property methods modify state on the calling node.
+They do not add another node to the pipeline, and always return a reference to the calling node.
+Property methods are marked using the `.` operator.
+'''
+
+
+index-width = 10
+[weights]
+ BatchNode = 4
+ StreamNode = 5
+
+
diff --git a/update_tick_docs.sh b/update_tick_docs.sh
index c278a5829..eb73fe42c 100755
--- a/update_tick_docs.sh
+++ b/update_tick_docs.sh
@@ -5,14 +5,13 @@
# of structs into property methods and chaining methods.
dest=$1 # output path for the .md files
-docspath=${2-/kapacitor/v0.11/nodes}
if [ -z "$dest" ]
then
- echo "Usage: ./update_tick_docs.sh output_path [docspath]"
+ echo "Usage: ./update_tick_docs.sh output_path"
exit 1
fi
-tickdoc $docspath ./pipeline $dest
+tickdoc -config tickdoc.conf ./pipeline $dest