diff --git a/go.mod b/go.mod
index 3f522925e4f..36dd4985e15 100644
--- a/go.mod
+++ b/go.mod
@@ -105,6 +105,8 @@ require (
github.com/googleapis/gax-go/v2 v2.7.0
github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.74.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.74.0
github.com/stoewer/parquet-cli v0.0.4
go.opentelemetry.io/collector/exporter v0.74.0
go.opentelemetry.io/collector/receiver v0.74.0
@@ -125,9 +127,11 @@ require (
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Shopify/sarama v1.38.1 // indirect
github.com/VividCortex/gohistogram v1.0.0 // indirect
+ github.com/alecthomas/participle/v2 v2.0.0-beta.5 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
+ github.com/antonmedv/expr v1.12.3 // indirect
github.com/apache/thrift v0.18.1 // indirect
github.com/armon/go-metrics v0.4.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
@@ -159,6 +163,7 @@ require (
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/swag v0.22.1 // indirect
github.com/go-openapi/validate v0.22.0 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
github.com/gogo/googleapis v1.4.1 // indirect
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
github.com/google/btree v1.0.1 // indirect
@@ -180,6 +185,7 @@ require (
github.com/hashicorp/memberlist v0.3.1 // indirect
github.com/hashicorp/serf v0.9.8 // indirect
github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d // indirect
+ github.com/iancoleman/strcase v0.2.0 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jcmturner/aescts/v2 v2.0.0 // indirect
github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect
@@ -215,7 +221,9 @@ require (
github.com/oklog/ulid v1.3.1 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.74.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.74.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.74.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.74.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.74.0 // indirect
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.74.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20220512140940-7b36cea86235 // indirect
diff --git a/go.sum b/go.sum
index 59c3c635e2e..5d21abe23b6 100644
--- a/go.sum
+++ b/go.sum
@@ -442,6 +442,8 @@ github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/
github.com/alecthomas/assert/v2 v2.1.0 h1:tbredtNcQnoSd3QBhQWI7QZ3XHOVkw1Moklp2ojoH/0=
github.com/alecthomas/kong v0.7.1 h1:azoTh0IOfwlAX3qN9sHWTxACE2oV8Bg2gAwBsMwDQY4=
github.com/alecthomas/kong v0.7.1/go.mod h1:n1iCIO2xS46oE8ZfYCNDqdR0b0wZNrXAIAqro/2132U=
+github.com/alecthomas/participle/v2 v2.0.0-beta.5 h1:y6dsSYVb1G5eK6mgmy+BgI3Mw35a3WghArZ/Hbebrjo=
+github.com/alecthomas/participle/v2 v2.0.0-beta.5/go.mod h1:RC764t6n4L8D8ITAJv0qdokritYSNR3wV5cVwmIEaMM=
github.com/alecthomas/repr v0.1.0 h1:ENn2e1+J3k09gyj2shc0dHr/yjaWSHRlrJ4DPMevDqE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -457,6 +459,8 @@ github.com/alicebob/miniredis/v2 v2.21.0/go.mod h1:XNqvJdQJv5mSuVMc0ynneafpnL/zv
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antonmedv/expr v1.12.3 h1:bQwNFbmpIXKY/v4ZKuA4nPGuvuBVd9/zKiGS5ZsPePI=
+github.com/antonmedv/expr v1.12.3/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU=
github.com/apache/thrift v0.18.1 h1:lNhK/1nqjbwbiOPDBPFJVKxgDEGSepKuTh6OLiXW8kg=
github.com/apache/thrift v0.18.1/go.mod h1:rdQn/dCcDKEWjjylUeueum4vQEjG2v8v2PqriUnbr+I=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@@ -704,6 +708,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
@@ -932,6 +938,7 @@ github.com/hetznercloud/hcloud-go v1.35.3 h1:WCmFAhLRooih2QHAsbCbEdpIHnshQQmrPqs
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs=
github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
+github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@@ -1118,16 +1125,23 @@ github.com/open-telemetry/opentelemetry-collector-contrib/exporter/zipkinexporte
github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.74.0 h1:SUlZZPqhPs0FUtq8ck07P2jUnxiNY6iGdqoKVSVSoOU=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.74.0 h1:vU5ZebauzCuYNXFlQaWaYnOfjoOAnS+Sc8+oNWoHkbM=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.74.0/go.mod h1:TEu3TnUv1TuyHtjllrUDQ/ImpyD+GrkDejZv4hxl3G8=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.74.0 h1:MnN05OtFcx37fUILRRFji5zYj9F3PNQN2lvdMqEqlx4=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.74.0/go.mod h1:27sbUKPQtoOh+gS25dWXx9x54axhqV5R7EN/0TOO0NQ=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0 h1:COFBWXiWnhRs9x1oYJbDg5cyiNAozp8sycriD9+1/7E=
github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0/go.mod h1:cAKlYKU+/8mk6ETOnD+EAi5gpXZjDrGweAB9YTYrv/g=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.74.0 h1:HUDDdk1cjAYu4unzCq3PC8j+zuOsBIkLMFXjuOW975Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.74.0/go.mod h1:ste/ffn1fzB4Iaf/wIMsIvyFzIUkc/Rn6/f/Cf8FAoE=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.74.0 h1:DmOc+i5N1Ut23tJnHJUIcne5JWnYh6x2VL7YG4PP+tg=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.74.0 h1:9zWdiR9+bem0LvvWWoMZU6R3xTmu0WbcAPe8kI/jpyk=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.74.0/go.mod h1:3RViz8fguswWAFR+8W2Kzmch3eecOVK935QVsBdpUk4=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.74.0 h1:ww1pPXfAM0WHsymQnsN+s4B9DgwQC+GyoBq0t27JV/k=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.74.0/go.mod h1:OpEw7tyCg+iG1ywEgZ03qe5sP/8fhYdtWCMoqA8JCug=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.74.0 h1:0Fh6OjlUB9HlnX90/gGiyyFvnmNBv6inj7bSaVqQ7UQ=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.74.0/go.mod h1:13ekplz1UmvK99Vz2VjSBWPYqoRBEax5LPmA1tFHnhA=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.74.0 h1:A5xoBaMHX1WzLfvlqK6NBXq4XIbuSVJIpec5r6PDE7U=
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.74.0/go.mod h1:TJT7HkhFPrJic30Vk4seF/eRk8sa0VQ442Xq/qd+DLY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.74.0 h1:11kk61GJqLQ9lsveDUBfvJ3aN/Eq3nkDZoE7fzjztDY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.74.0/go.mod h1:Yt0Ve0tOdKE30Qu6pHGRXYn3FgCKlqaY/lrbDb2j/+8=
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.74.0 h1:pWNSPCKD+V4rC+MnZj8uErEbcsYUpEqU3InNYyafAPY=
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.74.0/go.mod h1:0lXcDf6LUbtDxZZO3zDbRzMuL7gL1Q0FPOR8/3IBwaQ=
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver v0.74.0 h1:NWd9+rQTd6pELLf3copo7CEuNgKp90kgyhPozpwax2U=
diff --git a/vendor/github.com/alecthomas/participle/v2/.golangci.yml b/vendor/github.com/alecthomas/participle/v2/.golangci.yml
new file mode 100644
index 00000000000..5229d875f53
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/.golangci.yml
@@ -0,0 +1,82 @@
+run:
+ tests: true
+ skip-dirs:
+ - _examples
+
+output:
+ print-issued-lines: false
+
+linters:
+ enable-all: true
+ disable:
+ - maligned
+ - lll
+ - gocyclo
+ - gochecknoglobals
+ - wsl
+ - whitespace
+ - godox
+ - funlen
+ - gocognit
+ - gomnd
+ - goerr113
+ - godot
+ - nestif
+ - testpackage
+ - nolintlint
+ - exhaustivestruct
+ - wrapcheck
+ - gci
+ - gofumpt
+ - gocritic
+ - nlreturn
+ - errorlint
+ - nakedret
+ - forbidigo
+ - revive
+ - cyclop
+ - ifshort
+ - paralleltest
+ - interfacer
+ - scopelint
+ - golint
+ - wastedassign
+ - forcetypeassert
+ - gomoddirectives
+ - varnamelen
+ - exhaustruct
+ - ireturn
+ - nonamedreturns
+ - errname
+ - nilnil
+ - maintidx
+ - unused # Does not work with type parameters
+
+linters-settings:
+ govet:
+ check-shadowing: true
+ gocyclo:
+ min-complexity: 10
+ dupl:
+ threshold: 100
+ goconst:
+ min-len: 8
+ min-occurrences: 3
+ exhaustive:
+ default-signifies-exhaustive: true
+
+issues:
+ max-per-linter: 0
+ max-same: 0
+ exclude-use-default: false
+ exclude:
+ # Captured by errcheck.
+ - '^(G104|G204|G307):'
+ # Very commonly not checked.
+ - 'Error return value of .(.*\.Help|.*\.MarkFlagRequired|(os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked'
+ - 'exported method `(.*\.MarshalJSON|.*\.UnmarshalJSON|.*\.EntityURN|.*\.GoString|.*\.Pos)` should have comment or be unexported'
+ - 'composite literal uses unkeyed fields'
+ - 'declaration of "err" shadows declaration'
+ - 'bad syntax for struct tag key'
+ - 'bad syntax for struct tag pair'
+ - '^ST1012'
diff --git a/vendor/github.com/alecthomas/participle/v2/CHANGES.md b/vendor/github.com/alecthomas/participle/v2/CHANGES.md
new file mode 100644
index 00000000000..a7e8887c2b2
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/CHANGES.md
@@ -0,0 +1,36 @@
+
+
+- [v2](#v2)
+
+
+
+
+## v2
+
+v2 was released in November 2020. It contains the following changes, some of
+which are backwards-incompatible:
+
+- Added optional `LexString()` and `LexBytes()` methods that lexer
+ definitions can implement to fast-path lexing of bytes and strings.
+- A new stateful lexer has been added.
+- A `filename` must now be passed to all `Parse*()` and `Lex*()` methods.
+- The `text/scanner` lexer no longer automatically unquotes strings or
+ supports arbitary length single quoted strings. The tokens it produces are
+ identical to that of the `text/scanner` package. Use `Unquote()` to remove
+ quotes.
+- `Tok` and `EndTok` will no longer be populated.
+- If a field named `Token []lexer.Token` exists it will be populated with the
+ raw tokens that the node parsed from the lexer.
+- Support capturing directly into lexer.Token fields. eg.
+
+ type ast struct {
+ Head lexer.Token `@Ident`
+ Tail []lexer.Token `@(Ident*)`
+ }
+- Add an `experimental/codegen` for stateful lexers. This provides ~10x
+ performance improvement with zero garbage when lexing strings.
+- The `regex` lexer has been removed.
+- The `ebnf` lexer has been removed.
+- All future work on lexing will be put into the stateful lexer.
+- The need for `DropToken` has been removed.
+
diff --git a/vendor/github.com/alecthomas/participle/v2/COPYING b/vendor/github.com/alecthomas/participle/v2/COPYING
new file mode 100644
index 00000000000..92dc39f7091
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/COPYING
@@ -0,0 +1,19 @@
+Copyright (C) 2017 Alec Thomas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/alecthomas/participle/v2/README.md b/vendor/github.com/alecthomas/participle/v2/README.md
new file mode 100644
index 00000000000..ebbb140fbae
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/README.md
@@ -0,0 +1,651 @@
+# A dead simple parser package for Go
+
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/alecthomas/participle/v2)](https://pkg.go.dev/github.com/alecthomas/participle/v2) [![GHA Build](https://github.com/alecthomas/participle/actions/workflows/ci.yml/badge.svg)](https://github.com/alecthomas/participle/actions)
+ [![Go Report Card](https://goreportcard.com/badge/github.com/alecthomas/participle/v2)](https://goreportcard.com/report/github.com/alecthomas/participle/v2) [![Slack chat](https://img.shields.io/static/v1?logo=slack&style=flat&label=slack&color=green&message=gophers)](https://gophers.slack.com/messages/CN9DS8YF3)
+
+
+
+- [V2](#v2)
+- [Introduction](#introduction)
+- [Tutorial](#tutorial)
+- [Tag syntax](#tag-syntax)
+- [Overview](#overview)
+- [Grammar syntax](#grammar-syntax)
+- [Capturing](#capturing)
+ - [Capturing boolean value](#capturing-boolean-value)
+- ["Union" types](#union-types)
+- [Custom parsing](#custom-parsing)
+- [Lexing](#lexing)
+ - [Stateful lexer](#stateful-lexer)
+ - [Example stateful lexer](#example-stateful-lexer)
+ - [Example simple/non-stateful lexer](#example-simplenon-stateful-lexer)
+ - [Experimental - code generation](#experimental---code-generation)
+- [Options](#options)
+- [Examples](#examples)
+- [Performance](#performance)
+- [Concurrency](#concurrency)
+- [Error reporting](#error-reporting)
+- [Comments](#comments)
+- [Limitations](#limitations)
+- [EBNF](#ebnf)
+- [Syntax/Railroad Diagrams](#syntaxrailroad-diagrams)
+
+
+
+## V2
+
+This is a beta version of version 2 of Participle. It is still subject to change but should be mostly stable at this point.
+
+See the [Change Log](CHANGES.md) for details.
+
+> **Note:** semantic versioning API guarantees do not apply to the [experimental](https://pkg.go.dev/github.com/alecthomas/participle/v2/experimental) packages - the API may break between minor point releases.
+
+It can be installed with:
+
+```shell
+$ go get github.com/alecthomas/participle/v2@latest
+```
+
+The latest version from v0 can be installed via:
+
+```shell
+$ go get github.com/alecthomas/participle@latest
+```
+
+## Introduction
+
+The goal of this package is to provide a simple, idiomatic and elegant way of
+defining parsers in Go.
+
+Participle's method of defining grammars should be familiar to any Go
+programmer who has used the `encoding/json` package: struct field tags define
+what and how input is mapped to those same fields. This is not unusual for Go
+encoders, but is unusual for a parser.
+
+## Tutorial
+
+A [tutorial](TUTORIAL.md) is available, walking through the creation of an .ini parser.
+
+## Tag syntax
+
+Participle supports two forms of struct tag grammar syntax.
+
+The easiest to read is when the grammar uses the entire struct tag content, eg.
+
+```go
+Field string `@Ident @("," Ident)*`
+```
+
+However, this does not coexist well with other tags such as JSON, etc. and
+may cause issues with linters. If this is an issue then you can use the
+`parser:""` tag format. In this case single quotes can be used to quote
+literals making the tags somewhat easier to write, eg.
+
+```go
+Field string `parser:"@ident (',' Ident)*" json:"field"`
+```
+
+
+
+## Overview
+
+A grammar is an annotated Go structure used to both define the parser grammar,
+and be the AST output by the parser. As an example, following is the final INI
+parser from the tutorial.
+
+ ```go
+ type INI struct {
+ Properties []*Property `@@*`
+ Sections []*Section `@@*`
+ }
+
+ type Section struct {
+ Identifier string `"[" @Ident "]"`
+ Properties []*Property `@@*`
+ }
+
+ type Property struct {
+ Key string `@Ident "="`
+ Value *Value `@@`
+ }
+
+ type Value struct {
+ String *string ` @String`
+ Float *float64 `| @Float`
+ Int *int `| @Int`
+ }
+ ```
+
+> **Note:** Participle also supports named struct tags (eg. Hello string `parser:"@Ident"`
).
+
+A parser is constructed from a grammar and a lexer:
+
+```go
+parser, err := participle.Build[INI]()
+```
+
+Once constructed, the parser is applied to input to produce an AST:
+
+```go
+ast, err := parser.ParseString("", "size = 10")
+// ast == &INI{
+// Properties: []*Property{
+// {Key: "size", Value: &Value{Int: &10}},
+// },
+// }
+```
+
+## Grammar syntax
+
+Participle grammars are defined as tagged Go structures. Participle will
+first look for tags in the form `parser:"..."`. It will then fall back to
+using the entire tag body.
+
+The grammar format is:
+
+- `@` Capture expression into the field.
+- `@@` Recursively capture using the fields own type.
+- `` Match named lexer token.
+- `( ... )` Group.
+- `"..."` or `'...'` Match the literal (note that the lexer must emit tokens matching this literal exactly).
+- `"...":` Match the literal, specifying the exact lexer token type to match.
+- ` ...` Match expressions.
+- ` | | ...` Match one of the alternatives. Each alternative is tried in order, with backtracking.
+- `~` Match any token that is _not_ the start of the expression (eg: `@~";"` matches anything but the `;` character into the field).
+- `(?= ... )` Positive lookahead group - requires the contents to match further input, without consuming it.
+- `(?! ... )` Negative lookahead group - requires the contents not to match further input, without consuming it.
+
+The following modifiers can be used after any expression:
+
+- `*` Expression can match zero or more times.
+- `+` Expression must match one or more times.
+- `?` Expression can match zero or once.
+- `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`).
+
+Notes:
+
+- Each struct is a single production, with each field applied in sequence.
+- `@` is the mechanism for capturing matches into the field.
+- if a struct field is not keyed with "parser", the entire struct tag
+ will be used as the grammar fragment. This allows the grammar syntax to remain
+ clear and simple to maintain.
+
+## Capturing
+
+Prefixing any expression in the grammar with `@` will capture matching values
+for that expression into the corresponding field.
+
+For example:
+
+```go
+// The grammar definition.
+type Grammar struct {
+ Hello string `@Ident`
+}
+
+// The source text to parse.
+source := "world"
+
+// After parsing, the resulting AST.
+result == &Grammar{
+ Hello: "world",
+}
+```
+
+For slice and string fields, each instance of `@` will accumulate into the
+field (including repeated patterns). Accumulation into other types is not
+supported.
+
+For integer and floating point types, a successful capture will be parsed
+with `strconv.ParseInt()` and `strconv.ParseFloat()` respectively.
+
+A successful capture match into a `bool` field will set the field to true.
+
+Tokens can also be captured directly into fields of type `lexer.Token` and
+`[]lexer.Token`.
+
+Custom control of how values are captured into fields can be achieved by a
+field type implementing the `Capture` interface (`Capture(values []string)
+error`).
+
+Additionally, any field implementing the `encoding.TextUnmarshaler` interface
+will be capturable too. One caveat is that `UnmarshalText()` will be called once
+for each captured token, so eg. `@(Ident Ident Ident)` will be called three times.
+
+### Capturing boolean value
+
+By default, a boolean field is used to indicate that a match occurred, which
+turns out to be much more useful and common in Participle than parsing true
+or false literals. For example, parsing a variable declaration with a
+trailing optional syntax:
+
+```go
+type Var struct {
+ Name string `"var" @Ident`
+ Type string `":" @Ident`
+ Optional bool `@"?"?`
+}
+```
+
+In practice this gives more useful ASTs. If bool were to be parsed literally
+then you'd need to have some alternate type for Optional such as string or a
+custom type.
+
+To capture literal boolean values such as `true` or `false`, implement the
+Capture interface like so:
+
+```go
+type Boolean bool
+
+func (b *Boolean) Capture(values []string) error {
+ *b = values[0] == "true"
+ return nil
+}
+
+type Value struct {
+ Float *float64 ` @Float`
+ Int *int `| @Int`
+ String *string `| @String`
+ Bool *Boolean `| @("true" | "false")`
+}
+```
+
+## "Union" types
+
+A very common pattern in parsers is "union" types, an example of which is
+shown above in the `Value` type. A common way of expressing this in Go is via
+a sealed interface, with each member of the union implementing this
+interface.
+
+eg. this is how the `Value` type could be expressed in this way:
+
+```go
+type Value interface { value() }
+
+type Float struct { Value float64 `@Float` }
+func (f Float) value() {}
+
+type Int struct { Value int `@Int` }
+func (f Int) value() {}
+
+type String struct { Value string `@String` }
+func (f String) value() {}
+
+type Bool struct { Value Boolean `@("true" | "false")` }
+func (f Bool) value() {}
+```
+
+Thanks to the efforts of [Jacob Ryan McCollum](https://github.com/mccolljr), Participle
+now supports this pattern. Simply construct your parser with the `Union[T](member...T)`
+option, eg.
+
+```go
+parser := participle.MustBuild[AST](participle.Union[Value](Float{}, Int{}, String{}, Bool{}))
+```
+
+Custom parsers may also be defined for union types with the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option.
+
+## Custom parsing
+
+There are three ways of defining custom parsers for nodes in the grammar:
+
+1. Implement the [Capture](https://pkg.go.dev/github.com/alecthomas/participle/v2#Capture) interface.
+2. Implement the [Parseable](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parseable) interface.
+3. Use the [ParseTypeWith](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseTypeWith) option to specify a custom parser for union interface types.
+
+
+## Lexing
+
+Participle relies on distinct lexing and parsing phases. The lexer takes raw
+bytes and produces tokens which the parser consumes. The parser transforms
+these tokens into Go values.
+
+The default lexer, if one is not explicitly configured, is based on the Go
+`text/scanner` package and thus produces tokens for C/Go-like source code. This
+is surprisingly useful, but if you do require more control over lexing the
+included stateful [`participle/lexer`](#markdown-stateful-lexer) lexer should
+cover most other cases. If that in turn is not flexible enough, you can
+implement your own lexer.
+
+Configure your parser with a lexer using the `participle.Lexer()` option.
+
+To use your own Lexer you will need to implement two interfaces:
+[Definition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Definition)
+(and optionally [StringsDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#StringDefinition) and [BytesDefinition](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#BytesDefinition)) and [Lexer](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Lexer).
+
+### Stateful lexer
+
+In addition to the default lexer, Participle includes an optional
+stateful/modal lexer which provides powerful yet convenient
+construction of most lexers. (Notably, indentation based lexers cannot
+be expressed using the `stateful` lexer -- for discussion of how these
+lexers can be implemented, see [#20](https://github.com/alecthomas/participle/issues/20)).
+
+It is sometimes the case that a simple lexer cannot fully express the tokens
+required by a parser. The canonical example of this is interpolated strings
+within a larger language. eg.
+
+```go
+let a = "hello ${name + ", ${last + "!"}"}"
+```
+
+This is impossible to tokenise with a normal lexer due to the arbitrarily
+deep nesting of expressions. To support this case Participle's lexer is now
+stateful by default.
+
+The lexer is a state machine defined by a map of rules keyed by the state
+name. Each rule within the state includes the name of the produced token, the
+regex to match, and an optional operation to apply when the rule matches.
+
+As a convenience, any `Rule` starting with a lowercase letter will be elided
+from output, though it is recommended to use `participle.Elide()` instead, as it
+better integrates with the parser.
+
+Lexing starts in the `Root` group. Each rule is matched in order, with the first
+successful match producing a lexeme. If the matching rule has an associated Action
+it will be executed.
+
+A state change can be introduced with the Action `Push(state)`. `Pop()` will
+return to the previous state.
+
+To reuse rules from another state, use `Include(state)`.
+
+A special named rule `Return()` can also be used as the final rule in a state
+to always return to the previous state.
+
+As a special case, regexes containing backrefs in the form `\N` (where `N` is
+a digit) will match the corresponding capture group from the immediate parent
+group. This can be used to parse, among other things, heredocs. See the
+[tests](https://github.com/alecthomas/participle/blob/master/lexer/stateful/stateful_test.go#L59)
+for an example of this, among others.
+
+### Example stateful lexer
+
+Here's a cut down example of the string interpolation described above. Refer to
+the [stateful example](https://github.com/alecthomas/participle/tree/master/_examples/stateful)
+for the corresponding parser.
+
+```go
+var lexer = lexer.Must(Rules{
+ "Root": {
+ {`String`, `"`, Push("String")},
+ },
+ "String": {
+ {"Escaped", `\\.`, nil},
+ {"StringEnd", `"`, Pop()},
+ {"Expr", `\${`, Push("Expr")},
+ {"Char", `[^$"\\]+`, nil},
+ },
+ "Expr": {
+ Include("Root"),
+ {`whitespace`, `\s+`, nil},
+ {`Oper`, `[-+/*%]`, nil},
+ {"Ident", `\w+`, nil},
+ {"ExprEnd", `}`, Pop()},
+ },
+})
+```
+
+### Example simple/non-stateful lexer
+
+Other than the default and stateful lexers, it's easy to define your
+own _stateless_ lexer using the `lexer.MustSimple()` and
+`lexer.NewSimple()` functions. These functions accept a slice of
+`lexer.SimpleRule{}` objects consisting of a key and a regex-style pattern.
+
+> **Note:** The stateful lexer replaces the old regex lexer.
+
+For example, the lexer for a form of BASIC:
+
+```go
+var basicLexer = stateful.MustSimple([]stateful.SimpleRule{
+ {"Comment", `(?i)rem[^\n]*`},
+ {"String", `"(\\"|[^"])*"`},
+ {"Number", `[-+]?(\d*\.)?\d+`},
+ {"Ident", `[a-zA-Z_]\w*`},
+ {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`},
+ {"EOL", `[\n\r]+`},
+ {"whitespace", `[ \t]+`},
+})
+```
+
+### Experimental - code generation
+
+Participle v2 now has experimental support for generating code to perform
+lexing. Use `participle/experimental/codegen.GenerateLexer()` to compile a
+`stateful` lexer to Go code.
+
+This will generally provide around a 10x improvement in lexing performance
+while producing O(1) garbage.
+
+## Options
+
+The Parser's behaviour can be configured via [Options](https://pkg.go.dev/github.com/alecthomas/participle/v2#Option).
+
+## Examples
+
+There are several [examples included](https://github.com/alecthomas/participle/tree/master/_examples),
+some of which are linked directly here. These examples should be run from the
+`_examples` subdirectory within a cloned copy of this repository.
+
+Example | Description
+--------|---------------
+[BASIC](https://github.com/alecthomas/participle/tree/master/_examples/basic) | A lexer, parser and interpreter for a [rudimentary dialect](https://caml.inria.fr/pub/docs/oreilly-book/html/book-ora058.html) of BASIC.
+[EBNF](https://github.com/alecthomas/participle/tree/master/_examples/ebnf) | Parser for the form of EBNF used by Go.
+[Expr](https://github.com/alecthomas/participle/tree/master/_examples/expr) | A basic mathematical expression parser and evaluator.
+[GraphQL](https://github.com/alecthomas/participle/tree/master/_examples/graphql) | Lexer+parser for GraphQL schemas
+[HCL](https://github.com/alecthomas/participle/tree/master/_examples/hcl) | A parser for the [HashiCorp Configuration Language](https://github.com/hashicorp/hcl).
+[INI](https://github.com/alecthomas/participle/tree/master/_examples/ini) | An INI file parser.
+[Protobuf](https://github.com/alecthomas/participle/tree/master/_examples/protobuf) | A full [Protobuf](https://developers.google.com/protocol-buffers/) version 2 and 3 parser.
+[SQL](https://github.com/alecthomas/participle/tree/master/_examples/sql) | A *very* rudimentary SQL SELECT parser.
+[Stateful](https://github.com/alecthomas/participle/tree/master/_examples/stateful) | A basic example of a stateful lexer and corresponding parser.
+[Thrift](https://github.com/alecthomas/participle/tree/master/_examples/thrift) | A full [Thrift](https://thrift.apache.org/docs/idl) parser.
+[TOML](https://github.com/alecthomas/participle/tree/master/_examples/toml) | A [TOML](https://github.com/toml-lang/toml) parser.
+
+Included below is a full GraphQL lexer and parser:
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/alecthomas/kong"
+ "github.com/alecthomas/repr"
+
+ "github.com/alecthomas/participle/v2"
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+type File struct {
+ Entries []*Entry `@@*`
+}
+
+type Entry struct {
+ Type *Type ` @@`
+ Schema *Schema `| @@`
+ Enum *Enum `| @@`
+ Scalar string `| "scalar" @Ident`
+}
+
+type Enum struct {
+ Name string `"enum" @Ident`
+ Cases []string `"{" @Ident* "}"`
+}
+
+type Schema struct {
+ Fields []*Field `"schema" "{" @@* "}"`
+}
+
+type Type struct {
+ Name string `"type" @Ident`
+ Implements string `( "implements" @Ident )?`
+ Fields []*Field `"{" @@* "}"`
+}
+
+type Field struct {
+ Name string `@Ident`
+ Arguments []*Argument `( "(" ( @@ ( "," @@ )* )? ")" )?`
+ Type *TypeRef `":" @@`
+ Annotation string `( "@" @Ident )?`
+}
+
+type Argument struct {
+ Name string `@Ident`
+ Type *TypeRef `":" @@`
+ Default *Value `( "=" @@ )`
+}
+
+type TypeRef struct {
+ Array *TypeRef `( "[" @@ "]"`
+ Type string ` | @Ident )`
+ NonNullable bool `( @"!" )?`
+}
+
+type Value struct {
+ Symbol string `@Ident`
+}
+
+var (
+ graphQLLexer = lexer.MustSimple([]lexer.Rule{
+ {"Comment", `(?:#|//)[^\n]*\n?`, nil},
+ {"Ident", `[a-zA-Z]\w*`, nil},
+ {"Number", `(?:\d*\.)?\d+`, nil},
+ {"Punct", `[-[!@#$%^&*()+_={}\|:;"'<,>.?/]|]`, nil},
+ {"Whitespace", `[ \t\n\r]+`, nil},
+ })
+ parser = participle.MustBuild[File](
+ participle.Lexer(graphQLLexer),
+ participle.Elide("Comment", "Whitespace"),
+ participle.UseLookahead(2),
+ )
+)
+
+var cli struct {
+ EBNF bool `help"Dump EBNF."`
+ Files []string `arg:"" optional:"" type:"existingfile" help:"GraphQL schema files to parse."`
+}
+
+func main() {
+ ctx := kong.Parse(&cli)
+ if cli.EBNF {
+ fmt.Println(parser.String())
+ ctx.Exit(0)
+ }
+ for _, file := range cli.Files {
+ r, err := os.Open(file)
+ ctx.FatalIfErrorf(err)
+ ast, err := parser.Parse(file, r)
+ r.Close()
+ repr.Println(ast)
+ ctx.FatalIfErrorf(err)
+ }
+}
+```
+
+## Performance
+
+One of the included examples is a complete Thrift parser
+(shell-style comments are not supported). This gives
+a convenient baseline for comparing to the PEG based
+[pigeon](https://github.com/PuerkitoBio/pigeon), which is the parser used by
+[go-thrift](https://github.com/samuel/go-thrift). Additionally, the pigeon
+parser is utilising a generated parser, while the participle parser is built at
+run time.
+
+You can run the benchmarks yourself, but here's the output on my machine:
+
+ BenchmarkParticipleThrift-12 5941 201242 ns/op 178088 B/op 2390 allocs/op
+ BenchmarkGoThriftParser-12 3196 379226 ns/op 157560 B/op 2644 allocs/op
+
+On a real life codebase of 47K lines of Thrift, Participle takes 200ms and go-
+thrift takes 630ms, which aligns quite closely with the benchmarks.
+
+## Concurrency
+
+A compiled `Parser` instance can be used concurrently. A `LexerDefinition` can be used concurrently. A `Lexer` instance cannot be used concurrently.
+
+## Error reporting
+
+There are a few areas where Participle can provide useful feedback to users of your parser.
+
+1. Errors returned by [Parser.Parse*()](https://pkg.go.dev/github.com/alecthomas/participle/v2#Parser.Parse) will be:
+ 1. Of type [Error](https://pkg.go.dev/github.com/alecthomas/participle/v2#Error). This will contain positional information where available.
+ 2. May either be [ParseError](https://pkg.go.dev/github.com/alecthomas/participle/v2#ParseError) or [lexer.Error](https://pkg.go.dev/github.com/alecthomas/participle/v2/lexer#Error)
+2. Participle will make a best effort to return as much of the AST up to the error location as possible.
+3. Any node in the AST containing a field `Pos lexer.Position` will be automatically
+ populated from the nearest matching token.
+4. Any node in the AST containing a field `EndPos lexer.Position` will be
+ automatically populated from the token at the end of the node.
+5. Any node in the AST containing a field `Tokens []lexer.Token` will be automatically
+ populated with _all_ tokens captured by the node, _including_ elided tokens.
+
+These related pieces of information can be combined to provide fairly comprehensive error reporting.
+
+## Comments
+
+Comments can be difficult to capture as in most languages they may appear almost
+anywhere. There are three ways of capturing comments, with decreasing fidelity.
+
+The first is to elide tokens in the parser, then add `Tokens []lexer.Token` as a
+field to each AST node. Comments will be included. This has the downside that
+there's no straightforward way to know where the comments are relative to
+non-comment tokens in that node.
+
+The second way is to _not_ elide comment tokens, and explicitly capture them at
+every location in the AST where they might occur. This has the downside that
+unless you place these captures in every possible valid location, users might
+insert valid comments that then fail to parse.
+
+The third way is to elide comment tokens and capture them where they're
+semantically meaningful, such as for documentation comments. Participle supports
+explicitly matching elided tokens for this purpose.
+
+## Limitations
+
+Internally, Participle is a recursive descent parser with backtracking (see
+`UseLookahead(K)`).
+
+Among other things, this means that Participle grammars do not support left
+recursion. Left recursion must be eliminated by restructuring your grammar.
+
+## EBNF
+
+The old `EBNF` lexer was removed in a major refactoring at
+[362b26](https://github.com/alecthomas/participle/commit/362b26640fa3dc406aa60960f7d9a5b9a909414e)
+-- if you have an EBNF grammar you need to implement, you can either translate
+it into regex-style `lexer.Rule{}` syntax or implement your own EBNF lexer
+you might be able to use [the old EBNF lexer](https://github.com/alecthomas/participle/blob/2403858c8b2068b4b0cf96a6b36dd7069674039b/lexer/ebnf/ebnf.go)
+-- as a starting point.
+
+Participle supports outputting an EBNF grammar from a Participle parser. Once
+the parser is constructed simply call `String()`.
+
+Participle also [includes a parser](https://pkg.go.dev/github.com/alecthomas/participle/v2/ebnf) for this form of EBNF (naturally).
+
+eg. The [GraphQL example](https://github.com/alecthomas/participle/blob/master/_examples/graphql/main.go#L15-L62)
+gives in the following EBNF:
+
+```ebnf
+File = Entry* .
+Entry = Type | Schema | Enum | "scalar" ident .
+Type = "type" ident ("implements" ident)? "{" Field* "}" .
+Field = ident ("(" (Argument ("," Argument)*)? ")")? ":" TypeRef ("@" ident)? .
+Argument = ident ":" TypeRef ("=" Value)? .
+TypeRef = "[" TypeRef "]" | ident "!"? .
+Value = ident .
+Schema = "schema" "{" Field* "}" .
+Enum = "enum" ident "{" ident* "}" .
+```
+
+## Syntax/Railroad Diagrams
+
+Participle includes a [command-line utility]() to take an EBNF representation of a Participle grammar
+(as returned by `Parser.String()`) and produce a Railroad Diagram using
+[tabatkins/railroad-diagrams](https://github.com/tabatkins/railroad-diagrams).
+
+Here's what the GraphQL grammar looks like:
+
+![EBNF Railroad Diagram](railroad.png)
diff --git a/vendor/github.com/alecthomas/participle/v2/TUTORIAL.md b/vendor/github.com/alecthomas/participle/v2/TUTORIAL.md
new file mode 100644
index 00000000000..e7e7cc81214
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/TUTORIAL.md
@@ -0,0 +1,273 @@
+# Participle parser tutorial
+
+
+
+- [Introduction](#introduction)
+- [The complete grammar](#the-complete-grammar)
+- [Root of the .ini AST (structure, fields)](#root-of-the-ini-ast-structure-fields)
+- [.ini properties (named tokens, capturing, literals)](#ini-properties-named-tokens-capturing-literals)
+- [.ini property values (alternates, recursive structs, sequences)](#ini-property-values-alternates-recursive-structs-sequences)
+- [Complete, but limited, .ini grammar (top-level properties only)](#complete-but-limited-ini-grammar-top-level-properties-only)
+- [Extending our grammar to support sections](#extending-our-grammar-to-support-sections)
+- [(Optional) Source positional information](#optional-source-positional-information)
+- [Parsing using our grammar](#parsing-using-our-grammar)
+
+
+
+
+## Introduction
+
+Writing a parser in Participle typically involves starting from the "root" of
+the AST, annotating fields with the grammar, then recursively expanding until
+it is complete. The AST is expressed via Go data types and the grammar is
+expressed through struct field tags, as a form of EBNF.
+
+The parser we're going to create for this tutorial parses .ini files
+like this:
+
+```ini
+age = 21
+name = "Bob Smith"
+
+[address]
+city = "Beverly Hills"
+postal_code = 90210
+```
+
+
+## The complete grammar
+
+I think it's useful to see the complete grammar first, to see what we're
+working towards. Read on below for details.
+
+ ```go
+ type INI struct {
+ Properties []*Property `@@*`
+ Sections []*Section `@@*`
+ }
+
+ type Section struct {
+ Identifier string `"[" @Ident "]"`
+ Properties []*Property `@@*`
+ }
+
+ type Property struct {
+ Key string `@Ident "="`
+ Value *Value `@@`
+ }
+
+ type Value struct {
+ String *string ` @String`
+ Number *float64 `| (@Float | @Int)`
+ }
+ ```
+
+
+## Root of the .ini AST (structure, fields)
+
+The first step is to create a root struct for our grammar. In the case of our
+.ini parser, this struct will contain a sequence of properties:
+
+```go
+type INI struct {
+ Properties []*Property
+}
+
+type Property struct {
+}
+```
+
+
+## .ini properties (named tokens, capturing, literals)
+
+Each property in an .ini file has an identifier key:
+
+```go
+type Property struct {
+ Key string
+}
+```
+
+The default lexer tokenises Go source code, and includes an `Ident` token type
+that matches identifiers. To match this token we simply use the token type
+name:
+
+```go
+type Property struct {
+ Key string `Ident`
+}
+```
+
+This will *match* identifiers, but not *capture* them into the `Key` field. To
+capture input tokens into AST fields, prefix any grammar node with `@`:
+
+```go
+type Property struct {
+ Key string `@Ident`
+}
+```
+
+In .ini files, each key is separated from its value with a literal `=`. To
+match a literal, enclose the literal in double quotes:
+
+```go
+type Property struct {
+ Key string `@Ident "="`
+}
+```
+
+> Note: literals in the grammar must match tokens from the lexer *exactly*. In
+> this example if the lexer does not output `=` as a distinct token the
+> grammar will not match.
+
+
+## .ini property values (alternates, recursive structs, sequences)
+
+For the purposes of our example we are only going to support quoted string
+and numeric property values. As each value can be *either* a string or a float
+we'll need something akin to a sum type. Go's type system cannot express this
+directly, so we'll use the common approach of making each element a pointer.
+The selected "case" will *not* be nil.
+
+```go
+type Value struct {
+ String *string
+ Number *float64
+}
+```
+
+> Note: Participle will hydrate pointers as necessary.
+
+To express matching a set of alternatives we use the `|` operator:
+
+```go
+type Value struct {
+ String *string ` @String`
+ Number *float64 `| @Float`
+}
+```
+
+Since we want to parse also integers and the default lexer makes a difference,
+we need to be explicit:
+
+```go
+type Value struct {
+ String *string ` @String`
+ Number *float64 `| (@Float | @Int)`
+}
+```
+
+> Note: the grammar can cross fields.
+
+Next, we'll match values and capture them into the `Property`. To recursively
+capture structs use `@@` (capture self):
+
+```go
+type Property struct {
+ Key string `@Ident "="`
+ Value *Value `@@`
+}
+```
+
+Now that we can parse a `Property` we need to go back to the root of the
+grammar. We want to parse 0 or more properties. To do this, we use `*`.
+Participle will accumulate each match into the slice until matching fails,
+then move to the next node in the grammar.
+
+```go
+type INI struct {
+ Properties []*Property `@@*`
+}
+```
+
+> Note: tokens can also be accumulated into strings, appending each match.
+
+
+## Complete, but limited, .ini grammar (top-level properties only)
+
+We now have a functional, but limited, .ini parser!
+
+```go
+type INI struct {
+ Properties []*Property `@@*`
+}
+
+type Property struct {
+ Key string `@Ident "="`
+ Value *Value `@@`
+}
+
+type Value struct {
+ String *string ` @String`
+ Number *float64 `| (@Float | @Int)`
+}
+```
+
+
+## Extending our grammar to support sections
+
+Adding support for sections is simply a matter of utilising the constructs
+we've just learnt. A section consists of a header identifier, and a sequence
+of properties:
+
+```go
+type Section struct {
+ Identifier string `"[" @Ident "]"`
+ Properties []*Property `@@*`
+}
+```
+
+Simple!
+
+Now we just add a sequence of `Section`s to our root node:
+
+```go
+type INI struct {
+ Properties []*Property `@@*`
+ Sections []*Section `@@*`
+}
+```
+
+And we're done!
+
+
+## (Optional) Source positional information
+
+If a grammar node includes a field with the name `Pos` and type `lexer.Position`, it will be automatically populated by positional information. eg.
+
+```go
+type Value struct {
+ Pos lexer.Position
+ String *string ` @String`
+ Number *float64 `| (@Float | @Int)`
+}
+```
+
+This is useful for error reporting.
+
+
+## Parsing using our grammar
+
+To parse with this grammar we first construct the parser (we'll use the
+default lexer for now):
+
+```go
+parser, err := participle.Build[INI]()
+```
+
+Then parse a new INI file with `parser.Parse{,String,Bytes}()`:
+
+```go
+ini, err := parser.ParseString("", `
+age = 21
+name = "Bob Smith"
+
+[address]
+city = "Beverly Hills"
+postal_code = 90210
+`)
+```
+
+You can find the full example [here](_examples/ini/main.go), alongside
+other examples including an SQL `SELECT` parser and a full
+[Thrift](https://thrift.apache.org/) parser.
diff --git a/vendor/github.com/alecthomas/participle/v2/api.go b/vendor/github.com/alecthomas/participle/v2/api.go
new file mode 100644
index 00000000000..1fbcbc21dd1
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/api.go
@@ -0,0 +1,19 @@
+package participle
+
+import (
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+// Capture can be implemented by fields in order to transform captured tokens into field values.
+type Capture interface {
+ Capture(values []string) error
+}
+
+// The Parseable interface can be implemented by any element in the grammar to provide custom parsing.
+type Parseable interface {
+ // Parse into the receiver.
+ //
+ // Should return NextMatch if no tokens matched and parsing should continue.
+ // Nil should be returned if parsing was successful.
+ Parse(lex *lexer.PeekingLexer) error
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/context.go b/vendor/github.com/alecthomas/participle/v2/context.go
new file mode 100644
index 00000000000..025fb3c7109
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/context.go
@@ -0,0 +1,126 @@
+package participle
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+type contextFieldSet struct {
+ tokens []lexer.Token
+ strct reflect.Value
+ field structLexerField
+ fieldValue []reflect.Value
+}
+
+// Context for a single parse.
+type parseContext struct {
+ *lexer.PeekingLexer
+ depth int
+ trace io.Writer
+ deepestError error
+ deepestErrorDepth int
+ lookahead int
+ caseInsensitive map[lexer.TokenType]bool
+ apply []*contextFieldSet
+ allowTrailing bool
+}
+
+func newParseContext(lex *lexer.PeekingLexer, lookahead int, caseInsensitive map[lexer.TokenType]bool) *parseContext {
+ return &parseContext{
+ PeekingLexer: lex,
+ caseInsensitive: caseInsensitive,
+ lookahead: lookahead,
+ }
+}
+
+func (p *parseContext) DeepestError(err error) error {
+ if p.PeekingLexer.Cursor() >= p.deepestErrorDepth {
+ return err
+ }
+ if p.deepestError != nil {
+ return p.deepestError
+ }
+ return err
+}
+
+// Defer adds a function to be applied once a branch has been picked.
+func (p *parseContext) Defer(tokens []lexer.Token, strct reflect.Value, field structLexerField, fieldValue []reflect.Value) {
+ p.apply = append(p.apply, &contextFieldSet{tokens, strct, field, fieldValue})
+}
+
+// Apply deferred functions.
+func (p *parseContext) Apply() error {
+ for _, apply := range p.apply {
+ if err := setField(apply.tokens, apply.strct, apply.field, apply.fieldValue); err != nil {
+ return err
+ }
+ }
+ p.apply = nil
+ return nil
+}
+
+// Branch accepts the branch as the correct branch.
+func (p *parseContext) Accept(branch *parseContext) {
+ p.apply = append(p.apply, branch.apply...)
+ p.PeekingLexer = branch.PeekingLexer
+ if branch.deepestErrorDepth >= p.deepestErrorDepth {
+ p.deepestErrorDepth = branch.deepestErrorDepth
+ p.deepestError = branch.deepestError
+ }
+}
+
+// Branch starts a new lookahead branch.
+func (p *parseContext) Branch() *parseContext {
+ branch := &parseContext{}
+ *branch = *p
+ branch.apply = nil
+ branch.PeekingLexer = p.PeekingLexer.Clone()
+ return branch
+}
+
+func (p *parseContext) MaybeUpdateError(err error) {
+ if p.PeekingLexer.Cursor() >= p.deepestErrorDepth {
+ p.deepestError = err
+ p.deepestErrorDepth = p.PeekingLexer.Cursor()
+ }
+}
+
+// Stop returns true if parsing should terminate after the given "branch" failed to match.
+//
+// Additionally, "err" should be the branch error, if any. This will be tracked to
+// aid in error reporting under the assumption that the deepest occurring error is more
+// useful than errors further up.
+func (p *parseContext) Stop(err error, branch *parseContext) bool {
+ if branch.PeekingLexer.Cursor() >= p.deepestErrorDepth {
+ p.deepestError = err
+ p.deepestErrorDepth = maxInt(branch.PeekingLexer.Cursor(), branch.deepestErrorDepth)
+ }
+ if !p.hasInfiniteLookahead() && branch.PeekingLexer.Cursor() > p.PeekingLexer.Cursor()+p.lookahead {
+ p.Accept(branch)
+ return true
+ }
+ return false
+}
+
+func (p *parseContext) hasInfiniteLookahead() bool { return p.lookahead < 0 }
+
+func (p *parseContext) printTrace(n node) func() {
+ if p.trace != nil {
+ tok := p.PeekingLexer.Peek()
+ fmt.Fprintf(p.trace, "%s%q %s\n", strings.Repeat(" ", p.depth*2), tok, n.GoString())
+ p.depth += 1
+ return func() { p.depth -= 1 }
+ }
+ return func() {}
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/doc.go b/vendor/github.com/alecthomas/participle/v2/doc.go
new file mode 100644
index 00000000000..48d003dceb5
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/doc.go
@@ -0,0 +1,68 @@
+// Package participle constructs parsers from definitions in struct tags and parses directly into
+// those structs. The approach is philosophically similar to how other marshallers work in Go,
+// "unmarshalling" an instance of a grammar into a struct.
+//
+// The supported annotation syntax is:
+//
+// - `@` Capture expression into the field.
+// - `@@` Recursively capture using the fields own type.
+// - `` Match named lexer token.
+// - `( ... )` Group.
+// - `"..."` Match the literal (note that the lexer must emit tokens matching this literal exactly).
+// - `"...":` Match the literal, specifying the exact lexer token type to match.
+// - ` ...` Match expressions.
+// - ` | ` Match one of the alternatives.
+//
+// The following modifiers can be used after any expression:
+//
+// - `*` Expression can match zero or more times.
+// - `+` Expression must match one or more times.
+// - `?` Expression can match zero or once.
+// - `!` Require a non-empty match (this is useful with a sequence of optional matches eg. `("a"? "b"? "c"?)!`).
+//
+// Here's an example of an EBNF grammar.
+//
+// type Group struct {
+// Expression *Expression `"(" @@ ")"`
+// }
+//
+// type Option struct {
+// Expression *Expression `"[" @@ "]"`
+// }
+//
+// type Repetition struct {
+// Expression *Expression `"{" @@ "}"`
+// }
+//
+// type Literal struct {
+// Start string `@String` // lexer.Lexer token "String"
+// End string `("…" @String)?`
+// }
+//
+// type Term struct {
+// Name string ` @Ident`
+// Literal *Literal `| @@`
+// Group *Group `| @@`
+// Option *Option `| @@`
+// Repetition *Expression `| "(" @@ ")"`
+// }
+//
+// type Sequence struct {
+// Terms []*Term `@@+`
+// }
+//
+// type Expression struct {
+// Alternatives []*Sequence `@@ ("|" @@)*`
+// }
+//
+// type Expressions []*Expression
+//
+// type Production struct {
+// Name string `@Ident "="`
+// Expressions Expressions `@@+ "."`
+// }
+//
+// type EBNF struct {
+// Productions []*Production `@@*`
+// }
+package participle
diff --git a/vendor/github.com/alecthomas/participle/v2/ebnf.go b/vendor/github.com/alecthomas/participle/v2/ebnf.go
new file mode 100644
index 00000000000..1698f753ae8
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/ebnf.go
@@ -0,0 +1,165 @@
+package participle
+
+import (
+ "fmt"
+ "strings"
+)
+
+// String returns the EBNF for the grammar.
+//
+// Productions are always upper cased. Lexer tokens are always lower case.
+func (p *Parser[G]) String() string {
+ return ebnf(p.typeNodes[p.rootType])
+}
+
+type ebnfp struct {
+ name string
+ out string
+}
+
+func ebnf(n node) string {
+ outp := []*ebnfp{}
+ switch n.(type) {
+ case *strct:
+ buildEBNF(true, n, map[node]bool{}, nil, &outp)
+ out := []string{}
+ for _, p := range outp {
+ out = append(out, fmt.Sprintf("%s = %s .", p.name, p.out))
+ }
+ return strings.Join(out, "\n")
+
+ default:
+ out := &ebnfp{}
+ buildEBNF(true, n, map[node]bool{}, out, &outp)
+ return out.out
+ }
+}
+
+func buildEBNF(root bool, n node, seen map[node]bool, p *ebnfp, outp *[]*ebnfp) {
+ switch n := n.(type) {
+ case *disjunction:
+ if !root {
+ p.out += "("
+ }
+ for i, next := range n.nodes {
+ if i > 0 {
+ p.out += " | "
+ }
+ buildEBNF(false, next, seen, p, outp)
+ }
+ if !root {
+ p.out += ")"
+ }
+
+ case *union:
+ name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
+ if p != nil {
+ p.out += name
+ }
+ if seen[n] {
+ return
+ }
+ p = &ebnfp{name: name}
+ *outp = append(*outp, p)
+ seen[n] = true
+ for i, next := range n.nodeMembers {
+ if i > 0 {
+ p.out += " | "
+ }
+ buildEBNF(false, next, seen, p, outp)
+ }
+
+ case *custom:
+ name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
+ p.out += name
+
+ case *strct:
+ name := strings.ToUpper(n.typ.Name()[:1]) + n.typ.Name()[1:]
+ if p != nil {
+ p.out += name
+ }
+ if seen[n] {
+ return
+ }
+ seen[n] = true
+ p = &ebnfp{name: name}
+ *outp = append(*outp, p)
+ buildEBNF(true, n.expr, seen, p, outp)
+
+ case *sequence:
+ group := n.next != nil && !root
+ if group {
+ p.out += "("
+ }
+ for n != nil {
+ buildEBNF(false, n.node, seen, p, outp)
+ n = n.next
+ if n != nil {
+ p.out += " "
+ }
+ }
+ if group {
+ p.out += ")"
+ }
+
+ case *parseable:
+ p.out += n.t.Name()
+
+ case *capture:
+ buildEBNF(false, n.node, seen, p, outp)
+
+ case *reference:
+ p.out += "<" + strings.ToLower(n.identifier) + ">"
+
+ case *optional:
+ buildEBNF(false, n.node, seen, p, outp)
+ p.out += "?"
+
+ case *repetition:
+ buildEBNF(false, n.node, seen, p, outp)
+ p.out += "*"
+
+ case *negation:
+ p.out += "~"
+ buildEBNF(false, n.node, seen, p, outp)
+
+ case *literal:
+ p.out += fmt.Sprintf("%q", n.s)
+
+ case *group:
+ if child, ok := n.expr.(*group); ok && child.mode == groupMatchOnce {
+ buildEBNF(false, child.expr, seen, p, outp)
+ } else if child, ok := n.expr.(*capture); ok {
+ if grandchild, ok := child.node.(*group); ok && grandchild.mode == groupMatchOnce {
+ buildEBNF(false, grandchild.expr, seen, p, outp)
+ } else {
+ buildEBNF(false, n.expr, seen, p, outp)
+ }
+ } else {
+ buildEBNF(false, n.expr, seen, p, outp)
+ }
+ switch n.mode {
+ case groupMatchNonEmpty:
+ p.out += "!"
+ case groupMatchZeroOrOne:
+ p.out += "?"
+ case groupMatchZeroOrMore:
+ p.out += "*"
+ case groupMatchOneOrMore:
+ p.out += "+"
+ case groupMatchOnce:
+ }
+
+ case *lookaheadGroup:
+ if !n.negative {
+ p.out += "(?= "
+ } else {
+ p.out += "(?! "
+ }
+ buildEBNF(true, n.expr, seen, p, outp)
+ p.out += ")"
+
+ default:
+ panic(fmt.Sprintf("unsupported node type %T", n))
+ }
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/error.go b/vendor/github.com/alecthomas/participle/v2/error.go
new file mode 100644
index 00000000000..33827380024
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/error.go
@@ -0,0 +1,99 @@
+package participle
+
+import (
+ "fmt"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+// Error represents an error while parsing.
+//
+// The format of an Error is in the form "[:][::] ".
+//
+// The error will contain positional information if available.
+type Error interface {
+ error
+ // Unadorned message.
+ Message() string
+ // Closest position to error location.
+ Position() lexer.Position
+}
+
+// FormatError formats an error in the form "[:][::] "
+func FormatError(err Error) string {
+ msg := ""
+ pos := err.Position()
+ if pos.Filename != "" {
+ msg += pos.Filename + ":"
+ }
+ if pos.Line != 0 || pos.Column != 0 {
+ msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column)
+ }
+ if msg != "" {
+ msg += " " + err.Message()
+ } else {
+ msg = err.Message()
+ }
+ return msg
+}
+
+// UnexpectedTokenError is returned by Parse when an unexpected token is encountered.
+//
+// This is useful for composing parsers in order to detect when a sub-parser has terminated.
+type UnexpectedTokenError struct {
+ Unexpected lexer.Token
+ at node
+}
+
+func (u *UnexpectedTokenError) Error() string { return FormatError(u) }
+
+func (u *UnexpectedTokenError) Message() string { // nolint: golint
+ var expected string
+ if u.at != nil {
+ expected = fmt.Sprintf(" (expected %s)", u.at)
+ }
+ return fmt.Sprintf("unexpected token %q%s", u.Unexpected, expected)
+}
+func (u *UnexpectedTokenError) Position() lexer.Position { return u.Unexpected.Pos } // nolint: golint
+
+// ParseError is returned when a parse error occurs.
+//
+// It is useful for differentiating between parse errors and other errors such
+// as lexing and IO errors.
+type ParseError struct {
+ Msg string
+ Pos lexer.Position
+}
+
+func (p *ParseError) Error() string { return FormatError(p) }
+func (p *ParseError) Message() string { return p.Msg }
+func (p *ParseError) Position() lexer.Position { return p.Pos }
+
+// Errorf creates a new Error at the given position.
+func Errorf(pos lexer.Position, format string, args ...interface{}) Error {
+ return &ParseError{Msg: fmt.Sprintf(format, args...), Pos: pos}
+}
+
+type wrappingParseError struct {
+ err error
+ ParseError
+}
+
+func (w *wrappingParseError) Unwrap() error { return w.err }
+
+// Wrapf attempts to wrap an existing error in a new message.
+//
+// If "err" is a participle.Error, its positional information will be used and
+// "pos" will be ignored.
+//
+// The returned error implements the Unwrap() method supported by the errors package.
+func Wrapf(pos lexer.Position, err error, format string, args ...interface{}) Error {
+ var msg string
+ if perr, ok := err.(Error); ok {
+ pos = perr.Position()
+ msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), perr.Message())
+ } else {
+ msg = fmt.Sprintf("%s: %s", fmt.Sprintf(format, args...), err.Error())
+ }
+ return &wrappingParseError{err: err, ParseError: ParseError{Msg: msg, Pos: pos}}
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/grammar.go b/vendor/github.com/alecthomas/participle/v2/grammar.go
new file mode 100644
index 00000000000..45d5e069475
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/grammar.go
@@ -0,0 +1,418 @@
+package participle
+
+import (
+ "fmt"
+ "reflect"
+ "text/scanner"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+type generatorContext struct {
+ lexer.Definition
+ typeNodes map[reflect.Type]node
+ symbolsToIDs map[lexer.TokenType]string
+}
+
+func newGeneratorContext(lex lexer.Definition) *generatorContext {
+ return &generatorContext{
+ Definition: lex,
+ typeNodes: map[reflect.Type]node{},
+ symbolsToIDs: lexer.SymbolsByRune(lex),
+ }
+}
+
+func (g *generatorContext) addUnionDefs(defs []unionDef) error {
+ unionNodes := make([]*union, len(defs))
+ for i, def := range defs {
+ if _, exists := g.typeNodes[def.typ]; exists {
+ return fmt.Errorf("duplicate definition for interface or union type %s", def.typ)
+ }
+ unionNode := &union{
+ unionDef: def,
+ nodeMembers: make([]node, 0, len(def.members)),
+ }
+ g.typeNodes[def.typ], unionNodes[i] = unionNode, unionNode
+ }
+ for i, def := range defs {
+ unionNode := unionNodes[i]
+ for _, memberType := range def.members {
+ memberNode, err := g.parseType(memberType)
+ if err != nil {
+ return err
+ }
+ unionNode.nodeMembers = append(unionNode.nodeMembers, memberNode)
+ }
+ }
+ return nil
+}
+
+func (g *generatorContext) addCustomDefs(defs []customDef) error {
+ for _, def := range defs {
+ if _, exists := g.typeNodes[def.typ]; exists {
+ return fmt.Errorf("duplicate definition for interface or union type %s", def.typ)
+ }
+ g.typeNodes[def.typ] = &custom{typ: def.typ, parseFn: def.parseFn}
+ }
+ return nil
+}
+
+// Takes a type and builds a tree of nodes out of it.
+func (g *generatorContext) parseType(t reflect.Type) (_ node, returnedError error) {
+ t = indirectType(t)
+ if n, ok := g.typeNodes[t]; ok {
+ return n, nil
+ }
+ if t.Implements(parseableType) {
+ return &parseable{t.Elem()}, nil
+ }
+ if reflect.PtrTo(t).Implements(parseableType) {
+ return &parseable{t}, nil
+ }
+ switch t.Kind() { // nolint: exhaustive
+ case reflect.Slice, reflect.Ptr:
+ t = indirectType(t.Elem())
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("expected a struct but got %T", t)
+ }
+ fallthrough
+
+ case reflect.Struct:
+ slexer, err := lexStruct(t)
+ if err != nil {
+ return nil, err
+ }
+ out := newStrct(t)
+ g.typeNodes[t] = out // Ensure we avoid infinite recursion.
+ if slexer.NumField() == 0 {
+ return nil, fmt.Errorf("can not parse into empty struct %s", t)
+ }
+ defer decorate(&returnedError, func() string { return slexer.Field().Name })
+ e, err := g.parseDisjunction(slexer)
+ if err != nil {
+ return nil, err
+ }
+ if e == nil {
+ return nil, fmt.Errorf("no grammar found in %s", t)
+ }
+ if token, _ := slexer.Peek(); !token.EOF() {
+ return nil, fmt.Errorf("unexpected input %q", token.Value)
+ }
+ out.expr = e
+ return out, nil
+ }
+ return nil, fmt.Errorf("%s should be a struct or should implement the Parseable interface", t)
+}
+
+func (g *generatorContext) parseDisjunction(slexer *structLexer) (node, error) {
+ out := &disjunction{}
+ for {
+ n, err := g.parseSequence(slexer)
+ if err != nil {
+ return nil, err
+ }
+ if n == nil {
+ return nil, fmt.Errorf("alternative expression %d cannot be empty", len(out.nodes)+1)
+ }
+ out.nodes = append(out.nodes, n)
+ if token, _ := slexer.Peek(); token.Type != '|' {
+ break
+ }
+ _, err = slexer.Next() // |
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(out.nodes) == 1 {
+ return out.nodes[0], nil
+ }
+ return out, nil
+}
+
+func (g *generatorContext) parseSequence(slexer *structLexer) (node, error) {
+ head := &sequence{}
+ cursor := head
+loop:
+ for {
+ if token, err := slexer.Peek(); err != nil {
+ return nil, err
+ } else if token.Type == lexer.EOF {
+ break loop
+ }
+ term, err := g.parseTerm(slexer, true)
+ if err != nil {
+ return nil, err
+ }
+ if term == nil {
+ break loop
+ }
+ if cursor.node == nil {
+ cursor.head = true
+ cursor.node = term
+ } else {
+ cursor.next = &sequence{node: term}
+ cursor = cursor.next
+ }
+ }
+ if head.node == nil {
+ return nil, nil
+ }
+ if head.next == nil {
+ return head.node, nil
+ }
+ return head, nil
+}
+
+func (g *generatorContext) parseTermNoModifiers(slexer *structLexer, allowUnknown bool) (node, error) {
+ t, err := slexer.Peek()
+ if err != nil {
+ return nil, err
+ }
+ switch t.Type {
+ case '@':
+ return g.parseCapture(slexer)
+ case scanner.String, scanner.RawString, scanner.Char:
+ return g.parseLiteral(slexer)
+ case '!', '~':
+ return g.parseNegation(slexer)
+ case '[':
+ return g.parseOptional(slexer)
+ case '{':
+ return g.parseRepetition(slexer)
+ case '(':
+ // Also handles (? used for lookahead groups
+ return g.parseGroup(slexer)
+ case scanner.Ident:
+ return g.parseReference(slexer)
+ case lexer.EOF:
+ _, _ = slexer.Next()
+ return nil, nil
+ default:
+ if allowUnknown {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("unexpected token %v", t)
+ }
+}
+
+func (g *generatorContext) parseTerm(slexer *structLexer, allowUnknown bool) (node, error) {
+ out, err := g.parseTermNoModifiers(slexer, allowUnknown)
+ if err != nil {
+ return nil, err
+ }
+ return g.parseModifier(slexer, out)
+}
+
+// Parse modifiers: ?, *, + and/or !
+func (g *generatorContext) parseModifier(slexer *structLexer, expr node) (node, error) {
+ out := &group{expr: expr}
+ t, err := slexer.Peek()
+ if err != nil {
+ return nil, err
+ }
+ switch t.Type {
+ case '!':
+ out.mode = groupMatchNonEmpty
+ case '+':
+ out.mode = groupMatchOneOrMore
+ case '*':
+ out.mode = groupMatchZeroOrMore
+ case '?':
+ out.mode = groupMatchZeroOrOne
+ default:
+ return expr, nil
+ }
+ _, _ = slexer.Next()
+ return out, nil
+}
+
+// @ captures into the current field.
+func (g *generatorContext) parseCapture(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next()
+ token, err := slexer.Peek()
+ if err != nil {
+ return nil, err
+ }
+ field := slexer.Field()
+ if token.Type == '@' {
+ _, _ = slexer.Next()
+ n, err := g.parseType(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ return &capture{field, n}, nil
+ }
+ ft := indirectType(field.Type)
+ if ft.Kind() == reflect.Struct && ft != tokenType && ft != tokensType && !implements(ft, captureType) && !implements(ft, textUnmarshalerType) {
+ return nil, fmt.Errorf("structs can only be parsed with @@ or by implementing the Capture or encoding.TextUnmarshaler interfaces")
+ }
+ n, err := g.parseTermNoModifiers(slexer, false)
+ if err != nil {
+ return nil, err
+ }
+ return &capture{field, n}, nil
+}
+
+// A reference in the form refers to a named token from the lexer.
+func (g *generatorContext) parseReference(slexer *structLexer) (node, error) { // nolint: interfacer
+ token, err := slexer.Next()
+ if err != nil {
+ return nil, err
+ }
+ if token.Type != scanner.Ident {
+ return nil, fmt.Errorf("expected identifier but got %q", token)
+ }
+ typ, ok := g.Symbols()[token.Value]
+ if !ok {
+ return nil, fmt.Errorf("unknown token type %q", token)
+ }
+ return &reference{typ: typ, identifier: token.Value}, nil
+}
+
+// [ ] optionally matches .
+func (g *generatorContext) parseOptional(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next() // [
+ disj, err := g.parseDisjunction(slexer)
+ if err != nil {
+ return nil, err
+ }
+ n := &group{expr: disj, mode: groupMatchZeroOrOne}
+ next, err := slexer.Next()
+ if err != nil {
+ return nil, err
+ }
+ if next.Type != ']' {
+ return nil, fmt.Errorf("expected ] but got %q", next)
+ }
+ return n, nil
+}
+
+// { } matches 0 or more repititions of
+func (g *generatorContext) parseRepetition(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next() // {
+ disj, err := g.parseDisjunction(slexer)
+ if err != nil {
+ return nil, err
+ }
+ n := &group{expr: disj, mode: groupMatchZeroOrMore}
+ next, err := slexer.Next()
+ if err != nil {
+ return nil, err
+ }
+ if next.Type != '}' {
+ return nil, fmt.Errorf("expected } but got %q", next)
+ }
+ return n, nil
+}
+
+// ( ) groups a sub-expression
+func (g *generatorContext) parseGroup(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next() // (
+ peek, err := slexer.Peek()
+ if err != nil {
+ return nil, err
+ }
+ if peek.Type == '?' {
+ return g.subparseLookaheadGroup(slexer) // If there was an error peeking, code below will handle it
+ }
+ expr, err := g.subparseGroup(slexer)
+ if err != nil {
+ return nil, err
+ }
+ return &group{expr: expr}, nil
+}
+
+// (?[!=] ) requires a grouped sub-expression either matches or doesn't match, without consuming it
+func (g *generatorContext) subparseLookaheadGroup(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next() // ? - the opening ( was already consumed in parseGroup
+ var negative bool
+ next, err := slexer.Next()
+ if err != nil {
+ return nil, err
+ }
+ switch next.Type {
+ case '=':
+ negative = false
+ case '!':
+ negative = true
+ default:
+ return nil, fmt.Errorf("expected = or ! but got %q", next)
+ }
+ expr, err := g.subparseGroup(slexer)
+ if err != nil {
+ return nil, err
+ }
+ return &lookaheadGroup{expr: expr, negative: negative}, nil
+}
+
+// helper parsing ) to finish parsing groups or lookahead groups
+func (g *generatorContext) subparseGroup(slexer *structLexer) (node, error) {
+ disj, err := g.parseDisjunction(slexer)
+ if err != nil {
+ return nil, err
+ }
+ next, err := slexer.Next() // )
+ if err != nil {
+ return nil, err
+ }
+ if next.Type != ')' {
+ return nil, fmt.Errorf("expected ) but got %q", next)
+ }
+ return disj, nil
+}
+
+// A token negation
+//
+// Accepts both the form !"some-literal" and !SomeNamedToken
+func (g *generatorContext) parseNegation(slexer *structLexer) (node, error) {
+ _, _ = slexer.Next() // advance the parser since we have '!' right now.
+ next, err := g.parseTermNoModifiers(slexer, false)
+ if err != nil {
+ return nil, err
+ }
+ return &negation{next}, nil
+}
+
+// A literal string.
+//
+// Note that for this to match, the tokeniser must be able to produce this string. For example,
+// if the tokeniser only produces individual characters but the literal is "hello", or vice versa.
+func (g *generatorContext) parseLiteral(lex *structLexer) (node, error) { // nolint: interfacer
+ token, err := lex.Next()
+ if err != nil {
+ return nil, err
+ }
+ s := token.Value
+ t := lexer.TokenType(-1)
+ token, err = lex.Peek()
+ if err != nil {
+ return nil, err
+ }
+ if token.Type == ':' {
+ _, _ = lex.Next()
+ token, err = lex.Next()
+ if err != nil {
+ return nil, err
+ }
+ if token.Type != scanner.Ident {
+ return nil, fmt.Errorf("expected identifier for literal type constraint but got %q", token)
+ }
+ var ok bool
+ t, ok = g.Symbols()[token.Value]
+ if !ok {
+ return nil, fmt.Errorf("unknown token type %q in literal type constraint", token)
+ }
+ }
+ return &literal{s: s, t: t, tt: g.symbolsToIDs[t]}, nil
+}
+
+func indirectType(t reflect.Type) reflect.Type {
+ if t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice {
+ return indirectType(t.Elem())
+ }
+ return t
+}
+
+func implements(t, i reflect.Type) bool {
+ return t.Implements(i) || reflect.PtrTo(t).Implements(i)
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/api.go b/vendor/github.com/alecthomas/participle/v2/lexer/api.go
new file mode 100644
index 00000000000..9f89d7aaffc
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/api.go
@@ -0,0 +1,171 @@
+package lexer
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unicode/utf8"
+)
+
+type TokenType int
+
+const (
+ // EOF represents an end of file.
+ EOF TokenType = -(iota + 1)
+)
+
+// EOFToken creates a new EOF token at the given position.
+func EOFToken(pos Position) Token {
+ return Token{Type: EOF, Pos: pos}
+}
+
+// Definition is the main entry point for lexing.
+type Definition interface {
+ // Symbols returns a map of symbolic names to the corresponding pseudo-runes for those symbols.
+ // This is the same approach as used by text/scanner. For example, "EOF" might have the rune
+ // value of -1, "Ident" might be -2, and so on.
+ Symbols() map[string]TokenType
+ // Lex an io.Reader.
+ Lex(filename string, r io.Reader) (Lexer, error)
+}
+
+// StringDefinition is an optional interface lexer Definition's can implement
+// to offer a fast path for lexing strings.
+type StringDefinition interface {
+ LexString(filename string, input string) (Lexer, error)
+}
+
+// BytesDefinition is an optional interface lexer Definition's can implement
+// to offer a fast path for lexing byte slices.
+type BytesDefinition interface {
+ LexBytes(filename string, input []byte) (Lexer, error)
+}
+
+// A Lexer returns tokens from a source.
+type Lexer interface {
+ // Next consumes and returns the next token.
+ Next() (Token, error)
+}
+
+// SymbolsByRune returns a map of lexer symbol names keyed by rune.
+func SymbolsByRune(def Definition) map[TokenType]string {
+ symbols := def.Symbols()
+ out := make(map[TokenType]string, len(symbols))
+ for s, r := range symbols {
+ out[r] = s
+ }
+ return out
+}
+
+// NameOfReader attempts to retrieve the filename of a reader.
+func NameOfReader(r interface{}) string {
+ if nr, ok := r.(interface{ Name() string }); ok {
+ return nr.Name()
+ }
+ return ""
+}
+
+// Must takes the result of a Definition constructor call and returns the definition, but panics if
+// it errors
+//
+// eg.
+//
+// lex = lexer.Must(lexer.Build(`Symbol = "symbol" .`))
+func Must(def Definition, err error) Definition {
+ if err != nil {
+ panic(err)
+ }
+ return def
+}
+
+// ConsumeAll reads all tokens from a Lexer.
+func ConsumeAll(lexer Lexer) ([]Token, error) {
+ tokens := make([]Token, 0, 1024)
+ for {
+ token, err := lexer.Next()
+ if err != nil {
+ return nil, err
+ }
+ tokens = append(tokens, token)
+ if token.Type == EOF {
+ return tokens, nil
+ }
+ }
+}
+
+// Position of a token.
+type Position struct {
+ Filename string
+ Offset int
+ Line int
+ Column int
+}
+
+// Advance the Position based on the number of characters and newlines in "span".
+func (p *Position) Advance(span string) {
+ p.Offset += len(span)
+ lines := strings.Count(span, "\n")
+ p.Line += lines
+ // Update column.
+ if lines == 0 {
+ p.Column += utf8.RuneCountInString(span)
+ } else {
+ p.Column = utf8.RuneCountInString(span[strings.LastIndex(span, "\n"):])
+ }
+}
+
+func (p Position) GoString() string {
+ return fmt.Sprintf("Position{Filename: %q, Offset: %d, Line: %d, Column: %d}",
+ p.Filename, p.Offset, p.Line, p.Column)
+}
+
+func (p Position) String() string {
+ filename := p.Filename
+ if filename == "" {
+ return fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ return fmt.Sprintf("%s:%d:%d", filename, p.Line, p.Column)
+}
+
+// A Token returned by a Lexer.
+type Token struct {
+ // Type of token. This is the value keyed by symbol as returned by Definition.Symbols().
+ Type TokenType
+ Value string
+ Pos Position
+}
+
+// EOF returns true if this Token is an EOF token.
+func (t Token) EOF() bool {
+ return t.Type == EOF
+}
+
+func (t Token) String() string {
+ if t.EOF() {
+ return ""
+ }
+ return t.Value
+}
+
+func (t Token) GoString() string {
+ if t.Pos == (Position{}) {
+ return fmt.Sprintf("Token{%d, %q}", t.Type, t.Value)
+ }
+ return fmt.Sprintf("Token@%s{%d, %q}", t.Pos.String(), t.Type, t.Value)
+}
+
+// MakeSymbolTable builds a lookup table for checking token ID existence.
+//
+// For each symbolic name in "types", the returned map will contain the corresponding token ID as a key.
+func MakeSymbolTable(def Definition, types ...string) (map[TokenType]bool, error) {
+ symbols := def.Symbols()
+ table := make(map[TokenType]bool, len(types))
+ for _, symbol := range types {
+ rn, ok := symbols[symbol]
+ if !ok {
+ return nil, fmt.Errorf("lexer does not support symbol %q", symbol)
+ }
+ table[rn] = true
+ }
+ return table, nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go b/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go
new file mode 100644
index 00000000000..1acadc48a76
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/codegen.go
@@ -0,0 +1,439 @@
+package lexer
+
+import (
+ "fmt"
+ "io"
+ "regexp"
+ "regexp/syntax"
+ "sort"
+ "text/template"
+ "unicode/utf8"
+)
+
+var codegenBackrefRe = regexp.MustCompile(`(\\+)(\d)`)
+
+var codegenTemplate *template.Template = template.Must(template.New("lexgen").Funcs(template.FuncMap{
+ "IsPush": func(r Rule) string {
+ if p, ok := r.Action.(ActionPush); ok {
+ return p.State
+ }
+ return ""
+ },
+ "IsPop": func(r Rule) bool {
+ _, ok := r.Action.(ActionPop)
+ return ok
+ },
+ "IsReturn": func(r Rule) bool {
+ return r == ReturnRule
+ },
+ "OrderRules": orderRules,
+ "HaveBackrefs": func(def *StatefulDefinition, state string) bool {
+ for _, rule := range def.Rules()[state] {
+ if codegenBackrefRe.MatchString(rule.Pattern) {
+ return true
+ }
+ }
+ return false
+ },
+}).Parse(`
+// Code generated by Participle. DO NOT EDIT.
+package {{.Package}}
+
+import (
+ "io"
+ "strings"
+ "unicode/utf8"
+ "regexp/syntax"
+
+ "github.com/alecthomas/participle/v2"
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+var _ syntax.Op
+
+var Lexer lexer.Definition = definitionImpl{}
+
+type definitionImpl struct {}
+
+func (definitionImpl) Symbols() map[string]lexer.TokenType {
+ return map[string]lexer.TokenType{
+{{- range $sym, $rn := .Def.Symbols}}
+ "{{$sym}}": {{$rn}},
+{{- end}}
+ }
+}
+
+func (definitionImpl) LexString(filename string, s string) (lexer.Lexer, error) {
+ return &lexerImpl{
+ s: s,
+ pos: lexer.Position{
+ Filename: filename,
+ Line: 1,
+ Column: 1,
+ },
+ states: []lexerState{lexerState{name: "Root"}},
+ }, nil
+}
+
+func (d definitionImpl) LexBytes(filename string, b []byte) (lexer.Lexer, error) {
+ return d.LexString(filename, string(b))
+}
+
+func (d definitionImpl) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
+ s := &strings.Builder{}
+ _, err := io.Copy(s, r)
+ if err != nil {
+ return nil, err
+ }
+ return d.LexString(filename, s.String())
+}
+
+type lexerState struct {
+ name string
+ groups []string
+}
+
+type lexerImpl struct {
+ s string
+ p int
+ pos lexer.Position
+ states []lexerState
+}
+
+func (l *lexerImpl) Next() (lexer.Token, error) {
+ if l.p == len(l.s) {
+ return lexer.EOFToken(l.pos), nil
+ }
+ var (
+ state = l.states[len(l.states)-1]
+ groups []int
+ sym lexer.TokenType
+ )
+ switch state.name {
+{{- range $state := .Def.Rules|OrderRules}}
+ case "{{$state.Name}}":
+{{- range $i, $rule := $state.Rules}}
+ {{- if $i}} else {{end -}}
+{{- if .Pattern -}}
+ if match := match{{.Name}}(l.s, l.p); match[1] != 0 {
+ sym = {{index $.Def.Symbols .Name}}
+ groups = match[:]
+{{- else if .|IsReturn -}}
+ if true {
+{{- end}}
+{{- if .|IsPush}}
+ l.states = append(l.states, lexerState{name: "{{.|IsPush}}"{{if HaveBackrefs $.Def $state.Name}}, groups: l.sgroups(groups){{end}}})
+{{- else if (or (.|IsPop) (.|IsReturn))}}
+ l.states = l.states[:len(l.states)-1]
+{{- if .|IsReturn}}
+ return l.Next()
+{{- end}}
+{{- else if not .Action}}
+{{- else}}
+ Unsupported action {{.Action}}
+{{- end}}
+ }
+{{- end}}
+{{- end}}
+ }
+ if groups == nil {
+ sample := []rune(l.s[l.p:])
+ if len(sample) > 16 {
+ sample = append(sample[:16], []rune("...")...)
+ }
+ return lexer.Token{}, participle.Errorf(l.pos, "invalid input text %q", sample)
+ }
+ pos := l.pos
+ span := l.s[groups[0]:groups[1]]
+ l.p = groups[1]
+ l.pos.Advance(span)
+ return lexer.Token{
+ Type: sym,
+ Value: span,
+ Pos: pos,
+ }, nil
+}
+
+func (l *lexerImpl) sgroups(match []int) []string {
+ sgroups := make([]string, len(match)/2)
+ for i := 0; i < len(match)-1; i += 2 {
+ sgroups[i/2] = l.s[l.p+match[i]:l.p+match[i+1]]
+ }
+ return sgroups
+}
+
+`))
+
+// ExperimentalGenerateLexer generates Go code implementing the given stateful lexer.
+//
+// The generated code should in general by around 10x faster and produce zero garbage per token.
+//
+// NOTE: This is an experimental interface and subject to change.
+func ExperimentalGenerateLexer(w io.Writer, pkg string, def *StatefulDefinition) error {
+ type ctx struct {
+ Package string
+ Def *StatefulDefinition
+ }
+ rules := def.Rules()
+ err := codegenTemplate.Execute(w, ctx{pkg, def})
+ if err != nil {
+ return err
+ }
+ seen := map[string]bool{} // Rules can be duplicated by Include().
+ for _, rules := range orderRules(rules) {
+ for _, rule := range rules.Rules {
+ if rule.Name == "" {
+ panic(rule)
+ }
+ if seen[rule.Name] {
+ continue
+ }
+ seen[rule.Name] = true
+ fmt.Fprintf(w, "\n")
+ err := generateRegexMatch(w, rule.Name, rule.Pattern)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type orderedRule struct {
+ Name string
+ Rules []Rule
+}
+
+func orderRules(rules Rules) []orderedRule {
+ orderedRules := []orderedRule{}
+ for name, rules := range rules {
+ orderedRules = append(orderedRules, orderedRule{
+ Name: name,
+ Rules: rules,
+ })
+ }
+ sort.Slice(orderedRules, func(i, j int) bool {
+ return orderedRules[i].Name < orderedRules[j].Name
+ })
+ return orderedRules
+}
+
+func generateRegexMatch(w io.Writer, name, pattern string) error {
+ re, err := syntax.Parse(pattern, syntax.Perl)
+ if err != nil {
+ return err
+ }
+ ids := map[string]int{}
+ idn := 0
+ reid := func(re *syntax.Regexp) int {
+ key := re.Op.String() + ":" + re.String()
+ id, ok := ids[key]
+ if ok {
+ return id
+ }
+ id = idn
+ idn++
+ ids[key] = id
+ return id
+ }
+ exists := func(re *syntax.Regexp) bool {
+ key := re.Op.String() + ":" + re.String()
+ _, ok := ids[key]
+ return ok
+ }
+ re = re.Simplify()
+ fmt.Fprintf(w, "// %s\n", re)
+ fmt.Fprintf(w, "func match%s(s string, p int) (groups [%d]int) {\n", name, 2*re.MaxCap()+2)
+ flattened := flatten(re)
+
+ // Fast-path a single literal.
+ if len(flattened) == 1 && re.Op == syntax.OpLiteral {
+ n := utf8.RuneCountInString(string(re.Rune))
+ if n == 1 {
+ fmt.Fprintf(w, "if p < len(s) && s[p] == %q {\n", re.Rune[0])
+ } else {
+ fmt.Fprintf(w, "if p+%d < len(s) && s[p:p+%d] == %q {\n", n, n, string(re.Rune))
+ }
+ fmt.Fprintf(w, "groups[0] = p\n")
+ fmt.Fprintf(w, "groups[1] = p + %d\n", n)
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return\n")
+ fmt.Fprintf(w, "}\n")
+ return nil
+ }
+ for _, re := range flattened {
+ if exists(re) {
+ continue
+ }
+ fmt.Fprintf(w, "// %s (%s)\n", re, re.Op)
+ fmt.Fprintf(w, "l%d := func(s string, p int) int {\n", reid(re))
+ if re.Flags&syntax.NonGreedy != 0 {
+ panic("non-greedy match not supported: " + re.String())
+ }
+ switch re.Op {
+ case syntax.OpNoMatch: // matches no strings
+ fmt.Fprintf(w, "return p\n")
+
+ case syntax.OpEmptyMatch: // matches empty string
+ fmt.Fprintf(w, "if len(s) == 0 { return p }\n")
+ fmt.Fprintf(w, "return -1\n")
+
+ case syntax.OpLiteral: // matches Runes sequence
+ n := utf8.RuneCountInString(string(re.Rune))
+ if n == 1 {
+ fmt.Fprintf(w, "if p < len(s) && s[p] == %q { return p+1 }\n", re.Rune[0])
+ } else {
+ fmt.Fprintf(w, "if p+%d < len(s) && s[p:p+%d] == %q { return p+%d }\n", n, n, string(re.Rune), n)
+ }
+ fmt.Fprintf(w, "return -1\n")
+
+ case syntax.OpCharClass: // matches Runes interpreted as range pair list
+ fmt.Fprintf(w, "if len(s) <= p { return -1 }\n")
+ needDecode := false
+ for i := 0; i < len(re.Rune); i += 2 {
+ l, r := re.Rune[i], re.Rune[i+1]
+ ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
+ if ln != 1 || rn != 1 {
+ needDecode = true
+ break
+ }
+ }
+ if needDecode {
+ fmt.Fprintf(w, "var (rn rune; n int)\n")
+ decodeRune(w, "p", "rn", "n")
+ } else {
+ fmt.Fprintf(w, "rn := s[p]\n")
+ }
+ fmt.Fprintf(w, "switch {\n")
+ for i := 0; i < len(re.Rune); i += 2 {
+ l, r := re.Rune[i], re.Rune[i+1]
+ ln, rn := utf8.RuneLen(l), utf8.RuneLen(r)
+ if ln == 1 && rn == 1 {
+ if l == r {
+ fmt.Fprintf(w, "case rn == %q: return p+1\n", l)
+ } else {
+ fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+1\n", l, r)
+ }
+ } else {
+ if l == r {
+ fmt.Fprintf(w, "case rn == %q: return p+n\n", l)
+ } else {
+ fmt.Fprintf(w, "case rn >= %q && rn <= %q: return p+n\n", l, r)
+ }
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return -1\n")
+
+ case syntax.OpAnyCharNotNL: // matches any character except newline
+ fmt.Fprintf(w, "var (rn rune; n int)\n")
+ decodeRune(w, "p", "rn", "n")
+ fmt.Fprintf(w, "if len(s) <= p+n || rn == '\\n' { return -1 }\n")
+ fmt.Fprintf(w, "return p+n\n")
+
+ case syntax.OpAnyChar: // matches any character
+ fmt.Fprintf(w, "var n int\n")
+ fmt.Fprintf(w, "if s[p] < utf8.RuneSelf {\n")
+ fmt.Fprintf(w, " n = 1\n")
+ fmt.Fprintf(w, "} else {\n")
+ fmt.Fprintf(w, " _, n = utf8.DecodeRuneInString(s[p:])\n")
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "if len(s) <= p+n { return -1 }\n")
+ fmt.Fprintf(w, "return p+n\n")
+
+ case syntax.OpWordBoundary, syntax.OpNoWordBoundary,
+ syntax.OpBeginText, syntax.OpEndText,
+ syntax.OpBeginLine, syntax.OpEndLine:
+ fmt.Fprintf(w, "var l, u rune = -1, -1\n")
+ fmt.Fprintf(w, "if p == 0 {\n")
+ decodeRune(w, "0", "u", "_")
+ fmt.Fprintf(w, "} else if p == len(s) {\n")
+ fmt.Fprintf(w, " l, _ = utf8.DecodeLastRuneInString(s)\n")
+ fmt.Fprintf(w, "} else {\n")
+ fmt.Fprintf(w, " var ln int\n")
+ decodeRune(w, "p", "l", "ln")
+ fmt.Fprintf(w, " if p+ln <= len(s) {\n")
+ decodeRune(w, "p+ln", "u", "_")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "op := syntax.EmptyOpContext(l, u)\n")
+ lut := map[syntax.Op]string{
+ syntax.OpWordBoundary: "EmptyWordBoundary",
+ syntax.OpNoWordBoundary: "EmptyNoWordBoundary",
+ syntax.OpBeginText: "EmptyBeginText",
+ syntax.OpEndText: "EmptyEndText",
+ syntax.OpBeginLine: "EmptyBeginLine",
+ syntax.OpEndLine: "EmptyEndLine",
+ }
+ fmt.Fprintf(w, "if op & syntax.%s != 0 { return p }\n", lut[re.Op])
+ fmt.Fprintf(w, "return -1\n")
+
+ case syntax.OpCapture: // capturing subexpression with index Cap, optional name Name
+ fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re.Sub0[0]))
+ fmt.Fprintf(w, "if np != -1 {\n")
+ fmt.Fprintf(w, " groups[%d] = p\n", re.Cap*2)
+ fmt.Fprintf(w, " groups[%d] = np\n", re.Cap*2+1)
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return np")
+
+ case syntax.OpStar: // matches Sub[0] zero or more times
+ fmt.Fprintf(w, "for len(s) > p {\n")
+ fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return p\n")
+
+ case syntax.OpPlus: // matches Sub[0] one or more times
+ fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(re.Sub0[0]))
+ fmt.Fprintf(w, "for len(s) > p {\n")
+ fmt.Fprintf(w, "if np := l%d(s, p); np == -1 { return p } else { p = np }\n", reid(re.Sub0[0]))
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "return p\n")
+
+ case syntax.OpQuest: // matches Sub[0] zero or one times
+ fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(re.Sub0[0]))
+ fmt.Fprintf(w, "return p\n")
+
+ case syntax.OpRepeat: // matches Sub[0] at least Min times, at most Max (Max == -1 is no limit)
+ panic("??")
+
+ case syntax.OpConcat: // matches concatenation of Subs
+ for _, sub := range re.Sub {
+ fmt.Fprintf(w, "if p = l%d(s, p); p == -1 { return -1 }\n", reid(sub))
+ }
+ fmt.Fprintf(w, "return p\n")
+
+ case syntax.OpAlternate: // matches alternation of Subs
+ for _, sub := range re.Sub {
+ fmt.Fprintf(w, "if np := l%d(s, p); np != -1 { return np }\n", reid(sub))
+ }
+ fmt.Fprintf(w, "return -1\n")
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ fmt.Fprintf(w, "np := l%d(s, p)\n", reid(re))
+ fmt.Fprintf(w, "if np == -1 {\n")
+ fmt.Fprintf(w, " return\n")
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, "groups[0] = p\n")
+ fmt.Fprintf(w, "groups[1] = np\n")
+ fmt.Fprintf(w, "return\n")
+ fmt.Fprintf(w, "}\n")
+ return nil
+}
+
+// This exists because of https://github.com/golang/go/issues/31666
+func decodeRune(w io.Writer, offset string, rn string, n string) {
+ fmt.Fprintf(w, "if s[%s] < utf8.RuneSelf {\n", offset)
+ fmt.Fprintf(w, " %s, %s = rune(s[%s]), 1\n", rn, n, offset)
+ fmt.Fprintf(w, "} else {\n")
+ fmt.Fprintf(w, " %s, %s = utf8.DecodeRuneInString(s[%s:])\n", rn, n, offset)
+ fmt.Fprintf(w, "}\n")
+}
+
+func flatten(re *syntax.Regexp) (out []*syntax.Regexp) {
+ for _, sub := range re.Sub {
+ out = append(out, flatten(sub)...)
+ }
+ out = append(out, re)
+ return
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/doc.go b/vendor/github.com/alecthomas/participle/v2/lexer/doc.go
new file mode 100644
index 00000000000..a038a167c24
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/doc.go
@@ -0,0 +1,28 @@
+// Package lexer defines interfaces and implementations used by Participle to perform lexing.
+//
+// The primary interfaces are Definition and Lexer. There are two concrete implementations
+// included. The first is one based on Go's text/scanner package. The second is Participle's
+// default stateful/modal lexer.
+//
+// The stateful lexer is based heavily on the approach used by Chroma (and Pygments).
+//
+// It is a state machine defined by a map of rules keyed by state. Each rule
+// is a named regex and optional operation to apply when the rule matches.
+//
+// As a convenience, any Rule starting with a lowercase letter will be elided from output.
+//
+// Lexing starts in the "Root" group. Each rule is matched in order, with the first
+// successful match producing a lexeme. If the matching rule has an associated Action
+// it will be executed.
+//
+// A state change can be introduced with the Action `Push(state)`. `Pop()` will
+// return to the previous state.
+//
+// To reuse rules from another state, use `Include(state)`.
+//
+// As a special case, regexes containing backrefs in the form \N (where N is a digit)
+// will match the corresponding capture group from the immediate parent group. This
+// can be used to parse, among other things, heredocs.
+//
+// See the README, example and tests in this package for details.
+package lexer
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/errors.go b/vendor/github.com/alecthomas/participle/v2/lexer/errors.go
new file mode 100644
index 00000000000..6734a6caa5d
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/errors.go
@@ -0,0 +1,50 @@
+package lexer
+
+import "fmt"
+
+// This file exists to break circular imports. The types and functions in here
+// mirror those in the participle package.
+
+type errorInterface interface {
+ error
+ Message() string
+ Position() Position
+}
+
+// Error represents an error while lexing.
+//
+// It complies with the participle.Error interface.
+type Error struct {
+ Msg string
+ Pos Position
+}
+
+var _ errorInterface = &Error{}
+
+// Creates a new Error at the given position.
+func errorf(pos Position, format string, args ...interface{}) *Error {
+ return &Error{Msg: fmt.Sprintf(format, args...), Pos: pos}
+}
+
+func (e *Error) Message() string { return e.Msg } // nolint: golint
+func (e *Error) Position() Position { return e.Pos } // nolint: golint
+
+// Error formats the error with FormatError.
+func (e *Error) Error() string { return formatError(e.Pos, e.Msg) }
+
+// An error in the form "[:][::] "
+func formatError(pos Position, message string) string {
+ msg := ""
+ if pos.Filename != "" {
+ msg += pos.Filename + ":"
+ }
+ if pos.Line != 0 || pos.Column != 0 {
+ msg += fmt.Sprintf("%d:%d:", pos.Line, pos.Column)
+ }
+ if msg != "" {
+ msg += " " + message
+ } else {
+ msg = message
+ }
+ return msg
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/peek.go b/vendor/github.com/alecthomas/participle/v2/lexer/peek.go
new file mode 100644
index 00000000000..4e7324daa5b
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/peek.go
@@ -0,0 +1,126 @@
+package lexer
+
+// PeekingLexer supports arbitrary lookahead as well as cloning.
+type PeekingLexer struct {
+ rawCursor RawCursor
+ cursor int
+ eof Token
+ tokens []Token
+ elide map[TokenType]bool
+}
+
+// RawCursor index in the token stream.
+type RawCursor int
+
+// Upgrade a Lexer to a PeekingLexer with arbitrary lookahead.
+//
+// "elide" is a slice of token types to elide from processing.
+func Upgrade(lex Lexer, elide ...TokenType) (*PeekingLexer, error) {
+ r := &PeekingLexer{
+ elide: make(map[TokenType]bool, len(elide)),
+ }
+ for _, rn := range elide {
+ r.elide[rn] = true
+ }
+ for {
+ t, err := lex.Next()
+ if err != nil {
+ return r, err
+ }
+ if t.EOF() {
+ r.eof = t
+ break
+ }
+ r.tokens = append(r.tokens, t)
+ }
+ return r, nil
+}
+
+// Range returns the slice of tokens between the two cursor points.
+func (p *PeekingLexer) Range(rawStart, rawEnd RawCursor) []Token {
+ return p.tokens[rawStart:rawEnd]
+}
+
+// Cursor position in tokens, excluding elided tokens.
+func (p *PeekingLexer) Cursor() int {
+ return p.cursor
+}
+
+// RawCursor position in tokens, including elided tokens.
+func (p *PeekingLexer) RawCursor() RawCursor {
+ return p.rawCursor
+}
+
+// Next consumes and returns the next token.
+func (p *PeekingLexer) Next() Token {
+ for int(p.rawCursor) < len(p.tokens) {
+ t := p.tokens[p.rawCursor]
+ p.rawCursor++
+ if p.elide[t.Type] {
+ continue
+ }
+ p.cursor++
+ return t
+ }
+ return p.eof
+}
+
+// Peek ahead at the next token.
+func (p *PeekingLexer) Peek() Token {
+ for i := int(p.rawCursor); i < len(p.tokens); i++ {
+ t := p.tokens[i]
+ if p.elide[t.Type] {
+ continue
+ }
+ return t
+ }
+ return p.eof
+}
+
+// PeekAny peeks forward over elided and non-elided tokens.
+//
+// Elided tokens will be returned if they match, otherwise the next
+// non-elided token will be returned.
+//
+// The returned RawCursor position is the location of the returned token.
+// Use FastForward to move the internal cursors forward.
+func (p *PeekingLexer) PeekAny(match func(Token) bool) (t Token, rawCursor RawCursor) {
+ tokenCount := RawCursor(len(p.tokens))
+ for i := p.rawCursor; i < tokenCount; i++ {
+ t = p.tokens[i]
+ if match(t) || !p.elide[t.Type] {
+ return t, i
+ }
+ }
+ return p.eof, tokenCount
+}
+
+// FastForward the internal cursors to this RawCursor position.
+func (p *PeekingLexer) FastForward(rawCursor RawCursor) {
+ tokenCount := RawCursor(len(p.tokens))
+ for ; p.rawCursor <= rawCursor && p.rawCursor < tokenCount; p.rawCursor++ {
+ t := p.tokens[p.rawCursor]
+ if p.elide[t.Type] {
+ continue
+ }
+ p.cursor++
+ }
+}
+
+// RawPeek peeks ahead at the next raw token.
+//
+// Unlike Peek, this will include elided tokens.
+func (p *PeekingLexer) RawPeek() Token {
+ if int(p.rawCursor) < len(p.tokens) {
+ return p.tokens[p.rawCursor]
+ }
+ return p.eof
+}
+
+// Clone creates a clone of this PeekingLexer at its current token.
+//
+// The parent and clone are completely independent.
+func (p *PeekingLexer) Clone() *PeekingLexer {
+ clone := *p
+ return &clone
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/simple.go b/vendor/github.com/alecthomas/participle/v2/lexer/simple.go
new file mode 100644
index 00000000000..a8f9305a318
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/simple.go
@@ -0,0 +1,27 @@
+package lexer
+
+// SimpleRule is a named regular expression.
+type SimpleRule struct {
+ Name string
+ Pattern string
+}
+
+// MustSimple creates a new Stateful lexer with only a single root state.
+//
+// It panics if there is an error.
+func MustSimple(rules []SimpleRule, options ...Option) *StatefulDefinition {
+ def, err := NewSimple(rules, options...)
+ if err != nil {
+ panic(err)
+ }
+ return def
+}
+
+// NewSimple creates a new Stateful lexer with only a single root state.
+func NewSimple(rules []SimpleRule, options ...Option) (*StatefulDefinition, error) {
+ fullRules := make([]Rule, len(rules))
+ for i, rule := range rules {
+ fullRules[i] = Rule{Name: rule.Name, Pattern: rule.Pattern}
+ }
+ return New(Rules{"Root": fullRules}, options...)
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go b/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go
new file mode 100644
index 00000000000..d9e4837dc81
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/stateful.go
@@ -0,0 +1,384 @@
+package lexer
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+var (
+ backrefReplace = regexp.MustCompile(`(\\+)(\d)`)
+)
+
+// Option for modifying how the Lexer works.
+type Option func(d *StatefulDefinition)
+
+// A Rule matching input and possibly changing state.
+type Rule struct {
+ Name string
+ Pattern string
+ Action Action
+}
+
+// Rules grouped by name.
+type Rules map[string][]Rule
+
+// compiledRule is a Rule with its pattern compiled.
+type compiledRule struct {
+ Rule
+ ignore bool
+ RE *regexp.Regexp
+}
+
+// compiledRules grouped by name.
+type compiledRules map[string][]compiledRule
+
+// A Action is applied when a rule matches.
+type Action interface {
+ // Actions are responsible for validating the match. ie. if they consumed any input.
+ applyAction(lexer *StatefulLexer, groups []string) error
+}
+
+// RulesAction is an optional interface that Actions can implement.
+//
+// It is applied during rule construction to mutate the rule map.
+type RulesAction interface {
+ applyRules(state string, rule int, rules compiledRules) error
+}
+
+// InitialState overrides the default initial state of "Root".
+func InitialState(state string) Option {
+ return func(d *StatefulDefinition) {
+ d.initialState = state
+ }
+}
+
+// MatchLongest causes the Lexer to continue checking rules past the first match.
+// If any subsequent rule has a longer match, it will be used instead.
+func MatchLongest() Option {
+ return func(d *StatefulDefinition) {
+ d.matchLongest = true
+ }
+}
+
+// ActionPop pops to the previous state when the Rule matches.
+type ActionPop struct{}
+
+func (p ActionPop) applyAction(lexer *StatefulLexer, groups []string) error {
+ if groups[0] == "" {
+ return errors.New("did not consume any input")
+ }
+ lexer.stack = lexer.stack[:len(lexer.stack)-1]
+ return nil
+}
+
+// Pop to the previous state.
+func Pop() Action {
+ return ActionPop{}
+}
+
+// ReturnRule signals the lexer to return immediately.
+var ReturnRule = Rule{"returnToParent", "", nil}
+
+// Return to the parent state.
+//
+// Useful as the last rule in a sub-state.
+func Return() Rule { return ReturnRule }
+
+// ActionPush pushes the current state and switches to "State" when the Rule matches.
+type ActionPush struct{ State string }
+
+func (p ActionPush) applyAction(lexer *StatefulLexer, groups []string) error {
+ if groups[0] == "" {
+ return errors.New("did not consume any input")
+ }
+ lexer.stack = append(lexer.stack, lexerState{name: p.State, groups: groups})
+ return nil
+}
+
+// Push to the given state.
+//
+// The target state will then be the set of rules used for matching
+// until another Push or Pop is encountered.
+func Push(state string) Action {
+ return ActionPush{state}
+}
+
+type include struct{ state string }
+
+func (i include) applyAction(lexer *StatefulLexer, groups []string) error {
+ panic("should not be called")
+}
+
+func (i include) applyRules(state string, rule int, rules compiledRules) error {
+ includedRules, ok := rules[i.state]
+ if !ok {
+ return fmt.Errorf("invalid include state %q", i.state)
+ }
+ clone := make([]compiledRule, len(includedRules))
+ copy(clone, includedRules)
+ rules[state] = append(rules[state][:rule], append(clone, rules[state][rule+1:]...)...) // nolint: makezero
+ return nil
+}
+
+// Include rules from another state in this one.
+func Include(state string) Rule {
+ return Rule{Action: include{state}}
+}
+
+// StatefulDefinition is the lexer.Definition.
+type StatefulDefinition struct {
+ rules compiledRules
+ symbols map[string]TokenType
+ // Map of key->*regexp.Regexp
+ backrefCache sync.Map
+ initialState string
+ matchLongest bool
+}
+
+// MustStateful creates a new stateful lexer and panics if it is incorrect.
+func MustStateful(rules Rules, options ...Option) *StatefulDefinition {
+ def, err := New(rules, options...)
+ if err != nil {
+ panic(err)
+ }
+ return def
+}
+
+// New constructs a new stateful lexer from rules.
+func New(rules Rules, options ...Option) (*StatefulDefinition, error) {
+ compiled := compiledRules{}
+ for key, set := range rules {
+ for i, rule := range set {
+ pattern := "^(?:" + rule.Pattern + ")"
+ var (
+ re *regexp.Regexp
+ err error
+ )
+ var match = backrefReplace.FindStringSubmatch(rule.Pattern)
+ if match == nil || len(match[1])%2 == 0 {
+ re, err = regexp.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("%s.%d: %s", key, i, err)
+ }
+ }
+ compiled[key] = append(compiled[key], compiledRule{
+ Rule: rule,
+ ignore: len(rule.Name) > 0 && unicode.IsLower(rune(rule.Name[0])),
+ RE: re,
+ })
+ }
+ }
+restart:
+ for state, rules := range compiled {
+ for i, rule := range rules {
+ if action, ok := rule.Action.(RulesAction); ok {
+ if err := action.applyRules(state, i, compiled); err != nil {
+ return nil, fmt.Errorf("%s.%d: %s", state, i, err)
+ }
+ goto restart
+ }
+ }
+ }
+ keys := make([]string, 0, len(compiled))
+ for key := range compiled {
+ keys = append(keys, key)
+ }
+ symbols := map[string]TokenType{
+ "EOF": EOF,
+ }
+ sort.Strings(keys)
+ duplicates := map[string]compiledRule{}
+ rn := EOF - 1
+ for _, key := range keys {
+ for i, rule := range compiled[key] {
+ if dup, ok := duplicates[rule.Name]; ok && rule.Pattern != dup.Pattern {
+ panic(fmt.Sprintf("duplicate key %q with different patterns %q != %q", rule.Name, rule.Pattern, dup.Pattern))
+ }
+ duplicates[rule.Name] = rule
+ compiled[key][i] = rule
+ symbols[rule.Name] = rn
+ rn--
+ }
+ }
+ d := &StatefulDefinition{
+ initialState: "Root",
+ rules: compiled,
+ symbols: symbols,
+ }
+ for _, option := range options {
+ option(d)
+ }
+ return d, nil
+}
+
+// Rules returns the user-provided Rules used to construct the lexer.
+func (d *StatefulDefinition) Rules() Rules {
+ out := Rules{}
+ for state, rules := range d.rules {
+ for _, rule := range rules {
+ out[state] = append(out[state], rule.Rule)
+ }
+ }
+ return out
+}
+
+// LexString is a fast-path implementation for lexing strings.
+func (d *StatefulDefinition) LexString(filename string, s string) (Lexer, error) {
+ return &StatefulLexer{
+ def: d,
+ data: s,
+ stack: []lexerState{{name: d.initialState}},
+ pos: Position{
+ Filename: filename,
+ Line: 1,
+ Column: 1,
+ },
+ }, nil
+}
+
+func (d *StatefulDefinition) Lex(filename string, r io.Reader) (Lexer, error) { // nolint: golint
+ w := &strings.Builder{}
+ _, err := io.Copy(w, r)
+ if err != nil {
+ return nil, err
+ }
+ return d.LexString(filename, w.String())
+}
+
+func (d *StatefulDefinition) Symbols() map[string]TokenType { // nolint: golint
+ return d.symbols
+}
+
+type lexerState struct {
+ name string
+ groups []string
+}
+
+// StatefulLexer implementation.
+type StatefulLexer struct {
+ stack []lexerState
+ def *StatefulDefinition
+ data string
+ pos Position
+}
+
+func (l *StatefulLexer) Next() (Token, error) { // nolint: golint
+ parent := l.stack[len(l.stack)-1]
+ rules := l.def.rules[parent.name]
+next:
+ for len(l.data) > 0 {
+ var (
+ rule *compiledRule
+ m []int
+ match []int
+ )
+ for i, candidate := range rules {
+ // Special case "Return()".
+ if candidate.Rule == ReturnRule {
+ l.stack = l.stack[:len(l.stack)-1]
+ parent = l.stack[len(l.stack)-1]
+ rules = l.def.rules[parent.name]
+ continue next
+ }
+ re, err := l.getPattern(candidate)
+ if err != nil {
+ return Token{}, errorf(l.pos, "rule %q: %s", candidate.Name, err)
+ }
+ m = re.FindStringSubmatchIndex(l.data)
+ if m != nil && (match == nil || m[1] > match[1]) {
+ match = m
+ rule = &rules[i]
+ if !l.def.matchLongest {
+ break
+ }
+ }
+ }
+ if match == nil || rule == nil {
+ sample := []rune(l.data)
+ if len(sample) > 16 {
+ sample = append(sample[:16], []rune("...")...)
+ }
+ return Token{}, errorf(l.pos, "invalid input text %q", string(sample))
+ }
+
+ if rule.Action != nil {
+ groups := make([]string, 0, len(match)/2)
+ for i := 0; i < len(match); i += 2 {
+ groups = append(groups, l.data[match[i]:match[i+1]])
+ }
+ if err := rule.Action.applyAction(l, groups); err != nil {
+ return Token{}, errorf(l.pos, "rule %q: %s", rule.Name, err)
+ }
+ } else if match[0] == match[1] {
+ return Token{}, errorf(l.pos, "rule %q did not match any input", rule.Name)
+ }
+
+ span := l.data[match[0]:match[1]]
+ l.data = l.data[match[1]:]
+ // l.groups = groups
+
+ // Update position.
+ pos := l.pos
+ l.pos.Advance(span)
+ if rule.ignore {
+ parent = l.stack[len(l.stack)-1]
+ rules = l.def.rules[parent.name]
+ continue
+ }
+ return Token{
+ Type: l.def.symbols[rule.Name],
+ Value: span,
+ Pos: pos,
+ }, nil
+ }
+ return EOFToken(l.pos), nil
+}
+
+func (l *StatefulLexer) getPattern(candidate compiledRule) (*regexp.Regexp, error) {
+ if candidate.RE != nil {
+ return candidate.RE, nil
+ }
+
+ // We don't have a compiled RE. This means there are back-references
+ // that need to be substituted first.
+ parent := l.stack[len(l.stack)-1]
+ key := candidate.Pattern + "\000" + strings.Join(parent.groups, "\000")
+ cached, ok := l.def.backrefCache.Load(key)
+ if ok {
+ return cached.(*regexp.Regexp), nil
+ }
+
+ var (
+ re *regexp.Regexp
+ err error
+ )
+ pattern := backrefReplace.ReplaceAllStringFunc(candidate.Pattern, func(s string) string {
+ var rematch = backrefReplace.FindStringSubmatch(s)
+ n, nerr := strconv.ParseInt(rematch[2], 10, 64)
+ if nerr != nil {
+ err = nerr
+ return s
+ }
+ if len(parent.groups) == 0 || int(n) >= len(parent.groups) {
+ err = fmt.Errorf("invalid group %d from parent with %d groups", n, len(parent.groups))
+ return s
+ }
+ // concatenate the leading \\\\ which are already escaped to the quoted match.
+ return rematch[1][:len(rematch[1])-1] + regexp.QuoteMeta(parent.groups[n])
+ })
+ if err == nil {
+ re, err = regexp.Compile("^(?:" + pattern + ")")
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid backref expansion: %q: %s", pattern, err)
+ }
+ l.def.backrefCache.Store(key, re)
+ return re, nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/lexer/text_scanner.go b/vendor/github.com/alecthomas/participle/v2/lexer/text_scanner.go
new file mode 100644
index 00000000000..5607c333096
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/lexer/text_scanner.go
@@ -0,0 +1,112 @@
+package lexer
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "text/scanner"
+)
+
+// TextScannerLexer is a lexer that uses the text/scanner module.
+var (
+ TextScannerLexer Definition = &textScannerLexerDefinition{}
+
+ // DefaultDefinition defines properties for the default lexer.
+ DefaultDefinition = TextScannerLexer
+)
+
+// NewTextScannerLexer constructs a Definition that uses an underlying scanner.Scanner
+//
+// "configure" will be called after the scanner.Scanner.Init(r) is called. If "configure"
+// is nil a default scanner.Scanner will be used.
+func NewTextScannerLexer(configure func(*scanner.Scanner)) Definition {
+ return &textScannerLexerDefinition{configure: configure}
+}
+
+type textScannerLexerDefinition struct {
+ configure func(*scanner.Scanner)
+}
+
+func (d *textScannerLexerDefinition) Lex(filename string, r io.Reader) (Lexer, error) {
+ l := Lex(filename, r)
+ if d.configure != nil {
+ d.configure(l.(*textScannerLexer).scanner)
+ }
+ return l, nil
+}
+
+func (d *textScannerLexerDefinition) Symbols() map[string]TokenType {
+ return map[string]TokenType{
+ "EOF": EOF,
+ "Char": scanner.Char,
+ "Ident": scanner.Ident,
+ "Int": scanner.Int,
+ "Float": scanner.Float,
+ "String": scanner.String,
+ "RawString": scanner.RawString,
+ "Comment": scanner.Comment,
+ }
+}
+
+// textScannerLexer is a Lexer based on text/scanner.Scanner
+type textScannerLexer struct {
+ scanner *scanner.Scanner
+ filename string
+ err error
+}
+
+// Lex an io.Reader with text/scanner.Scanner.
+//
+// This provides very fast lexing of source code compatible with Go tokens.
+//
+// Note that this differs from text/scanner.Scanner in that string tokens will be unquoted.
+func Lex(filename string, r io.Reader) Lexer {
+ s := &scanner.Scanner{}
+ s.Init(r)
+ lexer := lexWithScanner(filename, s)
+ lexer.scanner.Error = func(s *scanner.Scanner, msg string) {
+ lexer.err = errorf(Position(lexer.scanner.Pos()), msg)
+ }
+ return lexer
+}
+
+// LexWithScanner creates a Lexer from a user-provided scanner.Scanner.
+//
+// Useful if you need to customise the Scanner.
+func LexWithScanner(filename string, scan *scanner.Scanner) Lexer {
+ return lexWithScanner(filename, scan)
+}
+
+func lexWithScanner(filename string, scan *scanner.Scanner) *textScannerLexer {
+ scan.Filename = filename
+ lexer := &textScannerLexer{
+ filename: filename,
+ scanner: scan,
+ }
+ return lexer
+}
+
+// LexBytes returns a new default lexer over bytes.
+func LexBytes(filename string, b []byte) Lexer {
+ return Lex(filename, bytes.NewReader(b))
+}
+
+// LexString returns a new default lexer over a string.
+func LexString(filename, s string) Lexer {
+ return Lex(filename, strings.NewReader(s))
+}
+
+func (t *textScannerLexer) Next() (Token, error) {
+ typ := t.scanner.Scan()
+ text := t.scanner.TokenText()
+ pos := Position(t.scanner.Position)
+ pos.Filename = t.filename
+ if t.err != nil {
+ return Token{}, t.err
+ }
+ return Token{
+ Type: TokenType(typ),
+ Value: text,
+ Pos: pos,
+ }, nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/map.go b/vendor/github.com/alecthomas/participle/v2/map.go
new file mode 100644
index 00000000000..c80290c9c1a
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/map.go
@@ -0,0 +1,111 @@
+package participle
+
+import (
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+type mapperByToken struct {
+ symbols []string
+ mapper Mapper
+}
+
+// Mapper function for mutating tokens before being applied to the AST.
+type Mapper func(token lexer.Token) (lexer.Token, error)
+
+// Map is an Option that configures the Parser to apply a mapping function to each Token from the lexer.
+//
+// This can be useful to eg. upper-case all tokens of a certain type, or dequote strings.
+//
+// "symbols" specifies the token symbols that the Mapper will be applied to. If empty, all tokens will be mapped.
+func Map(mapper Mapper, symbols ...string) Option {
+ return func(p *parserOptions) error {
+ p.mappers = append(p.mappers, mapperByToken{
+ mapper: mapper,
+ symbols: symbols,
+ })
+ return nil
+ }
+}
+
+// Unquote applies strconv.Unquote() to tokens of the given types.
+//
+// Tokens of type "String" will be unquoted if no other types are provided.
+func Unquote(types ...string) Option {
+ if len(types) == 0 {
+ types = []string{"String"}
+ }
+ return Map(func(t lexer.Token) (lexer.Token, error) {
+ value, err := unquote(t.Value)
+ if err != nil {
+ return t, Errorf(t.Pos, "invalid quoted string %q: %s", t.Value, err.Error())
+ }
+ t.Value = value
+ return t, nil
+ }, types...)
+}
+
+func unquote(s string) (string, error) {
+ quote := s[0]
+ s = s[1 : len(s)-1]
+ out := ""
+ for s != "" {
+ value, _, tail, err := strconv.UnquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = tail
+ out += string(value)
+ }
+ return out, nil
+}
+
+// Upper is an Option that upper-cases all tokens of the given type. Useful for case normalisation.
+func Upper(types ...string) Option {
+ return Map(func(token lexer.Token) (lexer.Token, error) {
+ token.Value = strings.ToUpper(token.Value)
+ return token, nil
+ }, types...)
+}
+
+// Elide drops tokens of the specified types.
+func Elide(types ...string) Option {
+ return func(p *parserOptions) error {
+ p.elide = append(p.elide, types...)
+ return nil
+ }
+}
+
+// Apply a Mapping to all tokens coming out of a Lexer.
+type mappingLexerDef struct {
+ l lexer.Definition
+ mapper Mapper
+}
+
+var _ lexer.Definition = &mappingLexerDef{}
+
+func (m *mappingLexerDef) Symbols() map[string]lexer.TokenType { return m.l.Symbols() }
+
+func (m *mappingLexerDef) Lex(filename string, r io.Reader) (lexer.Lexer, error) {
+ l, err := m.l.Lex(filename, r)
+ if err != nil {
+ return nil, err
+ }
+ return &mappingLexer{l, m.mapper}, nil
+}
+
+type mappingLexer struct {
+ lexer.Lexer
+ mapper Mapper
+}
+
+func (m *mappingLexer) Next() (lexer.Token, error) {
+ t, err := m.Lexer.Next()
+ if err != nil {
+ return t, err
+ }
+ return m.mapper(t)
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/nodes.go b/vendor/github.com/alecthomas/participle/v2/nodes.go
new file mode 100644
index 00000000000..3d609ca75dd
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/nodes.go
@@ -0,0 +1,797 @@
+package participle
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+var (
+ // MaxIterations limits the number of elements capturable by {}.
+ MaxIterations = 1000000
+
+ positionType = reflect.TypeOf(lexer.Position{})
+ tokenType = reflect.TypeOf(lexer.Token{})
+ tokensType = reflect.TypeOf([]lexer.Token{})
+ captureType = reflect.TypeOf((*Capture)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ parseableType = reflect.TypeOf((*Parseable)(nil)).Elem()
+
+ // NextMatch should be returned by Parseable.Parse() method implementations to indicate
+ // that the node did not match and that other matches should be attempted, if appropriate.
+ NextMatch = errors.New("no match") // nolint: golint
+)
+
+// A node in the grammar.
+type node interface {
+ // Parse from scanner into value.
+ //
+ // Returned slice will be nil if the node does not match.
+ Parse(ctx *parseContext, parent reflect.Value) ([]reflect.Value, error)
+
+ // Return a decent string representation of the Node.
+ fmt.Stringer
+
+ fmt.GoStringer
+}
+
+func decorate(err *error, name func() string) {
+ if *err == nil {
+ return
+ }
+ if perr, ok := (*err).(Error); ok {
+ *err = Errorf(perr.Position(), "%s: %s", name(), perr.Message())
+ } else {
+ *err = &ParseError{Msg: fmt.Sprintf("%s: %s", name(), *err)}
+ }
+}
+
+// A node that proxies to an implementation that implements the Parseable interface.
+type parseable struct {
+ t reflect.Type
+}
+
+func (p *parseable) String() string { return ebnf(p) }
+func (p *parseable) GoString() string { return p.t.String() }
+
+func (p *parseable) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(p)()
+ rv := reflect.New(p.t)
+ v := rv.Interface().(Parseable)
+ err = v.Parse(ctx.PeekingLexer)
+ if err != nil {
+ if err == NextMatch {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return []reflect.Value{rv.Elem()}, nil
+}
+
+// @@ (but for a custom production)
+type custom struct {
+ typ reflect.Type
+ parseFn reflect.Value
+}
+
+func (c *custom) String() string { return ebnf(c) }
+func (c *custom) GoString() string { return c.typ.Name() }
+
+func (c *custom) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(c)()
+ results := c.parseFn.Call([]reflect.Value{reflect.ValueOf(ctx.PeekingLexer)})
+ if err, _ := results[1].Interface().(error); err != nil {
+ if err == NextMatch {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return []reflect.Value{results[0]}, nil
+}
+
+// @@ (for a union)
+type union struct {
+ unionDef
+ nodeMembers []node
+}
+
+func (u *union) String() string { return ebnf(u) }
+func (u *union) GoString() string { return u.typ.Name() }
+
+func (u *union) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(u)()
+ temp := disjunction{u.nodeMembers}
+ vals, err := temp.Parse(ctx, parent)
+ if err != nil {
+ return nil, err
+ }
+ for i := range vals {
+ vals[i] = maybeRef(u.members[i], vals[i]).Convert(u.typ)
+ }
+ return vals, nil
+}
+
+// @@
+type strct struct {
+ typ reflect.Type
+ expr node
+ tokensFieldIndex []int
+ posFieldIndex []int
+ endPosFieldIndex []int
+}
+
+func newStrct(typ reflect.Type) *strct {
+ s := &strct{
+ typ: typ,
+ }
+ field, ok := typ.FieldByName("Pos")
+ if ok && field.Type == positionType {
+ s.posFieldIndex = field.Index
+ }
+ field, ok = typ.FieldByName("EndPos")
+ if ok && field.Type == positionType {
+ s.endPosFieldIndex = field.Index
+ }
+ field, ok = typ.FieldByName("Tokens")
+ if ok && field.Type == tokensType {
+ s.tokensFieldIndex = field.Index
+ }
+ return s
+}
+
+func (s *strct) String() string { return ebnf(s) }
+func (s *strct) GoString() string { return s.typ.Name() }
+
+func (s *strct) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(s)()
+ sv := reflect.New(s.typ).Elem()
+ start := ctx.RawCursor()
+ t := ctx.Peek()
+ s.maybeInjectStartToken(t, sv)
+ if out, err = s.expr.Parse(ctx, sv); err != nil {
+ _ = ctx.Apply() // Best effort to give partial AST.
+ ctx.MaybeUpdateError(err)
+ return []reflect.Value{sv}, err
+ } else if out == nil {
+ return nil, nil
+ }
+ end := ctx.RawCursor()
+ t = ctx.RawPeek()
+ s.maybeInjectEndToken(t, sv)
+ s.maybeInjectTokens(ctx.Range(start, end), sv)
+ return []reflect.Value{sv}, ctx.Apply()
+}
+
+func (s *strct) maybeInjectStartToken(token lexer.Token, v reflect.Value) {
+ if s.posFieldIndex == nil {
+ return
+ }
+ v.FieldByIndex(s.posFieldIndex).Set(reflect.ValueOf(token.Pos))
+}
+
+func (s *strct) maybeInjectEndToken(token lexer.Token, v reflect.Value) {
+ if s.endPosFieldIndex == nil {
+ return
+ }
+ v.FieldByIndex(s.endPosFieldIndex).Set(reflect.ValueOf(token.Pos))
+}
+
+func (s *strct) maybeInjectTokens(tokens []lexer.Token, v reflect.Value) {
+ if s.tokensFieldIndex == nil {
+ return
+ }
+ v.FieldByIndex(s.tokensFieldIndex).Set(reflect.ValueOf(tokens))
+}
+
+type groupMatchMode int
+
+func (g groupMatchMode) String() string {
+ switch g {
+ case groupMatchOnce:
+ return "n"
+ case groupMatchZeroOrOne:
+ return "n?"
+ case groupMatchZeroOrMore:
+ return "n*"
+ case groupMatchOneOrMore:
+ return "n+"
+ case groupMatchNonEmpty:
+ return "n!"
+ }
+ panic("??")
+}
+
+const (
+ groupMatchOnce groupMatchMode = iota
+ groupMatchZeroOrOne = iota
+ groupMatchZeroOrMore = iota
+ groupMatchOneOrMore = iota
+ groupMatchNonEmpty = iota
+)
+
+// ( ) - match once
+// ( )* - match zero or more times
+// ( )+ - match one or more times
+// ( )? - match zero or once
+// ( )! - must be a non-empty match
+//
+// The additional modifier "!" forces the content of the group to be non-empty if it does match.
+type group struct {
+ expr node
+ mode groupMatchMode
+}
+
+func (g *group) String() string { return ebnf(g) }
+func (g *group) GoString() string { return fmt.Sprintf("group{%s}", g.mode) }
+func (g *group) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(g)()
+ // Configure min/max matches.
+ min := 1
+ max := 1
+ switch g.mode {
+ case groupMatchNonEmpty:
+ out, err = g.expr.Parse(ctx, parent)
+ if err != nil {
+ return out, err
+ }
+ if len(out) == 0 {
+ t := ctx.Peek()
+ return out, Errorf(t.Pos, "sub-expression %s cannot be empty", g)
+ }
+ return out, nil
+ case groupMatchOnce:
+ return g.expr.Parse(ctx, parent)
+ case groupMatchZeroOrOne:
+ min = 0
+ case groupMatchZeroOrMore:
+ min = 0
+ max = MaxIterations
+ case groupMatchOneOrMore:
+ min = 1
+ max = MaxIterations
+ }
+ matches := 0
+ for ; matches < max; matches++ {
+ branch := ctx.Branch()
+ v, err := g.expr.Parse(branch, parent)
+ if err != nil {
+ ctx.MaybeUpdateError(err)
+ // Optional part failed to match.
+ if ctx.Stop(err, branch) {
+ out = append(out, v...) // Try to return as much of the parse tree as possible
+ return out, err
+ }
+ break
+ }
+ out = append(out, v...)
+ ctx.Accept(branch)
+ if v == nil {
+ break
+ }
+ }
+ // fmt.Printf("%d < %d < %d: out == nil? %v\n", min, matches, max, out == nil)
+ t := ctx.Peek()
+ if matches >= MaxIterations {
+ return nil, Errorf(t.Pos, "too many iterations of %s (> %d)", g, MaxIterations)
+ }
+ if matches < min {
+ return out, Errorf(t.Pos, "sub-expression %s must match at least once", g)
+ }
+ // The idea here is that something like "a"? is a successful match and that parsing should proceed.
+ if min == 0 && out == nil {
+ out = []reflect.Value{}
+ }
+ return out, nil
+}
+
+// (?= ) for positive lookahead, (?! ) for negative lookahead; neither consumes input
+type lookaheadGroup struct {
+ expr node
+ negative bool
+}
+
+func (n *lookaheadGroup) String() string { return ebnf(n) }
+func (n *lookaheadGroup) GoString() string { return "lookaheadGroup{}" }
+
+func (n *lookaheadGroup) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(n)()
+ // Create a branch to avoid advancing the parser as any match will be discarded
+ branch := ctx.Branch()
+ out, err = n.expr.Parse(branch, parent)
+ matchedLookahead := err == nil && out != nil
+ expectingMatch := !n.negative
+ if matchedLookahead != expectingMatch {
+ peek := ctx.Peek()
+ return nil, Errorf(peek.Pos, "unexpected '%s'", peek.Value)
+ }
+ return []reflect.Value{}, nil // Empty match slice means a match, unlike nil
+}
+
+// {"|" }
+type disjunction struct {
+ nodes []node
+}
+
+func (d *disjunction) String() string { return ebnf(d) }
+func (d *disjunction) GoString() string { return "disjunction{}" }
+
+func (d *disjunction) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(d)()
+ var (
+ deepestError = 0
+ firstError error
+ firstValues []reflect.Value
+ )
+ for _, a := range d.nodes {
+ branch := ctx.Branch()
+ if value, err := a.Parse(branch, parent); err != nil {
+ // If this branch progressed too far and still didn't match, error out.
+ if ctx.Stop(err, branch) {
+ return value, err
+ }
+ // Show the closest error returned. The idea here is that the further the parser progresses
+ // without error, the more difficult it is to trace the error back to its root.
+ if branch.Cursor() >= deepestError {
+ firstError = err
+ firstValues = value
+ deepestError = branch.Cursor()
+ }
+ } else if value != nil {
+ bt := branch.RawPeek()
+ ct := ctx.RawPeek()
+ if bt == ct && bt.Type != lexer.EOF {
+ panic(Errorf(bt.Pos, "branch %s was accepted but did not progress the lexer at %s (%q)", a, bt.Pos, bt.Value))
+ }
+ ctx.Accept(branch)
+ return value, nil
+ }
+ }
+ if firstError != nil {
+ ctx.MaybeUpdateError(firstError)
+ return firstValues, firstError
+ }
+ return nil, nil
+}
+
+// ...
+type sequence struct {
+ head bool // True if this is the head node.
+ node node
+ next *sequence
+}
+
+func (s *sequence) String() string { return ebnf(s) }
+func (s *sequence) GoString() string { return "sequence{}" }
+
+func (s *sequence) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(s)()
+ for n := s; n != nil; n = n.next {
+ child, err := n.node.Parse(ctx, parent)
+ out = append(out, child...)
+ if err != nil {
+ return out, err
+ }
+ if child == nil {
+ // Early exit if first value doesn't match, otherwise all values must match.
+ if n == s {
+ return nil, nil
+ }
+ token := ctx.Peek()
+ return out, &UnexpectedTokenError{Unexpected: token, at: n}
+ }
+ // Special-case for when children return an empty match.
+ // Appending an empty, non-nil slice to a nil slice returns a nil slice.
+ // https://go.dev/play/p/lV1Xk-IP6Ta
+ if out == nil {
+ out = []reflect.Value{}
+ }
+ }
+ return out, nil
+}
+
+// @
+type capture struct {
+ field structLexerField
+ node node
+}
+
+func (c *capture) String() string { return ebnf(c) }
+func (c *capture) GoString() string { return "capture{}" }
+
+func (c *capture) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(c)()
+ start := ctx.RawCursor()
+ v, err := c.node.Parse(ctx, parent)
+ if v != nil {
+ ctx.Defer(ctx.Range(start, ctx.RawCursor()), parent, c.field, v)
+ }
+ if err != nil {
+ return []reflect.Value{parent}, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ return []reflect.Value{parent}, nil
+}
+
+// - named lexer token reference
+type reference struct {
+ typ lexer.TokenType
+ identifier string // Used for informational purposes.
+}
+
+func (r *reference) String() string { return ebnf(r) }
+func (r *reference) GoString() string { return fmt.Sprintf("reference{%s}", r.identifier) }
+
+func (r *reference) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(r)()
+ token, cursor := ctx.PeekAny(func(t lexer.Token) bool {
+ return t.Type == r.typ
+ })
+ if token.Type != r.typ {
+ return nil, nil
+ }
+ ctx.FastForward(cursor)
+ return []reflect.Value{reflect.ValueOf(token.Value)}, nil
+}
+
+// [ ]
+type optional struct {
+ node node
+}
+
+func (o *optional) String() string { return ebnf(o) }
+func (o *optional) GoString() string { return "optional{}" }
+
+func (o *optional) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(o)()
+ branch := ctx.Branch()
+ out, err = o.node.Parse(branch, parent)
+ if err != nil {
+ // Optional part failed to match.
+ if ctx.Stop(err, branch) {
+ return out, err
+ }
+ } else {
+ ctx.Accept(branch)
+ }
+ if out == nil {
+ out = []reflect.Value{}
+ }
+ return out, nil
+}
+
+// { }
+type repetition struct {
+ node node
+}
+
+func (r *repetition) String() string { return ebnf(r) }
+func (r *repetition) GoString() string { return "repetition{}" }
+
+// Parse a repetition. Once a repetition is encountered it will always match, so grammars
+// should ensure that branches are differentiated prior to the repetition.
+func (r *repetition) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(r)()
+ i := 0
+ for ; i < MaxIterations; i++ {
+ branch := ctx.Branch()
+ v, err := r.node.Parse(branch, parent)
+ out = append(out, v...)
+ if err != nil {
+ // Optional part failed to match.
+ if ctx.Stop(err, branch) {
+ return out, err
+ }
+ break
+ } else {
+ ctx.Accept(branch)
+ }
+ if v == nil {
+ break
+ }
+ }
+ if i >= MaxIterations {
+ t := ctx.Peek()
+ return nil, Errorf(t.Pos, "too many iterations of %s (> %d)", r, MaxIterations)
+ }
+ if out == nil {
+ out = []reflect.Value{}
+ }
+ return out, nil
+}
+
+// Match a token literal exactly "..."[:].
+type literal struct {
+ s string
+ t lexer.TokenType
+ tt string // Used for display purposes - symbolic name of t.
+}
+
+func (l *literal) String() string { return ebnf(l) }
+func (l *literal) GoString() string { return fmt.Sprintf("literal{%q, %q}", l.s, l.tt) }
+
+func (l *literal) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(l)()
+ match := func(t lexer.Token) bool {
+ var equal bool
+ if ctx.caseInsensitive[t.Type] {
+ equal = l.s == "" || strings.EqualFold(t.Value, l.s)
+ } else {
+ equal = l.s == "" || t.Value == l.s
+ }
+ return (l.t == lexer.EOF || l.t == t.Type) && equal
+ }
+ token, cursor := ctx.PeekAny(match)
+ if match(token) {
+ ctx.FastForward(cursor)
+ return []reflect.Value{reflect.ValueOf(token.Value)}, nil
+ }
+ return nil, nil
+}
+
+type negation struct {
+ node node
+}
+
+func (n *negation) String() string { return ebnf(n) }
+func (n *negation) GoString() string { return "negation{}" }
+
+func (n *negation) Parse(ctx *parseContext, parent reflect.Value) (out []reflect.Value, err error) {
+ defer ctx.printTrace(n)()
+ // Create a branch to avoid advancing the parser, but call neither Stop nor Accept on it
+ // since we will discard a match.
+ branch := ctx.Branch()
+ notEOF := ctx.Peek()
+ if notEOF.EOF() {
+ // EOF cannot match a negation, which expects something
+ return nil, nil
+ }
+
+ out, err = n.node.Parse(branch, parent)
+ if out != nil && err == nil {
+ // out being non-nil means that what we don't want is actually here, so we report nomatch
+ return nil, Errorf(notEOF.Pos, "unexpected '%s'", notEOF.Value)
+ }
+
+ // Just give the next token
+ next := ctx.Next()
+ return []reflect.Value{reflect.ValueOf(next.Value)}, nil
+}
+
+// Attempt to transform values to given type.
+//
+// This will dereference pointers, and attempt to parse strings into integer values, floats, etc.
+func conform(t reflect.Type, values []reflect.Value) (out []reflect.Value, err error) {
+ for _, v := range values {
+ for t != v.Type() && t.Kind() == reflect.Ptr && v.Kind() != reflect.Ptr {
+ // This can occur during partial failure.
+ if !v.CanAddr() {
+ return
+ }
+ v = v.Addr()
+ }
+
+ // Already of the right kind, don't bother converting.
+ if v.Kind() == t.Kind() {
+ if v.Type() != t {
+ v = v.Convert(t)
+ }
+ out = append(out, v)
+ continue
+ }
+
+ kind := t.Kind()
+ switch kind { // nolint: exhaustive
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(v.String(), 0, sizeOfKind(kind))
+ if err != nil {
+ return nil, fmt.Errorf("invalid integer %q: %s", v.String(), err)
+ }
+ v = reflect.New(t).Elem()
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ n, err := strconv.ParseUint(v.String(), 0, sizeOfKind(kind))
+ if err != nil {
+ return nil, fmt.Errorf("invalid integer %q: %s", v.String(), err)
+ }
+ v = reflect.New(t).Elem()
+ v.SetUint(n)
+
+ case reflect.Bool:
+ v = reflect.ValueOf(true)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(v.String(), sizeOfKind(kind))
+ if err != nil {
+ return nil, fmt.Errorf("invalid integer %q: %s", v.String(), err)
+ }
+ v = reflect.New(t).Elem()
+ v.SetFloat(n)
+ }
+
+ out = append(out, v)
+ }
+ return out, nil
+}
+
+func sizeOfKind(kind reflect.Kind) int {
+ switch kind { // nolint: exhaustive
+ case reflect.Int8, reflect.Uint8:
+ return 8
+ case reflect.Int16, reflect.Uint16:
+ return 16
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ return 32
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ return 64
+ case reflect.Int, reflect.Uint:
+ return strconv.IntSize
+ }
+ panic("unsupported kind " + kind.String())
+}
+
+func maybeRef(tmpl reflect.Type, strct reflect.Value) reflect.Value {
+ if strct.Type() == tmpl {
+ return strct
+ }
+ if tmpl.Kind() == reflect.Ptr {
+ if strct.CanAddr() {
+ return strct.Addr()
+ }
+ ptr := reflect.New(tmpl)
+ ptr.Set(strct)
+ return ptr
+ }
+ return strct
+}
+
+// Set field.
+//
+// If field is a pointer the pointer will be set to the value. If field is a string, value will be
+// appended. If field is a slice, value will be appended to slice.
+//
+// For all other types, an attempt will be made to convert the string to the corresponding
+// type (int, float32, etc.).
+func setField(tokens []lexer.Token, strct reflect.Value, field structLexerField, fieldValue []reflect.Value) (err error) { // nolint: gocognit
+ defer decorate(&err, func() string { return strct.Type().Name() + "." + field.Name })
+
+ f := strct.FieldByIndex(field.Index)
+
+ // Any kind of pointer, hydrate it first.
+ if f.Kind() == reflect.Ptr {
+ if f.IsNil() {
+ fv := reflect.New(f.Type().Elem()).Elem()
+ f.Set(fv.Addr())
+ f = fv
+ } else {
+ f = f.Elem()
+ }
+ }
+
+ if f.Type() == tokenType {
+ f.Set(reflect.ValueOf(tokens[0]))
+ return nil
+ }
+
+ if f.Type() == tokensType {
+ f.Set(reflect.ValueOf(tokens))
+ return nil
+ }
+
+ if f.CanAddr() {
+ if d, ok := f.Addr().Interface().(Capture); ok {
+ ifv := make([]string, 0, len(fieldValue))
+ for _, v := range fieldValue {
+ ifv = append(ifv, v.Interface().(string))
+ }
+ return d.Capture(ifv)
+ } else if d, ok := f.Addr().Interface().(encoding.TextUnmarshaler); ok {
+ for _, v := range fieldValue {
+ if err := d.UnmarshalText([]byte(v.Interface().(string))); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }
+
+ if f.Kind() == reflect.Slice {
+ sliceElemType := f.Type().Elem()
+ if sliceElemType.Implements(captureType) || reflect.PtrTo(sliceElemType).Implements(captureType) {
+ if sliceElemType.Kind() == reflect.Ptr {
+ sliceElemType = sliceElemType.Elem()
+ }
+ for _, v := range fieldValue {
+ d := reflect.New(sliceElemType).Interface().(Capture)
+ if err := d.Capture([]string{v.Interface().(string)}); err != nil {
+ return err
+ }
+ eltValue := reflect.ValueOf(d)
+ if f.Type().Elem().Kind() != reflect.Ptr {
+ eltValue = eltValue.Elem()
+ }
+ f.Set(reflect.Append(f, eltValue))
+ }
+ } else {
+ fieldValue, err = conform(sliceElemType, fieldValue)
+ if err != nil {
+ return err
+ }
+ f.Set(reflect.Append(f, fieldValue...))
+ }
+ return nil
+ }
+
+ // Strings concatenate all captured tokens.
+ if f.Kind() == reflect.String {
+ fieldValue, err = conform(f.Type(), fieldValue)
+ if err != nil {
+ return err
+ }
+ for _, v := range fieldValue {
+ f.Set(reflect.ValueOf(f.String() + v.String()).Convert(f.Type()))
+ }
+ return nil
+ }
+
+ // Coalesce multiple tokens into one. This allows eg. ["-", "10"] to be captured as separate tokens but
+ // parsed as a single string "-10".
+ if len(fieldValue) > 1 {
+ out := []string{}
+ for _, v := range fieldValue {
+ out = append(out, v.String())
+ }
+ fieldValue = []reflect.Value{reflect.ValueOf(strings.Join(out, ""))}
+ }
+
+ fieldValue, err = conform(f.Type(), fieldValue)
+ if err != nil {
+ return err
+ }
+
+ fv := fieldValue[0]
+
+ switch f.Kind() { // nolint: exhaustive
+ // Numeric types will increment if the token can not be coerced.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ if fv.Type() != f.Type() {
+ f.SetInt(f.Int() + 1)
+ } else {
+ f.Set(fv)
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ if fv.Type() != f.Type() {
+ f.SetUint(f.Uint() + 1)
+ } else {
+ f.Set(fv)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ if fv.Type() != f.Type() {
+ f.SetFloat(f.Float() + 1)
+ } else {
+ f.Set(fv)
+ }
+
+ case reflect.Bool, reflect.Struct, reflect.Interface:
+ if f.Kind() == reflect.Bool && fv.Kind() == reflect.Bool {
+ f.SetBool(fv.Bool())
+ break
+ }
+ if fv.Type() != f.Type() {
+ return fmt.Errorf("value %q is not correct type %s", fv, f.Type())
+ }
+ f.Set(fv)
+
+ default:
+ return fmt.Errorf("unsupported field type %s for field %s", f.Type(), field.Name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/options.go b/vendor/github.com/alecthomas/participle/v2/options.go
new file mode 100644
index 00000000000..4842ecba7e1
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/options.go
@@ -0,0 +1,130 @@
+package participle
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+// MaxLookahead can be used with UseLookahead to get pseudo-infinite
+// lookahead without the risk of pathological cases causing a stack
+// overflow.
+const MaxLookahead = 99999
+
+// An Option to modify the behaviour of the Parser.
+type Option func(p *parserOptions) error
+
+// Lexer is an Option that sets the lexer to use with the given grammar.
+func Lexer(def lexer.Definition) Option {
+ return func(p *parserOptions) error {
+ p.lex = def
+ return nil
+ }
+}
+
+// UseLookahead allows branch lookahead up to "n" tokens.
+//
+// If parsing cannot be disambiguated before "n" tokens of lookahead, parsing will fail.
+//
+// Note that increasing lookahead has a minor performance impact, but also
+// reduces the accuracy of error reporting.
+//
+// If "n" is negative, it will be treated as "infinite" lookahead.
+// This can have a large impact on performance, and does not provide any
+// protection against stack overflow during parsing.
+// In most cases, using MaxLookahead will achieve the same results in practice,
+// but with a concrete upper bound to prevent pathological behavior in the parser.
+// Using infinite lookahead can be useful for testing, or for parsing especially
+// ambiguous grammars. Use at your own risk!
+func UseLookahead(n int) Option {
+ return func(p *parserOptions) error {
+ p.useLookahead = n
+ return nil
+ }
+}
+
+// CaseInsensitive allows the specified token types to be matched case-insensitively.
+//
+// Note that the lexer itself will also have to be case-insensitive; this option
+// just controls whether literals in the grammar are matched case insensitively.
+func CaseInsensitive(tokens ...string) Option {
+ return func(p *parserOptions) error {
+ for _, token := range tokens {
+ p.caseInsensitive[token] = true
+ }
+ return nil
+ }
+}
+
+// ParseTypeWith associates a custom parsing function with some interface type T.
+// When the parser encounters a value of type T, it will use the given parse function to
+// parse a value from the input.
+//
+// The parse function may return anything it wishes as long as that value satisfies the interface T.
+// However, only a single function can be defined for any type T.
+// If you want to have multiple parse functions returning types that satisfy the same interface, you'll
+// need to define new wrapper types for each one.
+//
+// This can be useful if you want to parse a DSL within the larger grammar, or if you want
+// to implement an optimized parsing scheme for some portion of the grammar.
+func ParseTypeWith[T any](parseFn func(*lexer.PeekingLexer) (T, error)) Option {
+ return func(p *parserOptions) error {
+ parseFnVal := reflect.ValueOf(parseFn)
+ parseFnType := parseFnVal.Type()
+ if parseFnType.Out(0).Kind() != reflect.Interface {
+ return fmt.Errorf("ParseTypeWith: T must be an interface type (got %s)", parseFnType.Out(0))
+ }
+ prodType := parseFnType.Out(0)
+ p.customDefs = append(p.customDefs, customDef{prodType, parseFnVal})
+ return nil
+ }
+}
+
+// Union associates several member productions with some interface type T.
+// Given members X, Y, Z, and W for a union type U, then the EBNF rule is:
+// U = X | Y | Z | W .
+// When the parser encounters a field of type T, it will attempt to parse each member
+// in sequence and take the first match. Because of this, the order in which the
+// members are defined is important. You must be careful to order your members appropriately.
+//
+// An example of a bad parse that can happen if members are out of order:
+//
+// If the first member matches A, and the second member matches A B,
+// and the source string is "AB", then the parser will only match A, and will not
+// try to parse the second member at all.
+func Union[T any](members ...T) Option {
+ return func(p *parserOptions) error {
+ var t T
+ unionType := reflect.TypeOf(&t).Elem()
+ if unionType.Kind() != reflect.Interface {
+ return fmt.Errorf("union: union type must be an interface (got %s)", unionType)
+ }
+ memberTypes := make([]reflect.Type, 0, len(members))
+ for _, m := range members {
+ memberTypes = append(memberTypes, reflect.TypeOf(m))
+ }
+ p.unionDefs = append(p.unionDefs, unionDef{unionType, memberTypes})
+ return nil
+ }
+}
+
+// ParseOption modifies how an individual parse is applied.
+type ParseOption func(p *parseContext)
+
+// Trace the parse to "w".
+func Trace(w io.Writer) ParseOption {
+ return func(p *parseContext) {
+ p.trace = w
+ }
+}
+
+// AllowTrailing tokens without erroring.
+//
+// That is, do not error if a full parse completes but additional tokens remain.
+func AllowTrailing(ok bool) ParseOption {
+ return func(p *parseContext) {
+ p.allowTrailing = ok
+ }
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/parser.go b/vendor/github.com/alecthomas/participle/v2/parser.go
new file mode 100644
index 00000000000..1da5004d28e
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/parser.go
@@ -0,0 +1,316 @@
+package participle
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+type unionDef struct {
+ typ reflect.Type
+ members []reflect.Type
+}
+
+type customDef struct {
+ typ reflect.Type
+ parseFn reflect.Value
+}
+
+type parserOptions struct {
+ lex lexer.Definition
+ rootType reflect.Type
+ typeNodes map[reflect.Type]node
+ useLookahead int
+ caseInsensitive map[string]bool
+ mappers []mapperByToken
+ unionDefs []unionDef
+ customDefs []customDef
+ elide []string
+}
+
+// A Parser for a particular grammar and lexer.
+type Parser[G any] struct {
+ parserOptions
+}
+
+// ParserForProduction returns a new parser for the given production in grammar G.
+func ParserForProduction[P, G any](parser *Parser[G]) (*Parser[P], error) {
+ t := reflect.TypeOf(*new(P))
+ _, ok := parser.typeNodes[t]
+ if !ok {
+ return nil, fmt.Errorf("parser does not contain a production of type %s", t)
+ }
+ return (*Parser[P])(parser), nil
+}
+
+// MustBuild calls Build[G](options...) and panics if an error occurs.
+func MustBuild[G any](options ...Option) *Parser[G] {
+ parser, err := Build[G](options...)
+ if err != nil {
+ panic(err)
+ }
+ return parser
+}
+
+// Build constructs a parser for the given grammar.
+//
+// If "Lexer()" is not provided as an option, a default lexer based on text/scanner will be used. This scans typical Go-
+// like tokens.
+//
+// See documentation for details.
+func Build[G any](options ...Option) (parser *Parser[G], err error) {
+ // Configure Parser[G] struct with defaults + options.
+ p := &Parser[G]{
+ parserOptions: parserOptions{
+ lex: lexer.TextScannerLexer,
+ caseInsensitive: map[string]bool{},
+ useLookahead: 1,
+ },
+ }
+ for _, option := range options {
+ if err = option(&p.parserOptions); err != nil {
+ return nil, err
+ }
+ }
+
+ symbols := p.lex.Symbols()
+ if len(p.mappers) > 0 {
+ mappers := map[lexer.TokenType][]Mapper{}
+ for _, mapper := range p.mappers {
+ if len(mapper.symbols) == 0 {
+ mappers[lexer.EOF] = append(mappers[lexer.EOF], mapper.mapper)
+ } else {
+ for _, symbol := range mapper.symbols {
+ if rn, ok := symbols[symbol]; !ok {
+ return nil, fmt.Errorf("mapper %#v uses unknown token %q", mapper, symbol)
+ } else { // nolint: golint
+ mappers[rn] = append(mappers[rn], mapper.mapper)
+ }
+ }
+ }
+ }
+ p.lex = &mappingLexerDef{p.lex, func(t lexer.Token) (lexer.Token, error) {
+ combined := make([]Mapper, 0, len(mappers[t.Type])+len(mappers[lexer.EOF]))
+ combined = append(combined, mappers[lexer.EOF]...)
+ combined = append(combined, mappers[t.Type]...)
+
+ var err error
+ for _, m := range combined {
+ t, err = m(t)
+ if err != nil {
+ return t, err
+ }
+ }
+ return t, nil
+ }}
+ }
+
+ context := newGeneratorContext(p.lex)
+ if err := context.addCustomDefs(p.customDefs); err != nil {
+ return nil, err
+ }
+ if err := context.addUnionDefs(p.unionDefs); err != nil {
+ return nil, err
+ }
+
+ var grammar G
+ v := reflect.ValueOf(&grammar)
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+ p.rootType = v.Type()
+ rootNode, err := context.parseType(p.rootType)
+ if err != nil {
+ return nil, err
+ }
+ if err := validate(rootNode); err != nil {
+ return nil, err
+ }
+ p.typeNodes = context.typeNodes
+ p.typeNodes[p.rootType] = rootNode
+ return p, nil
+}
+
+// Lexer returns the parser's builtin lexer.
+func (p *Parser[G]) Lexer() lexer.Definition {
+ return p.lex
+}
+
+// Lex uses the parser's lexer to tokenise input.
+// Parameter filename is used as an opaque prefix in error messages.
+func (p *Parser[G]) Lex(filename string, r io.Reader) ([]lexer.Token, error) {
+ lex, err := p.lex.Lex(filename, r)
+ if err != nil {
+ return nil, err
+ }
+ tokens, err := lexer.ConsumeAll(lex)
+ return tokens, err
+}
+
+// ParseFromLexer into grammar v which must be of the same type as the grammar passed to
+// Build().
+//
+// This may return a Error.
+func (p *Parser[G]) ParseFromLexer(lex *lexer.PeekingLexer, options ...ParseOption) (*G, error) {
+ v := new(G)
+ rv := reflect.ValueOf(v)
+ parseNode, err := p.parseNodeFor(rv)
+ if err != nil {
+ return nil, err
+ }
+ caseInsensitive := map[lexer.TokenType]bool{}
+ for sym, tt := range p.lex.Symbols() {
+ if p.caseInsensitive[sym] {
+ caseInsensitive[tt] = true
+ }
+ }
+ ctx := newParseContext(lex, p.useLookahead, caseInsensitive)
+ defer func() { *lex = *ctx.PeekingLexer }()
+ for _, option := range options {
+ option(ctx)
+ }
+ // If the grammar implements Parseable, use it.
+ if parseable, ok := any(v).(Parseable); ok {
+ return v, p.rootParseable(ctx, parseable)
+ }
+ return v, p.parseOne(ctx, parseNode, rv)
+}
+
+func (p *Parser[G]) parse(lex lexer.Lexer, options ...ParseOption) (v *G, err error) {
+ peeker, err := lexer.Upgrade(lex, p.getElidedTypes()...)
+ if err != nil {
+ return nil, err
+ }
+ return p.ParseFromLexer(peeker, options...)
+}
+
+// Parse from r into grammar v which must be of the same type as the grammar passed to
+// Build(). Parameter filename is used as an opaque prefix in error messages.
+//
+// This may return an Error.
+func (p *Parser[G]) Parse(filename string, r io.Reader, options ...ParseOption) (v *G, err error) {
+ if filename == "" {
+ filename = lexer.NameOfReader(r)
+ }
+ lex, err := p.lex.Lex(filename, r)
+ if err != nil {
+ return nil, err
+ }
+ return p.parse(lex, options...)
+}
+
+// ParseString from s into grammar v which must be of the same type as the grammar passed to
+// Build(). Parameter filename is used as an opaque prefix in error messages.
+//
+// This may return an Error.
+func (p *Parser[G]) ParseString(filename string, s string, options ...ParseOption) (v *G, err error) {
+ var lex lexer.Lexer
+ if sl, ok := p.lex.(lexer.StringDefinition); ok {
+ lex, err = sl.LexString(filename, s)
+ } else {
+ lex, err = p.lex.Lex(filename, strings.NewReader(s))
+ }
+ if err != nil {
+ return nil, err
+ }
+ return p.parse(lex, options...)
+}
+
+// ParseBytes from b into grammar v which must be of the same type as the grammar passed to
+// Build(). Parameter filename is used as an opaque prefix in error messages.
+//
+// This may return an Error.
+func (p *Parser[G]) ParseBytes(filename string, b []byte, options ...ParseOption) (v *G, err error) {
+ var lex lexer.Lexer
+ if sl, ok := p.lex.(lexer.BytesDefinition); ok {
+ lex, err = sl.LexBytes(filename, b)
+ } else {
+ lex, err = p.lex.Lex(filename, bytes.NewReader(b))
+ }
+ if err != nil {
+ return nil, err
+ }
+ return p.parse(lex, options...)
+}
+
+func (p *Parser[G]) parseOne(ctx *parseContext, parseNode node, rv reflect.Value) error {
+ err := p.parseInto(ctx, parseNode, rv)
+ if err != nil {
+ return err
+ }
+ token := ctx.Peek()
+ if !token.EOF() && !ctx.allowTrailing {
+ return ctx.DeepestError(&UnexpectedTokenError{Unexpected: token})
+ }
+ return nil
+}
+
+func (p *Parser[G]) parseInto(ctx *parseContext, parseNode node, rv reflect.Value) error {
+ if rv.IsNil() {
+ return fmt.Errorf("target must be a non-nil pointer to a struct or interface, but is a nil %s", rv.Type())
+ }
+ pv, err := p.typeNodes[rv.Type().Elem()].Parse(ctx, rv.Elem())
+ if len(pv) > 0 && pv[0].Type() == rv.Elem().Type() {
+ rv.Elem().Set(reflect.Indirect(pv[0]))
+ }
+ if err != nil {
+ return err
+ }
+ if pv == nil {
+ token := ctx.Peek()
+ return ctx.DeepestError(&UnexpectedTokenError{Unexpected: token})
+ }
+ return nil
+}
+
+func (p *Parser[G]) rootParseable(ctx *parseContext, parseable Parseable) error {
+ if err := parseable.Parse(ctx.PeekingLexer); err != nil {
+ if err == NextMatch {
+ err = &UnexpectedTokenError{Unexpected: ctx.Peek()}
+ } else {
+ err = &ParseError{Msg: err.Error(), Pos: ctx.Peek().Pos}
+ }
+ return ctx.DeepestError(err)
+ }
+ peek := ctx.Peek()
+ if !peek.EOF() && !ctx.allowTrailing {
+ return ctx.DeepestError(&UnexpectedTokenError{Unexpected: peek})
+ }
+ return nil
+}
+
+func (p *Parser[G]) getElidedTypes() []lexer.TokenType {
+ symbols := p.lex.Symbols()
+ elideTypes := make([]lexer.TokenType, 0, len(p.elide))
+ for _, elide := range p.elide {
+ rn, ok := symbols[elide]
+ if !ok {
+ panic(fmt.Errorf("Elide() uses unknown token %q", elide))
+ }
+ elideTypes = append(elideTypes, rn)
+ }
+ return elideTypes
+}
+
+func (p *Parser[G]) parseNodeFor(v reflect.Value) (node, error) {
+ t := v.Type()
+ if t.Kind() == reflect.Interface {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Ptr || (t.Elem().Kind() != reflect.Struct && t.Elem().Kind() != reflect.Interface) {
+ return nil, fmt.Errorf("expected a pointer to a struct or interface, but got %s", t)
+ }
+ parseNode := p.typeNodes[t]
+ if parseNode == nil {
+ t = t.Elem()
+ parseNode = p.typeNodes[t]
+ }
+ if parseNode == nil {
+ return nil, fmt.Errorf("parser does not know how to parse values of type %s", t)
+ }
+ return parseNode, nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/railroad.png b/vendor/github.com/alecthomas/participle/v2/railroad.png
new file mode 100644
index 00000000000..3388484e23a
Binary files /dev/null and b/vendor/github.com/alecthomas/participle/v2/railroad.png differ
diff --git a/vendor/github.com/alecthomas/participle/v2/struct.go b/vendor/github.com/alecthomas/participle/v2/struct.go
new file mode 100644
index 00000000000..882d6d2dd22
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/struct.go
@@ -0,0 +1,205 @@
+package participle
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "text/scanner"
+ "unicode/utf8"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+// A structLexer lexes over the tags of struct fields while tracking the current field.
+type structLexer struct {
+ s reflect.Type
+ field int
+ indexes [][]int
+ lexer *lexer.PeekingLexer
+}
+
+func lexStruct(s reflect.Type) (*structLexer, error) {
+ indexes, err := collectFieldIndexes(s)
+ if err != nil {
+ return nil, err
+ }
+ slex := &structLexer{
+ s: s,
+ indexes: indexes,
+ }
+ if len(slex.indexes) > 0 {
+ tag := fieldLexerTag(slex.Field().StructField)
+ slex.lexer, err = lexer.Upgrade(newTagLexer(s.Name(), tag))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return slex, nil
+}
+
+// NumField returns the number of fields in the struct associated with this structLexer.
+func (s *structLexer) NumField() int {
+ return len(s.indexes)
+}
+
+type structLexerField struct {
+ reflect.StructField
+ Index []int
+}
+
+// Field returns the field associated with the current token.
+func (s *structLexer) Field() structLexerField {
+ return s.GetField(s.field)
+}
+
+func (s *structLexer) GetField(field int) structLexerField {
+ if field >= len(s.indexes) {
+ field = len(s.indexes) - 1
+ }
+ return structLexerField{
+ StructField: s.s.FieldByIndex(s.indexes[field]),
+ Index: s.indexes[field],
+ }
+}
+
+func (s *structLexer) Peek() (lexer.Token, error) {
+ field := s.field
+ lex := s.lexer
+ for {
+ token := lex.Peek()
+ if !token.EOF() {
+ token.Pos.Line = field + 1
+ return token, nil
+ }
+ field++
+ if field >= s.NumField() {
+ return lexer.EOFToken(token.Pos), nil
+ }
+ ft := s.GetField(field).StructField
+ tag := fieldLexerTag(ft)
+ var err error
+ lex, err = lexer.Upgrade(newTagLexer(ft.Name, tag))
+ if err != nil {
+ return token, err
+ }
+ }
+}
+
+func (s *structLexer) Next() (lexer.Token, error) {
+ token := s.lexer.Next()
+ if !token.EOF() {
+ token.Pos.Line = s.field + 1
+ return token, nil
+ }
+ if s.field+1 >= s.NumField() {
+ return lexer.EOFToken(token.Pos), nil
+ }
+ s.field++
+ ft := s.Field().StructField
+ tag := fieldLexerTag(ft)
+ var err error
+ s.lexer, err = lexer.Upgrade(newTagLexer(ft.Name, tag))
+ if err != nil {
+ return token, err
+ }
+ return s.Next()
+}
+
+func fieldLexerTag(field reflect.StructField) string {
+ if tag, ok := field.Tag.Lookup("parser"); ok {
+ return tag
+ }
+ return string(field.Tag)
+}
+
+// Recursively collect flattened indices for top-level fields and embedded fields.
+func collectFieldIndexes(s reflect.Type) (out [][]int, err error) {
+ if s.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("expected a struct but got %q", s)
+ }
+ defer decorate(&err, s.String)
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ switch {
+ case f.Anonymous: // nolint: gocritic
+ children, err := collectFieldIndexes(f.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, idx := range children {
+ out = append(out, append(f.Index, idx...))
+ }
+
+ case f.PkgPath != "":
+ continue
+
+ case fieldLexerTag(f) != "":
+ out = append(out, f.Index)
+ }
+ }
+ return
+}
+
+// tagLexer is a Lexer based on text/scanner.Scanner
+type tagLexer struct {
+ scanner *scanner.Scanner
+ filename string
+ err error
+}
+
+func newTagLexer(filename string, tag string) *tagLexer {
+ s := &scanner.Scanner{}
+ s.Init(strings.NewReader(tag))
+ lexer := &tagLexer{
+ filename: filename,
+ scanner: s,
+ }
+ lexer.scanner.Error = func(s *scanner.Scanner, msg string) {
+ // This is to support single quoted strings. Hacky.
+ if !strings.HasSuffix(msg, "char literal") {
+ lexer.err = fmt.Errorf("%s: %s", lexer.scanner.Pos(), msg)
+ }
+ }
+ return lexer
+}
+
+func (t *tagLexer) Next() (lexer.Token, error) {
+ typ := t.scanner.Scan()
+ text := t.scanner.TokenText()
+ pos := lexer.Position(t.scanner.Position)
+ pos.Filename = t.filename
+ if t.err != nil {
+ return lexer.Token{}, t.err
+ }
+ return textScannerTransform(lexer.Token{
+ Type: lexer.TokenType(typ),
+ Value: text,
+ Pos: pos,
+ })
+}
+
+func textScannerTransform(token lexer.Token) (lexer.Token, error) {
+ // Unquote strings.
+ switch token.Type {
+ case scanner.Char:
+ // FIXME(alec): This is pretty hacky...we convert a single quoted char into a double
+ // quoted string in order to support single quoted strings.
+ token.Value = fmt.Sprintf("\"%s\"", token.Value[1:len(token.Value)-1])
+ fallthrough
+ case scanner.String:
+ s, err := strconv.Unquote(token.Value)
+ if err != nil {
+ return lexer.Token{}, Errorf(token.Pos, "%s: %q", err.Error(), token.Value)
+ }
+ token.Value = s
+ if token.Type == scanner.Char && utf8.RuneCountInString(s) > 1 {
+ token.Type = scanner.String
+ }
+ case scanner.RawString:
+ token.Value = token.Value[1 : len(token.Value)-1]
+
+ default:
+ }
+ return token, nil
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/validate.go b/vendor/github.com/alecthomas/participle/v2/validate.go
new file mode 100644
index 00000000000..3785e3ce1a6
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/validate.go
@@ -0,0 +1,59 @@
+package participle
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Perform some post-construction validation. This currently does:
+//
+// Checks for left recursion.
+func validate(n node) error {
+ checked := map[*strct]bool{}
+ seen := map[node]bool{}
+
+ return visit(n, func(n node, next func() error) error {
+ if n, ok := n.(*strct); ok {
+ if !checked[n] && isLeftRecursive(n) {
+ return fmt.Errorf("left recursion detected on\n\n%s", indent(n.String()))
+ }
+ checked[n] = true
+ if seen[n] {
+ return nil
+ }
+ }
+ seen[n] = true
+ return next()
+ })
+}
+
+func isLeftRecursive(root *strct) (found bool) {
+ defer func() { _ = recover() }()
+ seen := map[node]bool{}
+ _ = visit(root.expr, func(n node, next func() error) error {
+ if found {
+ return nil
+ }
+ switch n := n.(type) {
+ case *strct:
+ if root.typ == n.typ {
+ found = true
+ }
+
+ case *sequence:
+ if !n.head {
+ panic("done")
+ }
+ }
+ if seen[n] {
+ return nil
+ }
+ seen[n] = true
+ return next()
+ })
+ return
+}
+
+func indent(s string) string {
+ return " " + strings.Join(strings.Split(s, "\n"), "\n ")
+}
diff --git a/vendor/github.com/alecthomas/participle/v2/visit.go b/vendor/github.com/alecthomas/participle/v2/visit.go
new file mode 100644
index 00000000000..e4186b18f1c
--- /dev/null
+++ b/vendor/github.com/alecthomas/participle/v2/visit.go
@@ -0,0 +1,59 @@
+package participle
+
+import "fmt"
+
+// Visit all nodes.
+//
+// Cycles are deliberately not detected, it is up to the visitor function to handle this.
+func visit(n node, visitor func(n node, next func() error) error) error {
+ return visitor(n, func() error {
+ switch n := n.(type) {
+ case *disjunction:
+ for _, child := range n.nodes {
+ if err := visit(child, visitor); err != nil {
+ return err
+ }
+ }
+ return nil
+ case *strct:
+ return visit(n.expr, visitor)
+ case *custom:
+ return nil
+ case *union:
+ for _, member := range n.nodeMembers {
+ if err := visit(member, visitor); err != nil {
+ return err
+ }
+ }
+ return nil
+ case *sequence:
+ if err := visit(n.node, visitor); err != nil {
+ return err
+ }
+ if n.next != nil {
+ return visit(n.next, visitor)
+ }
+ return nil
+ case *parseable:
+ return nil
+ case *capture:
+ return visit(n.node, visitor)
+ case *reference:
+ return nil
+ case *optional:
+ return visit(n.node, visitor)
+ case *repetition:
+ return visit(n.node, visitor)
+ case *negation:
+ return visit(n.node, visitor)
+ case *literal:
+ return nil
+ case *group:
+ return visit(n.expr, visitor)
+ case *lookaheadGroup:
+ return visit(n.expr, visitor)
+ default:
+ panic(fmt.Sprintf("%T", n))
+ }
+ })
+}
diff --git a/vendor/github.com/antonmedv/expr/.gitignore b/vendor/github.com/antonmedv/expr/.gitignore
new file mode 100644
index 00000000000..b0df3eb4442
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/.gitignore
@@ -0,0 +1,8 @@
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.test
+*.out
+*.html
diff --git a/vendor/github.com/antonmedv/expr/LICENSE b/vendor/github.com/antonmedv/expr/LICENSE
new file mode 100644
index 00000000000..7d058f841cb
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Anton Medvedev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/antonmedv/expr/README.md b/vendor/github.com/antonmedv/expr/README.md
new file mode 100644
index 00000000000..242431f2ceb
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/README.md
@@ -0,0 +1,160 @@
+# Expr
+[![test](https://github.com/antonmedv/expr/actions/workflows/test.yml/badge.svg)](https://github.com/antonmedv/expr/actions/workflows/test.yml)
+[![Go Report Card](https://goreportcard.com/badge/github.com/antonmedv/expr)](https://goreportcard.com/report/github.com/antonmedv/expr)
+[![GoDoc](https://godoc.org/github.com/antonmedv/expr?status.svg)](https://godoc.org/github.com/antonmedv/expr)
+
+
+
+**Expr** package provides an engine that can compile and evaluate expressions.
+An expression is a one-liner that returns a value (mostly, but not limited to, booleans).
+It is designed for simplicity, speed and safety.
+
+The purpose of the package is to allow users to use expressions inside configuration for more complex logic.
+It is a perfect candidate for the foundation of a _business rule engine_.
+The idea is to let configure things in a dynamic way without recompile of a program:
+
+```coffeescript
+# Get the special price if
+user.Group in ["good_customers", "collaborator"]
+
+# Promote article to the homepage when
+len(article.Comments) > 100 and article.Category not in ["misc"]
+
+# Send an alert when
+product.Stock < 15
+```
+
+## Features
+
+* Seamless integration with Go (no need to redefine types)
+* Static typing ([example](https://godoc.org/github.com/antonmedv/expr#example-Env)).
+ ```go
+ out, err := expr.Compile(`name + age`)
+ // err: invalid operation + (mismatched types string and int)
+ // | name + age
+ // | .....^
+ ```
+* User-friendly error messages.
+* Reasonable set of basic operators.
+* Builtins `all`, `none`, `any`, `one`, `filter`, `map`.
+ ```coffeescript
+ all(Tweets, {.Size <= 280})
+ ```
+* Fast ([benchmarks](https://github.com/antonmedv/golang-expression-evaluation-comparison#readme)): uses bytecode virtual machine and optimizing compiler.
+
+## Install
+
+```
+go get github.com/antonmedv/expr
+```
+
+## Documentation
+
+* See [Getting Started](https://expr.medv.io/docs/Getting-Started) page for developer documentation.
+* See [Language Definition](https://expr.medv.io/docs/Language-Definition) page to learn the syntax.
+
+## Expr Code Editor
+
+
+
+
+
+Also, I have an embeddable code editor written in JavaScript which allows editing expressions with syntax highlighting and autocomplete based on your types declaration.
+
+[Learn more →](https://antonmedv.github.io/expr/)
+
+## Examples
+
+[Play Online](https://play.golang.org/p/z7T8ytJ1T1d)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/antonmedv/expr"
+)
+
+func main() {
+ env := map[string]interface{}{
+ "greet": "Hello, %v!",
+ "names": []string{"world", "you"},
+ "sprintf": fmt.Sprintf,
+ }
+
+ code := `sprintf(greet, names[0])`
+
+ program, err := expr.Compile(code, expr.Env(env))
+ if err != nil {
+ panic(err)
+ }
+
+ output, err := expr.Run(program, env)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(output)
+}
+```
+
+[Play Online](https://play.golang.org/p/4S4brsIvU4i)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/antonmedv/expr"
+)
+
+type Tweet struct {
+ Len int
+}
+
+type Env struct {
+ Tweets []Tweet
+}
+
+func main() {
+ code := `all(Tweets, {.Len <= 240})`
+
+ program, err := expr.Compile(code, expr.Env(Env{}))
+ if err != nil {
+ panic(err)
+ }
+
+ env := Env{
+ Tweets: []Tweet{{42}, {98}, {69}},
+ }
+ output, err := expr.Run(program, env)
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(output)
+}
+```
+
+## Who uses Expr?
+
+* [Aviasales](https://aviasales.ru) uses Expr as a business rule engine for our flight search engine.
+* [Wish.com](https://www.wish.com) uses Expr for decision-making rule engine in the Wish Assistant.
+* [Argo](https://argoproj.github.io) uses Expr in Argo Rollouts and Argo Workflows for Kubernetes.
+* [Crowdsec](https://crowdsec.net) uses Expr in a security automation tool.
+* [FACEIT](https://www.faceit.com) uses Expr to allow customization of its eSports matchmaking algorithm.
+* [qiniu](https://www.qiniu.com) uses Expr in trade systems.
+* [Junglee Games](https://www.jungleegames.com/) uses Expr for an in house marketing retention tool [Project Audience](https://www.linkedin.com/pulse/meet-project-audience-our-no-code-swiss-army-knife-product-bharti).
+* [OpenTelemetry](https://opentelemetry.io) uses Expr in the OpenTelemetry Collector.
+* [Philips Labs](https://github.com/philips-labs/tabia) uses Expr in Tabia, a tool for collecting insights on the characteristics of our code bases.
+* [CoreDNS](https://coredns.io) uses Expr in CoreDNS, a DNS server.
+* [Chaos Mesh](https://chaos-mesh.org) uses Expr in Chaos Mesh, a cloud-native Chaos Engineering platform.
+* [Milvus](https://milvus.io) uses Expr in Milvus, an open-source vector database.
+* [Visually.io](https://visually.io) uses Expr as a business rule engine for our personalization targeting algorithm.
+* [Akvorado](https://github.com/akvorado/akvorado) uses Expr to classify exporters and interfaces in network flows.
+
+[Add your company too](https://github.com/antonmedv/expr/edit/master/README.md)
+
+## License
+
+[MIT](https://github.com/antonmedv/expr/blob/master/LICENSE)
diff --git a/vendor/github.com/antonmedv/expr/ast/node.go b/vendor/github.com/antonmedv/expr/ast/node.go
new file mode 100644
index 00000000000..018c9565870
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/node.go
@@ -0,0 +1,167 @@
+package ast
+
+import (
+ "reflect"
+ "regexp"
+
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+)
+
+// Node represents items of abstract syntax tree.
+type Node interface {
+ Location() file.Location
+ SetLocation(file.Location)
+ Type() reflect.Type
+ SetType(reflect.Type)
+}
+
+func Patch(node *Node, newNode Node) {
+ newNode.SetType((*node).Type())
+ newNode.SetLocation((*node).Location())
+ *node = newNode
+}
+
+type base struct {
+ loc file.Location
+ nodeType reflect.Type
+}
+
+func (n *base) Location() file.Location {
+ return n.loc
+}
+
+func (n *base) SetLocation(loc file.Location) {
+ n.loc = loc
+}
+
+func (n *base) Type() reflect.Type {
+ return n.nodeType
+}
+
+func (n *base) SetType(t reflect.Type) {
+ n.nodeType = t
+}
+
+type NilNode struct {
+ base
+}
+
+type IdentifierNode struct {
+ base
+ Value string
+ Deref bool
+ FieldIndex []int
+ Method bool // true if method, false if field
+ MethodIndex int // index of method, set only if Method is true
+}
+
+type IntegerNode struct {
+ base
+ Value int
+}
+
+type FloatNode struct {
+ base
+ Value float64
+}
+
+type BoolNode struct {
+ base
+ Value bool
+}
+
+type StringNode struct {
+ base
+ Value string
+}
+
+type ConstantNode struct {
+ base
+ Value interface{}
+}
+
+type UnaryNode struct {
+ base
+ Operator string
+ Node Node
+}
+
+type BinaryNode struct {
+ base
+ Regexp *regexp.Regexp
+ Operator string
+ Left Node
+ Right Node
+}
+
+type ChainNode struct {
+ base
+ Node Node
+}
+
+type MemberNode struct {
+ base
+ Node Node
+ Property Node
+ Name string
+ Optional bool
+ Deref bool
+ FieldIndex []int
+ Method bool
+ MethodIndex int
+}
+
+type SliceNode struct {
+ base
+ Node Node
+ From Node
+ To Node
+}
+
+type CallNode struct {
+ base
+ Callee Node
+ Arguments []Node
+ Typed int
+ Fast bool
+ Func *builtin.Function
+}
+
+type BuiltinNode struct {
+ base
+ Name string
+ Arguments []Node
+}
+
+type ClosureNode struct {
+ base
+ Node Node
+}
+
+type PointerNode struct {
+ base
+}
+
+type ConditionalNode struct {
+ base
+ Cond Node
+ Exp1 Node
+ Exp2 Node
+}
+
+type ArrayNode struct {
+ base
+ Nodes []Node
+}
+
+type MapNode struct {
+ base
+ Pairs []Node
+}
+
+type PairNode struct {
+ base
+ Key Node
+ Value Node
+}
diff --git a/vendor/github.com/antonmedv/expr/ast/print.go b/vendor/github.com/antonmedv/expr/ast/print.go
new file mode 100644
index 00000000000..56bc7dbe2e3
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/print.go
@@ -0,0 +1,59 @@
+package ast
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+func Dump(node Node) string {
+ return dump(reflect.ValueOf(node), "")
+}
+
+func dump(v reflect.Value, ident string) string {
+ if !v.IsValid() {
+ return "nil"
+ }
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Struct:
+ out := t.Name() + "{\n"
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if isPrivate(f.Name) {
+ continue
+ }
+ s := v.Field(i)
+ out += fmt.Sprintf("%v%v: %v,\n", ident+"\t", f.Name, dump(s, ident+"\t"))
+ }
+ return out + ident + "}"
+ case reflect.Slice:
+ if v.Len() == 0 {
+ return t.String() + "{}"
+ }
+ out := t.String() + "{\n"
+ for i := 0; i < v.Len(); i++ {
+ s := v.Index(i)
+ out += fmt.Sprintf("%v%v,", ident+"\t", dump(s, ident+"\t"))
+ if i+1 < v.Len() {
+ out += "\n"
+ }
+ }
+ return out + "\n" + ident + "}"
+ case reflect.Ptr:
+ return dump(v.Elem(), ident)
+ case reflect.Interface:
+ return dump(reflect.ValueOf(v.Interface()), ident)
+
+ case reflect.String:
+ return fmt.Sprintf("%q", v)
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+var isCapital = regexp.MustCompile("^[A-Z]")
+
+func isPrivate(s string) bool {
+ return !isCapital.Match([]byte(s))
+}
diff --git a/vendor/github.com/antonmedv/expr/ast/visitor.go b/vendor/github.com/antonmedv/expr/ast/visitor.go
new file mode 100644
index 00000000000..351e5d72b23
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/ast/visitor.go
@@ -0,0 +1,68 @@
+package ast
+
+import "fmt"
+
+type Visitor interface {
+ Visit(node *Node)
+}
+
+func Walk(node *Node, v Visitor) {
+ switch n := (*node).(type) {
+ case *NilNode:
+ case *IdentifierNode:
+ case *IntegerNode:
+ case *FloatNode:
+ case *BoolNode:
+ case *StringNode:
+ case *ConstantNode:
+ case *UnaryNode:
+ Walk(&n.Node, v)
+ case *BinaryNode:
+ Walk(&n.Left, v)
+ Walk(&n.Right, v)
+ case *ChainNode:
+ Walk(&n.Node, v)
+ case *MemberNode:
+ Walk(&n.Node, v)
+ Walk(&n.Property, v)
+ case *SliceNode:
+ Walk(&n.Node, v)
+ if n.From != nil {
+ Walk(&n.From, v)
+ }
+ if n.To != nil {
+ Walk(&n.To, v)
+ }
+ case *CallNode:
+ Walk(&n.Callee, v)
+ for i := range n.Arguments {
+ Walk(&n.Arguments[i], v)
+ }
+ case *BuiltinNode:
+ for i := range n.Arguments {
+ Walk(&n.Arguments[i], v)
+ }
+ case *ClosureNode:
+ Walk(&n.Node, v)
+ case *PointerNode:
+ case *ConditionalNode:
+ Walk(&n.Cond, v)
+ Walk(&n.Exp1, v)
+ Walk(&n.Exp2, v)
+ case *ArrayNode:
+ for i := range n.Nodes {
+ Walk(&n.Nodes[i], v)
+ }
+ case *MapNode:
+ for i := range n.Pairs {
+ Walk(&n.Pairs[i], v)
+ }
+ case *PairNode:
+ Walk(&n.Key, v)
+ Walk(&n.Value, v)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+
+ v.Visit(node)
+}
diff --git a/vendor/github.com/antonmedv/expr/builtin/builtin.go b/vendor/github.com/antonmedv/expr/builtin/builtin.go
new file mode 100644
index 00000000000..ad9376962ee
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/builtin/builtin.go
@@ -0,0 +1,101 @@
+package builtin
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var (
+ anyType = reflect.TypeOf(new(interface{})).Elem()
+ integerType = reflect.TypeOf(0)
+ floatType = reflect.TypeOf(float64(0))
+)
+
+type Function struct {
+ Name string
+ Func func(args ...interface{}) (interface{}, error)
+ Opcode int
+ Types []reflect.Type
+ Validate func(args []reflect.Type) (reflect.Type, error)
+}
+
+const (
+ Len = iota + 1
+ Abs
+ Int
+ Float
+)
+
+var Builtins = map[int]*Function{
+ Len: {
+ Name: "len",
+ Opcode: Len,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for len (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String, reflect.Interface:
+ return integerType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for len (type %s)", args[0])
+ },
+ },
+ Abs: {
+ Name: "abs",
+ Opcode: Abs,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for abs (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Interface:
+ return args[0], nil
+ }
+ return anyType, fmt.Errorf("invalid argument for abs (type %s)", args[0])
+ },
+ },
+ Int: {
+ Name: "int",
+ Opcode: Int,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for int (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Interface:
+ return integerType, nil
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return integerType, nil
+ case reflect.String:
+ return integerType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for int (type %s)", args[0])
+ },
+ },
+ Float: {
+ Name: "float",
+ Opcode: Float,
+ Validate: func(args []reflect.Type) (reflect.Type, error) {
+ if len(args) != 1 {
+ return anyType, fmt.Errorf("invalid number of arguments for float (expected 1, got %d)", len(args))
+ }
+ switch kind(args[0]) {
+ case reflect.Interface:
+ return floatType, nil
+ case reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return floatType, nil
+ case reflect.String:
+ return floatType, nil
+ }
+ return anyType, fmt.Errorf("invalid argument for float (type %s)", args[0])
+ },
+ },
+}
+
+func kind(t reflect.Type) reflect.Kind {
+ if t == nil {
+ return reflect.Invalid
+ }
+ return t.Kind()
+}
diff --git a/vendor/github.com/antonmedv/expr/checker/checker.go b/vendor/github.com/antonmedv/expr/checker/checker.go
new file mode 100644
index 00000000000..5ce9b31faca
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/checker/checker.go
@@ -0,0 +1,852 @@
+package checker
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/parser"
+ "github.com/antonmedv/expr/vm"
+)
+
+func Check(tree *parser.Tree, config *conf.Config) (t reflect.Type, err error) {
+ if config == nil {
+ config = conf.New(nil)
+ }
+
+ v := &visitor{
+ config: config,
+ collections: make([]reflect.Type, 0),
+ parents: make([]ast.Node, 0),
+ }
+
+ t, _ = v.visit(tree.Node)
+
+ if v.err != nil {
+ return t, v.err.Bind(tree.Source)
+ }
+
+ if v.config.Expect != reflect.Invalid {
+ switch v.config.Expect {
+ case reflect.Int, reflect.Int64, reflect.Float64:
+ if !isNumber(t) && !isAny(t) {
+ return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t)
+ }
+ default:
+ if t != nil {
+ if t.Kind() == v.config.Expect {
+ return t, nil
+ }
+ }
+ return nil, fmt.Errorf("expected %v, but got %v", v.config.Expect, t)
+ }
+ }
+
+ return t, nil
+}
+
+type visitor struct {
+ config *conf.Config
+ collections []reflect.Type
+ parents []ast.Node
+ err *file.Error
+}
+
+type info struct {
+ method bool
+ fn *builtin.Function
+}
+
+func (v *visitor) visit(node ast.Node) (reflect.Type, info) {
+ var t reflect.Type
+ var i info
+ v.parents = append(v.parents, node)
+ switch n := node.(type) {
+ case *ast.NilNode:
+ t, i = v.NilNode(n)
+ case *ast.IdentifierNode:
+ t, i = v.IdentifierNode(n)
+ case *ast.IntegerNode:
+ t, i = v.IntegerNode(n)
+ case *ast.FloatNode:
+ t, i = v.FloatNode(n)
+ case *ast.BoolNode:
+ t, i = v.BoolNode(n)
+ case *ast.StringNode:
+ t, i = v.StringNode(n)
+ case *ast.ConstantNode:
+ t, i = v.ConstantNode(n)
+ case *ast.UnaryNode:
+ t, i = v.UnaryNode(n)
+ case *ast.BinaryNode:
+ t, i = v.BinaryNode(n)
+ case *ast.ChainNode:
+ t, i = v.ChainNode(n)
+ case *ast.MemberNode:
+ t, i = v.MemberNode(n)
+ case *ast.SliceNode:
+ t, i = v.SliceNode(n)
+ case *ast.CallNode:
+ t, i = v.CallNode(n)
+ case *ast.BuiltinNode:
+ t, i = v.BuiltinNode(n)
+ case *ast.ClosureNode:
+ t, i = v.ClosureNode(n)
+ case *ast.PointerNode:
+ t, i = v.PointerNode(n)
+ case *ast.ConditionalNode:
+ t, i = v.ConditionalNode(n)
+ case *ast.ArrayNode:
+ t, i = v.ArrayNode(n)
+ case *ast.MapNode:
+ t, i = v.MapNode(n)
+ case *ast.PairNode:
+ t, i = v.PairNode(n)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+ v.parents = v.parents[:len(v.parents)-1]
+ node.SetType(t)
+ return t, i
+}
+
+func (v *visitor) error(node ast.Node, format string, args ...interface{}) (reflect.Type, info) {
+ if v.err == nil { // show first error
+ v.err = &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+ return anyType, info{} // interface represent undefined type
+}
+
+func (v *visitor) NilNode(*ast.NilNode) (reflect.Type, info) {
+ return nilType, info{}
+}
+
+func (v *visitor) IdentifierNode(node *ast.IdentifierNode) (reflect.Type, info) {
+ if fn, ok := v.config.Functions[node.Value]; ok {
+ // Return anyType instead of func type as we don't know the arguments yet.
+ // The func type can be one of the fn.Types. The type will be resolved
+ // when the arguments are known in CallNode.
+ return anyType, info{fn: fn}
+ }
+ if v.config.Types == nil {
+ node.Deref = true
+ } else if t, ok := v.config.Types[node.Value]; ok {
+ if t.Ambiguous {
+ return v.error(node, "ambiguous identifier %v", node.Value)
+ }
+ d, c := deref(t.Type)
+ node.Deref = c
+ node.Method = t.Method
+ node.MethodIndex = t.MethodIndex
+ node.FieldIndex = t.FieldIndex
+ return d, info{method: t.Method}
+ }
+ if v.config.Strict {
+ return v.error(node, "unknown name %v", node.Value)
+ }
+ if v.config.DefaultType != nil {
+ return v.config.DefaultType, info{}
+ }
+ return anyType, info{}
+}
+
+func (v *visitor) IntegerNode(*ast.IntegerNode) (reflect.Type, info) {
+ return integerType, info{}
+}
+
+func (v *visitor) FloatNode(*ast.FloatNode) (reflect.Type, info) {
+ return floatType, info{}
+}
+
+func (v *visitor) BoolNode(*ast.BoolNode) (reflect.Type, info) {
+ return boolType, info{}
+}
+
+func (v *visitor) StringNode(*ast.StringNode) (reflect.Type, info) {
+ return stringType, info{}
+}
+
+func (v *visitor) ConstantNode(node *ast.ConstantNode) (reflect.Type, info) {
+ return reflect.TypeOf(node.Value), info{}
+}
+
+func (v *visitor) UnaryNode(node *ast.UnaryNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+
+ switch node.Operator {
+
+ case "!", "not":
+ if isBool(t) {
+ return boolType, info{}
+ }
+ if isAny(t) {
+ return boolType, info{}
+ }
+
+ case "+", "-":
+ if isNumber(t) {
+ return t, info{}
+ }
+ if isAny(t) {
+ return anyType, info{}
+ }
+
+ default:
+ return v.error(node, "unknown operator (%v)", node.Operator)
+ }
+
+ return v.error(node, `invalid operation: %v (mismatched type %v)`, node.Operator, t)
+}
+
+func (v *visitor) BinaryNode(node *ast.BinaryNode) (reflect.Type, info) {
+ l, _ := v.visit(node.Left)
+ r, _ := v.visit(node.Right)
+
+ // check operator overloading
+ if fns, ok := v.config.Operators[node.Operator]; ok {
+ t, _, ok := conf.FindSuitableOperatorOverload(fns, v.config.Types, l, r)
+ if ok {
+ return t, info{}
+ }
+ }
+
+ switch node.Operator {
+ case "==", "!=":
+ if isNumber(l) && isNumber(r) {
+ return boolType, info{}
+ }
+ if l == nil || r == nil { // It is possible to compare with nil.
+ return boolType, info{}
+ }
+ if l.Kind() == r.Kind() {
+ return boolType, info{}
+ }
+ if isAny(l) || isAny(r) {
+ return boolType, info{}
+ }
+
+ case "or", "||", "and", "&&":
+ if isBool(l) && isBool(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isBool) {
+ return boolType, info{}
+ }
+
+ case "<", ">", ">=", "<=":
+ if isNumber(l) && isNumber(r) {
+ return boolType, info{}
+ }
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if isTime(l) && isTime(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isNumber, isString, isTime) {
+ return boolType, info{}
+ }
+
+ case "-":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if isTime(l) && isTime(r) {
+ return durationType, info{}
+ }
+ if or(l, r, isNumber, isTime) {
+ return anyType, info{}
+ }
+
+ case "/", "*":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if or(l, r, isNumber) {
+ return anyType, info{}
+ }
+
+ case "**", "^":
+ if isNumber(l) && isNumber(r) {
+ return floatType, info{}
+ }
+ if or(l, r, isNumber) {
+ return floatType, info{}
+ }
+
+ case "%":
+ if isInteger(l) && isInteger(r) {
+ return combined(l, r), info{}
+ }
+ if or(l, r, isInteger) {
+ return anyType, info{}
+ }
+
+ case "+":
+ if isNumber(l) && isNumber(r) {
+ return combined(l, r), info{}
+ }
+ if isString(l) && isString(r) {
+ return stringType, info{}
+ }
+ if isTime(l) && isDuration(r) {
+ return timeType, info{}
+ }
+ if isDuration(l) && isTime(r) {
+ return timeType, info{}
+ }
+ if or(l, r, isNumber, isString, isTime, isDuration) {
+ return anyType, info{}
+ }
+
+ case "in":
+ if (isString(l) || isAny(l)) && isStruct(r) {
+ return boolType, info{}
+ }
+ if isMap(r) {
+ return boolType, info{}
+ }
+ if isArray(r) {
+ return boolType, info{}
+ }
+ if isAny(l) && anyOf(r, isString, isArray, isMap) {
+ return boolType, info{}
+ }
+ if isAny(r) {
+ return boolType, info{}
+ }
+
+ case "matches":
+ if s, ok := node.Right.(*ast.StringNode); ok {
+ r, err := regexp.Compile(s.Value)
+ if err != nil {
+ return v.error(node, err.Error())
+ }
+ node.Regexp = r
+ }
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isString) {
+ return boolType, info{}
+ }
+
+ case "contains", "startsWith", "endsWith":
+ if isString(l) && isString(r) {
+ return boolType, info{}
+ }
+ if or(l, r, isString) {
+ return boolType, info{}
+ }
+
+ case "..":
+ ret := reflect.SliceOf(integerType)
+ if isInteger(l) && isInteger(r) {
+ return ret, info{}
+ }
+ if or(l, r, isInteger) {
+ return ret, info{}
+ }
+
+ case "??":
+ if l == nil && r != nil {
+ return r, info{}
+ }
+ if l != nil && r == nil {
+ return l, info{}
+ }
+ if l == nil && r == nil {
+ return nilType, info{}
+ }
+ if r.AssignableTo(l) {
+ return l, info{}
+ }
+ return anyType, info{}
+
+ default:
+ return v.error(node, "unknown operator (%v)", node.Operator)
+
+ }
+
+ return v.error(node, `invalid operation: %v (mismatched types %v and %v)`, node.Operator, l, r)
+}
+
+func (v *visitor) ChainNode(node *ast.ChainNode) (reflect.Type, info) {
+ return v.visit(node.Node)
+}
+
+func (v *visitor) MemberNode(node *ast.MemberNode) (reflect.Type, info) {
+ base, _ := v.visit(node.Node)
+ prop, _ := v.visit(node.Property)
+
+ if name, ok := node.Property.(*ast.StringNode); ok {
+ if base == nil {
+ return v.error(node, "type %v has no field %v", base, name.Value)
+ }
+ // First, check methods defined on base type itself,
+ // independent of which type it is. Without dereferencing.
+ if m, ok := base.MethodByName(name.Value); ok {
+ node.Method = true
+ node.MethodIndex = m.Index
+ node.Name = name.Value
+ if base.Kind() == reflect.Interface {
+ // In case of interface type method will not have a receiver,
+ // and to prevent checker decreasing numbers of in arguments
+ // return method type as not method (second argument is false).
+ return m.Type, info{}
+ } else {
+ return m.Type, info{method: true}
+ }
+ }
+ }
+
+ if base.Kind() == reflect.Ptr {
+ base = base.Elem()
+ }
+
+ switch base.Kind() {
+ case reflect.Interface:
+ node.Deref = true
+ return anyType, info{}
+
+ case reflect.Map:
+ if prop != nil && !prop.AssignableTo(base.Key()) && !isAny(prop) {
+ return v.error(node.Property, "cannot use %v to get an element from %v", prop, base)
+ }
+ t, c := deref(base.Elem())
+ node.Deref = c
+ return t, info{}
+
+ case reflect.Array, reflect.Slice:
+ if !isInteger(prop) && !isAny(prop) {
+ return v.error(node.Property, "array elements can only be selected using an integer (got %v)", prop)
+ }
+ t, c := deref(base.Elem())
+ node.Deref = c
+ return t, info{}
+
+ case reflect.Struct:
+ if name, ok := node.Property.(*ast.StringNode); ok {
+ propertyName := name.Value
+ if field, ok := fetchField(base, propertyName); ok {
+ t, c := deref(field.Type)
+ node.Deref = c
+ node.FieldIndex = field.Index
+ node.Name = propertyName
+ return t, info{}
+ }
+ if len(v.parents) > 1 {
+ if _, ok := v.parents[len(v.parents)-2].(*ast.CallNode); ok {
+ return v.error(node, "type %v has no method %v", base, propertyName)
+ }
+ }
+ return v.error(node, "type %v has no field %v", base, propertyName)
+ }
+ }
+
+ return v.error(node, "type %v[%v] is undefined", base, prop)
+}
+
+func (v *visitor) SliceNode(node *ast.SliceNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+
+ switch t.Kind() {
+ case reflect.Interface:
+ // ok
+ case reflect.String, reflect.Array, reflect.Slice:
+ // ok
+ default:
+ return v.error(node, "cannot slice %v", t)
+ }
+
+ if node.From != nil {
+ from, _ := v.visit(node.From)
+ if !isInteger(from) && !isAny(from) {
+ return v.error(node.From, "non-integer slice index %v", from)
+ }
+ }
+ if node.To != nil {
+ to, _ := v.visit(node.To)
+ if !isInteger(to) && !isAny(to) {
+ return v.error(node.To, "non-integer slice index %v", to)
+ }
+ }
+ return t, info{}
+}
+
+func (v *visitor) CallNode(node *ast.CallNode) (reflect.Type, info) {
+ fn, fnInfo := v.visit(node.Callee)
+
+ if fnInfo.fn != nil {
+ f := fnInfo.fn
+ node.Func = f
+ if f.Validate != nil {
+ args := make([]reflect.Type, len(node.Arguments))
+ for i, arg := range node.Arguments {
+ args[i], _ = v.visit(arg)
+ }
+ t, err := f.Validate(args)
+ if err != nil {
+ return v.error(node, "%v", err)
+ }
+ return t, info{}
+ }
+ if len(f.Types) == 0 {
+ t, err := v.checkFunc(f.Name, functionType, false, node)
+ if err != nil {
+ if v.err == nil {
+ v.err = err
+ }
+ return anyType, info{}
+ }
+ // No type was specified, so we assume the function returns any.
+ return t, info{}
+ }
+ var lastErr *file.Error
+ for _, t := range f.Types {
+ outType, err := v.checkFunc(f.Name, t, false, node)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ return outType, info{}
+ }
+ if lastErr != nil {
+ if v.err == nil {
+ v.err = lastErr
+ }
+ return anyType, info{}
+ }
+ }
+
+ fnName := "function"
+ if identifier, ok := node.Callee.(*ast.IdentifierNode); ok {
+ fnName = identifier.Value
+ }
+ if member, ok := node.Callee.(*ast.MemberNode); ok {
+ if name, ok := member.Property.(*ast.StringNode); ok {
+ fnName = name.Value
+ }
+ }
+ switch fn.Kind() {
+ case reflect.Interface:
+ return anyType, info{}
+ case reflect.Func:
+ inputParamsCount := 1 // for functions
+ if fnInfo.method {
+ inputParamsCount = 2 // for methods
+ }
+ // TODO: Deprecate OpCallFast and move fn(...any) any to TypedFunc list.
+ // To do this we need add support for variadic arguments in OpCallTyped.
+ if !isAny(fn) &&
+ fn.IsVariadic() &&
+ fn.NumIn() == inputParamsCount &&
+ fn.NumOut() == 1 &&
+ fn.Out(0).Kind() == reflect.Interface {
+ rest := fn.In(fn.NumIn() - 1) // function has only one param for functions and two for methods
+ if rest.Kind() == reflect.Slice && rest.Elem().Kind() == reflect.Interface {
+ node.Fast = true
+ }
+ }
+
+ outType, err := v.checkFunc(fnName, fn, fnInfo.method, node)
+ if err != nil {
+ if v.err == nil {
+ v.err = err
+ }
+ return anyType, info{}
+ }
+
+ v.findTypedFunc(node, fn, fnInfo.method)
+
+ return outType, info{}
+ }
+ return v.error(node, "%v is not callable", fn)
+}
+
+func (v *visitor) checkFunc(name string, fn reflect.Type, method bool, node *ast.CallNode) (reflect.Type, *file.Error) {
+ if isAny(fn) {
+ return anyType, nil
+ }
+
+ if fn.NumOut() == 0 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("func %v doesn't return value", name),
+ }
+ }
+ if numOut := fn.NumOut(); numOut > 2 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("func %v returns more then two values", name),
+ }
+ }
+
+ // If func is method on an env, first argument should be a receiver,
+ // and actual arguments less than fnNumIn by one.
+ fnNumIn := fn.NumIn()
+ if method {
+ fnNumIn--
+ }
+ // Skip first argument in case of the receiver.
+ fnInOffset := 0
+ if method {
+ fnInOffset = 1
+ }
+
+ if fn.IsVariadic() {
+ if len(node.Arguments) < fnNumIn-1 {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("not enough arguments to call %v", name),
+ }
+ }
+ } else {
+ if len(node.Arguments) > fnNumIn {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("too many arguments to call %v", name),
+ }
+ }
+ if len(node.Arguments) < fnNumIn {
+ return anyType, &file.Error{
+ Location: node.Location(),
+ Message: fmt.Sprintf("not enough arguments to call %v", name),
+ }
+ }
+ }
+
+ for i, arg := range node.Arguments {
+ t, _ := v.visit(arg)
+
+ var in reflect.Type
+ if fn.IsVariadic() && i >= fnNumIn-1 {
+ // For variadic arguments fn(xs ...int), go replaces type of xs (int) with ([]int).
+ // As we compare arguments one by one, we need underling type.
+ in = fn.In(fn.NumIn() - 1).Elem()
+ } else {
+ in = fn.In(i + fnInOffset)
+ }
+
+ if isIntegerOrArithmeticOperation(arg) {
+ t = in
+ setTypeForIntegers(arg, t)
+ }
+
+ if t == nil {
+ continue
+ }
+
+ if !t.AssignableTo(in) && t.Kind() != reflect.Interface {
+ return anyType, &file.Error{
+ Location: arg.Location(),
+ Message: fmt.Sprintf("cannot use %v as argument (type %v) to call %v ", t, in, name),
+ }
+ }
+ }
+
+ return fn.Out(0), nil
+}
+
+func (v *visitor) BuiltinNode(node *ast.BuiltinNode) (reflect.Type, info) {
+ switch node.Name {
+ case "all", "none", "any", "one":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+ return boolType, info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "filter":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+ if isAny(collection) {
+ return arrayType, info{}
+ }
+ return reflect.SliceOf(collection.Elem()), info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "map":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+
+ return reflect.SliceOf(closure.Out(0)), info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ case "count":
+ collection, _ := v.visit(node.Arguments[0])
+ if !isArray(collection) && !isAny(collection) {
+ return v.error(node.Arguments[0], "builtin %v takes only array (got %v)", node.Name, collection)
+ }
+
+ v.collections = append(v.collections, collection)
+ closure, _ := v.visit(node.Arguments[1])
+ v.collections = v.collections[:len(v.collections)-1]
+
+ if isFunc(closure) &&
+ closure.NumOut() == 1 &&
+ closure.NumIn() == 1 && isAny(closure.In(0)) {
+ if !isBool(closure.Out(0)) && !isAny(closure.Out(0)) {
+ return v.error(node.Arguments[1], "closure should return boolean (got %v)", closure.Out(0).String())
+ }
+
+ return integerType, info{}
+ }
+ return v.error(node.Arguments[1], "closure should has one input and one output param")
+
+ default:
+ return v.error(node, "unknown builtin %v", node.Name)
+ }
+}
+
+func (v *visitor) ClosureNode(node *ast.ClosureNode) (reflect.Type, info) {
+ t, _ := v.visit(node.Node)
+ return reflect.FuncOf([]reflect.Type{anyType}, []reflect.Type{t}, false), info{}
+}
+
+func (v *visitor) PointerNode(node *ast.PointerNode) (reflect.Type, info) {
+ if len(v.collections) == 0 {
+ return v.error(node, "cannot use pointer accessor outside closure")
+ }
+
+ collection := v.collections[len(v.collections)-1]
+ switch collection.Kind() {
+ case reflect.Interface:
+ return anyType, info{}
+ case reflect.Array, reflect.Slice:
+ return collection.Elem(), info{}
+ }
+ return v.error(node, "cannot use %v as array", collection)
+}
+
+func (v *visitor) ConditionalNode(node *ast.ConditionalNode) (reflect.Type, info) {
+ c, _ := v.visit(node.Cond)
+ if !isBool(c) && !isAny(c) {
+ return v.error(node.Cond, "non-bool expression (type %v) used as condition", c)
+ }
+
+ t1, _ := v.visit(node.Exp1)
+ t2, _ := v.visit(node.Exp2)
+
+ if t1 == nil && t2 != nil {
+ return t2, info{}
+ }
+ if t1 != nil && t2 == nil {
+ return t1, info{}
+ }
+ if t1 == nil && t2 == nil {
+ return nilType, info{}
+ }
+ if t1.AssignableTo(t2) {
+ return t1, info{}
+ }
+ return anyType, info{}
+}
+
+func (v *visitor) ArrayNode(node *ast.ArrayNode) (reflect.Type, info) {
+ for _, node := range node.Nodes {
+ v.visit(node)
+ }
+ return arrayType, info{}
+}
+
+func (v *visitor) MapNode(node *ast.MapNode) (reflect.Type, info) {
+ for _, pair := range node.Pairs {
+ v.visit(pair)
+ }
+ return mapType, info{}
+}
+
+func (v *visitor) PairNode(node *ast.PairNode) (reflect.Type, info) {
+ v.visit(node.Key)
+ v.visit(node.Value)
+ return nilType, info{}
+}
+
+func (v *visitor) findTypedFunc(node *ast.CallNode, fn reflect.Type, method bool) {
+ // OnCallTyped doesn't work for functions with variadic arguments,
+ // and doesn't work named function, like `type MyFunc func() int`.
+ // In PkgPath() is an empty string, it's unnamed function.
+ if !fn.IsVariadic() && fn.PkgPath() == "" {
+ fnNumIn := fn.NumIn()
+ fnInOffset := 0
+ if method {
+ fnNumIn--
+ fnInOffset = 1
+ }
+ funcTypes:
+ for i := range vm.FuncTypes {
+ if i == 0 {
+ continue
+ }
+ typed := reflect.ValueOf(vm.FuncTypes[i]).Elem().Type()
+ if typed.Kind() != reflect.Func {
+ continue
+ }
+ if typed.NumOut() != fn.NumOut() {
+ continue
+ }
+ for j := 0; j < typed.NumOut(); j++ {
+ if typed.Out(j) != fn.Out(j) {
+ continue funcTypes
+ }
+ }
+ if typed.NumIn() != fnNumIn {
+ continue
+ }
+ for j := 0; j < typed.NumIn(); j++ {
+ if typed.In(j) != fn.In(j+fnInOffset) {
+ continue funcTypes
+ }
+ }
+ node.Typed = i
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/checker/types.go b/vendor/github.com/antonmedv/expr/checker/types.go
new file mode 100644
index 00000000000..7ccd8948091
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/checker/types.go
@@ -0,0 +1,262 @@
+package checker
+
+import (
+ "reflect"
+ "time"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+)
+
+var (
+ nilType = reflect.TypeOf(nil)
+ boolType = reflect.TypeOf(true)
+ integerType = reflect.TypeOf(0)
+ floatType = reflect.TypeOf(float64(0))
+ stringType = reflect.TypeOf("")
+ arrayType = reflect.TypeOf([]interface{}{})
+ mapType = reflect.TypeOf(map[string]interface{}{})
+ anyType = reflect.TypeOf(new(interface{})).Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ functionType = reflect.TypeOf(new(func(...interface{}) (interface{}, error))).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+func combined(a, b reflect.Type) reflect.Type {
+ if a.Kind() == b.Kind() {
+ return a
+ }
+ if isFloat(a) || isFloat(b) {
+ return floatType
+ }
+ return integerType
+}
+
+func anyOf(t reflect.Type, fns ...func(reflect.Type) bool) bool {
+ for _, fn := range fns {
+ if fn(t) {
+ return true
+ }
+ }
+ return false
+}
+
+func or(l, r reflect.Type, fns ...func(reflect.Type) bool) bool {
+ if isAny(l) && isAny(r) {
+ return true
+ }
+ if isAny(l) && anyOf(r, fns...) {
+ return true
+ }
+ if isAny(r) && anyOf(l, fns...) {
+ return true
+ }
+ return false
+}
+
+func isAny(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Interface:
+ return true
+ }
+ }
+ return false
+}
+
+func isInteger(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fallthrough
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ }
+ }
+ return false
+}
+
+func isFloat(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Float32, reflect.Float64:
+ return true
+ }
+ }
+ return false
+}
+
+func isNumber(t reflect.Type) bool {
+ return isInteger(t) || isFloat(t)
+}
+
+func isTime(t reflect.Type) bool {
+ if t != nil {
+ switch t {
+ case timeType:
+ return true
+ }
+ }
+ return isAny(t)
+}
+
+func isDuration(t reflect.Type) bool {
+ if t != nil {
+ switch t {
+ case durationType:
+ return true
+ }
+ }
+ return false
+}
+
+func isBool(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Bool:
+ return true
+ }
+ }
+ return false
+}
+
+func isString(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.String:
+ return true
+ }
+ }
+ return false
+}
+
+func isArray(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isArray(t.Elem())
+ case reflect.Slice, reflect.Array:
+ return true
+ }
+ }
+ return false
+}
+
+func isMap(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isMap(t.Elem())
+ case reflect.Map:
+ return true
+ }
+ }
+ return false
+}
+
+func isStruct(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isStruct(t.Elem())
+ case reflect.Struct:
+ return true
+ }
+ }
+ return false
+}
+
+func isFunc(t reflect.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case reflect.Ptr:
+ return isFunc(t.Elem())
+ case reflect.Func:
+ return true
+ }
+ }
+ return false
+}
+
+func fetchField(t reflect.Type, name string) (reflect.StructField, bool) {
+ if t != nil {
+ // First check all structs fields.
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ // Search all fields, even embedded structs.
+ if conf.FieldName(field) == name {
+ return field, true
+ }
+ }
+
+ // Second check fields of embedded structs.
+ for i := 0; i < t.NumField(); i++ {
+ anon := t.Field(i)
+ if anon.Anonymous {
+ if field, ok := fetchField(anon.Type, name); ok {
+ field.Index = append(anon.Index, field.Index...)
+ return field, true
+ }
+ }
+ }
+ }
+ return reflect.StructField{}, false
+}
+
+func deref(t reflect.Type) (reflect.Type, bool) {
+ if t == nil {
+ return nil, false
+ }
+ if t.Kind() == reflect.Interface {
+ return t, true
+ }
+ found := false
+ for t != nil && t.Kind() == reflect.Ptr {
+ e := t.Elem()
+ switch e.Kind() {
+ case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice:
+ return t, false
+ default:
+ found = true
+ t = e
+ }
+ }
+ return t, found
+}
+
+func isIntegerOrArithmeticOperation(node ast.Node) bool {
+ switch n := node.(type) {
+ case *ast.IntegerNode:
+ return true
+ case *ast.UnaryNode:
+ switch n.Operator {
+ case "+", "-":
+ return true
+ }
+ case *ast.BinaryNode:
+ switch n.Operator {
+ case "+", "/", "-", "*":
+ return true
+ }
+ }
+ return false
+}
+
+func setTypeForIntegers(node ast.Node, t reflect.Type) {
+ switch n := node.(type) {
+ case *ast.IntegerNode:
+ n.SetType(t)
+ case *ast.UnaryNode:
+ switch n.Operator {
+ case "+", "-":
+ setTypeForIntegers(n.Node, t)
+ }
+ case *ast.BinaryNode:
+ switch n.Operator {
+ case "+", "/", "-", "*":
+ setTypeForIntegers(n.Left, t)
+ setTypeForIntegers(n.Right, t)
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/compiler/compiler.go b/vendor/github.com/antonmedv/expr/compiler/compiler.go
new file mode 100644
index 00000000000..3cd32af0f27
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/compiler/compiler.go
@@ -0,0 +1,739 @@
+package compiler
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/parser"
+ . "github.com/antonmedv/expr/vm"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+const (
+ placeholder = 12345
+)
+
+func Compile(tree *parser.Tree, config *conf.Config) (program *Program, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("%v", r)
+ }
+ }()
+
+ c := &compiler{
+ locations: make([]file.Location, 0),
+ constantsIndex: make(map[interface{}]int),
+ functionsIndex: make(map[string]int),
+ }
+
+ if config != nil {
+ c.mapEnv = config.MapEnv
+ c.cast = config.Expect
+ }
+
+ c.compile(tree.Node)
+
+ switch c.cast {
+ case reflect.Int:
+ c.emit(OpCast, 0)
+ case reflect.Int64:
+ c.emit(OpCast, 1)
+ case reflect.Float64:
+ c.emit(OpCast, 2)
+ }
+
+ program = &Program{
+ Node: tree.Node,
+ Source: tree.Source,
+ Locations: c.locations,
+ Constants: c.constants,
+ Bytecode: c.bytecode,
+ Arguments: c.arguments,
+ Functions: c.functions,
+ }
+ return
+}
+
+type compiler struct {
+ locations []file.Location
+ bytecode []Opcode
+ constants []interface{}
+ constantsIndex map[interface{}]int
+ functions []Function
+ functionsIndex map[string]int
+ mapEnv bool
+ cast reflect.Kind
+ nodes []ast.Node
+ chains [][]int
+ arguments []int
+}
+
+func (c *compiler) emitLocation(loc file.Location, op Opcode, arg int) int {
+ c.bytecode = append(c.bytecode, op)
+ current := len(c.bytecode)
+ c.arguments = append(c.arguments, arg)
+ c.locations = append(c.locations, loc)
+ return current
+}
+
+func (c *compiler) emit(op Opcode, args ...int) int {
+ arg := 0
+ if len(args) > 1 {
+ panic("too many arguments")
+ }
+ if len(args) == 1 {
+ arg = args[0]
+ }
+ var loc file.Location
+ if len(c.nodes) > 0 {
+ loc = c.nodes[len(c.nodes)-1].Location()
+ }
+ return c.emitLocation(loc, op, arg)
+}
+
+func (c *compiler) emitPush(value interface{}) int {
+ return c.emit(OpPush, c.addConstant(value))
+}
+
+func (c *compiler) addConstant(constant interface{}) int {
+ indexable := true
+ hash := constant
+ switch reflect.TypeOf(constant).Kind() {
+ case reflect.Slice, reflect.Map, reflect.Struct:
+ indexable = false
+ }
+ if field, ok := constant.(*runtime.Field); ok {
+ indexable = true
+ hash = fmt.Sprintf("%v", field)
+ }
+ if method, ok := constant.(*runtime.Method); ok {
+ indexable = true
+ hash = fmt.Sprintf("%v", method)
+ }
+ if indexable {
+ if p, ok := c.constantsIndex[hash]; ok {
+ return p
+ }
+ }
+ c.constants = append(c.constants, constant)
+ p := len(c.constants) - 1
+ if indexable {
+ c.constantsIndex[hash] = p
+ }
+ return p
+}
+
+func (c *compiler) addFunction(node *ast.CallNode) int {
+ if node.Func == nil {
+ panic("function is nil")
+ }
+ if p, ok := c.functionsIndex[node.Func.Name]; ok {
+ return p
+ }
+ p := len(c.functions)
+ c.functions = append(c.functions, node.Func.Func)
+ c.functionsIndex[node.Func.Name] = p
+ return p
+}
+
+func (c *compiler) patchJump(placeholder int) {
+ offset := len(c.bytecode) - placeholder
+ c.arguments[placeholder-1] = offset
+}
+
+func (c *compiler) calcBackwardJump(to int) int {
+ return len(c.bytecode) + 1 - to
+}
+
+func (c *compiler) compile(node ast.Node) {
+ c.nodes = append(c.nodes, node)
+ defer func() {
+ c.nodes = c.nodes[:len(c.nodes)-1]
+ }()
+
+ switch n := node.(type) {
+ case *ast.NilNode:
+ c.NilNode(n)
+ case *ast.IdentifierNode:
+ c.IdentifierNode(n)
+ case *ast.IntegerNode:
+ c.IntegerNode(n)
+ case *ast.FloatNode:
+ c.FloatNode(n)
+ case *ast.BoolNode:
+ c.BoolNode(n)
+ case *ast.StringNode:
+ c.StringNode(n)
+ case *ast.ConstantNode:
+ c.ConstantNode(n)
+ case *ast.UnaryNode:
+ c.UnaryNode(n)
+ case *ast.BinaryNode:
+ c.BinaryNode(n)
+ case *ast.ChainNode:
+ c.ChainNode(n)
+ case *ast.MemberNode:
+ c.MemberNode(n)
+ case *ast.SliceNode:
+ c.SliceNode(n)
+ case *ast.CallNode:
+ c.CallNode(n)
+ case *ast.BuiltinNode:
+ c.BuiltinNode(n)
+ case *ast.ClosureNode:
+ c.ClosureNode(n)
+ case *ast.PointerNode:
+ c.PointerNode(n)
+ case *ast.ConditionalNode:
+ c.ConditionalNode(n)
+ case *ast.ArrayNode:
+ c.ArrayNode(n)
+ case *ast.MapNode:
+ c.MapNode(n)
+ case *ast.PairNode:
+ c.PairNode(n)
+ default:
+ panic(fmt.Sprintf("undefined node type (%T)", node))
+ }
+}
+
+func (c *compiler) NilNode(_ *ast.NilNode) {
+ c.emit(OpNil)
+}
+
+func (c *compiler) IdentifierNode(node *ast.IdentifierNode) {
+ if c.mapEnv {
+ c.emit(OpLoadFast, c.addConstant(node.Value))
+ } else if len(node.FieldIndex) > 0 {
+ c.emit(OpLoadField, c.addConstant(&runtime.Field{
+ Index: node.FieldIndex,
+ Path: []string{node.Value},
+ }))
+ } else if node.Method {
+ c.emit(OpLoadMethod, c.addConstant(&runtime.Method{
+ Name: node.Value,
+ Index: node.MethodIndex,
+ }))
+ } else {
+ c.emit(OpLoadConst, c.addConstant(node.Value))
+ }
+ if node.Deref {
+ c.emit(OpDeref)
+ } else if node.Type() == nil {
+ c.emit(OpDeref)
+ }
+}
+
+func (c *compiler) IntegerNode(node *ast.IntegerNode) {
+ t := node.Type()
+ if t == nil {
+ c.emitPush(node.Value)
+ return
+ }
+ switch t.Kind() {
+ case reflect.Float32:
+ c.emitPush(float32(node.Value))
+ case reflect.Float64:
+ c.emitPush(float64(node.Value))
+ case reflect.Int:
+ c.emitPush(node.Value)
+ case reflect.Int8:
+ c.emitPush(int8(node.Value))
+ case reflect.Int16:
+ c.emitPush(int16(node.Value))
+ case reflect.Int32:
+ c.emitPush(int32(node.Value))
+ case reflect.Int64:
+ c.emitPush(int64(node.Value))
+ case reflect.Uint:
+ c.emitPush(uint(node.Value))
+ case reflect.Uint8:
+ c.emitPush(uint8(node.Value))
+ case reflect.Uint16:
+ c.emitPush(uint16(node.Value))
+ case reflect.Uint32:
+ c.emitPush(uint32(node.Value))
+ case reflect.Uint64:
+ c.emitPush(uint64(node.Value))
+ default:
+ c.emitPush(node.Value)
+ }
+}
+
+func (c *compiler) FloatNode(node *ast.FloatNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) BoolNode(node *ast.BoolNode) {
+ if node.Value {
+ c.emit(OpTrue)
+ } else {
+ c.emit(OpFalse)
+ }
+}
+
+func (c *compiler) StringNode(node *ast.StringNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) ConstantNode(node *ast.ConstantNode) {
+ c.emitPush(node.Value)
+}
+
+func (c *compiler) UnaryNode(node *ast.UnaryNode) {
+ c.compile(node.Node)
+
+ switch node.Operator {
+
+ case "!", "not":
+ c.emit(OpNot)
+
+ case "+":
+ // Do nothing
+
+ case "-":
+ c.emit(OpNegate)
+
+ default:
+ panic(fmt.Sprintf("unknown operator (%v)", node.Operator))
+ }
+}
+
+func (c *compiler) BinaryNode(node *ast.BinaryNode) {
+ l := kind(node.Left)
+ r := kind(node.Right)
+
+ switch node.Operator {
+ case "==":
+ c.compile(node.Left)
+ c.compile(node.Right)
+
+ if l == r && l == reflect.Int {
+ c.emit(OpEqualInt)
+ } else if l == r && l == reflect.String {
+ c.emit(OpEqualString)
+ } else {
+ c.emit(OpEqual)
+ }
+
+ case "!=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpEqual)
+ c.emit(OpNot)
+
+ case "or", "||":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfTrue, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ case "and", "&&":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ case "<":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpLess)
+
+ case ">":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMore)
+
+ case "<=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpLessOrEqual)
+
+ case ">=":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMoreOrEqual)
+
+ case "+":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpAdd)
+
+ case "-":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpSubtract)
+
+ case "*":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMultiply)
+
+ case "/":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpDivide)
+
+ case "%":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpModulo)
+
+ case "**", "^":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpExponent)
+
+ case "in":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpIn)
+
+ case "matches":
+ if node.Regexp != nil {
+ c.compile(node.Left)
+ c.emit(OpMatchesConst, c.addConstant(node.Regexp))
+ } else {
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpMatches)
+ }
+
+ case "contains":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpContains)
+
+ case "startsWith":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpStartsWith)
+
+ case "endsWith":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpEndsWith)
+
+ case "..":
+ c.compile(node.Left)
+ c.compile(node.Right)
+ c.emit(OpRange)
+
+ case "??":
+ c.compile(node.Left)
+ end := c.emit(OpJumpIfNotNil, placeholder)
+ c.emit(OpPop)
+ c.compile(node.Right)
+ c.patchJump(end)
+
+ default:
+ panic(fmt.Sprintf("unknown operator (%v)", node.Operator))
+
+ }
+}
+
+func (c *compiler) ChainNode(node *ast.ChainNode) {
+ c.chains = append(c.chains, []int{})
+ c.compile(node.Node)
+ // Chain activate (got nit somewhere)
+ for _, ph := range c.chains[len(c.chains)-1] {
+ c.patchJump(ph)
+ }
+ c.chains = c.chains[:len(c.chains)-1]
+}
+
+func (c *compiler) MemberNode(node *ast.MemberNode) {
+ if node.Method {
+ c.compile(node.Node)
+ c.emit(OpMethod, c.addConstant(&runtime.Method{
+ Name: node.Name,
+ Index: node.MethodIndex,
+ }))
+ return
+ }
+ op := OpFetch
+ original := node
+ index := node.FieldIndex
+ path := []string{node.Name}
+ base := node.Node
+ if len(node.FieldIndex) > 0 {
+ op = OpFetchField
+ for !node.Optional {
+ ident, ok := base.(*ast.IdentifierNode)
+ if ok && len(ident.FieldIndex) > 0 {
+ if ident.Deref {
+ panic("IdentifierNode should not be dereferenced")
+ }
+ index = append(ident.FieldIndex, index...)
+ path = append([]string{ident.Value}, path...)
+ c.emitLocation(ident.Location(), OpLoadField, c.addConstant(
+ &runtime.Field{Index: index, Path: path},
+ ))
+ goto deref
+ }
+ member, ok := base.(*ast.MemberNode)
+ if ok && len(member.FieldIndex) > 0 {
+ if member.Deref {
+ panic("MemberNode should not be dereferenced")
+ }
+ index = append(member.FieldIndex, index...)
+ path = append([]string{member.Name}, path...)
+ node = member
+ base = member.Node
+ } else {
+ break
+ }
+ }
+ }
+
+ c.compile(base)
+ if node.Optional {
+ ph := c.emit(OpJumpIfNil, placeholder)
+ c.chains[len(c.chains)-1] = append(c.chains[len(c.chains)-1], ph)
+ }
+
+ if op == OpFetch {
+ c.compile(node.Property)
+ c.emit(OpFetch)
+ } else {
+ c.emitLocation(node.Location(), op, c.addConstant(
+ &runtime.Field{Index: index, Path: path},
+ ))
+ }
+
+deref:
+ if original.Deref {
+ c.emit(OpDeref)
+ } else if original.Type() == nil {
+ c.emit(OpDeref)
+ }
+}
+
+func (c *compiler) SliceNode(node *ast.SliceNode) {
+ c.compile(node.Node)
+ if node.To != nil {
+ c.compile(node.To)
+ } else {
+ c.emit(OpLen)
+ }
+ if node.From != nil {
+ c.compile(node.From)
+ } else {
+ c.emitPush(0)
+ }
+ c.emit(OpSlice)
+}
+
+func (c *compiler) CallNode(node *ast.CallNode) {
+ for _, arg := range node.Arguments {
+ c.compile(arg)
+ }
+ if node.Func != nil {
+ if node.Func.Opcode > 0 {
+ c.emit(OpBuiltin, node.Func.Opcode)
+ return
+ }
+ switch len(node.Arguments) {
+ case 0:
+ c.emit(OpCall0, c.addFunction(node))
+ case 1:
+ c.emit(OpCall1, c.addFunction(node))
+ case 2:
+ c.emit(OpCall2, c.addFunction(node))
+ case 3:
+ c.emit(OpCall3, c.addFunction(node))
+ default:
+ c.emit(OpLoadFunc, c.addFunction(node))
+ c.emit(OpCallN, len(node.Arguments))
+ }
+ return
+ }
+ c.compile(node.Callee)
+ if node.Typed > 0 {
+ c.emit(OpCallTyped, node.Typed)
+ return
+ } else if node.Fast {
+ c.emit(OpCallFast, len(node.Arguments))
+ } else {
+ c.emit(OpCall, len(node.Arguments))
+ }
+}
+
+func (c *compiler) BuiltinNode(node *ast.BuiltinNode) {
+ switch node.Name {
+ case "all":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ loopBreak = c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpTrue)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "none":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emit(OpNot)
+ loopBreak = c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpTrue)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "any":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ var loopBreak int
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ loopBreak = c.emit(OpJumpIfTrue, placeholder)
+ c.emit(OpPop)
+ })
+ c.emit(OpFalse)
+ c.patchJump(loopBreak)
+ c.emit(OpEnd)
+
+ case "one":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emitPush(1)
+ c.emit(OpEqual)
+ c.emit(OpEnd)
+
+ case "filter":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ c.emit(OpPointer)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emit(OpEnd)
+ c.emit(OpArray)
+
+ case "map":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ })
+ c.emit(OpGetLen)
+ c.emit(OpEnd)
+ c.emit(OpArray)
+
+ case "count":
+ c.compile(node.Arguments[0])
+ c.emit(OpBegin)
+ c.emitLoop(func() {
+ c.compile(node.Arguments[1])
+ c.emitCond(func() {
+ c.emit(OpIncrementCount)
+ })
+ })
+ c.emit(OpGetCount)
+ c.emit(OpEnd)
+
+ default:
+ panic(fmt.Sprintf("unknown builtin %v", node.Name))
+ }
+}
+
+func (c *compiler) emitCond(body func()) {
+ noop := c.emit(OpJumpIfFalse, placeholder)
+ c.emit(OpPop)
+
+ body()
+
+ jmp := c.emit(OpJump, placeholder)
+ c.patchJump(noop)
+ c.emit(OpPop)
+ c.patchJump(jmp)
+}
+
+func (c *compiler) emitLoop(body func()) {
+ begin := len(c.bytecode)
+ end := c.emit(OpJumpIfEnd, placeholder)
+
+ body()
+
+ c.emit(OpIncrementIt)
+ c.emit(OpJumpBackward, c.calcBackwardJump(begin))
+ c.patchJump(end)
+}
+
+func (c *compiler) ClosureNode(node *ast.ClosureNode) {
+ c.compile(node.Node)
+}
+
+func (c *compiler) PointerNode(node *ast.PointerNode) {
+ c.emit(OpPointer)
+}
+
+func (c *compiler) ConditionalNode(node *ast.ConditionalNode) {
+ c.compile(node.Cond)
+ otherwise := c.emit(OpJumpIfFalse, placeholder)
+
+ c.emit(OpPop)
+ c.compile(node.Exp1)
+ end := c.emit(OpJump, placeholder)
+
+ c.patchJump(otherwise)
+ c.emit(OpPop)
+ c.compile(node.Exp2)
+
+ c.patchJump(end)
+}
+
+func (c *compiler) ArrayNode(node *ast.ArrayNode) {
+ for _, node := range node.Nodes {
+ c.compile(node)
+ }
+
+ c.emitPush(len(node.Nodes))
+ c.emit(OpArray)
+}
+
+func (c *compiler) MapNode(node *ast.MapNode) {
+ for _, pair := range node.Pairs {
+ c.compile(pair)
+ }
+
+ c.emitPush(len(node.Pairs))
+ c.emit(OpMap)
+}
+
+func (c *compiler) PairNode(node *ast.PairNode) {
+ c.compile(node.Key)
+ c.compile(node.Value)
+}
+
+func kind(node ast.Node) reflect.Kind {
+ t := node.Type()
+ if t == nil {
+ return reflect.Invalid
+ }
+ return t.Kind()
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/config.go b/vendor/github.com/antonmedv/expr/conf/config.go
new file mode 100644
index 00000000000..1ac0fa7d291
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/config.go
@@ -0,0 +1,96 @@
+package conf
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+type Config struct {
+ Env interface{}
+ Types TypesTable
+ MapEnv bool
+ DefaultType reflect.Type
+ Operators OperatorsTable
+ Expect reflect.Kind
+ Optimize bool
+ Strict bool
+ ConstFns map[string]reflect.Value
+ Visitors []ast.Visitor
+ Functions map[string]*builtin.Function
+}
+
+// CreateNew creates new config with default values.
+func CreateNew() *Config {
+ c := &Config{
+ Operators: make(map[string][]string),
+ ConstFns: make(map[string]reflect.Value),
+ Functions: make(map[string]*builtin.Function),
+ Optimize: true,
+ }
+ for _, f := range builtin.Builtins {
+ c.Functions[f.Name] = f
+ }
+ return c
+}
+
+// New creates new config with environment.
+func New(env interface{}) *Config {
+ c := CreateNew()
+ c.WithEnv(env)
+ return c
+}
+
+func (c *Config) WithEnv(env interface{}) {
+ var mapEnv bool
+ var mapValueType reflect.Type
+ if _, ok := env.(map[string]interface{}); ok {
+ mapEnv = true
+ } else {
+ if reflect.ValueOf(env).Kind() == reflect.Map {
+ mapValueType = reflect.TypeOf(env).Elem()
+ }
+ }
+
+ c.Env = env
+ c.Types = CreateTypesTable(env)
+ c.MapEnv = mapEnv
+ c.DefaultType = mapValueType
+ c.Strict = true
+}
+
+func (c *Config) Operator(operator string, fns ...string) {
+ c.Operators[operator] = append(c.Operators[operator], fns...)
+}
+
+func (c *Config) ConstExpr(name string) {
+ if c.Env == nil {
+ panic("no environment is specified for ConstExpr()")
+ }
+ fn := reflect.ValueOf(runtime.Fetch(c.Env, name))
+ if fn.Kind() != reflect.Func {
+ panic(fmt.Errorf("const expression %q must be a function", name))
+ }
+ c.ConstFns[name] = fn
+}
+
+func (c *Config) Check() {
+ for operator, fns := range c.Operators {
+ for _, fn := range fns {
+ fnType, ok := c.Types[fn]
+ if !ok || fnType.Type.Kind() != reflect.Func {
+ panic(fmt.Errorf("function %s for %s operator does not exist in the environment", fn, operator))
+ }
+ requiredNumIn := 2
+ if fnType.Method {
+ requiredNumIn = 3 // As first argument of method is receiver.
+ }
+ if fnType.Type.NumIn() != requiredNumIn || fnType.Type.NumOut() != 1 {
+ panic(fmt.Errorf("function %s for %s operator does not have a correct signature", fn, operator))
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/functions.go b/vendor/github.com/antonmedv/expr/conf/functions.go
new file mode 100644
index 00000000000..8f52a955753
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/functions.go
@@ -0,0 +1 @@
+package conf
diff --git a/vendor/github.com/antonmedv/expr/conf/operators.go b/vendor/github.com/antonmedv/expr/conf/operators.go
new file mode 100644
index 00000000000..13e069d76ca
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/operators.go
@@ -0,0 +1,59 @@
+package conf
+
+import (
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+)
+
+// OperatorsTable maps binary operators to corresponding list of functions.
+// Functions should be provided in the environment to allow operator overloading.
+type OperatorsTable map[string][]string
+
+func FindSuitableOperatorOverload(fns []string, types TypesTable, l, r reflect.Type) (reflect.Type, string, bool) {
+ for _, fn := range fns {
+ fnType := types[fn]
+ firstInIndex := 0
+ if fnType.Method {
+ firstInIndex = 1 // As first argument to method is receiver.
+ }
+ firstArgType := fnType.Type.In(firstInIndex)
+ secondArgType := fnType.Type.In(firstInIndex + 1)
+
+ firstArgumentFit := l == firstArgType || (firstArgType.Kind() == reflect.Interface && (l == nil || l.Implements(firstArgType)))
+ secondArgumentFit := r == secondArgType || (secondArgType.Kind() == reflect.Interface && (r == nil || r.Implements(secondArgType)))
+ if firstArgumentFit && secondArgumentFit {
+ return fnType.Type.Out(0), fn, true
+ }
+ }
+ return nil, "", false
+}
+
+type OperatorPatcher struct {
+ Operators OperatorsTable
+ Types TypesTable
+}
+
+func (p *OperatorPatcher) Visit(node *ast.Node) {
+ binaryNode, ok := (*node).(*ast.BinaryNode)
+ if !ok {
+ return
+ }
+
+ fns, ok := p.Operators[binaryNode.Operator]
+ if !ok {
+ return
+ }
+
+ leftType := binaryNode.Left.Type()
+ rightType := binaryNode.Right.Type()
+
+ _, fn, ok := FindSuitableOperatorOverload(fns, p.Types, leftType, rightType)
+ if ok {
+ newNode := &ast.CallNode{
+ Callee: &ast.IdentifierNode{Value: fn},
+ Arguments: []ast.Node{binaryNode.Left, binaryNode.Right},
+ }
+ ast.Patch(node, newNode)
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/conf/types_table.go b/vendor/github.com/antonmedv/expr/conf/types_table.go
new file mode 100644
index 00000000000..e917f5fa844
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/conf/types_table.go
@@ -0,0 +1,123 @@
+package conf
+
+import (
+ "reflect"
+)
+
+type Tag struct {
+ Type reflect.Type
+ Ambiguous bool
+ FieldIndex []int
+ Method bool
+ MethodIndex int
+}
+
+type TypesTable map[string]Tag
+
+// CreateTypesTable creates types table for type checks during parsing.
+// If struct is passed, all fields will be treated as variables,
+// as well as all fields of embedded structs and struct itself.
+//
+// If map is passed, all items will be treated as variables
+// (key as name, value as type).
+func CreateTypesTable(i interface{}) TypesTable {
+ if i == nil {
+ return nil
+ }
+
+ types := make(TypesTable)
+ v := reflect.ValueOf(i)
+ t := reflect.TypeOf(i)
+
+ d := t
+ if t.Kind() == reflect.Ptr {
+ d = t.Elem()
+ }
+
+ switch d.Kind() {
+ case reflect.Struct:
+ types = FieldsFromStruct(d)
+
+ // Methods of struct should be gathered from original struct with pointer,
+ // as methods maybe declared on pointer receiver. Also this method retrieves
+ // all embedded structs methods as well, no need to recursion.
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+ types[m.Name] = Tag{
+ Type: m.Type,
+ Method: true,
+ MethodIndex: i,
+ }
+ }
+
+ case reflect.Map:
+ for _, key := range v.MapKeys() {
+ value := v.MapIndex(key)
+ if key.Kind() == reflect.String && value.IsValid() && value.CanInterface() {
+ types[key.String()] = Tag{Type: reflect.TypeOf(value.Interface())}
+ }
+ }
+
+ // A map may have method too.
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+ types[m.Name] = Tag{
+ Type: m.Type,
+ Method: true,
+ MethodIndex: i,
+ }
+ }
+ }
+
+ return types
+}
+
+func FieldsFromStruct(t reflect.Type) TypesTable {
+ types := make(TypesTable)
+ t = dereference(t)
+ if t == nil {
+ return types
+ }
+
+ switch t.Kind() {
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if f.Anonymous {
+ for name, typ := range FieldsFromStruct(f.Type) {
+ if _, ok := types[name]; ok {
+ types[name] = Tag{Ambiguous: true}
+ } else {
+ typ.FieldIndex = append(f.Index, typ.FieldIndex...)
+ types[name] = typ
+ }
+ }
+ }
+
+ types[FieldName(f)] = Tag{
+ Type: f.Type,
+ FieldIndex: f.Index,
+ }
+ }
+ }
+
+ return types
+}
+
+func dereference(t reflect.Type) reflect.Type {
+ if t == nil {
+ return nil
+ }
+ if t.Kind() == reflect.Ptr {
+ t = dereference(t.Elem())
+ }
+ return t
+}
+
+func FieldName(field reflect.StructField) string {
+ if taggedName := field.Tag.Get("expr"); taggedName != "" {
+ return taggedName
+ }
+ return field.Name
+}
diff --git a/vendor/github.com/antonmedv/expr/expr.go b/vendor/github.com/antonmedv/expr/expr.go
new file mode 100644
index 00000000000..14f6af285c5
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/expr.go
@@ -0,0 +1,205 @@
+package expr
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/checker"
+ "github.com/antonmedv/expr/compiler"
+ "github.com/antonmedv/expr/conf"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/optimizer"
+ "github.com/antonmedv/expr/parser"
+ "github.com/antonmedv/expr/vm"
+)
+
+// Option for configuring config.
+type Option func(c *conf.Config)
+
+// Env specifies expected input of env for type checks.
+// If struct is passed, all fields will be treated as variables,
+// as well as all fields of embedded structs and struct itself.
+// If map is passed, all items will be treated as variables.
+// Methods defined on this type will be available as functions.
+func Env(env interface{}) Option {
+ return func(c *conf.Config) {
+ c.WithEnv(env)
+ }
+}
+
+// AllowUndefinedVariables allows to use undefined variables inside expressions.
+// This can be used with expr.Env option to partially define a few variables.
+func AllowUndefinedVariables() Option {
+ return func(c *conf.Config) {
+ c.Strict = false
+ }
+}
+
+// Operator allows to replace a binary operator with a function.
+func Operator(operator string, fn ...string) Option {
+ return func(c *conf.Config) {
+ c.Operator(operator, fn...)
+ }
+}
+
+// ConstExpr defines func expression as constant. If all argument to this function is constants,
+// then it can be replaced by result of this func call on compile step.
+func ConstExpr(fn string) Option {
+ return func(c *conf.Config) {
+ c.ConstExpr(fn)
+ }
+}
+
+// AsKind tells the compiler to expect kind of the result.
+func AsKind(kind reflect.Kind) Option {
+ return func(c *conf.Config) {
+ c.Expect = kind
+ }
+}
+
+// AsBool tells the compiler to expect a boolean result.
+func AsBool() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Bool
+ }
+}
+
+// AsInt tells the compiler to expect an int result.
+func AsInt() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Int
+ }
+}
+
+// AsInt64 tells the compiler to expect an int64 result.
+func AsInt64() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Int64
+ }
+}
+
+// AsFloat64 tells the compiler to expect a float64 result.
+func AsFloat64() Option {
+ return func(c *conf.Config) {
+ c.Expect = reflect.Float64
+ }
+}
+
+// Optimize turns optimizations on or off.
+func Optimize(b bool) Option {
+ return func(c *conf.Config) {
+ c.Optimize = b
+ }
+}
+
+// Patch adds visitor to list of visitors what will be applied before compiling AST to bytecode.
+func Patch(visitor ast.Visitor) Option {
+ return func(c *conf.Config) {
+ c.Visitors = append(c.Visitors, visitor)
+ }
+}
+
+// Function adds function to list of functions what will be available in expressions.
+func Function(name string, fn func(params ...interface{}) (interface{}, error), types ...interface{}) Option {
+ return func(c *conf.Config) {
+ ts := make([]reflect.Type, len(types))
+ for i, t := range types {
+ t := reflect.TypeOf(t)
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Func {
+ panic(fmt.Sprintf("expr: type of %s is not a function", name))
+ }
+ ts[i] = t
+ }
+ c.Functions[name] = &builtin.Function{
+ Name: name,
+ Func: fn,
+ Types: ts,
+ }
+ }
+}
+
+// Compile parses and compiles given input expression to bytecode program.
+func Compile(input string, ops ...Option) (*vm.Program, error) {
+ config := conf.CreateNew()
+
+ for _, op := range ops {
+ op(config)
+ }
+ config.Check()
+
+ if len(config.Operators) > 0 {
+ config.Visitors = append(config.Visitors, &conf.OperatorPatcher{
+ Operators: config.Operators,
+ Types: config.Types,
+ })
+ }
+
+ tree, err := parser.Parse(input)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(config.Visitors) > 0 {
+ for _, v := range config.Visitors {
+ // We need to perform types check, because some visitors may rely on
+ // types information available in the tree.
+ _, _ = checker.Check(tree, config)
+ ast.Walk(&tree.Node, v)
+ }
+ _, err = checker.Check(tree, config)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ _, err = checker.Check(tree, config)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if config.Optimize {
+ err = optimizer.Optimize(&tree.Node, config)
+ if err != nil {
+ if fileError, ok := err.(*file.Error); ok {
+ return nil, fileError.Bind(tree.Source)
+ }
+ return nil, err
+ }
+ }
+
+ program, err := compiler.Compile(tree, config)
+ if err != nil {
+ return nil, err
+ }
+
+ return program, nil
+}
+
+// Run evaluates given bytecode program.
+func Run(program *vm.Program, env interface{}) (interface{}, error) {
+ return vm.Run(program, env)
+}
+
+// Eval parses, compiles and runs given input.
+func Eval(input string, env interface{}) (interface{}, error) {
+ if _, ok := env.(Option); ok {
+ return nil, fmt.Errorf("misused expr.Eval: second argument (env) should be passed without expr.Env")
+ }
+
+ program, err := Compile(input)
+ if err != nil {
+ return nil, err
+ }
+
+ output, err := Run(program, env)
+ if err != nil {
+ return nil, err
+ }
+
+ return output, nil
+}
diff --git a/vendor/github.com/antonmedv/expr/file/error.go b/vendor/github.com/antonmedv/expr/file/error.go
new file mode 100644
index 00000000000..1e7e81b947b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/error.go
@@ -0,0 +1,69 @@
+package file
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type Error struct {
+ Location
+ Message string
+ Snippet string
+ Prev error
+}
+
+func (e *Error) Error() string {
+ return e.format()
+}
+
+func (e *Error) Bind(source *Source) *Error {
+ if snippet, found := source.Snippet(e.Location.Line); found {
+ snippet := strings.Replace(snippet, "\t", " ", -1)
+ srcLine := "\n | " + snippet
+ var bytes = []byte(snippet)
+ var indLine = "\n | "
+ for i := 0; i < e.Location.Column && len(bytes) > 0; i++ {
+ _, sz := utf8.DecodeRune(bytes)
+ bytes = bytes[sz:]
+ if sz > 1 {
+ goto noind
+ } else {
+ indLine += "."
+ }
+ }
+ if _, sz := utf8.DecodeRune(bytes); sz > 1 {
+ goto noind
+ } else {
+ indLine += "^"
+ }
+ srcLine += indLine
+
+ noind:
+ e.Snippet = srcLine
+ }
+ return e
+}
+
+
+func (e *Error) Unwrap() error {
+ return e.Prev
+}
+
+func (e *Error) Wrap(err error) {
+ e.Prev = err
+}
+
+
+func (e *Error) format() string {
+ if e.Location.Empty() {
+ return e.Message
+ }
+ return fmt.Sprintf(
+ "%s (%d:%d)%s",
+ e.Message,
+ e.Line,
+ e.Column+1, // add one to the 0-based column for display
+ e.Snippet,
+ )
+}
diff --git a/vendor/github.com/antonmedv/expr/file/location.go b/vendor/github.com/antonmedv/expr/file/location.go
new file mode 100644
index 00000000000..a92e27f0b1c
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/location.go
@@ -0,0 +1,10 @@
+package file
+
+type Location struct {
+ Line int // The 1-based line of the location.
+ Column int // The 0-based column number of the location.
+}
+
+func (l Location) Empty() bool {
+ return l.Column == 0 && l.Line == 0
+}
diff --git a/vendor/github.com/antonmedv/expr/file/source.go b/vendor/github.com/antonmedv/expr/file/source.go
new file mode 100644
index 00000000000..9ee297b5802
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/file/source.go
@@ -0,0 +1,76 @@
+package file
+
+import (
+ "encoding/json"
+ "strings"
+ "unicode/utf8"
+)
+
+type Source struct {
+ contents []rune
+ lineOffsets []int32
+}
+
+func NewSource(contents string) *Source {
+ s := &Source{
+ contents: []rune(contents),
+ }
+ s.updateOffsets()
+ return s
+}
+
+func (s *Source) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.contents)
+}
+
+func (s *Source) UnmarshalJSON(b []byte) error {
+ contents := make([]rune, 0)
+ err := json.Unmarshal(b, &contents)
+ if err != nil {
+ return err
+ }
+
+ s.contents = contents
+ s.updateOffsets()
+ return nil
+}
+
+func (s *Source) Content() string {
+ return string(s.contents)
+}
+
+func (s *Source) Snippet(line int) (string, bool) {
+ charStart, found := s.findLineOffset(line)
+ if !found || len(s.contents) == 0 {
+ return "", false
+ }
+ charEnd, found := s.findLineOffset(line + 1)
+ if found {
+ return string(s.contents[charStart : charEnd-1]), true
+ }
+ return string(s.contents[charStart:]), true
+}
+
+// updateOffsets compute line offsets up front as they are referred to frequently.
+func (s *Source) updateOffsets() {
+ lines := strings.Split(string(s.contents), "\n")
+ offsets := make([]int32, len(lines))
+ var offset int32
+ for i, line := range lines {
+ offset = offset + int32(utf8.RuneCountInString(line)) + 1
+ offsets[int32(i)] = offset
+ }
+ s.lineOffsets = offsets
+}
+
+// findLineOffset returns the offset where the (1-indexed) line begins,
+// or false if line doesn't exist.
+func (s *Source) findLineOffset(line int) (int32, bool) {
+ if line == 1 {
+ return 0, true
+ } else if line > 1 && line <= len(s.lineOffsets) {
+ offset := s.lineOffsets[line-2]
+ return offset, true
+ }
+ return -1, false
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_expr.go b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go
new file mode 100644
index 00000000000..7ececb3dbad
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/const_expr.go
@@ -0,0 +1,85 @@
+package optimizer
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+)
+
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+type constExpr struct {
+ applied bool
+ err error
+ fns map[string]reflect.Value
+}
+
+func (c *constExpr) Visit(node *Node) {
+ defer func() {
+ if r := recover(); r != nil {
+ msg := fmt.Sprintf("%v", r)
+ // Make message more actual, it's a runtime error, but at compile step.
+ msg = strings.Replace(msg, "runtime error:", "compile error:", 1)
+ c.err = &file.Error{
+ Location: (*node).Location(),
+ Message: msg,
+ }
+ }
+ }()
+
+ patch := func(newNode Node) {
+ c.applied = true
+ Patch(node, newNode)
+ }
+
+ if call, ok := (*node).(*CallNode); ok {
+ if name, ok := call.Callee.(*IdentifierNode); ok {
+ fn, ok := c.fns[name.Value]
+ if ok {
+ in := make([]reflect.Value, len(call.Arguments))
+ for i := 0; i < len(call.Arguments); i++ {
+ arg := call.Arguments[i]
+ var param interface{}
+
+ switch a := arg.(type) {
+ case *NilNode:
+ param = nil
+ case *IntegerNode:
+ param = a.Value
+ case *FloatNode:
+ param = a.Value
+ case *BoolNode:
+ param = a.Value
+ case *StringNode:
+ param = a.Value
+ case *ConstantNode:
+ param = a.Value
+
+ default:
+ return // Const expr optimization not applicable.
+ }
+
+ if param == nil && reflect.TypeOf(param) == nil {
+ // In case of nil value and nil type use this hack,
+ // otherwise reflect.Call will panic on zero value.
+ in[i] = reflect.ValueOf(¶m).Elem()
+ } else {
+ in[i] = reflect.ValueOf(param)
+ }
+ }
+
+ out := fn.Call(in)
+ value := out[0].Interface()
+ if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() {
+ c.err = out[1].Interface().(error)
+ return
+ }
+ constNode := &ConstantNode{Value: value}
+ patch(constNode)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/const_range.go b/vendor/github.com/antonmedv/expr/optimizer/const_range.go
new file mode 100644
index 00000000000..26d6d6f571b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/const_range.go
@@ -0,0 +1,40 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+)
+
+type constRange struct{}
+
+func (*constRange) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == ".." {
+ if min, ok := n.Left.(*IntegerNode); ok {
+ if max, ok := n.Right.(*IntegerNode); ok {
+ size := max.Value - min.Value + 1
+ // In case the max < min, patch empty slice
+ // as max must be greater than equal to min.
+ if size < 1 {
+ Patch(node, &ConstantNode{
+ Value: make([]int, 0),
+ })
+ return
+ }
+ // In this case array is too big. Skip generation,
+ // and wait for memory budget detection on runtime.
+ if size > 1e6 {
+ return
+ }
+ value := make([]int, size)
+ for i := range value {
+ value[i] = min.Value + i
+ }
+ Patch(node, &ConstantNode{
+ Value: value,
+ })
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/fold.go b/vendor/github.com/antonmedv/expr/optimizer/fold.go
new file mode 100644
index 00000000000..b62b2d7ed42
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/fold.go
@@ -0,0 +1,343 @@
+package optimizer
+
+import (
+ "math"
+ "reflect"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+)
+
+type fold struct {
+ applied bool
+ err *file.Error
+}
+
+func (fold *fold) Visit(node *Node) {
+ patch := func(newNode Node) {
+ fold.applied = true
+ Patch(node, newNode)
+ }
+ // for IntegerNode the type may have been changed from int->float
+ // preserve this information by setting the type after the Patch
+ patchWithType := func(newNode Node, leafType reflect.Type) {
+ patch(newNode)
+ newNode.SetType(leafType)
+ }
+
+ switch n := (*node).(type) {
+ case *UnaryNode:
+ switch n.Operator {
+ case "-":
+ if i, ok := n.Node.(*IntegerNode); ok {
+ patchWithType(&IntegerNode{Value: -i.Value}, n.Node.Type())
+ }
+ if i, ok := n.Node.(*FloatNode); ok {
+ patchWithType(&FloatNode{Value: -i.Value}, n.Node.Type())
+ }
+ case "+":
+ if i, ok := n.Node.(*IntegerNode); ok {
+ patchWithType(&IntegerNode{Value: i.Value}, n.Node.Type())
+ }
+ if i, ok := n.Node.(*FloatNode); ok {
+ patchWithType(&FloatNode{Value: i.Value}, n.Node.Type())
+ }
+ case "!", "not":
+ if a := toBool(n.Node); a != nil {
+ patch(&BoolNode{Value: !a.Value})
+ }
+ }
+
+ case *BinaryNode:
+ switch n.Operator {
+ case "+":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value + float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value + b.Value}, a.Type())
+ }
+ }
+ {
+ a := toString(n.Left)
+ b := toString(n.Right)
+ if a != nil && b != nil {
+ patch(&StringNode{Value: a.Value + b.Value})
+ }
+ }
+ case "-":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value - b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) - b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value - float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value - b.Value}, a.Type())
+ }
+ }
+ case "*":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&IntegerNode{Value: a.Value * b.Value}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) * b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value * float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value * b.Value}, a.Type())
+ }
+ }
+ case "/":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) / float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: float64(a.Value) / b.Value}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value / float64(b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: a.Value / b.Value}, a.Type())
+ }
+ }
+ case "%":
+ if a, ok := n.Left.(*IntegerNode); ok {
+ if b, ok := n.Right.(*IntegerNode); ok {
+ if b.Value == 0 {
+ fold.err = &file.Error{
+ Location: (*node).Location(),
+ Message: "integer divide by zero",
+ }
+ return
+ }
+ patch(&IntegerNode{Value: a.Value % b.Value})
+ }
+ }
+ case "**", "^":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), float64(b.Value))}, a.Type())
+ }
+ }
+ {
+ a := toInteger(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(float64(a.Value), b.Value)}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(a.Value, float64(b.Value))}, a.Type())
+ }
+ }
+ {
+ a := toFloat(n.Left)
+ b := toFloat(n.Right)
+ if a != nil && b != nil {
+ patchWithType(&FloatNode{Value: math.Pow(a.Value, b.Value)}, a.Type())
+ }
+ }
+ case "and", "&&":
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+
+ if a != nil && a.Value { // true and x
+ patch(n.Right)
+ } else if b != nil && b.Value { // x and true
+ patch(n.Left)
+ } else if (a != nil && !a.Value) || (b != nil && !b.Value) { // "x and false" or "false and x"
+ patch(&BoolNode{Value: false})
+ }
+ case "or", "||":
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+
+ if a != nil && !a.Value { // false or x
+ patch(n.Right)
+ } else if b != nil && !b.Value { // x or false
+ patch(n.Left)
+ } else if (a != nil && a.Value) || (b != nil && b.Value) { // "x or true" or "true or x"
+ patch(&BoolNode{Value: true})
+ }
+ case "==":
+ {
+ a := toInteger(n.Left)
+ b := toInteger(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ {
+ a := toString(n.Left)
+ b := toString(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ {
+ a := toBool(n.Left)
+ b := toBool(n.Right)
+ if a != nil && b != nil {
+ patch(&BoolNode{Value: a.Value == b.Value})
+ }
+ }
+ }
+
+ case *ArrayNode:
+ if len(n.Nodes) > 0 {
+ for _, a := range n.Nodes {
+ switch a.(type) {
+ case *IntegerNode, *FloatNode, *StringNode, *BoolNode:
+ continue
+ default:
+ return
+ }
+ }
+ value := make([]interface{}, len(n.Nodes))
+ for i, a := range n.Nodes {
+ switch b := a.(type) {
+ case *IntegerNode:
+ value[i] = b.Value
+ case *FloatNode:
+ value[i] = b.Value
+ case *StringNode:
+ value[i] = b.Value
+ case *BoolNode:
+ value[i] = b.Value
+ }
+ }
+ patch(&ConstantNode{Value: value})
+ }
+
+ case *BuiltinNode:
+ switch n.Name {
+ case "filter":
+ if len(n.Arguments) != 2 {
+ return
+ }
+ if base, ok := n.Arguments[0].(*BuiltinNode); ok && base.Name == "filter" {
+ patch(&BuiltinNode{
+ Name: "filter",
+ Arguments: []Node{
+ base.Arguments[0],
+ &BinaryNode{
+ Operator: "&&",
+ Left: base.Arguments[1],
+ Right: n.Arguments[1],
+ },
+ },
+ })
+ }
+ }
+ }
+}
+
+func toString(n Node) *StringNode {
+ switch a := n.(type) {
+ case *StringNode:
+ return a
+ }
+ return nil
+}
+
+func toInteger(n Node) *IntegerNode {
+ switch a := n.(type) {
+ case *IntegerNode:
+ return a
+ }
+ return nil
+}
+
+func toFloat(n Node) *FloatNode {
+ switch a := n.(type) {
+ case *FloatNode:
+ return a
+ }
+ return nil
+}
+
+func toBool(n Node) *BoolNode {
+ switch a := n.(type) {
+ case *BoolNode:
+ return a
+ }
+ return nil
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_array.go b/vendor/github.com/antonmedv/expr/optimizer/in_array.go
new file mode 100644
index 00000000000..a51957631c0
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/in_array.go
@@ -0,0 +1,64 @@
+package optimizer
+
+import (
+ "reflect"
+
+ . "github.com/antonmedv/expr/ast"
+)
+
+type inArray struct{}
+
+func (*inArray) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == "in" {
+ if array, ok := n.Right.(*ArrayNode); ok {
+ if len(array.Nodes) > 0 {
+ t := n.Left.Type()
+ if t == nil || t.Kind() != reflect.Int {
+ // This optimization can be only performed if left side is int type,
+ // as runtime.in func uses reflect.Map.MapIndex and keys of map must,
+ // be same as checked value type.
+ goto string
+ }
+
+ for _, a := range array.Nodes {
+ if _, ok := a.(*IntegerNode); !ok {
+ goto string
+ }
+ }
+ {
+ value := make(map[int]struct{})
+ for _, a := range array.Nodes {
+ value[a.(*IntegerNode).Value] = struct{}{}
+ }
+ Patch(node, &BinaryNode{
+ Operator: n.Operator,
+ Left: n.Left,
+ Right: &ConstantNode{Value: value},
+ })
+ }
+
+ string:
+ for _, a := range array.Nodes {
+ if _, ok := a.(*StringNode); !ok {
+ return
+ }
+ }
+ {
+ value := make(map[string]struct{})
+ for _, a := range array.Nodes {
+ value[a.(*StringNode).Value] = struct{}{}
+ }
+ Patch(node, &BinaryNode{
+ Operator: n.Operator,
+ Left: n.Left,
+ Right: &ConstantNode{Value: value},
+ })
+ }
+
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/in_range.go b/vendor/github.com/antonmedv/expr/optimizer/in_range.go
new file mode 100644
index 00000000000..7895249e0be
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/in_range.go
@@ -0,0 +1,34 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+)
+
+type inRange struct{}
+
+func (*inRange) Visit(node *Node) {
+ switch n := (*node).(type) {
+ case *BinaryNode:
+ if n.Operator == "in" {
+ if rng, ok := n.Right.(*BinaryNode); ok && rng.Operator == ".." {
+ if from, ok := rng.Left.(*IntegerNode); ok {
+ if to, ok := rng.Right.(*IntegerNode); ok {
+ Patch(node, &BinaryNode{
+ Operator: "and",
+ Left: &BinaryNode{
+ Operator: ">=",
+ Left: n.Left,
+ Right: from,
+ },
+ Right: &BinaryNode{
+ Operator: "<=",
+ Left: n.Left,
+ Right: to,
+ },
+ })
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/antonmedv/expr/optimizer/optimizer.go b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go
new file mode 100644
index 00000000000..9c97496c8d6
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/optimizer/optimizer.go
@@ -0,0 +1,37 @@
+package optimizer
+
+import (
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/conf"
+)
+
+func Optimize(node *Node, config *conf.Config) error {
+ Walk(node, &inArray{})
+ for limit := 1000; limit >= 0; limit-- {
+ fold := &fold{}
+ Walk(node, fold)
+ if fold.err != nil {
+ return fold.err
+ }
+ if !fold.applied {
+ break
+ }
+ }
+ if config != nil && len(config.ConstFns) > 0 {
+ for limit := 100; limit >= 0; limit-- {
+ constExpr := &constExpr{
+ fns: config.ConstFns,
+ }
+ Walk(node, constExpr)
+ if constExpr.err != nil {
+ return constExpr.err
+ }
+ if !constExpr.applied {
+ break
+ }
+ }
+ }
+ Walk(node, &inRange{})
+ Walk(node, &constRange{})
+ return nil
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go
new file mode 100644
index 00000000000..cfb1e8c61b8
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/lexer.go
@@ -0,0 +1,221 @@
+package lexer
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/antonmedv/expr/file"
+)
+
+func Lex(source *file.Source) ([]Token, error) {
+ l := &lexer{
+ input: source.Content(),
+ tokens: make([]Token, 0),
+ }
+
+ l.loc = file.Location{Line: 1, Column: 0}
+ l.prev = l.loc
+ l.startLoc = l.loc
+
+ for state := root; state != nil; {
+ state = state(l)
+ }
+
+ if l.err != nil {
+ return nil, l.err.Bind(source)
+ }
+
+ return l.tokens, nil
+}
+
+type lexer struct {
+ input string
+ tokens []Token
+ start, end int // current position in input
+ width int // last rune width
+ startLoc file.Location // start location
+ prev, loc file.Location // prev location of end location, end location
+ err *file.Error
+}
+
+const eof rune = -1
+
+func (l *lexer) next() rune {
+ if l.end >= len(l.input) {
+ l.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(l.input[l.end:])
+ l.width = w
+ l.end += w
+
+ l.prev = l.loc
+ if r == '\n' {
+ l.loc.Line++
+ l.loc.Column = 0
+ } else {
+ l.loc.Column++
+ }
+
+ return r
+}
+
+func (l *lexer) peek() rune {
+ r := l.next()
+ l.backup()
+ return r
+}
+
+func (l *lexer) backup() {
+ l.end -= l.width
+ l.loc = l.prev
+}
+
+func (l *lexer) emit(t Kind) {
+ l.emitValue(t, l.word())
+}
+
+func (l *lexer) emitValue(t Kind, value string) {
+ l.tokens = append(l.tokens, Token{
+ Location: l.startLoc,
+ Kind: t,
+ Value: value,
+ })
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) emitEOF() {
+ l.tokens = append(l.tokens, Token{
+ Location: l.prev, // Point to previous position for better error messages.
+ Kind: EOF,
+ })
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) skip() {
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) word() string {
+ return l.input[l.start:l.end]
+}
+
+func (l *lexer) ignore() {
+ l.start = l.end
+ l.startLoc = l.loc
+}
+
+func (l *lexer) accept(valid string) bool {
+ if strings.ContainsRune(valid, l.next()) {
+ return true
+ }
+ l.backup()
+ return false
+}
+
+func (l *lexer) acceptRun(valid string) {
+ for strings.ContainsRune(valid, l.next()) {
+ }
+ l.backup()
+}
+
+func (l *lexer) skipSpaces() {
+ r := l.peek()
+ for ; r == ' '; r = l.peek() {
+ l.next()
+ }
+ l.skip()
+}
+
+func (l *lexer) acceptWord(word string) bool {
+ pos, loc, prev := l.end, l.loc, l.prev
+
+ l.skipSpaces()
+
+ for _, ch := range word {
+ if l.next() != ch {
+ l.end, l.loc, l.prev = pos, loc, prev
+ return false
+ }
+ }
+ if r := l.peek(); r != ' ' && r != eof {
+ l.end, l.loc, l.prev = pos, loc, prev
+ return false
+ }
+
+ return true
+}
+
+func (l *lexer) error(format string, args ...interface{}) stateFn {
+ if l.err == nil { // show first error
+ l.err = &file.Error{
+ Location: l.loc,
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+ return nil
+}
+
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= lower(ch) && lower(ch) <= 'f':
+ return int(lower(ch) - 'a' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+
+func (l *lexer) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = l.next()
+ n--
+ }
+ if n > 0 {
+ l.error("invalid char escape")
+ }
+ return ch
+}
+
+func (l *lexer) scanEscape(quote rune) rune {
+ ch := l.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+ // nothing to do
+ ch = l.next()
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ ch = l.scanDigits(ch, 8, 3)
+ case 'x':
+ ch = l.scanDigits(l.next(), 16, 2)
+ case 'u':
+ ch = l.scanDigits(l.next(), 16, 4)
+ case 'U':
+ ch = l.scanDigits(l.next(), 16, 8)
+ default:
+ l.error("invalid char escape")
+ }
+ return ch
+}
+
+func (l *lexer) scanString(quote rune) (n int) {
+ ch := l.next() // read character after quote
+ for ch != quote {
+ if ch == '\n' || ch == eof {
+ l.error("literal not terminated")
+ return
+ }
+ if ch == '\\' {
+ ch = l.scanEscape(quote)
+ } else {
+ ch = l.next()
+ }
+ n++
+ }
+ return
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/state.go b/vendor/github.com/antonmedv/expr/parser/lexer/state.go
new file mode 100644
index 00000000000..1212aa3217f
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/state.go
@@ -0,0 +1,198 @@
+package lexer
+
+import (
+ "strings"
+)
+
+type stateFn func(*lexer) stateFn
+
+func root(l *lexer) stateFn {
+ switch r := l.next(); {
+ case r == eof:
+ l.emitEOF()
+ return nil
+ case IsSpace(r):
+ l.ignore()
+ return root
+ case r == '\'' || r == '"':
+ l.scanString(r)
+ str, err := unescape(l.word())
+ if err != nil {
+ l.error("%v", err)
+ }
+ l.emitValue(String, str)
+ case '0' <= r && r <= '9':
+ l.backup()
+ return number
+ case r == '?':
+ return questionMark
+ case r == '/':
+ return slash
+ case strings.ContainsRune("([{", r):
+ l.emit(Bracket)
+ case strings.ContainsRune(")]}", r):
+ l.emit(Bracket)
+ case strings.ContainsRune("#,:%+-^", r): // single rune operator
+ l.emit(Operator)
+ case strings.ContainsRune("&|!=*<>", r): // possible double rune operator
+ l.accept("&|=*")
+ l.emit(Operator)
+ case r == '.':
+ l.backup()
+ return dot
+ case IsAlphaNumeric(r):
+ l.backup()
+ return identifier
+ default:
+ return l.error("unrecognized character: %#U", r)
+ }
+ return root
+}
+
+func number(l *lexer) stateFn {
+ if !l.scanNumber() {
+ return l.error("bad number syntax: %q", l.word())
+ }
+ l.emit(Number)
+ return root
+}
+
+func (l *lexer) scanNumber() bool {
+ digits := "0123456789_"
+ // Is it hex?
+ if l.accept("0") {
+ // Note: Leading 0 does not mean octal in floats.
+ if l.accept("xX") {
+ digits = "0123456789abcdefABCDEF_"
+ } else if l.accept("oO") {
+ digits = "01234567_"
+ } else if l.accept("bB") {
+ digits = "01_"
+ }
+ }
+ l.acceptRun(digits)
+ loc, prev, end := l.loc, l.prev, l.end
+ if l.accept(".") {
+ // Lookup for .. operator: if after dot there is another dot (1..2), it maybe a range operator.
+ if l.peek() == '.' {
+ // We can't backup() here, as it would require two backups,
+ // and backup() func supports only one for now. So, save and
+ // restore it here.
+ l.loc, l.prev, l.end = loc, prev, end
+ return true
+ }
+ l.acceptRun(digits)
+ }
+ if l.accept("eE") {
+ l.accept("+-")
+ l.acceptRun(digits)
+ }
+ // Next thing mustn't be alphanumeric.
+ if IsAlphaNumeric(l.peek()) {
+ l.next()
+ return false
+ }
+ return true
+}
+
+func dot(l *lexer) stateFn {
+ l.next()
+ if l.accept("0123456789") {
+ l.backup()
+ return number
+ }
+ l.accept(".")
+ l.emit(Operator)
+ return root
+}
+
+func identifier(l *lexer) stateFn {
+loop:
+ for {
+ switch r := l.next(); {
+ case IsAlphaNumeric(r):
+ // absorb
+ default:
+ l.backup()
+ switch l.word() {
+ case "not":
+ return not
+ case "in", "or", "and", "matches", "contains", "startsWith", "endsWith":
+ l.emit(Operator)
+ default:
+ l.emit(Identifier)
+ }
+ break loop
+ }
+ }
+ return root
+}
+
+func not(l *lexer) stateFn {
+ l.emit(Operator)
+
+ l.skipSpaces()
+
+ pos, loc, prev := l.end, l.loc, l.prev
+
+ // Get the next word.
+ for {
+ r := l.next()
+ if IsAlphaNumeric(r) {
+ // absorb
+ } else {
+ l.backup()
+ break
+ }
+ }
+
+ switch l.word() {
+ case "in", "matches", "contains", "startsWith", "endsWith":
+ l.emit(Operator)
+ default:
+ l.end, l.loc, l.prev = pos, loc, prev
+ }
+ return root
+}
+
+func questionMark(l *lexer) stateFn {
+ l.accept(".?")
+ l.emit(Operator)
+ return root
+}
+
+func slash(l *lexer) stateFn {
+ if l.accept("/") {
+ return singleLineComment
+ }
+ if l.accept("*") {
+ return multiLineComment
+ }
+ l.emit(Operator)
+ return root
+}
+
+func singleLineComment(l *lexer) stateFn {
+ for {
+ r := l.next()
+ if r == eof || r == '\n' {
+ break
+ }
+ }
+ l.ignore()
+ return root
+}
+
+func multiLineComment(l *lexer) stateFn {
+ for {
+ r := l.next()
+ if r == eof {
+ return l.error("unclosed comment")
+ }
+ if r == '*' && l.accept("/") {
+ break
+ }
+ }
+ l.ignore()
+ return root
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/token.go b/vendor/github.com/antonmedv/expr/parser/lexer/token.go
new file mode 100644
index 00000000000..8917b26dce6
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/token.go
@@ -0,0 +1,47 @@
+package lexer
+
+import (
+ "fmt"
+
+ "github.com/antonmedv/expr/file"
+)
+
+type Kind string
+
+const (
+ Identifier Kind = "Identifier"
+ Number Kind = "Number"
+ String Kind = "String"
+ Operator Kind = "Operator"
+ Bracket Kind = "Bracket"
+ EOF Kind = "EOF"
+)
+
+type Token struct {
+ file.Location
+ Kind Kind
+ Value string
+}
+
+func (t Token) String() string {
+ if t.Value == "" {
+ return string(t.Kind)
+ }
+ return fmt.Sprintf("%s(%#v)", t.Kind, t.Value)
+}
+
+func (t Token) Is(kind Kind, values ...string) bool {
+ if len(values) == 0 {
+ return kind == t.Kind
+ }
+
+ for _, v := range values {
+ if v == t.Value {
+ goto found
+ }
+ }
+ return false
+
+found:
+ return kind == t.Kind
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/lexer/utils.go b/vendor/github.com/antonmedv/expr/parser/lexer/utils.go
new file mode 100644
index 00000000000..72e3cf20c97
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/lexer/utils.go
@@ -0,0 +1,194 @@
+package lexer
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+func IsSpace(r rune) bool {
+ return unicode.IsSpace(r)
+}
+
+func IsAlphaNumeric(r rune) bool {
+ return IsAlphabetic(r) || unicode.IsDigit(r)
+}
+
+func IsAlphabetic(r rune) bool {
+ return r == '_' || r == '$' || unicode.IsLetter(r)
+}
+
+var (
+ newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n")
+)
+
+// Unescape takes a quoted string, unquotes, and unescapes it.
+func unescape(value string) (string, error) {
+ // All strings normalize newlines to the \n representation.
+ value = newlineNormalizer.Replace(value)
+ n := len(value)
+
+ // Nothing to unescape / decode.
+ if n < 2 {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ // Quoted string of some form, must have same first and last char.
+ if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') {
+ return value, fmt.Errorf("unable to unescape string")
+ }
+
+ value = value[1 : n-1]
+
+ // The string contains escape characters.
+ // The following logic is adapted from `strconv/quote.go`
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*n/2)
+ for len(value) > 0 {
+ c, multibyte, rest, err := unescapeChar(value)
+ if err != nil {
+ return "", err
+ }
+ value = rest
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ }
+ return string(buf), nil
+}
+
+// unescapeChar takes a string input and returns the following info:
+//
+// value - the escaped unicode rune at the front of the string.
+// multibyte - whether the rune value might require multiple bytes to represent.
+// tail - the remainder of the input string.
+// err - error value, if the character could not be unescaped.
+//
+// When multibyte is true the return value may still fit within a single byte,
+// but a multibyte conversion is attempted which is more expensive than when the
+// value is known to fit within one byte.
+func unescapeChar(s string) (value rune, multibyte bool, tail string, err error) {
+ // 1. Character is not an escape sequence.
+ switch c := s[0]; {
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // 2. Last character is the start of an escape sequence.
+ if len(s) <= 1 {
+ err = fmt.Errorf("unable to unescape string, found '\\' as last character")
+ return
+ }
+
+ c := s[1]
+ s = s[2:]
+ // 3. Common escape sequences shared with Google SQL
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case '\\':
+ value = '\\'
+ case '\'':
+ value = '\''
+ case '"':
+ value = '"'
+ case '`':
+ value = '`'
+ case '?':
+ value = '?'
+
+ // 4. Unicode escape sequences, reproduced from `strconv/quote.go`
+ case 'x', 'X', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ multibyte = true
+
+ // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7]
+ case '0', '1', '2', '3':
+ if len(s) < 2 {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v := rune(c - '0')
+ for j := 0; j < 2; j++ {
+ x := s[j]
+ if x < '0' || x > '7' {
+ err = fmt.Errorf("unable to unescape octal sequence in string")
+ return
+ }
+ v = v*8 + rune(x-'0')
+ }
+ if v > utf8.MaxRune {
+ err = fmt.Errorf("unable to unescape string")
+ return
+ }
+ value = v
+ s = s[2:]
+ multibyte = true
+
+ // Unknown escape sequence.
+ default:
+ err = fmt.Errorf("unable to unescape string")
+ }
+
+ tail = s
+ return
+}
+
+func unhex(b byte) (rune, bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return 0, false
+}
diff --git a/vendor/github.com/antonmedv/expr/parser/parser.go b/vendor/github.com/antonmedv/expr/parser/parser.go
new file mode 100644
index 00000000000..fd26fe18bdc
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/parser/parser.go
@@ -0,0 +1,610 @@
+package parser
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ . "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/file"
+ . "github.com/antonmedv/expr/parser/lexer"
+)
+
+type associativity int
+
+const (
+ left associativity = iota + 1
+ right
+)
+
+type operator struct {
+ precedence int
+ associativity associativity
+}
+
+type builtin struct {
+ arity int
+}
+
+var unaryOperators = map[string]operator{
+ "not": {50, left},
+ "!": {50, left},
+ "-": {90, left},
+ "+": {90, left},
+}
+
+var binaryOperators = map[string]operator{
+ "or": {10, left},
+ "||": {10, left},
+ "and": {15, left},
+ "&&": {15, left},
+ "==": {20, left},
+ "!=": {20, left},
+ "<": {20, left},
+ ">": {20, left},
+ ">=": {20, left},
+ "<=": {20, left},
+ "in": {20, left},
+ "matches": {20, left},
+ "contains": {20, left},
+ "startsWith": {20, left},
+ "endsWith": {20, left},
+ "..": {25, left},
+ "+": {30, left},
+ "-": {30, left},
+ "*": {60, left},
+ "/": {60, left},
+ "%": {60, left},
+ "**": {100, right},
+ "^": {100, right},
+ "??": {500, left},
+}
+
+var builtins = map[string]builtin{
+ "all": {2},
+ "none": {2},
+ "any": {2},
+ "one": {2},
+ "filter": {2},
+ "map": {2},
+ "count": {2},
+}
+
+type parser struct {
+ tokens []Token
+ current Token
+ pos int
+ err *file.Error
+ depth int // closure call depth
+}
+
+type Tree struct {
+ Node Node
+ Source *file.Source
+}
+
+func Parse(input string) (*Tree, error) {
+ source := file.NewSource(input)
+
+ tokens, err := Lex(source)
+ if err != nil {
+ return nil, err
+ }
+
+ p := &parser{
+ tokens: tokens,
+ current: tokens[0],
+ }
+
+ node := p.parseExpression(0)
+
+ if !p.current.Is(EOF) {
+ p.error("unexpected token %v", p.current)
+ }
+
+ if p.err != nil {
+ return nil, p.err.Bind(source)
+ }
+
+ return &Tree{
+ Node: node,
+ Source: source,
+ }, nil
+}
+
+func (p *parser) error(format string, args ...interface{}) {
+ p.errorAt(p.current, format, args...)
+}
+
+func (p *parser) errorAt(token Token, format string, args ...interface{}) {
+ if p.err == nil { // show first error
+ p.err = &file.Error{
+ Location: token.Location,
+ Message: fmt.Sprintf(format, args...),
+ }
+ }
+}
+
+func (p *parser) next() {
+ p.pos++
+ if p.pos >= len(p.tokens) {
+ p.error("unexpected end of expression")
+ return
+ }
+ p.current = p.tokens[p.pos]
+}
+
+func (p *parser) expect(kind Kind, values ...string) {
+ if p.current.Is(kind, values...) {
+ p.next()
+ return
+ }
+ p.error("unexpected token %v", p.current)
+}
+
+// parse functions
+
+func (p *parser) parseExpression(precedence int) Node {
+ nodeLeft := p.parsePrimary()
+
+ lastOperator := ""
+ opToken := p.current
+ for opToken.Is(Operator) && p.err == nil {
+ negate := false
+ var notToken Token
+
+ if opToken.Is(Operator, "not") {
+ p.next()
+ notToken = p.current
+ negate = true
+ opToken = p.current
+ }
+
+ if op, ok := binaryOperators[opToken.Value]; ok {
+ if op.precedence >= precedence {
+ p.next()
+
+ if lastOperator == "??" && opToken.Value != "??" && !opToken.Is(Bracket, "(") {
+ p.errorAt(opToken, "Operator (%v) and coalesce expressions (??) cannot be mixed. Wrap either by parentheses.", opToken.Value)
+ break
+ }
+
+ var nodeRight Node
+ if op.associativity == left {
+ nodeRight = p.parseExpression(op.precedence + 1)
+ } else {
+ nodeRight = p.parseExpression(op.precedence)
+ }
+
+ nodeLeft = &BinaryNode{
+ Operator: opToken.Value,
+ Left: nodeLeft,
+ Right: nodeRight,
+ }
+ nodeLeft.SetLocation(opToken.Location)
+
+ if negate {
+ nodeLeft = &UnaryNode{
+ Operator: "not",
+ Node: nodeLeft,
+ }
+ nodeLeft.SetLocation(notToken.Location)
+ }
+
+ lastOperator = opToken.Value
+ opToken = p.current
+ continue
+ }
+ }
+ break
+ }
+
+ if precedence == 0 {
+ nodeLeft = p.parseConditionalExpression(nodeLeft)
+ }
+
+ return nodeLeft
+}
+
+func (p *parser) parsePrimary() Node {
+ token := p.current
+
+ if token.Is(Operator) {
+ if op, ok := unaryOperators[token.Value]; ok {
+ p.next()
+ expr := p.parseExpression(op.precedence)
+ node := &UnaryNode{
+ Operator: token.Value,
+ Node: expr,
+ }
+ node.SetLocation(token.Location)
+ return p.parsePostfixExpression(node)
+ }
+ }
+
+ if token.Is(Bracket, "(") {
+ p.next()
+ expr := p.parseExpression(0)
+ p.expect(Bracket, ")") // "an opened parenthesis is not properly closed"
+ return p.parsePostfixExpression(expr)
+ }
+
+ if p.depth > 0 {
+ if token.Is(Operator, "#") || token.Is(Operator, ".") {
+ if token.Is(Operator, "#") {
+ p.next()
+ }
+ node := &PointerNode{}
+ node.SetLocation(token.Location)
+ return p.parsePostfixExpression(node)
+ }
+ } else {
+ if token.Is(Operator, "#") || token.Is(Operator, ".") {
+ p.error("cannot use pointer accessor outside closure")
+ }
+ }
+
+ return p.parsePrimaryExpression()
+}
+
+func (p *parser) parseConditionalExpression(node Node) Node {
+ var expr1, expr2 Node
+ for p.current.Is(Operator, "?") && p.err == nil {
+ p.next()
+
+ if !p.current.Is(Operator, ":") {
+ expr1 = p.parseExpression(0)
+ p.expect(Operator, ":")
+ expr2 = p.parseExpression(0)
+ } else {
+ p.next()
+ expr1 = node
+ expr2 = p.parseExpression(0)
+ }
+
+ node = &ConditionalNode{
+ Cond: node,
+ Exp1: expr1,
+ Exp2: expr2,
+ }
+ }
+ return node
+}
+
+func (p *parser) parsePrimaryExpression() Node {
+ var node Node
+ token := p.current
+
+ switch token.Kind {
+
+ case Identifier:
+ p.next()
+ switch token.Value {
+ case "true":
+ node := &BoolNode{Value: true}
+ node.SetLocation(token.Location)
+ return node
+ case "false":
+ node := &BoolNode{Value: false}
+ node.SetLocation(token.Location)
+ return node
+ case "nil":
+ node := &NilNode{}
+ node.SetLocation(token.Location)
+ return node
+ default:
+ node = p.parseIdentifierExpression(token)
+ }
+
+ case Number:
+ p.next()
+ value := strings.Replace(token.Value, "_", "", -1)
+ if strings.Contains(value, "x") {
+ number, err := strconv.ParseInt(value, 0, 64)
+ if err != nil {
+ p.error("invalid hex literal: %v", err)
+ }
+ node := &IntegerNode{Value: int(number)}
+ node.SetLocation(token.Location)
+ return node
+ } else if strings.ContainsAny(value, ".eE") {
+ number, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ p.error("invalid float literal: %v", err)
+ }
+ node := &FloatNode{Value: number}
+ node.SetLocation(token.Location)
+ return node
+ } else {
+ number, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ p.error("invalid integer literal: %v", err)
+ }
+ node := &IntegerNode{Value: int(number)}
+ node.SetLocation(token.Location)
+ return node
+ }
+
+ case String:
+ p.next()
+ node := &StringNode{Value: token.Value}
+ node.SetLocation(token.Location)
+ return node
+
+ default:
+ if token.Is(Bracket, "[") {
+ node = p.parseArrayExpression(token)
+ } else if token.Is(Bracket, "{") {
+ node = p.parseMapExpression(token)
+ } else {
+ p.error("unexpected token %v", token)
+ }
+ }
+
+ return p.parsePostfixExpression(node)
+}
+
+func (p *parser) parseIdentifierExpression(token Token) Node {
+ var node Node
+ if p.current.Is(Bracket, "(") {
+ var arguments []Node
+
+ if b, ok := builtins[token.Value]; ok {
+ p.expect(Bracket, "(")
+ // TODO: Add builtins signatures.
+ if b.arity == 1 {
+ arguments = make([]Node, 1)
+ arguments[0] = p.parseExpression(0)
+ } else if b.arity == 2 {
+ arguments = make([]Node, 2)
+ arguments[0] = p.parseExpression(0)
+ p.expect(Operator, ",")
+ arguments[1] = p.parseClosure()
+ }
+ p.expect(Bracket, ")")
+
+ node = &BuiltinNode{
+ Name: token.Value,
+ Arguments: arguments,
+ }
+ node.SetLocation(token.Location)
+ } else {
+ callee := &IdentifierNode{Value: token.Value}
+ callee.SetLocation(token.Location)
+ node = &CallNode{
+ Callee: callee,
+ Arguments: p.parseArguments(),
+ }
+ node.SetLocation(token.Location)
+ }
+ } else {
+ node = &IdentifierNode{Value: token.Value}
+ node.SetLocation(token.Location)
+ }
+ return node
+}
+
+func (p *parser) parseClosure() Node {
+ startToken := p.current
+ expectClosingBracket := false
+ if p.current.Is(Bracket, "{") {
+ p.next()
+ expectClosingBracket = true
+ }
+
+ p.depth++
+ node := p.parseExpression(0)
+ p.depth--
+
+ if expectClosingBracket {
+ p.expect(Bracket, "}")
+ }
+ closure := &ClosureNode{
+ Node: node,
+ }
+ closure.SetLocation(startToken.Location)
+ return closure
+}
+
+func (p *parser) parseArrayExpression(token Token) Node {
+ nodes := make([]Node, 0)
+
+ p.expect(Bracket, "[")
+ for !p.current.Is(Bracket, "]") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ if p.current.Is(Bracket, "]") {
+ goto end
+ }
+ }
+ node := p.parseExpression(0)
+ nodes = append(nodes, node)
+ }
+end:
+ p.expect(Bracket, "]")
+
+ node := &ArrayNode{Nodes: nodes}
+ node.SetLocation(token.Location)
+ return node
+}
+
+func (p *parser) parseMapExpression(token Token) Node {
+ p.expect(Bracket, "{")
+
+ nodes := make([]Node, 0)
+ for !p.current.Is(Bracket, "}") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ if p.current.Is(Bracket, "}") {
+ goto end
+ }
+ if p.current.Is(Operator, ",") {
+ p.error("unexpected token %v", p.current)
+ }
+ }
+
+ var key Node
+ // Map key can be one of:
+ // * number
+ // * string
+ // * identifier, which is equivalent to a string
+ // * expression, which must be enclosed in parentheses -- (1 + 2)
+ if p.current.Is(Number) || p.current.Is(String) || p.current.Is(Identifier) {
+ key = &StringNode{Value: p.current.Value}
+ key.SetLocation(token.Location)
+ p.next()
+ } else if p.current.Is(Bracket, "(") {
+ key = p.parseExpression(0)
+ } else {
+ p.error("a map key must be a quoted string, a number, a identifier, or an expression enclosed in parentheses (unexpected token %v)", p.current)
+ }
+
+ p.expect(Operator, ":")
+
+ node := p.parseExpression(0)
+ pair := &PairNode{Key: key, Value: node}
+ pair.SetLocation(token.Location)
+ nodes = append(nodes, pair)
+ }
+
+end:
+ p.expect(Bracket, "}")
+
+ node := &MapNode{Pairs: nodes}
+ node.SetLocation(token.Location)
+ return node
+}
+
+func (p *parser) parsePostfixExpression(node Node) Node {
+ postfixToken := p.current
+ for (postfixToken.Is(Operator) || postfixToken.Is(Bracket)) && p.err == nil {
+ if postfixToken.Value == "." || postfixToken.Value == "?." {
+ p.next()
+
+ propertyToken := p.current
+ p.next()
+
+ if propertyToken.Kind != Identifier &&
+ // Operators like "not" and "matches" are valid methods or property names.
+ (propertyToken.Kind != Operator || !isValidIdentifier(propertyToken.Value)) {
+ p.error("expected name")
+ }
+
+ property := &StringNode{Value: propertyToken.Value}
+ property.SetLocation(propertyToken.Location)
+
+ chainNode, isChain := node.(*ChainNode)
+ optional := postfixToken.Value == "?."
+
+ if isChain {
+ node = chainNode.Node
+ }
+
+ memberNode := &MemberNode{
+ Node: node,
+ Property: property,
+ Optional: optional,
+ }
+ memberNode.SetLocation(propertyToken.Location)
+
+ if p.current.Is(Bracket, "(") {
+ node = &CallNode{
+ Callee: memberNode,
+ Arguments: p.parseArguments(),
+ }
+ node.SetLocation(propertyToken.Location)
+ } else {
+ node = memberNode
+ }
+
+ if isChain || optional {
+ node = &ChainNode{Node: node}
+ }
+
+ } else if postfixToken.Value == "[" {
+ p.next()
+ var from, to Node
+
+ if p.current.Is(Operator, ":") { // slice without from [:1]
+ p.next()
+
+ if !p.current.Is(Bracket, "]") { // slice without from and to [:]
+ to = p.parseExpression(0)
+ }
+
+ node = &SliceNode{
+ Node: node,
+ To: to,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+
+ } else {
+
+ from = p.parseExpression(0)
+
+ if p.current.Is(Operator, ":") {
+ p.next()
+
+ if !p.current.Is(Bracket, "]") { // slice without to [1:]
+ to = p.parseExpression(0)
+ }
+
+ node = &SliceNode{
+ Node: node,
+ From: from,
+ To: to,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+
+ } else {
+ // Slice operator [:] was not found,
+ // it should be just an index node.
+ node = &MemberNode{
+ Node: node,
+ Property: from,
+ }
+ node.SetLocation(postfixToken.Location)
+ p.expect(Bracket, "]")
+ }
+ }
+ } else {
+ break
+ }
+ postfixToken = p.current
+ }
+ return node
+}
+
+func isValidIdentifier(str string) bool {
+ if len(str) == 0 {
+ return false
+ }
+ h, w := utf8.DecodeRuneInString(str)
+ if !IsAlphabetic(h) {
+ return false
+ }
+ for _, r := range str[w:] {
+ if !IsAlphaNumeric(r) {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) parseArguments() []Node {
+ p.expect(Bracket, "(")
+ nodes := make([]Node, 0)
+ for !p.current.Is(Bracket, ")") && p.err == nil {
+ if len(nodes) > 0 {
+ p.expect(Operator, ",")
+ }
+ node := p.parseExpression(0)
+ nodes = append(nodes, node)
+ }
+ p.expect(Bracket, ")")
+
+ return nodes
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/generated.go b/vendor/github.com/antonmedv/expr/vm/generated.go
new file mode 100644
index 00000000000..9fc7883e2df
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/generated.go
@@ -0,0 +1,262 @@
+// Code generated by vm/func_types/main.go. DO NOT EDIT.
+
+package vm
+
+import (
+ "fmt"
+ "time"
+)
+
+var FuncTypes = []interface{}{
+ 1: new(func() time.Duration),
+ 2: new(func() time.Month),
+ 3: new(func() time.Time),
+ 4: new(func() time.Weekday),
+ 5: new(func() []uint8),
+ 6: new(func() []interface{}),
+ 7: new(func() bool),
+ 8: new(func() uint8),
+ 9: new(func() float64),
+ 10: new(func() int),
+ 11: new(func() int64),
+ 12: new(func() interface{}),
+ 13: new(func() map[string]interface{}),
+ 14: new(func() int32),
+ 15: new(func() string),
+ 16: new(func() uint),
+ 17: new(func() uint64),
+ 18: new(func(time.Duration) time.Duration),
+ 19: new(func(time.Duration) time.Time),
+ 20: new(func(time.Time) time.Duration),
+ 21: new(func(time.Time) bool),
+ 22: new(func([]interface{}, string) string),
+ 23: new(func([]string, string) string),
+ 24: new(func(bool) bool),
+ 25: new(func(bool) float64),
+ 26: new(func(bool) int),
+ 27: new(func(bool) string),
+ 28: new(func(float64) bool),
+ 29: new(func(float64) float64),
+ 30: new(func(float64) int),
+ 31: new(func(float64) string),
+ 32: new(func(int) bool),
+ 33: new(func(int) float64),
+ 34: new(func(int) int),
+ 35: new(func(int) string),
+ 36: new(func(int, int) int),
+ 37: new(func(int, int) string),
+ 38: new(func(int64) time.Time),
+ 39: new(func(string) []string),
+ 40: new(func(string) bool),
+ 41: new(func(string) float64),
+ 42: new(func(string) int),
+ 43: new(func(string) string),
+ 44: new(func(string, uint8) int),
+ 45: new(func(string, int) int),
+ 46: new(func(string, int32) int),
+ 47: new(func(string, string) bool),
+ 48: new(func(string, string) string),
+ 49: new(func(interface{}) bool),
+ 50: new(func(interface{}) float64),
+ 51: new(func(interface{}) int),
+ 52: new(func(interface{}) string),
+ 53: new(func(interface{}) interface{}),
+ 54: new(func(interface{}) []interface{}),
+ 55: new(func(interface{}) map[string]interface{}),
+ 56: new(func([]interface{}) interface{}),
+ 57: new(func([]interface{}) []interface{}),
+ 58: new(func([]interface{}) map[string]interface{}),
+ 59: new(func(interface{}, interface{}) bool),
+ 60: new(func(interface{}, interface{}) string),
+ 61: new(func(interface{}, interface{}) interface{}),
+ 62: new(func(interface{}, interface{}) []interface{}),
+}
+
+func (vm *VM) call(fn interface{}, kind int) interface{} {
+ switch kind {
+ case 1:
+ return fn.(func() time.Duration)()
+ case 2:
+ return fn.(func() time.Month)()
+ case 3:
+ return fn.(func() time.Time)()
+ case 4:
+ return fn.(func() time.Weekday)()
+ case 5:
+ return fn.(func() []uint8)()
+ case 6:
+ return fn.(func() []interface{})()
+ case 7:
+ return fn.(func() bool)()
+ case 8:
+ return fn.(func() uint8)()
+ case 9:
+ return fn.(func() float64)()
+ case 10:
+ return fn.(func() int)()
+ case 11:
+ return fn.(func() int64)()
+ case 12:
+ return fn.(func() interface{})()
+ case 13:
+ return fn.(func() map[string]interface{})()
+ case 14:
+ return fn.(func() int32)()
+ case 15:
+ return fn.(func() string)()
+ case 16:
+ return fn.(func() uint)()
+ case 17:
+ return fn.(func() uint64)()
+ case 18:
+ arg1 := vm.pop().(time.Duration)
+ return fn.(func(time.Duration) time.Duration)(arg1)
+ case 19:
+ arg1 := vm.pop().(time.Duration)
+ return fn.(func(time.Duration) time.Time)(arg1)
+ case 20:
+ arg1 := vm.pop().(time.Time)
+ return fn.(func(time.Time) time.Duration)(arg1)
+ case 21:
+ arg1 := vm.pop().(time.Time)
+ return fn.(func(time.Time) bool)(arg1)
+ case 22:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}, string) string)(arg1, arg2)
+ case 23:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().([]string)
+ return fn.(func([]string, string) string)(arg1, arg2)
+ case 24:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) bool)(arg1)
+ case 25:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) float64)(arg1)
+ case 26:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) int)(arg1)
+ case 27:
+ arg1 := vm.pop().(bool)
+ return fn.(func(bool) string)(arg1)
+ case 28:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) bool)(arg1)
+ case 29:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) float64)(arg1)
+ case 30:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) int)(arg1)
+ case 31:
+ arg1 := vm.pop().(float64)
+ return fn.(func(float64) string)(arg1)
+ case 32:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) bool)(arg1)
+ case 33:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) float64)(arg1)
+ case 34:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) int)(arg1)
+ case 35:
+ arg1 := vm.pop().(int)
+ return fn.(func(int) string)(arg1)
+ case 36:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(int)
+ return fn.(func(int, int) int)(arg1, arg2)
+ case 37:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(int)
+ return fn.(func(int, int) string)(arg1, arg2)
+ case 38:
+ arg1 := vm.pop().(int64)
+ return fn.(func(int64) time.Time)(arg1)
+ case 39:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) []string)(arg1)
+ case 40:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) bool)(arg1)
+ case 41:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) float64)(arg1)
+ case 42:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) int)(arg1)
+ case 43:
+ arg1 := vm.pop().(string)
+ return fn.(func(string) string)(arg1)
+ case 44:
+ arg2 := vm.pop().(uint8)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, uint8) int)(arg1, arg2)
+ case 45:
+ arg2 := vm.pop().(int)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, int) int)(arg1, arg2)
+ case 46:
+ arg2 := vm.pop().(int32)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, int32) int)(arg1, arg2)
+ case 47:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, string) bool)(arg1, arg2)
+ case 48:
+ arg2 := vm.pop().(string)
+ arg1 := vm.pop().(string)
+ return fn.(func(string, string) string)(arg1, arg2)
+ case 49:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) bool)(arg1)
+ case 50:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) float64)(arg1)
+ case 51:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) int)(arg1)
+ case 52:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) string)(arg1)
+ case 53:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) interface{})(arg1)
+ case 54:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) []interface{})(arg1)
+ case 55:
+ arg1 := vm.pop()
+ return fn.(func(interface{}) map[string]interface{})(arg1)
+ case 56:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) interface{})(arg1)
+ case 57:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) []interface{})(arg1)
+ case 58:
+ arg1 := vm.pop().([]interface{})
+ return fn.(func([]interface{}) map[string]interface{})(arg1)
+ case 59:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) bool)(arg1, arg2)
+ case 60:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) string)(arg1, arg2)
+ case 61:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) interface{})(arg1, arg2)
+ case 62:
+ arg2 := vm.pop()
+ arg1 := vm.pop()
+ return fn.(func(interface{}, interface{}) []interface{})(arg1, arg2)
+
+ }
+ panic(fmt.Sprintf("unknown function kind (%v)", kind))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/opcodes.go b/vendor/github.com/antonmedv/expr/vm/opcodes.go
new file mode 100644
index 00000000000..b3117e73c2b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/opcodes.go
@@ -0,0 +1,71 @@
+package vm
+
+type Opcode byte
+
+const (
+ OpPush Opcode = iota
+ OpPushInt
+ OpPop
+ OpLoadConst
+ OpLoadField
+ OpLoadFast
+ OpLoadMethod
+ OpLoadFunc
+ OpFetch
+ OpFetchField
+ OpMethod
+ OpTrue
+ OpFalse
+ OpNil
+ OpNegate
+ OpNot
+ OpEqual
+ OpEqualInt
+ OpEqualString
+ OpJump
+ OpJumpIfTrue
+ OpJumpIfFalse
+ OpJumpIfNil
+ OpJumpIfNotNil
+ OpJumpIfEnd
+ OpJumpBackward
+ OpIn
+ OpLess
+ OpMore
+ OpLessOrEqual
+ OpMoreOrEqual
+ OpAdd
+ OpSubtract
+ OpMultiply
+ OpDivide
+ OpModulo
+ OpExponent
+ OpRange
+ OpMatches
+ OpMatchesConst
+ OpContains
+ OpStartsWith
+ OpEndsWith
+ OpSlice
+ OpCall
+ OpCall0
+ OpCall1
+ OpCall2
+ OpCall3
+ OpCallN
+ OpCallFast
+ OpCallTyped
+ OpBuiltin
+ OpArray
+ OpMap
+ OpLen
+ OpCast
+ OpDeref
+ OpIncrementIt
+ OpIncrementCount
+ OpGetCount
+ OpGetLen
+ OpPointer
+ OpBegin
+ OpEnd // This opcode must be at the end of this list.
+)
diff --git a/vendor/github.com/antonmedv/expr/vm/program.go b/vendor/github.com/antonmedv/expr/vm/program.go
new file mode 100644
index 00000000000..7a417903c3b
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/program.go
@@ -0,0 +1,272 @@
+package vm
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/antonmedv/expr/ast"
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+type Program struct {
+ Node ast.Node
+ Source *file.Source
+ Locations []file.Location
+ Constants []interface{}
+ Bytecode []Opcode
+ Arguments []int
+ Functions []Function
+}
+
+func (program *Program) Disassemble() string {
+ out := ""
+ ip := 0
+ for ip < len(program.Bytecode) {
+ pp := ip
+ op := program.Bytecode[ip]
+ arg := program.Arguments[ip]
+ ip += 1
+
+ code := func(label string) {
+ out += fmt.Sprintf("%v\t%v\n", pp, label)
+ }
+ jump := func(label string) {
+ out += fmt.Sprintf("%v\t%v\t%v\t(%v)\n", pp, label, arg, ip+arg)
+ }
+ jumpBack := func(label string) {
+ out += fmt.Sprintf("%v\t%v\t%v\t(%v)\n", pp, label, arg, ip-arg)
+ }
+ argument := func(label string) {
+ out += fmt.Sprintf("%v\t%v\t%v\n", pp, label, arg)
+ }
+ constant := func(label string) {
+ var c interface{}
+ if arg < len(program.Constants) {
+ c = program.Constants[arg]
+ } else {
+ c = "out of range"
+ }
+ if r, ok := c.(*regexp.Regexp); ok {
+ c = r.String()
+ }
+ if field, ok := c.(*runtime.Field); ok {
+ c = fmt.Sprintf("{%v %v}", strings.Join(field.Path, "."), field.Index)
+ }
+ if method, ok := c.(*runtime.Method); ok {
+ c = fmt.Sprintf("{%v %v}", method.Name, method.Index)
+ }
+ out += fmt.Sprintf("%v\t%v\t%v\t%v\n", pp, label, arg, c)
+ }
+ builtIn := func(label string) {
+ f, ok := builtin.Builtins[arg]
+ if !ok {
+ panic(fmt.Sprintf("unknown builtin %v", arg))
+ }
+ out += fmt.Sprintf("%v\t%v\t%v\n", pp, "OpBuiltin", f.Name)
+ }
+
+ switch op {
+ case OpPush:
+ constant("OpPush")
+
+ case OpPushInt:
+ argument("OpPushInt")
+
+ case OpPop:
+ code("OpPop")
+
+ case OpLoadConst:
+ constant("OpLoadConst")
+
+ case OpLoadField:
+ constant("OpLoadField")
+
+ case OpLoadFast:
+ constant("OpLoadFast")
+
+ case OpLoadMethod:
+ constant("OpLoadMethod")
+
+ case OpLoadFunc:
+ argument("OpLoadFunc")
+
+ case OpFetch:
+ code("OpFetch")
+
+ case OpFetchField:
+ constant("OpFetchField")
+
+ case OpMethod:
+ constant("OpMethod")
+
+ case OpTrue:
+ code("OpTrue")
+
+ case OpFalse:
+ code("OpFalse")
+
+ case OpNil:
+ code("OpNil")
+
+ case OpNegate:
+ code("OpNegate")
+
+ case OpNot:
+ code("OpNot")
+
+ case OpEqual:
+ code("OpEqual")
+
+ case OpEqualInt:
+ code("OpEqualInt")
+
+ case OpEqualString:
+ code("OpEqualString")
+
+ case OpJump:
+ jump("OpJump")
+
+ case OpJumpIfTrue:
+ jump("OpJumpIfTrue")
+
+ case OpJumpIfFalse:
+ jump("OpJumpIfFalse")
+
+ case OpJumpIfNil:
+ jump("OpJumpIfNil")
+
+ case OpJumpIfNotNil:
+ jump("OpJumpIfNotNil")
+
+ case OpJumpIfEnd:
+ jump("OpJumpIfEnd")
+
+ case OpJumpBackward:
+ jumpBack("OpJumpBackward")
+
+ case OpIn:
+ code("OpIn")
+
+ case OpLess:
+ code("OpLess")
+
+ case OpMore:
+ code("OpMore")
+
+ case OpLessOrEqual:
+ code("OpLessOrEqual")
+
+ case OpMoreOrEqual:
+ code("OpMoreOrEqual")
+
+ case OpAdd:
+ code("OpAdd")
+
+ case OpSubtract:
+ code("OpSubtract")
+
+ case OpMultiply:
+ code("OpMultiply")
+
+ case OpDivide:
+ code("OpDivide")
+
+ case OpModulo:
+ code("OpModulo")
+
+ case OpExponent:
+ code("OpExponent")
+
+ case OpRange:
+ code("OpRange")
+
+ case OpMatches:
+ code("OpMatches")
+
+ case OpMatchesConst:
+ constant("OpMatchesConst")
+
+ case OpContains:
+ code("OpContains")
+
+ case OpStartsWith:
+ code("OpStartsWith")
+
+ case OpEndsWith:
+ code("OpEndsWith")
+
+ case OpSlice:
+ code("OpSlice")
+
+ case OpCall:
+ argument("OpCall")
+
+ case OpCall0:
+ argument("OpCall0")
+
+ case OpCall1:
+ argument("OpCall1")
+
+ case OpCall2:
+ argument("OpCall2")
+
+ case OpCall3:
+ argument("OpCall3")
+
+ case OpCallN:
+ argument("OpCallN")
+
+ case OpCallFast:
+ argument("OpCallFast")
+
+ case OpCallTyped:
+ argument("OpCallTyped")
+
+ case OpBuiltin:
+ builtIn("OpBuiltin")
+
+ case OpArray:
+ code("OpArray")
+
+ case OpMap:
+ code("OpMap")
+
+ case OpLen:
+ code("OpLen")
+
+ case OpCast:
+ argument("OpCast")
+
+ case OpDeref:
+ code("OpDeref")
+
+ case OpIncrementIt:
+ code("OpIncrementIt")
+
+ case OpIncrementCount:
+ code("OpIncrementCount")
+
+ case OpGetCount:
+ code("OpGetCount")
+
+ case OpGetLen:
+ code("OpGetLen")
+
+ case OpPointer:
+ code("OpPointer")
+
+ case OpBegin:
+ code("OpBegin")
+
+ case OpEnd:
+ code("OpEnd")
+
+ default:
+ out += fmt.Sprintf("%v\t%#x\n", ip, op)
+ }
+ }
+ return out
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/generated.go b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go
new file mode 100644
index 00000000000..09a4a200ed2
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/runtime/generated.go
@@ -0,0 +1,3288 @@
+// Code generated by vm/runtime/helpers/main.go. DO NOT EDIT.
+
+package runtime
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+func Equal(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) == int(y)
+ case uint8:
+ return int(x) == int(y)
+ case uint16:
+ return int(x) == int(y)
+ case uint32:
+ return int(x) == int(y)
+ case uint64:
+ return int(x) == int(y)
+ case int:
+ return int(x) == int(y)
+ case int8:
+ return int(x) == int(y)
+ case int16:
+ return int(x) == int(y)
+ case int32:
+ return int(x) == int(y)
+ case int64:
+ return int(x) == int(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) == float64(y)
+ case uint8:
+ return float64(x) == float64(y)
+ case uint16:
+ return float64(x) == float64(y)
+ case uint32:
+ return float64(x) == float64(y)
+ case uint64:
+ return float64(x) == float64(y)
+ case int:
+ return float64(x) == float64(y)
+ case int8:
+ return float64(x) == float64(y)
+ case int16:
+ return float64(x) == float64(y)
+ case int32:
+ return float64(x) == float64(y)
+ case int64:
+ return float64(x) == float64(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) == float64(y)
+ case uint8:
+ return float64(x) == float64(y)
+ case uint16:
+ return float64(x) == float64(y)
+ case uint32:
+ return float64(x) == float64(y)
+ case uint64:
+ return float64(x) == float64(y)
+ case int:
+ return float64(x) == float64(y)
+ case int8:
+ return float64(x) == float64(y)
+ case int16:
+ return float64(x) == float64(y)
+ case int32:
+ return float64(x) == float64(y)
+ case int64:
+ return float64(x) == float64(y)
+ case float32:
+ return float64(x) == float64(y)
+ case float64:
+ return float64(x) == float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x == y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Equal(y)
+ }
+ }
+ if IsNil(a) && IsNil(b) {
+ return true
+ }
+ return reflect.DeepEqual(a, b)
+}
+
+func Less(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) < int(y)
+ case uint8:
+ return int(x) < int(y)
+ case uint16:
+ return int(x) < int(y)
+ case uint32:
+ return int(x) < int(y)
+ case uint64:
+ return int(x) < int(y)
+ case int:
+ return int(x) < int(y)
+ case int8:
+ return int(x) < int(y)
+ case int16:
+ return int(x) < int(y)
+ case int32:
+ return int(x) < int(y)
+ case int64:
+ return int(x) < int(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) < float64(y)
+ case uint8:
+ return float64(x) < float64(y)
+ case uint16:
+ return float64(x) < float64(y)
+ case uint32:
+ return float64(x) < float64(y)
+ case uint64:
+ return float64(x) < float64(y)
+ case int:
+ return float64(x) < float64(y)
+ case int8:
+ return float64(x) < float64(y)
+ case int16:
+ return float64(x) < float64(y)
+ case int32:
+ return float64(x) < float64(y)
+ case int64:
+ return float64(x) < float64(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) < float64(y)
+ case uint8:
+ return float64(x) < float64(y)
+ case uint16:
+ return float64(x) < float64(y)
+ case uint32:
+ return float64(x) < float64(y)
+ case uint64:
+ return float64(x) < float64(y)
+ case int:
+ return float64(x) < float64(y)
+ case int8:
+ return float64(x) < float64(y)
+ case int16:
+ return float64(x) < float64(y)
+ case int32:
+ return float64(x) < float64(y)
+ case int64:
+ return float64(x) < float64(y)
+ case float32:
+ return float64(x) < float64(y)
+ case float64:
+ return float64(x) < float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x < y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Before(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T < %T", a, b))
+}
+
+func More(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) > int(y)
+ case uint8:
+ return int(x) > int(y)
+ case uint16:
+ return int(x) > int(y)
+ case uint32:
+ return int(x) > int(y)
+ case uint64:
+ return int(x) > int(y)
+ case int:
+ return int(x) > int(y)
+ case int8:
+ return int(x) > int(y)
+ case int16:
+ return int(x) > int(y)
+ case int32:
+ return int(x) > int(y)
+ case int64:
+ return int(x) > int(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) > float64(y)
+ case uint8:
+ return float64(x) > float64(y)
+ case uint16:
+ return float64(x) > float64(y)
+ case uint32:
+ return float64(x) > float64(y)
+ case uint64:
+ return float64(x) > float64(y)
+ case int:
+ return float64(x) > float64(y)
+ case int8:
+ return float64(x) > float64(y)
+ case int16:
+ return float64(x) > float64(y)
+ case int32:
+ return float64(x) > float64(y)
+ case int64:
+ return float64(x) > float64(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) > float64(y)
+ case uint8:
+ return float64(x) > float64(y)
+ case uint16:
+ return float64(x) > float64(y)
+ case uint32:
+ return float64(x) > float64(y)
+ case uint64:
+ return float64(x) > float64(y)
+ case int:
+ return float64(x) > float64(y)
+ case int8:
+ return float64(x) > float64(y)
+ case int16:
+ return float64(x) > float64(y)
+ case int32:
+ return float64(x) > float64(y)
+ case int64:
+ return float64(x) > float64(y)
+ case float32:
+ return float64(x) > float64(y)
+ case float64:
+ return float64(x) > float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x > y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.After(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T > %T", a, b))
+}
+
+func LessOrEqual(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) <= int(y)
+ case uint8:
+ return int(x) <= int(y)
+ case uint16:
+ return int(x) <= int(y)
+ case uint32:
+ return int(x) <= int(y)
+ case uint64:
+ return int(x) <= int(y)
+ case int:
+ return int(x) <= int(y)
+ case int8:
+ return int(x) <= int(y)
+ case int16:
+ return int(x) <= int(y)
+ case int32:
+ return int(x) <= int(y)
+ case int64:
+ return int(x) <= int(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) <= float64(y)
+ case uint8:
+ return float64(x) <= float64(y)
+ case uint16:
+ return float64(x) <= float64(y)
+ case uint32:
+ return float64(x) <= float64(y)
+ case uint64:
+ return float64(x) <= float64(y)
+ case int:
+ return float64(x) <= float64(y)
+ case int8:
+ return float64(x) <= float64(y)
+ case int16:
+ return float64(x) <= float64(y)
+ case int32:
+ return float64(x) <= float64(y)
+ case int64:
+ return float64(x) <= float64(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) <= float64(y)
+ case uint8:
+ return float64(x) <= float64(y)
+ case uint16:
+ return float64(x) <= float64(y)
+ case uint32:
+ return float64(x) <= float64(y)
+ case uint64:
+ return float64(x) <= float64(y)
+ case int:
+ return float64(x) <= float64(y)
+ case int8:
+ return float64(x) <= float64(y)
+ case int16:
+ return float64(x) <= float64(y)
+ case int32:
+ return float64(x) <= float64(y)
+ case int64:
+ return float64(x) <= float64(y)
+ case float32:
+ return float64(x) <= float64(y)
+ case float64:
+ return float64(x) <= float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x <= y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Before(y) || x.Equal(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T <= %T", a, b))
+}
+
+func MoreOrEqual(a, b interface{}) bool {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) >= int(y)
+ case uint8:
+ return int(x) >= int(y)
+ case uint16:
+ return int(x) >= int(y)
+ case uint32:
+ return int(x) >= int(y)
+ case uint64:
+ return int(x) >= int(y)
+ case int:
+ return int(x) >= int(y)
+ case int8:
+ return int(x) >= int(y)
+ case int16:
+ return int(x) >= int(y)
+ case int32:
+ return int(x) >= int(y)
+ case int64:
+ return int(x) >= int(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) >= float64(y)
+ case uint8:
+ return float64(x) >= float64(y)
+ case uint16:
+ return float64(x) >= float64(y)
+ case uint32:
+ return float64(x) >= float64(y)
+ case uint64:
+ return float64(x) >= float64(y)
+ case int:
+ return float64(x) >= float64(y)
+ case int8:
+ return float64(x) >= float64(y)
+ case int16:
+ return float64(x) >= float64(y)
+ case int32:
+ return float64(x) >= float64(y)
+ case int64:
+ return float64(x) >= float64(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) >= float64(y)
+ case uint8:
+ return float64(x) >= float64(y)
+ case uint16:
+ return float64(x) >= float64(y)
+ case uint32:
+ return float64(x) >= float64(y)
+ case uint64:
+ return float64(x) >= float64(y)
+ case int:
+ return float64(x) >= float64(y)
+ case int8:
+ return float64(x) >= float64(y)
+ case int16:
+ return float64(x) >= float64(y)
+ case int32:
+ return float64(x) >= float64(y)
+ case int64:
+ return float64(x) >= float64(y)
+ case float32:
+ return float64(x) >= float64(y)
+ case float64:
+ return float64(x) >= float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x >= y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.After(y) || x.Equal(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T >= %T", a, b))
+}
+
+func Add(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) + int(y)
+ case uint8:
+ return int(x) + int(y)
+ case uint16:
+ return int(x) + int(y)
+ case uint32:
+ return int(x) + int(y)
+ case uint64:
+ return int(x) + int(y)
+ case int:
+ return int(x) + int(y)
+ case int8:
+ return int(x) + int(y)
+ case int16:
+ return int(x) + int(y)
+ case int32:
+ return int(x) + int(y)
+ case int64:
+ return int(x) + int(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) + float64(y)
+ case uint8:
+ return float64(x) + float64(y)
+ case uint16:
+ return float64(x) + float64(y)
+ case uint32:
+ return float64(x) + float64(y)
+ case uint64:
+ return float64(x) + float64(y)
+ case int:
+ return float64(x) + float64(y)
+ case int8:
+ return float64(x) + float64(y)
+ case int16:
+ return float64(x) + float64(y)
+ case int32:
+ return float64(x) + float64(y)
+ case int64:
+ return float64(x) + float64(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) + float64(y)
+ case uint8:
+ return float64(x) + float64(y)
+ case uint16:
+ return float64(x) + float64(y)
+ case uint32:
+ return float64(x) + float64(y)
+ case uint64:
+ return float64(x) + float64(y)
+ case int:
+ return float64(x) + float64(y)
+ case int8:
+ return float64(x) + float64(y)
+ case int16:
+ return float64(x) + float64(y)
+ case int32:
+ return float64(x) + float64(y)
+ case int64:
+ return float64(x) + float64(y)
+ case float32:
+ return float64(x) + float64(y)
+ case float64:
+ return float64(x) + float64(y)
+ }
+ case string:
+ switch y := b.(type) {
+ case string:
+ return x + y
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Duration:
+ return x.Add(y)
+ }
+ case time.Duration:
+ switch y := b.(type) {
+ case time.Time:
+ return y.Add(x)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T + %T", a, b))
+}
+
+func Subtract(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) - int(y)
+ case uint8:
+ return int(x) - int(y)
+ case uint16:
+ return int(x) - int(y)
+ case uint32:
+ return int(x) - int(y)
+ case uint64:
+ return int(x) - int(y)
+ case int:
+ return int(x) - int(y)
+ case int8:
+ return int(x) - int(y)
+ case int16:
+ return int(x) - int(y)
+ case int32:
+ return int(x) - int(y)
+ case int64:
+ return int(x) - int(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) - float64(y)
+ case uint8:
+ return float64(x) - float64(y)
+ case uint16:
+ return float64(x) - float64(y)
+ case uint32:
+ return float64(x) - float64(y)
+ case uint64:
+ return float64(x) - float64(y)
+ case int:
+ return float64(x) - float64(y)
+ case int8:
+ return float64(x) - float64(y)
+ case int16:
+ return float64(x) - float64(y)
+ case int32:
+ return float64(x) - float64(y)
+ case int64:
+ return float64(x) - float64(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) - float64(y)
+ case uint8:
+ return float64(x) - float64(y)
+ case uint16:
+ return float64(x) - float64(y)
+ case uint32:
+ return float64(x) - float64(y)
+ case uint64:
+ return float64(x) - float64(y)
+ case int:
+ return float64(x) - float64(y)
+ case int8:
+ return float64(x) - float64(y)
+ case int16:
+ return float64(x) - float64(y)
+ case int32:
+ return float64(x) - float64(y)
+ case int64:
+ return float64(x) - float64(y)
+ case float32:
+ return float64(x) - float64(y)
+ case float64:
+ return float64(x) - float64(y)
+ }
+ case time.Time:
+ switch y := b.(type) {
+ case time.Time:
+ return x.Sub(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T - %T", a, b))
+}
+
+func Multiply(a, b interface{}) interface{} {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) * int(y)
+ case uint8:
+ return int(x) * int(y)
+ case uint16:
+ return int(x) * int(y)
+ case uint32:
+ return int(x) * int(y)
+ case uint64:
+ return int(x) * int(y)
+ case int:
+ return int(x) * int(y)
+ case int8:
+ return int(x) * int(y)
+ case int16:
+ return int(x) * int(y)
+ case int32:
+ return int(x) * int(y)
+ case int64:
+ return int(x) * int(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) * float64(y)
+ case uint8:
+ return float64(x) * float64(y)
+ case uint16:
+ return float64(x) * float64(y)
+ case uint32:
+ return float64(x) * float64(y)
+ case uint64:
+ return float64(x) * float64(y)
+ case int:
+ return float64(x) * float64(y)
+ case int8:
+ return float64(x) * float64(y)
+ case int16:
+ return float64(x) * float64(y)
+ case int32:
+ return float64(x) * float64(y)
+ case int64:
+ return float64(x) * float64(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) * float64(y)
+ case uint8:
+ return float64(x) * float64(y)
+ case uint16:
+ return float64(x) * float64(y)
+ case uint32:
+ return float64(x) * float64(y)
+ case uint64:
+ return float64(x) * float64(y)
+ case int:
+ return float64(x) * float64(y)
+ case int8:
+ return float64(x) * float64(y)
+ case int16:
+ return float64(x) * float64(y)
+ case int32:
+ return float64(x) * float64(y)
+ case int64:
+ return float64(x) * float64(y)
+ case float32:
+ return float64(x) * float64(y)
+ case float64:
+ return float64(x) * float64(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T * %T", a, b))
+}
+
+func Divide(a, b interface{}) float64 {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case float32:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ case float64:
+ switch y := b.(type) {
+ case uint:
+ return float64(x) / float64(y)
+ case uint8:
+ return float64(x) / float64(y)
+ case uint16:
+ return float64(x) / float64(y)
+ case uint32:
+ return float64(x) / float64(y)
+ case uint64:
+ return float64(x) / float64(y)
+ case int:
+ return float64(x) / float64(y)
+ case int8:
+ return float64(x) / float64(y)
+ case int16:
+ return float64(x) / float64(y)
+ case int32:
+ return float64(x) / float64(y)
+ case int64:
+ return float64(x) / float64(y)
+ case float32:
+ return float64(x) / float64(y)
+ case float64:
+ return float64(x) / float64(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T / %T", a, b))
+}
+
+func Modulo(a, b interface{}) int {
+ switch x := a.(type) {
+ case uint:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case uint64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int8:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int16:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int32:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ case int64:
+ switch y := b.(type) {
+ case uint:
+ return int(x) % int(y)
+ case uint8:
+ return int(x) % int(y)
+ case uint16:
+ return int(x) % int(y)
+ case uint32:
+ return int(x) % int(y)
+ case uint64:
+ return int(x) % int(y)
+ case int:
+ return int(x) % int(y)
+ case int8:
+ return int(x) % int(y)
+ case int16:
+ return int(x) % int(y)
+ case int32:
+ return int(x) % int(y)
+ case int64:
+ return int(x) % int(y)
+ }
+ }
+ panic(fmt.Sprintf("invalid operation: %T %% %T", a, b))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go
new file mode 100644
index 00000000000..b2eeb65d83c
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/runtime/runtime.go
@@ -0,0 +1,517 @@
+package runtime
+
+//go:generate sh -c "go run ./helpers > ./generated.go"
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+)
+
+func Fetch(from, i interface{}) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ panic(fmt.Sprintf("cannot fetch %v from %T", i, from))
+ }
+
+ // Methods can be defined on any type.
+ if v.NumMethod() > 0 {
+ if methodName, ok := i.(string); ok {
+ method := v.MethodByName(methodName)
+ if method.IsValid() {
+ return method.Interface()
+ }
+ }
+ }
+
+ // Structs, maps, and slices can be access through a pointer or through
+ // a value, when they are accessed through a pointer we don't want to
+ // copy them to a value.
+ if kind == reflect.Ptr {
+ v = reflect.Indirect(v)
+ kind = v.Kind()
+ }
+
+ // TODO: We can create separate opcodes for each of the cases below to make
+ // the little bit faster.
+ switch kind {
+ case reflect.Array, reflect.Slice, reflect.String:
+ index := ToInt(i)
+ if index < 0 {
+ index = v.Len() + index
+ }
+ value := v.Index(index)
+ if value.IsValid() {
+ return value.Interface()
+ }
+
+ case reflect.Map:
+ var value reflect.Value
+ if i == nil {
+ value = v.MapIndex(reflect.Zero(v.Type().Key()))
+ } else {
+ value = v.MapIndex(reflect.ValueOf(i))
+ }
+ if value.IsValid() {
+ return value.Interface()
+ } else {
+ elem := reflect.TypeOf(from).Elem()
+ return reflect.Zero(elem).Interface()
+ }
+
+ case reflect.Struct:
+ fieldName := i.(string)
+ value := v.FieldByNameFunc(func(name string) bool {
+ field, _ := v.Type().FieldByName(name)
+ if field.Tag.Get("expr") == fieldName {
+ return true
+ }
+ return name == fieldName
+ })
+ if value.IsValid() {
+ return value.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot fetch %v from %T", i, from))
+}
+
+type Field struct {
+ Index []int
+ Path []string
+}
+
+func FetchField(from interface{}, field *Field) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind != reflect.Invalid {
+ if kind == reflect.Ptr {
+ v = reflect.Indirect(v)
+ }
+ // We can use v.FieldByIndex here, but it will panic if the field
+ // is not exists. And we need to recover() to generate a more
+ // user-friendly error message.
+ // Also, our fieldByIndex() function is slightly faster than the
+ // v.FieldByIndex() function as we don't need to verify what a field
+ // is a struct as we already did it on compilation step.
+ value := fieldByIndex(v, field)
+ if value.IsValid() {
+ return value.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot get %v from %T", field.Path[0], from))
+}
+
+func fieldByIndex(v reflect.Value, field *Field) reflect.Value {
+ if len(field.Index) == 1 {
+ return v.Field(field.Index[0])
+ }
+ for i, x := range field.Index {
+ if i > 0 {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ panic(fmt.Sprintf("cannot get %v from %v", field.Path[i], field.Path[i-1]))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
+
+type Method struct {
+ Index int
+ Name string
+}
+
+func FetchMethod(from interface{}, method *Method) interface{} {
+ v := reflect.ValueOf(from)
+ kind := v.Kind()
+ if kind != reflect.Invalid {
+ // Methods can be defined on any type, no need to dereference.
+ method := v.Method(method.Index)
+ if method.IsValid() {
+ return method.Interface()
+ }
+ }
+ panic(fmt.Sprintf("cannot fetch %v from %T", method.Name, from))
+}
+
+func Deref(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i)
+
+ if v.Kind() == reflect.Interface {
+ if v.IsNil() {
+ return i
+ }
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return i
+ }
+ indirect := reflect.Indirect(v)
+ switch indirect.Kind() {
+ case reflect.Struct, reflect.Map, reflect.Array, reflect.Slice:
+ default:
+ v = v.Elem()
+ }
+ }
+
+ if v.IsValid() {
+ return v.Interface()
+ }
+
+ panic(fmt.Sprintf("cannot dereference %v", i))
+}
+
+func Slice(array, from, to interface{}) interface{} {
+ v := reflect.ValueOf(array)
+
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.String:
+ length := v.Len()
+ a, b := ToInt(from), ToInt(to)
+ if a < 0 {
+ a = length + a
+ }
+ if b < 0 {
+ b = length + b
+ }
+ if b > length {
+ b = length
+ }
+ if a > b {
+ a = b
+ }
+ value := v.Slice(a, b)
+ if value.IsValid() {
+ return value.Interface()
+ }
+
+ case reflect.Ptr:
+ value := v.Elem()
+ if value.IsValid() {
+ return Slice(value.Interface(), from, to)
+ }
+
+ }
+ panic(fmt.Sprintf("cannot slice %v", from))
+}
+
+func In(needle interface{}, array interface{}) bool {
+ if array == nil {
+ return false
+ }
+ v := reflect.ValueOf(array)
+
+ switch v.Kind() {
+
+ case reflect.Array, reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ value := v.Index(i)
+ if value.IsValid() {
+ if Equal(value.Interface(), needle) {
+ return true
+ }
+ }
+ }
+ return false
+
+ case reflect.Map:
+ var value reflect.Value
+ if needle == nil {
+ value = v.MapIndex(reflect.Zero(v.Type().Key()))
+ } else {
+ n := reflect.ValueOf(needle)
+ if !n.IsValid() {
+ panic(fmt.Sprintf("cannot use %T as index to %T", needle, array))
+ }
+ value = v.MapIndex(n)
+ }
+ if value.IsValid() {
+ return true
+ }
+ return false
+
+ case reflect.Struct:
+ n := reflect.ValueOf(needle)
+ if !n.IsValid() || n.Kind() != reflect.String {
+ panic(fmt.Sprintf("cannot use %T as field name of %T", needle, array))
+ }
+ value := v.FieldByName(n.String())
+ if value.IsValid() {
+ return true
+ }
+ return false
+
+ case reflect.Ptr:
+ value := v.Elem()
+ if value.IsValid() {
+ return In(needle, value.Interface())
+ }
+ return false
+ }
+
+ panic(fmt.Sprintf(`operator "in"" not defined on %T`, array))
+}
+
+func Len(a interface{}) interface{} {
+ v := reflect.ValueOf(a)
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return v.Len()
+ default:
+ panic(fmt.Sprintf("invalid argument for len (type %T)", a))
+ }
+}
+
+func Negate(i interface{}) interface{} {
+ switch v := i.(type) {
+ case float32:
+ return -v
+ case float64:
+ return -v
+ case int:
+ return -v
+ case int8:
+ return -v
+ case int16:
+ return -v
+ case int32:
+ return -v
+ case int64:
+ return -v
+ case uint:
+ return -v
+ case uint8:
+ return -v
+ case uint16:
+ return -v
+ case uint32:
+ return -v
+ case uint64:
+ return -v
+ default:
+ panic(fmt.Sprintf("invalid operation: - %T", v))
+ }
+}
+
+func Exponent(a, b interface{}) float64 {
+ return math.Pow(ToFloat64(a), ToFloat64(b))
+}
+
+func MakeRange(min, max int) []int {
+ size := max - min + 1
+ if size <= 0 {
+ return []int{}
+ }
+ rng := make([]int, size)
+ for i := range rng {
+ rng[i] = min + i
+ }
+ return rng
+}
+
+func ToInt(a interface{}) int {
+ switch x := a.(type) {
+ case float32:
+ return int(x)
+ case float64:
+ return int(x)
+ case int:
+ return x
+ case int8:
+ return int(x)
+ case int16:
+ return int(x)
+ case int32:
+ return int(x)
+ case int64:
+ return int(x)
+ case uint:
+ return int(x)
+ case uint8:
+ return int(x)
+ case uint16:
+ return int(x)
+ case uint32:
+ return int(x)
+ case uint64:
+ return int(x)
+ case string:
+ i, err := strconv.Atoi(x)
+ if err != nil {
+ panic(fmt.Sprintf("invalid operation: int(%s)", x))
+ }
+ return i
+ default:
+ panic(fmt.Sprintf("invalid operation: int(%T)", x))
+ }
+}
+
+func ToInt64(a interface{}) int64 {
+ switch x := a.(type) {
+ case float32:
+ return int64(x)
+ case float64:
+ return int64(x)
+ case int:
+ return int64(x)
+ case int8:
+ return int64(x)
+ case int16:
+ return int64(x)
+ case int32:
+ return int64(x)
+ case int64:
+ return x
+ case uint:
+ return int64(x)
+ case uint8:
+ return int64(x)
+ case uint16:
+ return int64(x)
+ case uint32:
+ return int64(x)
+ case uint64:
+ return int64(x)
+ default:
+ panic(fmt.Sprintf("invalid operation: int64(%T)", x))
+ }
+}
+
+func ToFloat64(a interface{}) float64 {
+ switch x := a.(type) {
+ case float32:
+ return float64(x)
+ case float64:
+ return x
+ case int:
+ return float64(x)
+ case int8:
+ return float64(x)
+ case int16:
+ return float64(x)
+ case int32:
+ return float64(x)
+ case int64:
+ return float64(x)
+ case uint:
+ return float64(x)
+ case uint8:
+ return float64(x)
+ case uint16:
+ return float64(x)
+ case uint32:
+ return float64(x)
+ case uint64:
+ return float64(x)
+ case string:
+ f, err := strconv.ParseFloat(x, 64)
+ if err != nil {
+ panic(fmt.Sprintf("invalid operation: float(%s)", x))
+ }
+ return f
+ default:
+ panic(fmt.Sprintf("invalid operation: float(%T)", x))
+ }
+}
+
+func IsNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ r := reflect.ValueOf(v)
+ switch r.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+ return r.IsNil()
+ default:
+ return false
+ }
+}
+
+func Abs(x interface{}) interface{} {
+ switch x.(type) {
+ case float32:
+ if x.(float32) < 0 {
+ return -x.(float32)
+ } else {
+ return x
+ }
+ case float64:
+ if x.(float64) < 0 {
+ return -x.(float64)
+ } else {
+ return x
+ }
+ case int:
+ if x.(int) < 0 {
+ return -x.(int)
+ } else {
+ return x
+ }
+ case int8:
+ if x.(int8) < 0 {
+ return -x.(int8)
+ } else {
+ return x
+ }
+ case int16:
+ if x.(int16) < 0 {
+ return -x.(int16)
+ } else {
+ return x
+ }
+ case int32:
+ if x.(int32) < 0 {
+ return -x.(int32)
+ } else {
+ return x
+ }
+ case int64:
+ if x.(int64) < 0 {
+ return -x.(int64)
+ } else {
+ return x
+ }
+ case uint:
+ if x.(uint) < 0 {
+ return -x.(uint)
+ } else {
+ return x
+ }
+ case uint8:
+ if x.(uint8) < 0 {
+ return -x.(uint8)
+ } else {
+ return x
+ }
+ case uint16:
+ if x.(uint16) < 0 {
+ return -x.(uint16)
+ } else {
+ return x
+ }
+ case uint32:
+ if x.(uint32) < 0 {
+ return -x.(uint32)
+ } else {
+ return x
+ }
+ case uint64:
+ if x.(uint64) < 0 {
+ return -x.(uint64)
+ } else {
+ return x
+ }
+ }
+ panic(fmt.Sprintf("invalid argument for abs (type %T)", x))
+}
diff --git a/vendor/github.com/antonmedv/expr/vm/vm.go b/vendor/github.com/antonmedv/expr/vm/vm.go
new file mode 100644
index 00000000000..af4fc5bf755
--- /dev/null
+++ b/vendor/github.com/antonmedv/expr/vm/vm.go
@@ -0,0 +1,523 @@
+package vm
+
+//go:generate sh -c "go run ./func_types > ./generated.go"
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/antonmedv/expr/builtin"
+ "github.com/antonmedv/expr/file"
+ "github.com/antonmedv/expr/vm/runtime"
+)
+
+var MemoryBudget int = 1e6
+var errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+type Function = func(params ...interface{}) (interface{}, error)
+
+func Run(program *Program, env interface{}) (interface{}, error) {
+ if program == nil {
+ return nil, fmt.Errorf("program is nil")
+ }
+
+ vm := VM{}
+ return vm.Run(program, env)
+}
+
+type VM struct {
+ stack []interface{}
+ ip int
+ scopes []*Scope
+ debug bool
+ step chan struct{}
+ curr chan int
+ memory int
+ memoryBudget int
+}
+
+type Scope struct {
+ Array reflect.Value
+ It int
+ Len int
+ Count int
+}
+
+func Debug() *VM {
+ vm := &VM{
+ debug: true,
+ step: make(chan struct{}, 0),
+ curr: make(chan int, 0),
+ }
+ return vm
+}
+
+func (vm *VM) Run(program *Program, env interface{}) (_ interface{}, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ f := &file.Error{
+ Location: program.Locations[vm.ip-1],
+ Message: fmt.Sprintf("%v", r),
+ }
+ if err, ok := r.(error); ok {
+ f.Wrap(err)
+ }
+ err = f.Bind(program.Source)
+ }
+ }()
+
+ if vm.stack == nil {
+ vm.stack = make([]interface{}, 0, 2)
+ } else {
+ vm.stack = vm.stack[0:0]
+ }
+
+ if vm.scopes != nil {
+ vm.scopes = vm.scopes[0:0]
+ }
+
+ vm.memoryBudget = MemoryBudget
+ vm.memory = 0
+ vm.ip = 0
+
+ for vm.ip < len(program.Bytecode) {
+ if vm.debug {
+ <-vm.step
+ }
+
+ op := program.Bytecode[vm.ip]
+ arg := program.Arguments[vm.ip]
+ vm.ip += 1
+
+ switch op {
+
+ case OpPush:
+ vm.push(program.Constants[arg])
+
+ case OpPop:
+ vm.pop()
+
+ case OpLoadConst:
+ vm.push(runtime.Fetch(env, program.Constants[arg]))
+
+ case OpLoadField:
+ vm.push(runtime.FetchField(env, program.Constants[arg].(*runtime.Field)))
+
+ case OpLoadFast:
+ vm.push(env.(map[string]interface{})[program.Constants[arg].(string)])
+
+ case OpLoadMethod:
+ vm.push(runtime.FetchMethod(env, program.Constants[arg].(*runtime.Method)))
+
+ case OpLoadFunc:
+ vm.push(program.Functions[arg])
+
+ case OpFetch:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Fetch(a, b))
+
+ case OpFetchField:
+ a := vm.pop()
+ vm.push(runtime.FetchField(a, program.Constants[arg].(*runtime.Field)))
+
+ case OpMethod:
+ a := vm.pop()
+ vm.push(runtime.FetchMethod(a, program.Constants[arg].(*runtime.Method)))
+
+ case OpTrue:
+ vm.push(true)
+
+ case OpFalse:
+ vm.push(false)
+
+ case OpNil:
+ vm.push(nil)
+
+ case OpNegate:
+ v := runtime.Negate(vm.pop())
+ vm.push(v)
+
+ case OpNot:
+ v := vm.pop().(bool)
+ vm.push(!v)
+
+ case OpEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Equal(a, b))
+
+ case OpEqualInt:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(a.(int) == b.(int))
+
+ case OpEqualString:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(a.(string) == b.(string))
+
+ case OpJump:
+ vm.ip += arg
+
+ case OpJumpIfTrue:
+ if vm.current().(bool) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfFalse:
+ if !vm.current().(bool) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfNil:
+ if runtime.IsNil(vm.current()) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfNotNil:
+ if !runtime.IsNil(vm.current()) {
+ vm.ip += arg
+ }
+
+ case OpJumpIfEnd:
+ scope := vm.Scope()
+ if scope.It >= scope.Len {
+ vm.ip += arg
+ }
+
+ case OpJumpBackward:
+ vm.ip -= arg
+
+ case OpIn:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.In(a, b))
+
+ case OpLess:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Less(a, b))
+
+ case OpMore:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.More(a, b))
+
+ case OpLessOrEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.LessOrEqual(a, b))
+
+ case OpMoreOrEqual:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.MoreOrEqual(a, b))
+
+ case OpAdd:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Add(a, b))
+
+ case OpSubtract:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Subtract(a, b))
+
+ case OpMultiply:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Multiply(a, b))
+
+ case OpDivide:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Divide(a, b))
+
+ case OpModulo:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Modulo(a, b))
+
+ case OpExponent:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(runtime.Exponent(a, b))
+
+ case OpRange:
+ b := vm.pop()
+ a := vm.pop()
+ min := runtime.ToInt(a)
+ max := runtime.ToInt(b)
+ size := max - min + 1
+ if vm.memory+size >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+ vm.push(runtime.MakeRange(min, max))
+ vm.memory += size
+
+ case OpMatches:
+ b := vm.pop()
+ a := vm.pop()
+ match, err := regexp.MatchString(b.(string), a.(string))
+ if err != nil {
+ panic(err)
+ }
+
+ vm.push(match)
+
+ case OpMatchesConst:
+ a := vm.pop()
+ r := program.Constants[arg].(*regexp.Regexp)
+ vm.push(r.MatchString(a.(string)))
+
+ case OpContains:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.Contains(a.(string), b.(string)))
+
+ case OpStartsWith:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.HasPrefix(a.(string), b.(string)))
+
+ case OpEndsWith:
+ b := vm.pop()
+ a := vm.pop()
+ vm.push(strings.HasSuffix(a.(string), b.(string)))
+
+ case OpSlice:
+ from := vm.pop()
+ to := vm.pop()
+ node := vm.pop()
+ vm.push(runtime.Slice(node, from, to))
+
+ case OpCall:
+ fn := reflect.ValueOf(vm.pop())
+ size := arg
+ in := make([]reflect.Value, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ param := vm.pop()
+ if param == nil && reflect.TypeOf(param) == nil {
+ // In case of nil value and nil type use this hack,
+ // otherwise reflect.Call will panic on zero value.
+ in[i] = reflect.ValueOf(¶m).Elem()
+ } else {
+ in[i] = reflect.ValueOf(param)
+ }
+ }
+ out := fn.Call(in)
+ if len(out) == 2 && out[1].Type() == errorType && !out[1].IsNil() {
+ panic(out[1].Interface().(error))
+ }
+ vm.push(out[0].Interface())
+
+ case OpCall0:
+ out, err := program.Functions[arg]()
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall1:
+ a := vm.pop()
+ out, err := program.Functions[arg](a)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall2:
+ b := vm.pop()
+ a := vm.pop()
+ out, err := program.Functions[arg](a, b)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCall3:
+ c := vm.pop()
+ b := vm.pop()
+ a := vm.pop()
+ out, err := program.Functions[arg](a, b, c)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCallN:
+ fn := vm.pop().(Function)
+ size := arg
+ in := make([]interface{}, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ in[i] = vm.pop()
+ }
+ out, err := fn(in...)
+ if err != nil {
+ panic(err)
+ }
+ vm.push(out)
+
+ case OpCallFast:
+ fn := vm.pop().(func(...interface{}) interface{})
+ size := arg
+ in := make([]interface{}, size)
+ for i := int(size) - 1; i >= 0; i-- {
+ in[i] = vm.pop()
+ }
+ vm.push(fn(in...))
+
+ case OpCallTyped:
+ fn := vm.pop()
+ out := vm.call(fn, arg)
+ vm.push(out)
+
+ case OpArray:
+ size := vm.pop().(int)
+ array := make([]interface{}, size)
+ for i := size - 1; i >= 0; i-- {
+ array[i] = vm.pop()
+ }
+ vm.push(array)
+ vm.memory += size
+ if vm.memory >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+
+ case OpMap:
+ size := vm.pop().(int)
+ m := make(map[string]interface{})
+ for i := size - 1; i >= 0; i-- {
+ value := vm.pop()
+ key := vm.pop()
+ m[key.(string)] = value
+ }
+ vm.push(m)
+ vm.memory += size
+ if vm.memory >= vm.memoryBudget {
+ panic("memory budget exceeded")
+ }
+
+ case OpLen:
+ vm.push(runtime.Len(vm.current()))
+
+ case OpCast:
+ t := arg
+ switch t {
+ case 0:
+ vm.push(runtime.ToInt(vm.pop()))
+ case 1:
+ vm.push(runtime.ToInt64(vm.pop()))
+ case 2:
+ vm.push(runtime.ToFloat64(vm.pop()))
+ }
+
+ case OpDeref:
+ a := vm.pop()
+ vm.push(runtime.Deref(a))
+
+ case OpIncrementIt:
+ scope := vm.Scope()
+ scope.It++
+
+ case OpIncrementCount:
+ scope := vm.Scope()
+ scope.Count++
+
+ case OpGetCount:
+ scope := vm.Scope()
+ vm.push(scope.Count)
+
+ case OpGetLen:
+ scope := vm.Scope()
+ vm.push(scope.Len)
+
+ case OpPointer:
+ scope := vm.Scope()
+ vm.push(scope.Array.Index(scope.It).Interface())
+
+ case OpBegin:
+ a := vm.pop()
+ array := reflect.ValueOf(a)
+ vm.scopes = append(vm.scopes, &Scope{
+ Array: array,
+ Len: array.Len(),
+ })
+
+ case OpEnd:
+ vm.scopes = vm.scopes[:len(vm.scopes)-1]
+
+ case OpBuiltin:
+ switch arg {
+ case builtin.Len:
+ vm.push(runtime.Len(vm.pop()))
+
+ case builtin.Abs:
+ vm.push(runtime.Abs(vm.pop()))
+
+ case builtin.Int:
+ vm.push(runtime.ToInt(vm.pop()))
+
+ case builtin.Float:
+ vm.push(runtime.ToFloat64(vm.pop()))
+
+ default:
+ panic(fmt.Sprintf("unknown builtin %v", arg))
+ }
+
+ default:
+ panic(fmt.Sprintf("unknown bytecode %#x", op))
+ }
+
+ if vm.debug {
+ vm.curr <- vm.ip
+ }
+ }
+
+ if vm.debug {
+ close(vm.curr)
+ close(vm.step)
+ }
+
+ if len(vm.stack) > 0 {
+ return vm.pop(), nil
+ }
+
+ return nil, nil
+}
+
+func (vm *VM) push(value interface{}) {
+ vm.stack = append(vm.stack, value)
+}
+
+func (vm *VM) current() interface{} {
+ return vm.stack[len(vm.stack)-1]
+}
+
+func (vm *VM) pop() interface{} {
+ value := vm.stack[len(vm.stack)-1]
+ vm.stack = vm.stack[:len(vm.stack)-1]
+ return value
+}
+
+func (vm *VM) Stack() []interface{} {
+ return vm.stack
+}
+
+func (vm *VM) Scope() *Scope {
+ if len(vm.scopes) > 0 {
+ return vm.scopes[len(vm.scopes)-1]
+ }
+ return nil
+}
+
+func (vm *VM) Step() {
+ vm.step <- struct{}{}
+}
+
+func (vm *VM) Position() chan int {
+ return vm.curr
+}
diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore
new file mode 100644
index 00000000000..b4ae623be55
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.gitignore
@@ -0,0 +1,8 @@
+glob.iml
+.idea
+*.cpu
+*.mem
+*.test
+*.dot
+*.png
+*.svg
diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml
new file mode 100644
index 00000000000..e8a276826cf
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/.travis.yml
@@ -0,0 +1,9 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.5.3
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE
new file mode 100644
index 00000000000..9d4735cad9f
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Sergey Kamardin
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh
new file mode 100644
index 00000000000..804cf22e646
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/bench.sh
@@ -0,0 +1,26 @@
+#! /bin/bash
+
+bench() {
+ filename="/tmp/$1-$2.bench"
+ if test -e "${filename}";
+ then
+ echo "Already exists ${filename}"
+ else
+ backup=`git rev-parse --abbrev-ref HEAD`
+ git checkout $1
+ echo -n "Creating ${filename}... "
+ go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem
+ echo "OK"
+ git checkout ${backup}
+ sleep 5
+ fi
+}
+
+
+to=$1
+current=`git rev-parse --abbrev-ref HEAD`
+
+bench ${to} $2
+bench ${current} $2
+
+benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench"
diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go
new file mode 100644
index 00000000000..02e7de80a0b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/compiler/compiler.go
@@ -0,0 +1,525 @@
+package compiler
+
+// TODO use constructor with all matchers, and to their structs private
+// TODO glue multiple Text nodes (like after QuoteMeta)
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/gobwas/glob/match"
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/util/runes"
+)
+
+func optimizeMatcher(matcher match.Matcher) match.Matcher {
+ switch m := matcher.(type) {
+
+ case match.Any:
+ if len(m.Separators) == 0 {
+ return match.NewSuper()
+ }
+
+ case match.AnyOf:
+ if len(m.Matchers) == 1 {
+ return m.Matchers[0]
+ }
+
+ return m
+
+ case match.List:
+ if m.Not == false && len(m.List) == 1 {
+ return match.NewText(string(m.List))
+ }
+
+ return m
+
+ case match.BTree:
+ m.Left = optimizeMatcher(m.Left)
+ m.Right = optimizeMatcher(m.Right)
+
+ r, ok := m.Value.(match.Text)
+ if !ok {
+ return m
+ }
+
+ var (
+ leftNil = m.Left == nil
+ rightNil = m.Right == nil
+ )
+ if leftNil && rightNil {
+ return match.NewText(r.Str)
+ }
+
+ _, leftSuper := m.Left.(match.Super)
+ lp, leftPrefix := m.Left.(match.Prefix)
+ la, leftAny := m.Left.(match.Any)
+
+ _, rightSuper := m.Right.(match.Super)
+ rs, rightSuffix := m.Right.(match.Suffix)
+ ra, rightAny := m.Right.(match.Any)
+
+ switch {
+ case leftSuper && rightSuper:
+ return match.NewContains(r.Str, false)
+
+ case leftSuper && rightNil:
+ return match.NewSuffix(r.Str)
+
+ case rightSuper && leftNil:
+ return match.NewPrefix(r.Str)
+
+ case leftNil && rightSuffix:
+ return match.NewPrefixSuffix(r.Str, rs.Suffix)
+
+ case rightNil && leftPrefix:
+ return match.NewPrefixSuffix(lp.Prefix, r.Str)
+
+ case rightNil && leftAny:
+ return match.NewSuffixAny(r.Str, la.Separators)
+
+ case leftNil && rightAny:
+ return match.NewPrefixAny(r.Str, ra.Separators)
+ }
+
+ return m
+ }
+
+ return matcher
+}
+
+func compileMatchers(matchers []match.Matcher) (match.Matcher, error) {
+ if len(matchers) == 0 {
+ return nil, fmt.Errorf("compile error: need at least one matcher")
+ }
+ if len(matchers) == 1 {
+ return matchers[0], nil
+ }
+ if m := glueMatchers(matchers); m != nil {
+ return m, nil
+ }
+
+ idx := -1
+ maxLen := -1
+ var val match.Matcher
+ for i, matcher := range matchers {
+ if l := matcher.Len(); l != -1 && l >= maxLen {
+ maxLen = l
+ idx = i
+ val = matcher
+ }
+ }
+
+ if val == nil { // not found matcher with static length
+ r, err := compileMatchers(matchers[1:])
+ if err != nil {
+ return nil, err
+ }
+ return match.NewBTree(matchers[0], nil, r), nil
+ }
+
+ left := matchers[:idx]
+ var right []match.Matcher
+ if len(matchers) > idx+1 {
+ right = matchers[idx+1:]
+ }
+
+ var l, r match.Matcher
+ var err error
+ if len(left) > 0 {
+ l, err = compileMatchers(left)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(right) > 0 {
+ r, err = compileMatchers(right)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return match.NewBTree(val, l, r), nil
+}
+
+func glueMatchers(matchers []match.Matcher) match.Matcher {
+ if m := glueMatchersAsEvery(matchers); m != nil {
+ return m
+ }
+ if m := glueMatchersAsRow(matchers); m != nil {
+ return m
+ }
+ return nil
+}
+
+func glueMatchersAsRow(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ c []match.Matcher
+ l int
+ )
+ for _, matcher := range matchers {
+ if ml := matcher.Len(); ml == -1 {
+ return nil
+ } else {
+ c = append(c, matcher)
+ l += ml
+ }
+ }
+ return match.NewRow(l, c...)
+}
+
+func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher {
+ if len(matchers) <= 1 {
+ return nil
+ }
+
+ var (
+ hasAny bool
+ hasSuper bool
+ hasSingle bool
+ min int
+ separator []rune
+ )
+
+ for i, matcher := range matchers {
+ var sep []rune
+
+ switch m := matcher.(type) {
+ case match.Super:
+ sep = []rune{}
+ hasSuper = true
+
+ case match.Any:
+ sep = m.Separators
+ hasAny = true
+
+ case match.Single:
+ sep = m.Separators
+ hasSingle = true
+ min++
+
+ case match.List:
+ if !m.Not {
+ return nil
+ }
+ sep = m.List
+ hasSingle = true
+ min++
+
+ default:
+ return nil
+ }
+
+ // initialize
+ if i == 0 {
+ separator = sep
+ }
+
+ if runes.Equal(sep, separator) {
+ continue
+ }
+
+ return nil
+ }
+
+ if hasSuper && !hasAny && !hasSingle {
+ return match.NewSuper()
+ }
+
+ if hasAny && !hasSuper && !hasSingle {
+ return match.NewAny(separator)
+ }
+
+ if (hasAny || hasSuper) && min > 0 && len(separator) == 0 {
+ return match.NewMin(min)
+ }
+
+ every := match.NewEveryOf()
+
+ if min > 0 {
+ every.Add(match.NewMin(min))
+
+ if !hasAny && !hasSuper {
+ every.Add(match.NewMax(min))
+ }
+ }
+
+ if len(separator) > 0 {
+ every.Add(match.NewContains(string(separator), true))
+ }
+
+ return every
+}
+
+func minimizeMatchers(matchers []match.Matcher) []match.Matcher {
+ var done match.Matcher
+ var left, right, count int
+
+ for l := 0; l < len(matchers); l++ {
+ for r := len(matchers); r > l; r-- {
+ if glued := glueMatchers(matchers[l:r]); glued != nil {
+ var swap bool
+
+ if done == nil {
+ swap = true
+ } else {
+ cl, gl := done.Len(), glued.Len()
+ swap = cl > -1 && gl > -1 && gl > cl
+ swap = swap || count < r-l
+ }
+
+ if swap {
+ done = glued
+ left = l
+ right = r
+ count = r - l
+ }
+ }
+ }
+ }
+
+ if done == nil {
+ return matchers
+ }
+
+ next := append(append([]match.Matcher{}, matchers[:left]...), done)
+ if right < len(matchers) {
+ next = append(next, matchers[right:]...)
+ }
+
+ if len(next) == len(matchers) {
+ return next
+ }
+
+ return minimizeMatchers(next)
+}
+
+// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree
+func minimizeTree(tree *ast.Node) *ast.Node {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ return minimizeTreeAnyOf(tree)
+ default:
+ return nil
+ }
+}
+
+// minimizeAnyOf tries to find common children of given node of AnyOf pattern
+// it searches for common children from left and from right
+// if any common children are found – then it returns new optimized ast tree
+// else it returns nil
+func minimizeTreeAnyOf(tree *ast.Node) *ast.Node {
+ if !areOfSameKind(tree.Children, ast.KindPattern) {
+ return nil
+ }
+
+ commonLeft, commonRight := commonChildren(tree.Children)
+ commonLeftCount, commonRightCount := len(commonLeft), len(commonRight)
+ if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts
+ return nil
+ }
+
+ var result []*ast.Node
+ if commonLeftCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...))
+ }
+
+ var anyOf []*ast.Node
+ for _, child := range tree.Children {
+ reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount]
+ var node *ast.Node
+ if len(reuse) == 0 {
+ // this pattern is completely reduced by commonLeft and commonRight patterns
+ // so it become nothing
+ node = ast.NewNode(ast.KindNothing, nil)
+ } else {
+ node = ast.NewNode(ast.KindPattern, nil, reuse...)
+ }
+ anyOf = appendIfUnique(anyOf, node)
+ }
+ switch {
+ case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing:
+ result = append(result, anyOf[0])
+ case len(anyOf) > 1:
+ result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...))
+ }
+
+ if commonRightCount > 0 {
+ result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...))
+ }
+
+ return ast.NewNode(ast.KindPattern, nil, result...)
+}
+
+func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) {
+ if len(nodes) <= 1 {
+ return
+ }
+
+ // find node that has least number of children
+ idx := leastChildren(nodes)
+ if idx == -1 {
+ return
+ }
+ tree := nodes[idx]
+ treeLength := len(tree.Children)
+
+ // allocate max able size for rightCommon slice
+ // to get ability insert elements in reverse order (from end to start)
+ // without sorting
+ commonRight = make([]*ast.Node, treeLength)
+ lastRight := treeLength // will use this to get results as commonRight[lastRight:]
+
+ var (
+ breakLeft bool
+ breakRight bool
+ commonTotal int
+ )
+ for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 {
+ treeLeft := tree.Children[i]
+ treeRight := tree.Children[j]
+
+ for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ {
+ // skip least children node
+ if k == idx {
+ continue
+ }
+
+ restLeft := nodes[k].Children[i]
+ restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength]
+
+ breakLeft = breakLeft || !treeLeft.Equal(restLeft)
+
+ // disable searching for right common parts, if left part is already overlapping
+ breakRight = breakRight || (!breakLeft && j <= i)
+ breakRight = breakRight || !treeRight.Equal(restRight)
+ }
+
+ if !breakLeft {
+ commonTotal++
+ commonLeft = append(commonLeft, treeLeft)
+ }
+ if !breakRight {
+ commonTotal++
+ lastRight = j
+ commonRight[j] = treeRight
+ }
+ }
+
+ commonRight = commonRight[lastRight:]
+
+ return
+}
+
+func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node {
+ for _, n := range target {
+ if reflect.DeepEqual(n, val) {
+ return target
+ }
+ }
+ return append(target, val)
+}
+
+func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool {
+ for _, n := range nodes {
+ if n.Kind != kind {
+ return false
+ }
+ }
+ return true
+}
+
+func leastChildren(nodes []*ast.Node) int {
+ min := -1
+ idx := -1
+ for i, n := range nodes {
+ if idx == -1 || (len(n.Children) < min) {
+ min = len(n.Children)
+ idx = i
+ }
+ }
+ return idx
+}
+
+func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) {
+ var matchers []match.Matcher
+ for _, desc := range tree.Children {
+ m, err := compile(desc, sep)
+ if err != nil {
+ return nil, err
+ }
+ matchers = append(matchers, optimizeMatcher(m))
+ }
+ return matchers, nil
+}
+
+func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) {
+ switch tree.Kind {
+ case ast.KindAnyOf:
+ // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go)
+ if n := minimizeTree(tree); n != nil {
+ return compile(n, sep)
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ return match.NewAnyOf(matchers...), nil
+
+ case ast.KindPattern:
+ if len(tree.Children) == 0 {
+ return match.NewNothing(), nil
+ }
+ matchers, err := compileTreeChildren(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+ m, err = compileMatchers(minimizeMatchers(matchers))
+ if err != nil {
+ return nil, err
+ }
+
+ case ast.KindAny:
+ m = match.NewAny(sep)
+
+ case ast.KindSuper:
+ m = match.NewSuper()
+
+ case ast.KindSingle:
+ m = match.NewSingle(sep)
+
+ case ast.KindNothing:
+ m = match.NewNothing()
+
+ case ast.KindList:
+ l := tree.Value.(ast.List)
+ m = match.NewList([]rune(l.Chars), l.Not)
+
+ case ast.KindRange:
+ r := tree.Value.(ast.Range)
+ m = match.NewRange(r.Lo, r.Hi, r.Not)
+
+ case ast.KindText:
+ t := tree.Value.(ast.Text)
+ m = match.NewText(t.Text)
+
+ default:
+ return nil, fmt.Errorf("could not compile tree: unknown node type")
+ }
+
+ return optimizeMatcher(m), nil
+}
+
+func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) {
+ m, err := compile(tree, sep)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go
new file mode 100644
index 00000000000..2afde343af8
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/glob.go
@@ -0,0 +1,80 @@
+package glob
+
+import (
+ "github.com/gobwas/glob/compiler"
+ "github.com/gobwas/glob/syntax"
+)
+
+// Glob represents compiled glob pattern.
+type Glob interface {
+ Match(string) bool
+}
+
+// Compile creates Glob for given pattern and strings (if any present after pattern) as separators.
+// The pattern syntax is:
+//
+// pattern:
+// { term }
+//
+// term:
+// `*` matches any sequence of non-separator characters
+// `**` matches any sequence of characters
+// `?` matches any single non-separator character
+// `[` [ `!` ] { character-range } `]`
+// character class (must be non-empty)
+// `{` pattern-list `}`
+// pattern alternatives
+// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
+// `\` c matches character c
+//
+// character-range:
+// c matches character c (c != `\\`, `-`, `]`)
+// `\` c matches character c
+// lo `-` hi matches character c for lo <= c <= hi
+//
+// pattern-list:
+// pattern { `,` pattern }
+// comma-separated (without spaces) patterns
+//
+func Compile(pattern string, separators ...rune) (Glob, error) {
+ ast, err := syntax.Parse(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ matcher, err := compiler.Compile(ast, separators)
+ if err != nil {
+ return nil, err
+ }
+
+ return matcher, nil
+}
+
+// MustCompile is the same as Compile, except that if Compile returns error, this will panic
+func MustCompile(pattern string, separators ...rune) Glob {
+ g, err := Compile(pattern, separators...)
+ if err != nil {
+ panic(err)
+ }
+
+ return g
+}
+
+// QuoteMeta returns a string that quotes all glob pattern meta characters
+// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`.
+func QuoteMeta(s string) string {
+ b := make([]byte, 2*len(s))
+
+ // a byte loop is correct because all meta characters are ASCII
+ j := 0
+ for i := 0; i < len(s); i++ {
+ if syntax.Special(s[i]) {
+ b[j] = '\\'
+ j++
+ }
+ b[j] = s[i]
+ j++
+ }
+
+ return string(b[0:j])
+}
diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go
new file mode 100644
index 00000000000..514a9a5c450
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/strings"
+)
+
+type Any struct {
+ Separators []rune
+}
+
+func NewAny(s []rune) Any {
+ return Any{s}
+}
+
+func (self Any) Match(s string) bool {
+ return strings.IndexAnyRunes(s, self.Separators) == -1
+}
+
+func (self Any) Index(s string) (int, []int) {
+ found := strings.IndexAnyRunes(s, self.Separators)
+ switch found {
+ case -1:
+ case 0:
+ return 0, segments0
+ default:
+ s = s[:found]
+ }
+
+ segments := acquireSegments(len(s))
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Any) Len() int {
+ return lenNo
+}
+
+func (self Any) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go
new file mode 100644
index 00000000000..8e65356cdc9
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/any_of.go
@@ -0,0 +1,82 @@
+package match
+
+import "fmt"
+
+type AnyOf struct {
+ Matchers Matchers
+}
+
+func NewAnyOf(m ...Matcher) AnyOf {
+ return AnyOf{Matchers(m)}
+}
+
+func (self *AnyOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self AnyOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if m.Match(s) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self AnyOf) Index(s string) (int, []int) {
+ index := -1
+
+ segments := acquireSegments(len(s))
+ for _, m := range self.Matchers {
+ idx, seg := m.Index(s)
+ if idx == -1 {
+ continue
+ }
+
+ if index == -1 || idx < index {
+ index = idx
+ segments = append(segments[:0], seg...)
+ continue
+ }
+
+ if idx > index {
+ continue
+ }
+
+ // here idx == index
+ segments = appendMerge(segments, seg)
+ }
+
+ if index == -1 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ return index, segments
+}
+
+func (self AnyOf) Len() (l int) {
+ l = -1
+ for _, m := range self.Matchers {
+ ml := m.Len()
+ switch {
+ case l == -1:
+ l = ml
+ continue
+
+ case ml == -1:
+ return -1
+
+ case l != ml:
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self AnyOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go
new file mode 100644
index 00000000000..a8130e93eae
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/btree.go
@@ -0,0 +1,146 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type BTree struct {
+ Value Matcher
+ Left Matcher
+ Right Matcher
+ ValueLengthRunes int
+ LeftLengthRunes int
+ RightLengthRunes int
+ LengthRunes int
+}
+
+func NewBTree(Value, Left, Right Matcher) (tree BTree) {
+ tree.Value = Value
+ tree.Left = Left
+ tree.Right = Right
+
+ lenOk := true
+ if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 {
+ lenOk = false
+ }
+
+ if Left != nil {
+ if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if Right != nil {
+ if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 {
+ lenOk = false
+ }
+ }
+
+ if lenOk {
+ tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes
+ } else {
+ tree.LengthRunes = -1
+ }
+
+ return tree
+}
+
+func (self BTree) Len() int {
+ return self.LengthRunes
+}
+
+// todo?
+func (self BTree) Index(s string) (int, []int) {
+ return -1, nil
+}
+
+func (self BTree) Match(s string) bool {
+ inputLen := len(s)
+
+ // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part
+ // here we manipulating byte length for better optimizations
+ // but these checks still works, cause minLen of 1-rune string is 1 byte.
+ if self.LengthRunes != -1 && self.LengthRunes > inputLen {
+ return false
+ }
+
+ // try to cut unnecessary parts
+ // by knowledge of length of right and left part
+ var offset, limit int
+ if self.LeftLengthRunes >= 0 {
+ offset = self.LeftLengthRunes
+ }
+ if self.RightLengthRunes >= 0 {
+ limit = inputLen - self.RightLengthRunes
+ } else {
+ limit = inputLen
+ }
+
+ for offset < limit {
+ // search for matching part in substring
+ index, segments := self.Value.Index(s[offset:limit])
+ if index == -1 {
+ releaseSegments(segments)
+ return false
+ }
+
+ l := s[:offset+index]
+ var left bool
+ if self.Left != nil {
+ left = self.Left.Match(l)
+ } else {
+ left = l == ""
+ }
+
+ if left {
+ for i := len(segments) - 1; i >= 0; i-- {
+ length := segments[i]
+
+ var right bool
+ var r string
+ // if there is no string for the right branch
+ if inputLen <= offset+index+length {
+ r = ""
+ } else {
+ r = s[offset+index+length:]
+ }
+
+ if self.Right != nil {
+ right = self.Right.Match(r)
+ } else {
+ right = r == ""
+ }
+
+ if right {
+ releaseSegments(segments)
+ return true
+ }
+ }
+ }
+
+ _, step := utf8.DecodeRuneInString(s[offset+index:])
+ offset += index + step
+
+ releaseSegments(segments)
+ }
+
+ return false
+}
+
+func (self BTree) String() string {
+ const n string = ""
+ var l, r string
+ if self.Left == nil {
+ l = n
+ } else {
+ l = self.Left.String()
+ }
+ if self.Right == nil {
+ r = n
+ } else {
+ r = self.Right.String()
+ }
+
+ return fmt.Sprintf("%s]>", l, self.Value, r)
+}
diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go
new file mode 100644
index 00000000000..0998e95b0ea
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/contains.go
@@ -0,0 +1,58 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Contains struct {
+ Needle string
+ Not bool
+}
+
+func NewContains(needle string, not bool) Contains {
+ return Contains{needle, not}
+}
+
+func (self Contains) Match(s string) bool {
+ return strings.Contains(s, self.Needle) != self.Not
+}
+
+func (self Contains) Index(s string) (int, []int) {
+ var offset int
+
+ idx := strings.Index(s, self.Needle)
+
+ if !self.Not {
+ if idx == -1 {
+ return -1, nil
+ }
+
+ offset = idx + len(self.Needle)
+ if len(s) <= offset {
+ return 0, []int{offset}
+ }
+ s = s[offset:]
+ } else if idx != -1 {
+ s = s[:idx]
+ }
+
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, offset+i)
+ }
+
+ return 0, append(segments, offset+len(s))
+}
+
+func (self Contains) Len() int {
+ return lenNo
+}
+
+func (self Contains) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, self.Needle)
+}
diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go
new file mode 100644
index 00000000000..7c968ee368b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/every_of.go
@@ -0,0 +1,99 @@
+package match
+
+import (
+ "fmt"
+)
+
+type EveryOf struct {
+ Matchers Matchers
+}
+
+func NewEveryOf(m ...Matcher) EveryOf {
+ return EveryOf{Matchers(m)}
+}
+
+func (self *EveryOf) Add(m Matcher) error {
+ self.Matchers = append(self.Matchers, m)
+ return nil
+}
+
+func (self EveryOf) Len() (l int) {
+ for _, m := range self.Matchers {
+ if ml := m.Len(); l > 0 {
+ l += ml
+ } else {
+ return -1
+ }
+ }
+
+ return
+}
+
+func (self EveryOf) Index(s string) (int, []int) {
+ var index int
+ var offset int
+
+ // make `in` with cap as len(s),
+ // cause it is the maximum size of output segments values
+ next := acquireSegments(len(s))
+ current := acquireSegments(len(s))
+
+ sub := s
+ for i, m := range self.Matchers {
+ idx, seg := m.Index(sub)
+ if idx == -1 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ if i == 0 {
+ // we use copy here instead of `current = seg`
+ // cause seg is a slice from reusable buffer `in`
+ // and it could be overwritten in next iteration
+ current = append(current, seg...)
+ } else {
+ // clear the next
+ next = next[:0]
+
+ delta := index - (idx + offset)
+ for _, ex := range current {
+ for _, n := range seg {
+ if ex+delta == n {
+ next = append(next, n)
+ }
+ }
+ }
+
+ if len(next) == 0 {
+ releaseSegments(next)
+ releaseSegments(current)
+ return -1, nil
+ }
+
+ current = append(current[:0], next...)
+ }
+
+ index = idx + offset
+ sub = s[index:]
+ offset += idx
+ }
+
+ releaseSegments(next)
+
+ return index, current
+}
+
+func (self EveryOf) Match(s string) bool {
+ for _, m := range self.Matchers {
+ if !m.Match(s) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self EveryOf) String() string {
+ return fmt.Sprintf("", self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go
new file mode 100644
index 00000000000..7fd763ecd8e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/list.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+type List struct {
+ List []rune
+ Not bool
+}
+
+func NewList(list []rune, not bool) List {
+ return List{list, not}
+}
+
+func (self List) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inList := runes.IndexRune(self.List, r) != -1
+ return inList == !self.Not
+}
+
+func (self List) Len() int {
+ return lenOne
+}
+
+func (self List) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not == (runes.IndexRune(self.List, r) == -1) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self List) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+
+ return fmt.Sprintf("", not, string(self.List))
+}
diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go
new file mode 100644
index 00000000000..f80e007fb83
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/match.go
@@ -0,0 +1,81 @@
+package match
+
+// todo common table of rune's length
+
+import (
+ "fmt"
+ "strings"
+)
+
+const lenOne = 1
+const lenZero = 0
+const lenNo = -1
+
+type Matcher interface {
+ Match(string) bool
+ Index(string) (int, []int)
+ Len() int
+ String() string
+}
+
+type Matchers []Matcher
+
+func (m Matchers) String() string {
+ var s []string
+ for _, matcher := range m {
+ s = append(s, fmt.Sprint(matcher))
+ }
+
+ return fmt.Sprintf("%s", strings.Join(s, ","))
+}
+
+// appendMerge merges and sorts given already SORTED and UNIQUE segments.
+func appendMerge(target, sub []int) []int {
+ lt, ls := len(target), len(sub)
+ out := make([]int, 0, lt+ls)
+
+ for x, y := 0, 0; x < lt || y < ls; {
+ if x >= lt {
+ out = append(out, sub[y:]...)
+ break
+ }
+
+ if y >= ls {
+ out = append(out, target[x:]...)
+ break
+ }
+
+ xValue := target[x]
+ yValue := sub[y]
+
+ switch {
+
+ case xValue == yValue:
+ out = append(out, xValue)
+ x++
+ y++
+
+ case xValue < yValue:
+ out = append(out, xValue)
+ x++
+
+ case yValue < xValue:
+ out = append(out, yValue)
+ y++
+
+ }
+ }
+
+ target = append(target[:0], out...)
+
+ return target
+}
+
+func reverseSegments(input []int) {
+ l := len(input)
+ m := l / 2
+
+ for i := 0; i < m; i++ {
+ input[i], input[l-i-1] = input[l-i-1], input[i]
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go
new file mode 100644
index 00000000000..d72f69efff7
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/max.go
@@ -0,0 +1,49 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Max struct {
+ Limit int
+}
+
+func NewMax(l int) Max {
+ return Max{l}
+}
+
+func (self Max) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l > self.Limit {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (self Max) Index(s string) (int, []int) {
+ segments := acquireSegments(self.Limit + 1)
+ segments = append(segments, 0)
+ var count int
+ for i, r := range s {
+ count++
+ if count > self.Limit {
+ break
+ }
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+
+ return 0, segments
+}
+
+func (self Max) Len() int {
+ return lenNo
+}
+
+func (self Max) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go
new file mode 100644
index 00000000000..db57ac8eb49
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/min.go
@@ -0,0 +1,57 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Min struct {
+ Limit int
+}
+
+func NewMin(l int) Min {
+ return Min{l}
+}
+
+func (self Min) Match(s string) bool {
+ var l int
+ for range s {
+ l += 1
+ if l >= self.Limit {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (self Min) Index(s string) (int, []int) {
+ var count int
+
+ c := len(s) - self.Limit + 1
+ if c <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(c)
+ for i, r := range s {
+ count++
+ if count >= self.Limit {
+ segments = append(segments, i+utf8.RuneLen(r))
+ }
+ }
+
+ if len(segments) == 0 {
+ return -1, nil
+ }
+
+ return 0, segments
+}
+
+func (self Min) Len() int {
+ return lenNo
+}
+
+func (self Min) String() string {
+ return fmt.Sprintf("", self.Limit)
+}
diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go
new file mode 100644
index 00000000000..0d4ecd36b80
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/nothing.go
@@ -0,0 +1,27 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Nothing struct{}
+
+func NewNothing() Nothing {
+ return Nothing{}
+}
+
+func (self Nothing) Match(s string) bool {
+ return len(s) == 0
+}
+
+func (self Nothing) Index(s string) (int, []int) {
+ return 0, segments0
+}
+
+func (self Nothing) Len() int {
+ return lenZero
+}
+
+func (self Nothing) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go
new file mode 100644
index 00000000000..a7347250e8d
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix.go
@@ -0,0 +1,50 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+type Prefix struct {
+ Prefix string
+}
+
+func NewPrefix(p string) Prefix {
+ return Prefix{p}
+}
+
+func (self Prefix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ length := len(self.Prefix)
+ var sub string
+ if len(s) > idx+length {
+ sub = s[idx+length:]
+ } else {
+ sub = ""
+ }
+
+ segments := acquireSegments(len(sub) + 1)
+ segments = append(segments, length)
+ for i, r := range sub {
+ segments = append(segments, length+i+utf8.RuneLen(r))
+ }
+
+ return idx, segments
+}
+
+func (self Prefix) Len() int {
+ return lenNo
+}
+
+func (self Prefix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix)
+}
+
+func (self Prefix) String() string {
+ return fmt.Sprintf("", self.Prefix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go
new file mode 100644
index 00000000000..8ee58fe1b3c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_any.go
@@ -0,0 +1,55 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type PrefixAny struct {
+ Prefix string
+ Separators []rune
+}
+
+func NewPrefixAny(s string, sep []rune) PrefixAny {
+ return PrefixAny{s, sep}
+}
+
+func (self PrefixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Prefix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ n := len(self.Prefix)
+ sub := s[idx+n:]
+ i := sutil.IndexAnyRunes(sub, self.Separators)
+ if i > -1 {
+ sub = sub[:i]
+ }
+
+ seg := acquireSegments(len(sub) + 1)
+ seg = append(seg, n)
+ for i, r := range sub {
+ seg = append(seg, n+i+utf8.RuneLen(r))
+ }
+
+ return idx, seg
+}
+
+func (self PrefixAny) Len() int {
+ return lenNo
+}
+
+func (self PrefixAny) Match(s string) bool {
+ if !strings.HasPrefix(s, self.Prefix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1
+}
+
+func (self PrefixAny) String() string {
+ return fmt.Sprintf("", self.Prefix, string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
new file mode 100644
index 00000000000..8208085a199
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go
@@ -0,0 +1,62 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type PrefixSuffix struct {
+ Prefix, Suffix string
+}
+
+func NewPrefixSuffix(p, s string) PrefixSuffix {
+ return PrefixSuffix{p, s}
+}
+
+func (self PrefixSuffix) Index(s string) (int, []int) {
+ prefixIdx := strings.Index(s, self.Prefix)
+ if prefixIdx == -1 {
+ return -1, nil
+ }
+
+ suffixLen := len(self.Suffix)
+ if suffixLen <= 0 {
+ return prefixIdx, []int{len(s) - prefixIdx}
+ }
+
+ if (len(s) - prefixIdx) <= 0 {
+ return -1, nil
+ }
+
+ segments := acquireSegments(len(s) - prefixIdx)
+ for sub := s[prefixIdx:]; ; {
+ suffixIdx := strings.LastIndex(sub, self.Suffix)
+ if suffixIdx == -1 {
+ break
+ }
+
+ segments = append(segments, suffixIdx+suffixLen)
+ sub = sub[:suffixIdx]
+ }
+
+ if len(segments) == 0 {
+ releaseSegments(segments)
+ return -1, nil
+ }
+
+ reverseSegments(segments)
+
+ return prefixIdx, segments
+}
+
+func (self PrefixSuffix) Len() int {
+ return lenNo
+}
+
+func (self PrefixSuffix) Match(s string) bool {
+ return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix)
+}
+
+func (self PrefixSuffix) String() string {
+ return fmt.Sprintf("", self.Prefix, self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go
new file mode 100644
index 00000000000..ce30245a40b
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/range.go
@@ -0,0 +1,48 @@
+package match
+
+import (
+ "fmt"
+ "unicode/utf8"
+)
+
+type Range struct {
+ Lo, Hi rune
+ Not bool
+}
+
+func NewRange(lo, hi rune, not bool) Range {
+ return Range{lo, hi, not}
+}
+
+func (self Range) Len() int {
+ return lenOne
+}
+
+func (self Range) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ inRange := r >= self.Lo && r <= self.Hi
+
+ return inRange == !self.Not
+}
+
+func (self Range) Index(s string) (int, []int) {
+ for i, r := range s {
+ if self.Not != (r >= self.Lo && r <= self.Hi) {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Range) String() string {
+ var not string
+ if self.Not {
+ not = "!"
+ }
+ return fmt.Sprintf("", not, string(self.Lo), string(self.Hi))
+}
diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go
new file mode 100644
index 00000000000..4379042e42f
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/row.go
@@ -0,0 +1,77 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Row struct {
+ Matchers Matchers
+ RunesLength int
+ Segments []int
+}
+
+func NewRow(len int, m ...Matcher) Row {
+ return Row{
+ Matchers: Matchers(m),
+ RunesLength: len,
+ Segments: []int{len},
+ }
+}
+
+func (self Row) matchAll(s string) bool {
+ var idx int
+ for _, m := range self.Matchers {
+ length := m.Len()
+
+ var next, i int
+ for next = range s[idx:] {
+ i++
+ if i == length {
+ break
+ }
+ }
+
+ if i < length || !m.Match(s[idx:idx+next+1]) {
+ return false
+ }
+
+ idx += next + 1
+ }
+
+ return true
+}
+
+func (self Row) lenOk(s string) bool {
+ var i int
+ for range s {
+ i++
+ if i > self.RunesLength {
+ return false
+ }
+ }
+ return self.RunesLength == i
+}
+
+func (self Row) Match(s string) bool {
+ return self.lenOk(s) && self.matchAll(s)
+}
+
+func (self Row) Len() (l int) {
+ return self.RunesLength
+}
+
+func (self Row) Index(s string) (int, []int) {
+ for i := range s {
+ if len(s[i:]) < self.RunesLength {
+ break
+ }
+ if self.matchAll(s[i:]) {
+ return i, self.Segments
+ }
+ }
+ return -1, nil
+}
+
+func (self Row) String() string {
+ return fmt.Sprintf("", self.RunesLength, self.Matchers)
+}
diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go
new file mode 100644
index 00000000000..9ea6f309439
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/segments.go
@@ -0,0 +1,91 @@
+package match
+
+import (
+ "sync"
+)
+
+type SomePool interface {
+ Get() []int
+ Put([]int)
+}
+
+var segmentsPools [1024]sync.Pool
+
+func toPowerOfTwo(v int) int {
+ v--
+ v |= v >> 1
+ v |= v >> 2
+ v |= v >> 4
+ v |= v >> 8
+ v |= v >> 16
+ v++
+
+ return v
+}
+
+const (
+ cacheFrom = 16
+ cacheToAndHigher = 1024
+ cacheFromIndex = 15
+ cacheToAndHigherIndex = 1023
+)
+
+var (
+ segments0 = []int{0}
+ segments1 = []int{1}
+ segments2 = []int{2}
+ segments3 = []int{3}
+ segments4 = []int{4}
+)
+
+var segmentsByRuneLength [5][]int = [5][]int{
+ 0: segments0,
+ 1: segments1,
+ 2: segments2,
+ 3: segments3,
+ 4: segments4,
+}
+
+func init() {
+ for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 {
+ func(i int) {
+ segmentsPools[i-1] = sync.Pool{New: func() interface{} {
+ return make([]int, 0, i)
+ }}
+ }(i)
+ }
+}
+
+func getTableIndex(c int) int {
+ p := toPowerOfTwo(c)
+ switch {
+ case p >= cacheToAndHigher:
+ return cacheToAndHigherIndex
+ case p <= cacheFrom:
+ return cacheFromIndex
+ default:
+ return p - 1
+ }
+}
+
+func acquireSegments(c int) []int {
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return make([]int, 0, c)
+ }
+
+ return segmentsPools[getTableIndex(c)].Get().([]int)[:0]
+}
+
+func releaseSegments(s []int) {
+ c := cap(s)
+
+ // make []int with less capacity than cacheFrom
+ // is faster than acquiring it from pool
+ if c < cacheFrom {
+ return
+ }
+
+ segmentsPools[getTableIndex(c)].Put(s)
+}
diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go
new file mode 100644
index 00000000000..ee6e3954c1f
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/single.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+// single represents ?
+type Single struct {
+ Separators []rune
+}
+
+func NewSingle(s []rune) Single {
+ return Single{s}
+}
+
+func (self Single) Match(s string) bool {
+ r, w := utf8.DecodeRuneInString(s)
+ if len(s) > w {
+ return false
+ }
+
+ return runes.IndexRune(self.Separators, r) == -1
+}
+
+func (self Single) Len() int {
+ return lenOne
+}
+
+func (self Single) Index(s string) (int, []int) {
+ for i, r := range s {
+ if runes.IndexRune(self.Separators, r) == -1 {
+ return i, segmentsByRuneLength[utf8.RuneLen(r)]
+ }
+ }
+
+ return -1, nil
+}
+
+func (self Single) String() string {
+ return fmt.Sprintf("", string(self.Separators))
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go
new file mode 100644
index 00000000000..85bea8c68ec
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix.go
@@ -0,0 +1,35 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+)
+
+type Suffix struct {
+ Suffix string
+}
+
+func NewSuffix(s string) Suffix {
+ return Suffix{s}
+}
+
+func (self Suffix) Len() int {
+ return lenNo
+}
+
+func (self Suffix) Match(s string) bool {
+ return strings.HasSuffix(s, self.Suffix)
+}
+
+func (self Suffix) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ return 0, []int{idx + len(self.Suffix)}
+}
+
+func (self Suffix) String() string {
+ return fmt.Sprintf("", self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go
new file mode 100644
index 00000000000..c5106f8196c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/suffix_any.go
@@ -0,0 +1,43 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+
+ sutil "github.com/gobwas/glob/util/strings"
+)
+
+type SuffixAny struct {
+ Suffix string
+ Separators []rune
+}
+
+func NewSuffixAny(s string, sep []rune) SuffixAny {
+ return SuffixAny{s, sep}
+}
+
+func (self SuffixAny) Index(s string) (int, []int) {
+ idx := strings.Index(s, self.Suffix)
+ if idx == -1 {
+ return -1, nil
+ }
+
+ i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1
+
+ return i, []int{idx + len(self.Suffix) - i}
+}
+
+func (self SuffixAny) Len() int {
+ return lenNo
+}
+
+func (self SuffixAny) Match(s string) bool {
+ if !strings.HasSuffix(s, self.Suffix) {
+ return false
+ }
+ return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1
+}
+
+func (self SuffixAny) String() string {
+ return fmt.Sprintf("", string(self.Separators), self.Suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go
new file mode 100644
index 00000000000..3875950bb8c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/super.go
@@ -0,0 +1,33 @@
+package match
+
+import (
+ "fmt"
+)
+
+type Super struct{}
+
+func NewSuper() Super {
+ return Super{}
+}
+
+func (self Super) Match(s string) bool {
+ return true
+}
+
+func (self Super) Len() int {
+ return lenNo
+}
+
+func (self Super) Index(s string) (int, []int) {
+ segments := acquireSegments(len(s) + 1)
+ for i := range s {
+ segments = append(segments, i)
+ }
+ segments = append(segments, len(s))
+
+ return 0, segments
+}
+
+func (self Super) String() string {
+ return fmt.Sprintf("")
+}
diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go
new file mode 100644
index 00000000000..0a17616d3cb
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/match/text.go
@@ -0,0 +1,45 @@
+package match
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+)
+
+// raw represents raw string to match
+type Text struct {
+ Str string
+ RunesLength int
+ BytesLength int
+ Segments []int
+}
+
+func NewText(s string) Text {
+ return Text{
+ Str: s,
+ RunesLength: utf8.RuneCountInString(s),
+ BytesLength: len(s),
+ Segments: []int{len(s)},
+ }
+}
+
+func (self Text) Match(s string) bool {
+ return self.Str == s
+}
+
+func (self Text) Len() int {
+ return self.RunesLength
+}
+
+func (self Text) Index(s string) (int, []int) {
+ index := strings.Index(s, self.Str)
+ if index == -1 {
+ return -1, nil
+ }
+
+ return index, self.Segments
+}
+
+func (self Text) String() string {
+ return fmt.Sprintf("", self.Str)
+}
diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md
new file mode 100644
index 00000000000..f58144e733e
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/readme.md
@@ -0,0 +1,148 @@
+# glob.[go](https://golang.org)
+
+[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url]
+
+> Go Globbing Library.
+
+## Install
+
+```shell
+ go get github.com/gobwas/glob
+```
+
+## Example
+
+```go
+
+package main
+
+import "github.com/gobwas/glob"
+
+func main() {
+ var g glob.Glob
+
+ // create simple glob
+ g = glob.MustCompile("*.github.com")
+ g.Match("api.github.com") // true
+
+ // quote meta characters and then create simple glob
+ g = glob.MustCompile(glob.QuoteMeta("*.github.com"))
+ g.Match("*.github.com") // true
+
+ // create new glob with set of delimiters as ["."]
+ g = glob.MustCompile("api.*.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // false
+
+ // create new glob with set of delimiters as ["."]
+ // but now with super wildcard
+ g = glob.MustCompile("api.**.com", '.')
+ g.Match("api.github.com") // true
+ g.Match("api.gi.hub.com") // true
+
+ // create glob with single symbol wildcard
+ g = glob.MustCompile("?at")
+ g.Match("cat") // true
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with single symbol wildcard and delimiters ['f']
+ g = glob.MustCompile("?at", 'f')
+ g.Match("cat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[abc]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-list matchers
+ g = glob.MustCompile("[!abc]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[a-c]at")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // false
+ g.Match("at") // false
+
+ // create glob with character-range matchers
+ g = glob.MustCompile("[!a-c]at")
+ g.Match("cat") // false
+ g.Match("bat") // false
+ g.Match("fat") // true
+ g.Match("at") // false
+
+ // create glob with pattern-alternatives list
+ g = glob.MustCompile("{cat,bat,[fr]at}")
+ g.Match("cat") // true
+ g.Match("bat") // true
+ g.Match("fat") // true
+ g.Match("rat") // true
+ g.Match("at") // false
+ g.Match("zat") // false
+}
+
+```
+
+## Performance
+
+This library is created for compile-once patterns. This means, that compilation could take time, but
+strings matching is done faster, than in case when always parsing template.
+
+If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower.
+
+Run `go test -bench=.` from source root to see the benchmarks:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432
+`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199
+`https://*.google.*` | `https://account.google.com` | `true` | 96
+`https://*.google.*` | `https://google.com` | `false` | 66
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163
+`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22
+`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24
+`abc*` | `abcdef` | `true` | 8.15
+`abc*` | `af` | `false` | 5.68
+`*def` | `abcdef` | `true` | 8.84
+`*def` | `af` | `false` | 5.74
+`ab*ef` | `abcdef` | `true` | 15.2
+`ab*ef` | `af` | `false` | 10.4
+
+The same things with `regexp` package:
+
+Pattern | Fixture | Match | Speed (ns/op)
+--------|---------|-------|--------------
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553
+`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383
+`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205
+`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435
+`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039
+`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272
+`^abc.*$` | `abcdef` | `true` | 237
+`^abc.*$` | `af` | `false` | 100
+`^.*def$` | `abcdef` | `true` | 464
+`^.*def$` | `af` | `false` | 265
+`^ab.*ef$` | `abcdef` | `true` | 375
+`^ab.*ef$` | `af` | `false` | 145
+
+[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg
+[godoc-url]: https://godoc.org/github.com/gobwas/glob
+[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master
+[travis-url]: https://travis-ci.org/gobwas/glob
+
+## Syntax
+
+Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm),
+except that `**` is aka super-asterisk, that do not sensitive for separators.
\ No newline at end of file
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
new file mode 100644
index 00000000000..3220a694a9c
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go
@@ -0,0 +1,122 @@
+package ast
+
+import (
+ "bytes"
+ "fmt"
+)
+
+type Node struct {
+ Parent *Node
+ Children []*Node
+ Value interface{}
+ Kind Kind
+}
+
+func NewNode(k Kind, v interface{}, ch ...*Node) *Node {
+ n := &Node{
+ Kind: k,
+ Value: v,
+ }
+ for _, c := range ch {
+ Insert(n, c)
+ }
+ return n
+}
+
+func (a *Node) Equal(b *Node) bool {
+ if a.Kind != b.Kind {
+ return false
+ }
+ if a.Value != b.Value {
+ return false
+ }
+ if len(a.Children) != len(b.Children) {
+ return false
+ }
+ for i, c := range a.Children {
+ if !c.Equal(b.Children[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *Node) String() string {
+ var buf bytes.Buffer
+ buf.WriteString(a.Kind.String())
+ if a.Value != nil {
+ buf.WriteString(" =")
+ buf.WriteString(fmt.Sprintf("%v", a.Value))
+ }
+ if len(a.Children) > 0 {
+ buf.WriteString(" [")
+ for i, c := range a.Children {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(c.String())
+ }
+ buf.WriteString("]")
+ }
+ return buf.String()
+}
+
+func Insert(parent *Node, children ...*Node) {
+ parent.Children = append(parent.Children, children...)
+ for _, ch := range children {
+ ch.Parent = parent
+ }
+}
+
+type List struct {
+ Not bool
+ Chars string
+}
+
+type Range struct {
+ Not bool
+ Lo, Hi rune
+}
+
+type Text struct {
+ Text string
+}
+
+type Kind int
+
+const (
+ KindNothing Kind = iota
+ KindPattern
+ KindList
+ KindRange
+ KindText
+ KindAny
+ KindSuper
+ KindSingle
+ KindAnyOf
+)
+
+func (k Kind) String() string {
+ switch k {
+ case KindNothing:
+ return "Nothing"
+ case KindPattern:
+ return "Pattern"
+ case KindList:
+ return "List"
+ case KindRange:
+ return "Range"
+ case KindText:
+ return "Text"
+ case KindAny:
+ return "Any"
+ case KindSuper:
+ return "Super"
+ case KindSingle:
+ return "Single"
+ case KindAnyOf:
+ return "AnyOf"
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
new file mode 100644
index 00000000000..429b4094303
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go
@@ -0,0 +1,157 @@
+package ast
+
+import (
+ "errors"
+ "fmt"
+ "github.com/gobwas/glob/syntax/lexer"
+ "unicode/utf8"
+)
+
+type Lexer interface {
+ Next() lexer.Token
+}
+
+type parseFn func(*Node, Lexer) (parseFn, *Node, error)
+
+func Parse(lexer Lexer) (*Node, error) {
+ var parser parseFn
+
+ root := NewNode(KindPattern, nil)
+
+ var (
+ tree *Node
+ err error
+ )
+ for parser, tree = parserMain, root; parser != nil; {
+ parser, tree, err = parser(tree, lexer)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return root, nil
+}
+
+func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, nil
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Text:
+ Insert(tree, NewNode(KindText, Text{token.Raw}))
+ return parserMain, tree, nil
+
+ case lexer.Any:
+ Insert(tree, NewNode(KindAny, nil))
+ return parserMain, tree, nil
+
+ case lexer.Super:
+ Insert(tree, NewNode(KindSuper, nil))
+ return parserMain, tree, nil
+
+ case lexer.Single:
+ Insert(tree, NewNode(KindSingle, nil))
+ return parserMain, tree, nil
+
+ case lexer.RangeOpen:
+ return parserRange, tree, nil
+
+ case lexer.TermsOpen:
+ a := NewNode(KindAnyOf, nil)
+ Insert(tree, a)
+
+ p := NewNode(KindPattern, nil)
+ Insert(a, p)
+
+ return parserMain, p, nil
+
+ case lexer.Separator:
+ p := NewNode(KindPattern, nil)
+ Insert(tree.Parent, p)
+
+ return parserMain, p, nil
+
+ case lexer.TermsClose:
+ return parserMain, tree.Parent.Parent, nil
+
+ default:
+ return nil, tree, fmt.Errorf("unexpected token: %s", token)
+ }
+ }
+ return nil, tree, fmt.Errorf("unknown error")
+}
+
+func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) {
+ var (
+ not bool
+ lo rune
+ hi rune
+ chars string
+ )
+ for {
+ token := lex.Next()
+ switch token.Type {
+ case lexer.EOF:
+ return nil, tree, errors.New("unexpected end")
+
+ case lexer.Error:
+ return nil, tree, errors.New(token.Raw)
+
+ case lexer.Not:
+ not = true
+
+ case lexer.RangeLo:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+ lo = r
+
+ case lexer.RangeBetween:
+ //
+
+ case lexer.RangeHi:
+ r, w := utf8.DecodeRuneInString(token.Raw)
+ if len(token.Raw) > w {
+ return nil, tree, fmt.Errorf("unexpected length of lo character")
+ }
+
+ hi = r
+
+ if hi < lo {
+ return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo))
+ }
+
+ case lexer.Text:
+ chars = token.Raw
+
+ case lexer.RangeClose:
+ isRange := lo != 0 && hi != 0
+ isChars := chars != ""
+
+ if isChars == isRange {
+ return nil, tree, fmt.Errorf("could not parse range")
+ }
+
+ if isRange {
+ Insert(tree, NewNode(KindRange, Range{
+ Lo: lo,
+ Hi: hi,
+ Not: not,
+ }))
+ } else {
+ Insert(tree, NewNode(KindList, List{
+ Chars: chars,
+ Not: not,
+ }))
+ }
+
+ return parserMain, tree, nil
+ }
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
new file mode 100644
index 00000000000..a1c8d1962a0
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go
@@ -0,0 +1,273 @@
+package lexer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/gobwas/glob/util/runes"
+ "unicode/utf8"
+)
+
+const (
+ char_any = '*'
+ char_comma = ','
+ char_single = '?'
+ char_escape = '\\'
+ char_range_open = '['
+ char_range_close = ']'
+ char_terms_open = '{'
+ char_terms_close = '}'
+ char_range_not = '!'
+ char_range_between = '-'
+)
+
+var specials = []byte{
+ char_any,
+ char_single,
+ char_escape,
+ char_range_open,
+ char_range_close,
+ char_terms_open,
+ char_terms_close,
+}
+
+func Special(c byte) bool {
+ return bytes.IndexByte(specials, c) != -1
+}
+
+type tokens []Token
+
+func (i *tokens) shift() (ret Token) {
+ ret = (*i)[0]
+ copy(*i, (*i)[1:])
+ *i = (*i)[:len(*i)-1]
+ return
+}
+
+func (i *tokens) push(v Token) {
+ *i = append(*i, v)
+}
+
+func (i *tokens) empty() bool {
+ return len(*i) == 0
+}
+
+var eof rune = 0
+
+type lexer struct {
+ data string
+ pos int
+ err error
+
+ tokens tokens
+ termsLevel int
+
+ lastRune rune
+ lastRuneSize int
+ hasRune bool
+}
+
+func NewLexer(source string) *lexer {
+ l := &lexer{
+ data: source,
+ tokens: tokens(make([]Token, 0, 4)),
+ }
+ return l
+}
+
+func (l *lexer) Next() Token {
+ if l.err != nil {
+ return Token{Error, l.err.Error()}
+ }
+ if !l.tokens.empty() {
+ return l.tokens.shift()
+ }
+
+ l.fetchItem()
+ return l.Next()
+}
+
+func (l *lexer) peek() (r rune, w int) {
+ if l.pos == len(l.data) {
+ return eof, 0
+ }
+
+ r, w = utf8.DecodeRuneInString(l.data[l.pos:])
+ if r == utf8.RuneError {
+ l.errorf("could not read rune")
+ r = eof
+ w = 0
+ }
+
+ return
+}
+
+func (l *lexer) read() rune {
+ if l.hasRune {
+ l.hasRune = false
+ l.seek(l.lastRuneSize)
+ return l.lastRune
+ }
+
+ r, s := l.peek()
+ l.seek(s)
+
+ l.lastRune = r
+ l.lastRuneSize = s
+
+ return r
+}
+
+func (l *lexer) seek(w int) {
+ l.pos += w
+}
+
+func (l *lexer) unread() {
+ if l.hasRune {
+ l.errorf("could not unread rune")
+ return
+ }
+ l.seek(-l.lastRuneSize)
+ l.hasRune = true
+}
+
+func (l *lexer) errorf(f string, v ...interface{}) {
+ l.err = fmt.Errorf(f, v...)
+}
+
+func (l *lexer) inTerms() bool {
+ return l.termsLevel > 0
+}
+
+func (l *lexer) termsEnter() {
+ l.termsLevel++
+}
+
+func (l *lexer) termsLeave() {
+ l.termsLevel--
+}
+
+var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open}
+var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma)
+
+func (l *lexer) fetchItem() {
+ r := l.read()
+ switch {
+ case r == eof:
+ l.tokens.push(Token{EOF, ""})
+
+ case r == char_terms_open:
+ l.termsEnter()
+ l.tokens.push(Token{TermsOpen, string(r)})
+
+ case r == char_comma && l.inTerms():
+ l.tokens.push(Token{Separator, string(r)})
+
+ case r == char_terms_close && l.inTerms():
+ l.tokens.push(Token{TermsClose, string(r)})
+ l.termsLeave()
+
+ case r == char_range_open:
+ l.tokens.push(Token{RangeOpen, string(r)})
+ l.fetchRange()
+
+ case r == char_single:
+ l.tokens.push(Token{Single, string(r)})
+
+ case r == char_any:
+ if l.read() == char_any {
+ l.tokens.push(Token{Super, string(r) + string(r)})
+ } else {
+ l.unread()
+ l.tokens.push(Token{Any, string(r)})
+ }
+
+ default:
+ l.unread()
+
+ var breakers []rune
+ if l.inTerms() {
+ breakers = inTermsBreakers
+ } else {
+ breakers = inTextBreakers
+ }
+ l.fetchText(breakers)
+ }
+}
+
+func (l *lexer) fetchRange() {
+ var wantHi bool
+ var wantClose bool
+ var seenNot bool
+ for {
+ r := l.read()
+ if r == eof {
+ l.errorf("unexpected end of input")
+ return
+ }
+
+ if wantClose {
+ if r != char_range_close {
+ l.errorf("expected close range character")
+ } else {
+ l.tokens.push(Token{RangeClose, string(r)})
+ }
+ return
+ }
+
+ if wantHi {
+ l.tokens.push(Token{RangeHi, string(r)})
+ wantClose = true
+ continue
+ }
+
+ if !seenNot && r == char_range_not {
+ l.tokens.push(Token{Not, string(r)})
+ seenNot = true
+ continue
+ }
+
+ if n, w := l.peek(); n == char_range_between {
+ l.seek(w)
+ l.tokens.push(Token{RangeLo, string(r)})
+ l.tokens.push(Token{RangeBetween, string(n)})
+ wantHi = true
+ continue
+ }
+
+ l.unread() // unread first peek and fetch as text
+ l.fetchText([]rune{char_range_close})
+ wantClose = true
+ }
+}
+
+func (l *lexer) fetchText(breakers []rune) {
+ var data []rune
+ var escaped bool
+
+reading:
+ for {
+ r := l.read()
+ if r == eof {
+ break
+ }
+
+ if !escaped {
+ if r == char_escape {
+ escaped = true
+ continue
+ }
+
+ if runes.IndexRune(breakers, r) != -1 {
+ l.unread()
+ break reading
+ }
+ }
+
+ escaped = false
+ data = append(data, r)
+ }
+
+ if len(data) > 0 {
+ l.tokens.push(Token{Text, string(data)})
+ }
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
new file mode 100644
index 00000000000..2797c4e83a4
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go
@@ -0,0 +1,88 @@
+package lexer
+
+import "fmt"
+
+type TokenType int
+
+const (
+ EOF TokenType = iota
+ Error
+ Text
+ Char
+ Any
+ Super
+ Single
+ Not
+ Separator
+ RangeOpen
+ RangeClose
+ RangeLo
+ RangeHi
+ RangeBetween
+ TermsOpen
+ TermsClose
+)
+
+func (tt TokenType) String() string {
+ switch tt {
+ case EOF:
+ return "eof"
+
+ case Error:
+ return "error"
+
+ case Text:
+ return "text"
+
+ case Char:
+ return "char"
+
+ case Any:
+ return "any"
+
+ case Super:
+ return "super"
+
+ case Single:
+ return "single"
+
+ case Not:
+ return "not"
+
+ case Separator:
+ return "separator"
+
+ case RangeOpen:
+ return "range_open"
+
+ case RangeClose:
+ return "range_close"
+
+ case RangeLo:
+ return "range_lo"
+
+ case RangeHi:
+ return "range_hi"
+
+ case RangeBetween:
+ return "range_between"
+
+ case TermsOpen:
+ return "terms_open"
+
+ case TermsClose:
+ return "terms_close"
+
+ default:
+ return "undef"
+ }
+}
+
+type Token struct {
+ Type TokenType
+ Raw string
+}
+
+func (t Token) String() string {
+ return fmt.Sprintf("%v<%q>", t.Type, t.Raw)
+}
diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go
new file mode 100644
index 00000000000..1d168b14829
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/syntax/syntax.go
@@ -0,0 +1,14 @@
+package syntax
+
+import (
+ "github.com/gobwas/glob/syntax/ast"
+ "github.com/gobwas/glob/syntax/lexer"
+)
+
+func Parse(s string) (*ast.Node, error) {
+ return ast.Parse(lexer.NewLexer(s))
+}
+
+func Special(b byte) bool {
+ return lexer.Special(b)
+}
diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go
new file mode 100644
index 00000000000..a7235564107
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/runes/runes.go
@@ -0,0 +1,154 @@
+package runes
+
+func Index(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ return 0
+ case ln == 1:
+ return IndexRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := 0; i < ls && ls-i >= ln; i++ {
+ for y := 0; y < ln; y++ {
+ if s[i+y] != needle[y] {
+ continue head
+ }
+ }
+
+ return i
+ }
+
+ return -1
+}
+
+func LastIndex(s, needle []rune) int {
+ ls, ln := len(s), len(needle)
+
+ switch {
+ case ln == 0:
+ if ls == 0 {
+ return 0
+ }
+ return ls
+ case ln == 1:
+ return IndexLastRune(s, needle[0])
+ case ln == ls:
+ if Equal(s, needle) {
+ return 0
+ }
+ return -1
+ case ln > ls:
+ return -1
+ }
+
+head:
+ for i := ls - 1; i >= 0 && i >= ln; i-- {
+ for y := ln - 1; y >= 0; y-- {
+ if s[i-(ln-y-1)] != needle[y] {
+ continue head
+ }
+ }
+
+ return i - ln + 1
+ }
+
+ return -1
+}
+
+// IndexAny returns the index of the first instance of any Unicode code point
+// from chars in s, or -1 if no Unicode code point from chars is present in s.
+func IndexAny(s, chars []rune) int {
+ if len(chars) > 0 {
+ for i, c := range s {
+ for _, m := range chars {
+ if c == m {
+ return i
+ }
+ }
+ }
+ }
+ return -1
+}
+
+func Contains(s, needle []rune) bool {
+ return Index(s, needle) >= 0
+}
+
+func Max(s []rune) (max rune) {
+ for _, r := range s {
+ if r > max {
+ max = r
+ }
+ }
+
+ return
+}
+
+func Min(s []rune) rune {
+ min := rune(-1)
+ for _, r := range s {
+ if min == -1 {
+ min = r
+ continue
+ }
+
+ if r < min {
+ min = r
+ }
+ }
+
+ return min
+}
+
+func IndexRune(s []rune, r rune) int {
+ for i, c := range s {
+ if c == r {
+ return i
+ }
+ }
+ return -1
+}
+
+func IndexLastRune(s []rune, r rune) int {
+ for i := len(s) - 1; i >= 0; i-- {
+ if s[i] == r {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func Equal(a, b []rune) bool {
+ if len(a) == len(b) {
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+
+ return true
+ }
+
+ return false
+}
+
+// HasPrefix tests whether the string s begins with prefix.
+func HasPrefix(s, prefix []rune) bool {
+ return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix)
+}
+
+// HasSuffix tests whether the string s ends with suffix.
+func HasSuffix(s, suffix []rune) bool {
+ return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix)
+}
diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go
new file mode 100644
index 00000000000..e8ee1920b17
--- /dev/null
+++ b/vendor/github.com/gobwas/glob/util/strings/strings.go
@@ -0,0 +1,39 @@
+package strings
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+func IndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ if i := strings.IndexRune(s, r); i != -1 {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func LastIndexAnyRunes(s string, rs []rune) int {
+ for _, r := range rs {
+ i := -1
+ if 0 <= r && r < utf8.RuneSelf {
+ i = strings.LastIndexByte(s, byte(r))
+ } else {
+ sub := s
+ for len(sub) > 0 {
+ j := strings.IndexRune(s, r)
+ if j == -1 {
+ break
+ }
+ i = j
+ sub = sub[i+1:]
+ }
+ }
+ if i != -1 {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/vendor/github.com/iancoleman/strcase/.travis.yml b/vendor/github.com/iancoleman/strcase/.travis.yml
new file mode 100644
index 00000000000..79ebc5693be
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/.travis.yml
@@ -0,0 +1,10 @@
+sudo: false
+language: go
+go:
+ - 1.10.x
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - master
diff --git a/vendor/github.com/iancoleman/strcase/LICENSE b/vendor/github.com/iancoleman/strcase/LICENSE
new file mode 100644
index 00000000000..3e87ff70e77
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Ian Coleman
+Copyright (c) 2018 Ma_124,
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, Subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or Substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/iancoleman/strcase/README.md b/vendor/github.com/iancoleman/strcase/README.md
new file mode 100644
index 00000000000..e72d9e97c62
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/README.md
@@ -0,0 +1,59 @@
+# strcase
+[![Godoc Reference](https://godoc.org/github.com/iancoleman/strcase?status.svg)](http://godoc.org/github.com/iancoleman/strcase)
+[![Build Status](https://travis-ci.com/iancoleman/strcase.svg)](https://travis-ci.com/iancoleman/strcase)
+[![Coverage](http://gocover.io/_badge/github.com/iancoleman/strcase?0)](http://gocover.io/github.com/iancoleman/strcase)
+[![Go Report Card](https://goreportcard.com/badge/github.com/iancoleman/strcase)](https://goreportcard.com/report/github.com/iancoleman/strcase)
+
+strcase is a go package for converting string case to various cases (e.g. [snake case](https://en.wikipedia.org/wiki/Snake_case) or [camel case](https://en.wikipedia.org/wiki/CamelCase)) to see the full conversion table below.
+
+## Example
+
+```go
+s := "AnyKind of_string"
+```
+
+| Function | Result |
+|-------------------------------------------|----------------------|
+| `ToSnake(s)` | `any_kind_of_string` |
+| `ToSnakeWithIgnore(s, '.')` | `any_kind.of_string` |
+| `ToScreamingSnake(s)` | `ANY_KIND_OF_STRING` |
+| `ToKebab(s)` | `any-kind-of-string` |
+| `ToScreamingKebab(s)` | `ANY-KIND-OF-STRING` |
+| `ToDelimited(s, '.')` | `any.kind.of.string` |
+| `ToScreamingDelimited(s, '.', '', true)` | `ANY.KIND.OF.STRING` |
+| `ToScreamingDelimited(s, '.', ' ', true)` | `ANY.KIND OF.STRING` |
+| `ToCamel(s)` | `AnyKindOfString` |
+| `ToLowerCamel(s)` | `anyKindOfString` |
+
+
+## Install
+
+```bash
+go get -u github.com/iancoleman/strcase
+```
+
+## Custom Acronyms for ToCamel && ToLowerCamel
+
+Often times text can contain specific acronyms which you need to be handled a certain way.
+Out of the box `strcase` treats the string "ID" as "Id" or "id" but there is no way to cater
+for every case in the wild.
+
+To configure your custom acronym globally you can use the following before running any conversion
+
+```go
+import (
+ "github.com/iancoleman/strcase"
+)
+
+func init() {
+ // results in "Api" using ToCamel("API")
+ // results in "api" using ToLowerCamel("API")
+ strcase.ConfigureAcronym("API", "api")
+
+ // results in "PostgreSQL" using ToCamel("PostgreSQL")
+ // results in "postgreSQL" using ToLowerCamel("PostgreSQL")
+ strcase.ConfigureAcronym("PostgreSQL", "PostgreSQL")
+
+}
+
+```
diff --git a/vendor/github.com/iancoleman/strcase/acronyms.go b/vendor/github.com/iancoleman/strcase/acronyms.go
new file mode 100644
index 00000000000..9c3110dfffc
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/acronyms.go
@@ -0,0 +1,10 @@
+package strcase
+
+var uppercaseAcronym = map[string]string{
+ "ID": "id",
+}
+
+// ConfigureAcronym allows you to add additional words which will be considered acronyms
+func ConfigureAcronym(key, val string) {
+ uppercaseAcronym[key] = val
+}
diff --git a/vendor/github.com/iancoleman/strcase/camel.go b/vendor/github.com/iancoleman/strcase/camel.go
new file mode 100644
index 00000000000..cd5a260392a
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/camel.go
@@ -0,0 +1,80 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Ian Coleman
+ * Copyright (c) 2018 Ma_124,
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, Subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or Substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package strcase
+
+import (
+ "strings"
+)
+
+// Converts a string to CamelCase
+func toCamelInitCase(s string, initCase bool) string {
+ s = strings.TrimSpace(s)
+ if s == "" {
+ return s
+ }
+ if a, ok := uppercaseAcronym[s]; ok {
+ s = a
+ }
+
+ n := strings.Builder{}
+ n.Grow(len(s))
+ capNext := initCase
+ for i, v := range []byte(s) {
+ vIsCap := v >= 'A' && v <= 'Z'
+ vIsLow := v >= 'a' && v <= 'z'
+ if capNext {
+ if vIsLow {
+ v += 'A'
+ v -= 'a'
+ }
+ } else if i == 0 {
+ if vIsCap {
+ v += 'a'
+ v -= 'A'
+ }
+ }
+ if vIsCap || vIsLow {
+ n.WriteByte(v)
+ capNext = false
+ } else if vIsNum := v >= '0' && v <= '9'; vIsNum {
+ n.WriteByte(v)
+ capNext = true
+ } else {
+ capNext = v == '_' || v == ' ' || v == '-' || v == '.'
+ }
+ }
+ return n.String()
+}
+
+// ToCamel converts a string to CamelCase
+func ToCamel(s string) string {
+ return toCamelInitCase(s, true)
+}
+
+// ToLowerCamel converts a string to lowerCamelCase
+func ToLowerCamel(s string) string {
+ return toCamelInitCase(s, false)
+}
diff --git a/vendor/github.com/iancoleman/strcase/doc.go b/vendor/github.com/iancoleman/strcase/doc.go
new file mode 100644
index 00000000000..5e1825b9e04
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/doc.go
@@ -0,0 +1,12 @@
+// Package strcase converts strings to various cases. See the conversion table below:
+// | Function | Result |
+// |---------------------------------|--------------------|
+// | ToSnake(s) | any_kind_of_string |
+// | ToScreamingSnake(s) | ANY_KIND_OF_STRING |
+// | ToKebab(s) | any-kind-of-string |
+// | ToScreamingKebab(s) | ANY-KIND-OF-STRING |
+// | ToDelimited(s, '.') | any.kind.of.string |
+// | ToScreamingDelimited(s, '.') | ANY.KIND.OF.STRING |
+// | ToCamel(s) | AnyKindOfString |
+// | ToLowerCamel(s) | anyKindOfString |
+package strcase
diff --git a/vendor/github.com/iancoleman/strcase/snake.go b/vendor/github.com/iancoleman/strcase/snake.go
new file mode 100644
index 00000000000..df018bc7a2d
--- /dev/null
+++ b/vendor/github.com/iancoleman/strcase/snake.go
@@ -0,0 +1,115 @@
+/*
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2015 Ian Coleman
+ * Copyright (c) 2018 Ma_124,
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, Subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or Substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package strcase
+
+import (
+ "strings"
+)
+
+// ToSnake converts a string to snake_case
+func ToSnake(s string) string {
+ return ToDelimited(s, '_')
+}
+
+func ToSnakeWithIgnore(s string, ignore string) string {
+ return ToScreamingDelimited(s, '_', ignore, false)
+}
+
+// ToScreamingSnake converts a string to SCREAMING_SNAKE_CASE
+func ToScreamingSnake(s string) string {
+ return ToScreamingDelimited(s, '_', "", true)
+}
+
+// ToKebab converts a string to kebab-case
+func ToKebab(s string) string {
+ return ToDelimited(s, '-')
+}
+
+// ToScreamingKebab converts a string to SCREAMING-KEBAB-CASE
+func ToScreamingKebab(s string) string {
+ return ToScreamingDelimited(s, '-', "", true)
+}
+
+// ToDelimited converts a string to delimited.snake.case
+// (in this case `delimiter = '.'`)
+func ToDelimited(s string, delimiter uint8) string {
+ return ToScreamingDelimited(s, delimiter, "", false)
+}
+
+// ToScreamingDelimited converts a string to SCREAMING.DELIMITED.SNAKE.CASE
+// (in this case `delimiter = '.'; screaming = true`)
+// or delimited.snake.case
+// (in this case `delimiter = '.'; screaming = false`)
+func ToScreamingDelimited(s string, delimiter uint8, ignore string, screaming bool) string {
+ s = strings.TrimSpace(s)
+ n := strings.Builder{}
+ n.Grow(len(s) + 2) // nominal 2 bytes of extra space for inserted delimiters
+ for i, v := range []byte(s) {
+ vIsCap := v >= 'A' && v <= 'Z'
+ vIsLow := v >= 'a' && v <= 'z'
+ if vIsLow && screaming {
+ v += 'A'
+ v -= 'a'
+ } else if vIsCap && !screaming {
+ v += 'a'
+ v -= 'A'
+ }
+
+ // treat acronyms as words, eg for JSONData -> JSON is a whole word
+ if i+1 < len(s) {
+ next := s[i+1]
+ vIsNum := v >= '0' && v <= '9'
+ nextIsCap := next >= 'A' && next <= 'Z'
+ nextIsLow := next >= 'a' && next <= 'z'
+ nextIsNum := next >= '0' && next <= '9'
+ // add underscore if next letter case type is changed
+ if (vIsCap && (nextIsLow || nextIsNum)) || (vIsLow && (nextIsCap || nextIsNum)) || (vIsNum && (nextIsCap || nextIsLow)) {
+ prevIgnore := ignore != "" && i > 0 && strings.ContainsAny(string(s[i-1]), ignore)
+ if !prevIgnore {
+ if vIsCap && nextIsLow {
+ if prevIsCap := i > 0 && s[i-1] >= 'A' && s[i-1] <= 'Z'; prevIsCap {
+ n.WriteByte(delimiter)
+ }
+ }
+ n.WriteByte(v)
+ if vIsLow || vIsNum || nextIsNum {
+ n.WriteByte(delimiter)
+ }
+ continue
+ }
+ }
+ }
+
+ if (v == ' ' || v == '_' || v == '-' || v == '.') && !strings.ContainsAny(string(v), ignore) {
+ // replace space/underscore/hyphen/dot with delimiter
+ n.WriteByte(delimiter)
+ } else {
+ n.WriteByte(v)
+ }
+ }
+
+ return n.String()
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go
new file mode 100644
index 00000000000..560f3542e4a
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr/matcher.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expr // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+
+import (
+ "context"
+)
+
+// BoolExpr is an interface that allows matching a context K against a configuration of a match.
+type BoolExpr[K any] interface {
+ Eval(ctx context.Context, tCtx K) (bool, error)
+}
+
+type notMatcher[K any] struct {
+ matcher BoolExpr[K]
+}
+
+func (nm notMatcher[K]) Eval(ctx context.Context, tCtx K) (bool, error) {
+ ret, err := nm.matcher.Eval(ctx, tCtx)
+ return !ret, err
+}
+
+func Not[K any](matcher BoolExpr[K]) BoolExpr[K] {
+ return notMatcher[K]{matcher: matcher}
+}
+
+type orMatcher[K any] struct {
+ matchers []BoolExpr[K]
+}
+
+func (om orMatcher[K]) Eval(ctx context.Context, tCtx K) (bool, error) {
+ for i := range om.matchers {
+ ret, err := om.matchers[i].Eval(ctx, tCtx)
+ if err != nil {
+ return false, err
+ }
+ if ret {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func Or[K any](matchers ...BoolExpr[K]) BoolExpr[K] {
+ switch len(matchers) {
+ case 0:
+ return nil
+ case 1:
+ return matchers[0]
+ default:
+ return orMatcher[K]{matchers: matchers}
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig/config.go
new file mode 100644
index 00000000000..494a92e0ac4
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig/config.go
@@ -0,0 +1,237 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterconfig // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+
+ "go.opentelemetry.io/collector/pdata/plog"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+)
+
+// MatchConfig has two optional MatchProperties one to define what is processed
+// by the processor, captured under the 'include' and the second, exclude, to
+// define what is excluded from the processor.
+type MatchConfig struct {
+ // Include specifies the set of input data properties that must be present in order
+ // for this processor to apply to it.
+ // Note: If `exclude` is specified, the input data is compared against those
+ // properties after the `include` properties.
+ // This is an optional field. If neither `include` and `exclude` are set, all input data
+ // are processed. If `include` is set and `exclude` isn't set, then all
+ // input data matching the properties in this structure are processed.
+ Include *MatchProperties `mapstructure:"include"`
+
+ // Exclude specifies when this processor will not be applied to the input data
+ // which match the specified properties.
+ // Note: The `exclude` properties are checked after the `include` properties,
+ // if they exist, are checked.
+ // If `include` isn't specified, the `exclude` properties are checked against
+ // all input data.
+ // This is an optional field. If neither `include` and `exclude` are set, all input data
+ // is processed. If `exclude` is set and `include` isn't set, then all the
+ // input data that does not match the properties in this structure are processed.
+ Exclude *MatchProperties `mapstructure:"exclude"`
+}
+
+// MatchProperties specifies the set of properties in a spans/log/metric to match
+// against and if the input data should be included or excluded from the
+// processor. At least one of services (spans only), names or
+// attributes must be specified. It is supported to have all specified, but
+// this requires all the properties to match for the inclusion/exclusion to
+// occur.
+// The following are examples of invalid configurations:
+//
+// attributes/bad1:
+// # This is invalid because include is specified with neither services or
+// # attributes.
+// include:
+// actions: ...
+//
+// span/bad2:
+// exclude:
+// # This is invalid because services, span_names and attributes have empty values.
+// services:
+// span_names:
+// attributes:
+// actions: ...
+//
+// Please refer to processor/attributesprocessor/testdata/config.yaml and
+// processor/spanprocessor/testdata/config.yaml for valid configurations.
+type MatchProperties struct {
+ // Config configures the matching patterns used when matching span properties.
+ filterset.Config `mapstructure:",squash"`
+
+ // Note: For spans, one of Services, SpanNames, Attributes, Resources or Libraries must be specified with a
+ // non-empty value for a valid configuration.
+
+ // For logs, one of LogNames, Attributes, Resources or Libraries must be specified with a
+ // non-empty value for a valid configuration.
+
+ // For metrics, one of MetricNames, Expressions, or ResourceAttributes must be specified with a
+ // non-empty value for a valid configuration.
+
+ // Services specify the list of items to match service name against.
+ // A match occurs if the span's service name matches at least one item in this list.
+ // This is an optional field.
+ Services []string `mapstructure:"services"`
+
+ // SpanNames specify the list of items to match span name against.
+ // A match occurs if the span name matches at least one item in this list.
+ // This is an optional field.
+ SpanNames []string `mapstructure:"span_names"`
+
+ // LogBodies is a list of strings that the LogRecord's body field must match
+ // against.
+ LogBodies []string `mapstructure:"log_bodies"`
+
+ // LogSeverityTexts is a list of strings that the LogRecord's severity text field must match
+ // against.
+ LogSeverityTexts []string `mapstructure:"log_severity_texts"`
+
+ // LogSeverityNumber defines how to match against a log record's SeverityNumber, if defined.
+ LogSeverityNumber *LogSeverityNumberMatchProperties `mapstructure:"log_severity_number"`
+
+ // MetricNames is a list of strings to match metric name against.
+ // A match occurs if metric name matches at least one item in the list.
+ // This field is optional.
+ MetricNames []string `mapstructure:"metric_names"`
+
+ // Attributes specifies the list of attributes to match against.
+ // All of these attributes must match exactly for a match to occur.
+ // Only match_type=strict is allowed if "attributes" are specified.
+ // This is an optional field.
+ Attributes []Attribute `mapstructure:"attributes"`
+
+ // Resources specify the list of items to match the resources against.
+ // A match occurs if the data's resources match at least one item in this list.
+ // This is an optional field.
+ Resources []Attribute `mapstructure:"resources"`
+
+ // Libraries specify the list of items to match the implementation library against.
+ // A match occurs if the span's implementation library matches at least one item in this list.
+ // This is an optional field.
+ Libraries []InstrumentationLibrary `mapstructure:"libraries"`
+
+ // SpanKinds specify the list of items to match the span kind against.
+ // A match occurs if the span's span kind matches at least one item in this list.
+ // This is an optional field
+ SpanKinds []string `mapstructure:"span_kinds"`
+}
+
+var (
+ ErrMissingRequiredField = errors.New(`at least one of "attributes", "libraries", or "resources" field must be specified`)
+ ErrInvalidLogField = errors.New("services, span_names, and span_kinds are not valid for log records")
+ ErrMissingRequiredLogField = errors.New(`at least one of "attributes", "libraries", "span_kinds", "resources", "log_bodies", "log_severity_texts" or "log_severity_number" field must be specified`)
+
+ spanKinds = map[string]bool{
+ traceutil.SpanKindStr(ptrace.SpanKindInternal): true,
+ traceutil.SpanKindStr(ptrace.SpanKindClient): true,
+ traceutil.SpanKindStr(ptrace.SpanKindServer): true,
+ traceutil.SpanKindStr(ptrace.SpanKindConsumer): true,
+ traceutil.SpanKindStr(ptrace.SpanKindProducer): true,
+ }
+)
+
+// ValidateForSpans validates properties for spans.
+func (mp *MatchProperties) ValidateForSpans() error {
+ if len(mp.LogBodies) > 0 {
+ return errors.New("log_bodies should not be specified for trace spans")
+ }
+
+ if len(mp.LogSeverityTexts) > 0 {
+ return errors.New("log_severity_texts should not be specified for trace spans")
+ }
+
+ if mp.LogSeverityNumber != nil {
+ return errors.New("log_severity_number should not be specified for trace spans")
+ }
+
+ if len(mp.Services) == 0 && len(mp.SpanNames) == 0 && len(mp.Attributes) == 0 &&
+ len(mp.Libraries) == 0 && len(mp.Resources) == 0 && len(mp.SpanKinds) == 0 {
+ return ErrMissingRequiredField
+ }
+
+ if len(mp.SpanKinds) > 0 && mp.MatchType == "strict" {
+ for _, kind := range mp.SpanKinds {
+ if !spanKinds[kind] {
+ validSpanKinds := make([]string, len(spanKinds))
+ for k := range spanKinds {
+ validSpanKinds = append(validSpanKinds, k)
+ }
+ sort.Strings(validSpanKinds)
+ return fmt.Errorf("span_kinds string must match one of the standard span kinds when match_type=strict: %v", validSpanKinds)
+ }
+ }
+ }
+
+ return nil
+}
+
+// ValidateForLogs validates properties for logs.
+func (mp *MatchProperties) ValidateForLogs() error {
+ if len(mp.SpanNames) > 0 || len(mp.Services) > 0 || len(mp.SpanKinds) > 0 {
+ return ErrInvalidLogField
+ }
+
+ if len(mp.Attributes) == 0 && len(mp.Libraries) == 0 &&
+ len(mp.Resources) == 0 && len(mp.LogBodies) == 0 &&
+ len(mp.LogSeverityTexts) == 0 && mp.LogSeverityNumber == nil &&
+ len(mp.SpanKinds) == 0 {
+ return ErrMissingRequiredLogField
+ }
+
+ return nil
+}
+
+// Attribute specifies the attribute key and optional value to match against.
+type Attribute struct {
+ // Key specifies the attribute key.
+ Key string `mapstructure:"key"`
+
+ // Values specifies the value to match against.
+ // If it is not set, any value will match.
+ Value interface{} `mapstructure:"value"`
+}
+
+// InstrumentationLibrary specifies the instrumentation library and optional version to match against.
+type InstrumentationLibrary struct {
+ Name string `mapstructure:"name"`
+ // version match
+ // expected actual match
+ // nil yes
+ // nil 1 yes
+ // yes
+ // 1 no
+ // 1 no
+ // 1 1 yes
+ Version *string `mapstructure:"version"`
+}
+
+// LogSeverityNumberMatchProperties defines how to match based on a log record's SeverityNumber field.
+type LogSeverityNumberMatchProperties struct {
+ // Min is the lowest severity that may be matched.
+ // e.g. if this is plog.SeverityNumberInfo, INFO, WARN, ERROR, and FATAL logs will match.
+ Min plog.SeverityNumber `mapstructure:"min"`
+
+ // MatchUndefined controls whether logs with "undefined" severity matches.
+ // If this is true, entries with undefined severity will match.
+ MatchUndefined bool `mapstructure:"match_undefined"`
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr/matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr/matcher.go
new file mode 100644
index 00000000000..3fb3fa325eb
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr/matcher.go
@@ -0,0 +1,172 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterexpr // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr"
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/antonmedv/expr"
+ "github.com/antonmedv/expr/vm"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+)
+
+var vmPool = sync.Pool{
+ New: func() interface{} {
+ return &vm.VM{}
+ },
+}
+
+type Matcher struct {
+ program *vm.Program
+}
+
+type env struct {
+ MetricName string
+ MetricType string
+ attributes pcommon.Map
+}
+
+func (e *env) HasLabel(key string) bool {
+ _, ok := e.attributes.Get(key)
+ return ok
+}
+
+func (e *env) Label(key string) string {
+ v, _ := e.attributes.Get(key)
+ return v.Str()
+}
+
+func NewMatcher(expression string) (*Matcher, error) {
+ program, err := expr.Compile(expression)
+ if err != nil {
+ return nil, err
+ }
+ return &Matcher{program: program}, nil
+}
+
+func (m *Matcher) MatchMetric(metric pmetric.Metric) (bool, error) {
+ metricName := metric.Name()
+ vm := vmPool.Get().(*vm.VM)
+ defer vmPool.Put(vm)
+ switch metric.Type() {
+ case pmetric.MetricTypeGauge:
+ return m.matchGauge(metricName, metric.Gauge(), vm)
+ case pmetric.MetricTypeSum:
+ return m.matchSum(metricName, metric.Sum(), vm)
+ case pmetric.MetricTypeHistogram:
+ return m.matchHistogram(metricName, metric.Histogram(), vm)
+ case pmetric.MetricTypeExponentialHistogram:
+ return m.matchExponentialHistogram(metricName, metric.ExponentialHistogram(), vm)
+ case pmetric.MetricTypeSummary:
+ return m.matchSummary(metricName, metric.Summary(), vm)
+ default:
+ return false, nil
+ }
+}
+
+func (m *Matcher) matchGauge(metricName string, gauge pmetric.Gauge, vm *vm.VM) (bool, error) {
+ pts := gauge.DataPoints()
+ for i := 0; i < pts.Len(); i++ {
+ matched, err := m.matchEnv(metricName, pmetric.MetricTypeGauge, pts.At(i).Attributes(), vm)
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *Matcher) matchSum(metricName string, sum pmetric.Sum, vm *vm.VM) (bool, error) {
+ pts := sum.DataPoints()
+ for i := 0; i < pts.Len(); i++ {
+ matched, err := m.matchEnv(metricName, pmetric.MetricTypeSum, pts.At(i).Attributes(), vm)
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *Matcher) matchHistogram(metricName string, histogram pmetric.Histogram, vm *vm.VM) (bool, error) {
+ pts := histogram.DataPoints()
+ for i := 0; i < pts.Len(); i++ {
+ matched, err := m.matchEnv(metricName, pmetric.MetricTypeHistogram, pts.At(i).Attributes(), vm)
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *Matcher) matchExponentialHistogram(metricName string, eh pmetric.ExponentialHistogram, vm *vm.VM) (bool, error) {
+ pts := eh.DataPoints()
+ for i := 0; i < pts.Len(); i++ {
+ matched, err := m.matchEnv(metricName, pmetric.MetricTypeExponentialHistogram, pts.At(i).Attributes(), vm)
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *Matcher) matchSummary(metricName string, summary pmetric.Summary, vm *vm.VM) (bool, error) {
+ pts := summary.DataPoints()
+ for i := 0; i < pts.Len(); i++ {
+ matched, err := m.matchEnv(metricName, pmetric.MetricTypeSummary, pts.At(i).Attributes(), vm)
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (m *Matcher) matchEnv(metricName string, metricType pmetric.MetricType, attributes pcommon.Map, vm *vm.VM) (bool, error) {
+ return m.match(env{
+ MetricName: metricName,
+ MetricType: metricType.String(),
+ attributes: attributes,
+ }, vm)
+}
+
+func (m *Matcher) match(env env, vm *vm.VM) (bool, error) {
+ result, err := vm.Run(m.program, &env)
+ if err != nil {
+ return false, err
+ }
+
+ v, ok := result.(bool)
+ if !ok {
+ return false, fmt.Errorf("filter returned non-boolean value type=%T result=%v metric=%s, attributes=%v",
+ result, result, env.MetricName, env.attributes.AsRaw())
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go
new file mode 100644
index 00000000000..27874ed8308
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/filterlog.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterlog // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
+)
+
+// NewSkipExpr creates a BoolExpr that on evaluation returns true if a log should NOT be processed or kept.
+// The logic determining if a log should be processed is based on include and exclude settings.
+// Include properties are checked before exclude settings are checked.
+func NewSkipExpr(mp *filterconfig.MatchConfig) (expr.BoolExpr[ottllog.TransformContext], error) {
+ var matchers []expr.BoolExpr[ottllog.TransformContext]
+ inclExpr, err := newExpr(mp.Include)
+ if err != nil {
+ return nil, err
+ }
+ if inclExpr != nil {
+ matchers = append(matchers, expr.Not(inclExpr))
+ }
+ exclExpr, err := newExpr(mp.Exclude)
+ if err != nil {
+ return nil, err
+ }
+ if exclExpr != nil {
+ matchers = append(matchers, exclExpr)
+ }
+ return expr.Or(matchers...), nil
+}
+
+// propertiesMatcher allows matching a log record against various log record properties.
+type propertiesMatcher struct {
+ filtermatcher.PropertiesMatcher
+
+ // log bodies to compare to.
+ bodyFilters filterset.FilterSet
+
+ // log severity texts to compare to
+ severityTextFilters filterset.FilterSet
+
+ // matcher for severity number
+ severityNumberMatcher *severityNumberMatcher
+}
+
+// NewMatcher creates a LogRecord Matcher that matches based on the given MatchProperties.
+func newExpr(mp *filterconfig.MatchProperties) (expr.BoolExpr[ottllog.TransformContext], error) {
+ if mp == nil {
+ return nil, nil
+ }
+
+ if err := mp.ValidateForLogs(); err != nil {
+ return nil, err
+ }
+
+ rm, err := filtermatcher.NewMatcher(mp)
+ if err != nil {
+ return nil, err
+ }
+
+ var bodyFS filterset.FilterSet
+ if len(mp.LogBodies) > 0 {
+ bodyFS, err = filterset.CreateFilterSet(mp.LogBodies, &mp.Config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating log record body filters: %w", err)
+ }
+ }
+ var severitytextFS filterset.FilterSet
+ if len(mp.LogSeverityTexts) > 0 {
+ severitytextFS, err = filterset.CreateFilterSet(mp.LogSeverityTexts, &mp.Config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating log record severity text filters: %w", err)
+ }
+ }
+
+ pm := &propertiesMatcher{
+ PropertiesMatcher: rm,
+ bodyFilters: bodyFS,
+ severityTextFilters: severitytextFS,
+ }
+
+ if mp.LogSeverityNumber != nil {
+ pm.severityNumberMatcher = newSeverityNumberMatcher(mp.LogSeverityNumber.Min, mp.LogSeverityNumber.MatchUndefined)
+ }
+
+ return pm, nil
+}
+
+// Eval matches a log record to a set of properties.
+// There are 3 sets of properties to match against.
+// The log record names are matched, if specified.
+// The log record bodies are matched, if specified.
+// The attributes are then checked, if specified.
+// At least one of log record names or attributes must be specified. It is
+// supported to have more than one of these specified, and all specified must
+// evaluate to true for a match to occur.
+func (mp *propertiesMatcher) Eval(_ context.Context, tCtx ottllog.TransformContext) (bool, error) {
+ lr := tCtx.GetLogRecord()
+ if lr.Body().Type() == pcommon.ValueTypeStr && mp.bodyFilters != nil && !mp.bodyFilters.Matches(lr.Body().Str()) {
+ return false, nil
+ }
+ if mp.severityTextFilters != nil && !mp.severityTextFilters.Matches(lr.SeverityText()) {
+ return false, nil
+ }
+ if mp.severityNumberMatcher != nil && !mp.severityNumberMatcher.match(lr) {
+ return false, nil
+ }
+
+ return mp.PropertiesMatcher.Match(lr.Attributes(), tCtx.GetResource(), tCtx.GetInstrumentationScope()), nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/severity_matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/severity_matcher.go
new file mode 100644
index 00000000000..4216381af61
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog/severity_matcher.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterlog // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog"
+
+import (
+ "go.opentelemetry.io/collector/pdata/plog"
+)
+
+// severtiyNumberMatcher is a Matcher that matches if the input log record has a severity higher than
+// the minSeverityNumber.
+type severityNumberMatcher struct {
+ matchUndefined bool
+ minSeverityNumber plog.SeverityNumber
+}
+
+func newSeverityNumberMatcher(minSeverity plog.SeverityNumber, matchUndefined bool) *severityNumberMatcher {
+ return &severityNumberMatcher{
+ minSeverityNumber: minSeverity,
+ matchUndefined: matchUndefined,
+ }
+}
+
+func (snm severityNumberMatcher) match(lr plog.LogRecord) bool {
+ // behavior on SeverityNumberUNDEFINED is explicitly defined by matchUndefined
+ if lr.SeverityNumber() == plog.SeverityNumberUnspecified {
+ return snm.matchUndefined
+ }
+
+ // If the log records severity is greater than or equal to the desired severity, it matches
+ if lr.SeverityNumber() >= snm.minSeverityNumber {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go
new file mode 100644
index 00000000000..849f9dd1c3d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/attributematcher.go
@@ -0,0 +1,161 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermatcher // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher"
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
+)
+
+type AttributesMatcher []AttributeMatcher
+
+type valueIdentifier struct {
+ value pcommon.Value
+ valueHash [16]byte
+}
+
+// AttributeMatcher is a attribute key/value pair to match to.
+type AttributeMatcher struct {
+ Key string
+ // If both AttributeValue and StringFilter are nil only check for key existence.
+ AttributeValue *valueIdentifier
+ // StringFilter is needed to match against a regular expression
+ StringFilter filterset.FilterSet
+}
+
+var errUnexpectedAttributeType = errors.New("unexpected attribute type")
+
+func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Attribute) (AttributesMatcher, error) {
+ // Convert attribute values from mp representation to in-memory representation.
+ var rawAttributes []AttributeMatcher
+ for _, attribute := range attributes {
+
+ if attribute.Key == "" {
+ return nil, errors.New("can't have empty key in the list of attributes")
+ }
+
+ entry := AttributeMatcher{
+ Key: attribute.Key,
+ }
+ if attribute.Value != nil {
+ val := pcommon.NewValueEmpty()
+ err := val.FromRaw(attribute.Value)
+ if err != nil {
+ return nil, err
+ }
+
+ switch config.MatchType {
+ case filterset.Regexp:
+ if val.Type() != pcommon.ValueTypeStr {
+ return nil, fmt.Errorf(
+ "%s=%s for %q only supports Str, but found %s",
+ filterset.MatchTypeFieldName, filterset.Regexp, attribute.Key, val.Type(),
+ )
+ }
+
+ filter, err := filterset.CreateFilterSet([]string{val.Str()}, &config)
+ if err != nil {
+ return nil, err
+ }
+ entry.StringFilter = filter
+ case filterset.Strict:
+ entry.AttributeValue = &valueIdentifier{
+ value: val,
+ valueHash: pdatautil.ValueHash(val),
+ }
+ default:
+ return nil, filterset.NewUnrecognizedMatchTypeError(config.MatchType)
+
+ }
+ }
+
+ rawAttributes = append(rawAttributes, entry)
+ }
+ return rawAttributes, nil
+}
+
+// Match attributes specification against a span/log.
+func (ma AttributesMatcher) Match(attrs pcommon.Map) bool {
+ // If there are no attributes to match against, the span/log matches.
+ if len(ma) == 0 {
+ return true
+ }
+
+ // At this point, it is expected of the span/log to have attributes because of
+ // len(ma) != 0. This means for spans/logs with no attributes, it does not match.
+ if attrs.Len() == 0 {
+ return false
+ }
+
+ // Check that all expected properties are set.
+ for _, property := range ma {
+ attr, exist := attrs.Get(property.Key)
+ if !exist {
+ return false
+ }
+
+ if property.StringFilter != nil {
+ value, err := attributeStringValue(attr)
+ if err != nil || !property.StringFilter.Matches(value) {
+ return false
+ }
+ } else if property.AttributeValue != nil {
+ if !attributeValueMatch(property.AttributeValue, attr) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func attributeStringValue(attr pcommon.Value) (string, error) {
+ switch attr.Type() {
+ case pcommon.ValueTypeStr:
+ return attr.Str(), nil
+ case pcommon.ValueTypeBool:
+ return strconv.FormatBool(attr.Bool()), nil
+ case pcommon.ValueTypeDouble:
+ return strconv.FormatFloat(attr.Double(), 'f', -1, 64), nil
+ case pcommon.ValueTypeInt:
+ return strconv.FormatInt(attr.Int(), 10), nil
+ default:
+ return "", errUnexpectedAttributeType
+ }
+}
+
+func attributeValueMatch(vi *valueIdentifier, val pcommon.Value) bool {
+ if vi.value.Type() != val.Type() {
+ return false
+ }
+ switch val.Type() {
+ case pcommon.ValueTypeStr:
+ return vi.value.Str() == val.Str()
+ case pcommon.ValueTypeBool:
+ return vi.value.Bool() == val.Bool()
+ case pcommon.ValueTypeDouble:
+ return vi.value.Double() == val.Double()
+ case pcommon.ValueTypeInt:
+ return vi.value.Int() == val.Int()
+ }
+ // Use hash for other complex data types.
+ return vi.valueHash == pdatautil.ValueHash(val)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/filtermatcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/filtermatcher.go
new file mode 100644
index 00000000000..84935ca3c70
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher/filtermatcher.go
@@ -0,0 +1,104 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermatcher // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher"
+
+import (
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+)
+
+type instrumentationLibraryMatcher struct {
+ Name filterset.FilterSet
+ Version filterset.FilterSet
+}
+
+// PropertiesMatcher allows matching a span against various span properties.
+type PropertiesMatcher struct {
+ // Instrumentation libraries to compare against
+ libraries []instrumentationLibraryMatcher
+
+ // The attribute values are stored in the internal format.
+ attributes AttributesMatcher
+
+ // The attribute values are stored in the internal format.
+ resources AttributesMatcher
+}
+
+// NewMatcher creates a span Matcher that matches based on the given MatchProperties.
+func NewMatcher(mp *filterconfig.MatchProperties) (PropertiesMatcher, error) {
+ var lm []instrumentationLibraryMatcher
+ for _, library := range mp.Libraries {
+ name, err := filterset.CreateFilterSet([]string{library.Name}, &mp.Config)
+ if err != nil {
+ return PropertiesMatcher{}, fmt.Errorf("error creating library name filters: %w", err)
+ }
+
+ var version filterset.FilterSet
+ if library.Version != nil {
+ filter, err := filterset.CreateFilterSet([]string{*library.Version}, &mp.Config)
+ if err != nil {
+ return PropertiesMatcher{}, fmt.Errorf("error creating library version filters: %w", err)
+ }
+ version = filter
+ }
+
+ lm = append(lm, instrumentationLibraryMatcher{Name: name, Version: version})
+ }
+
+ var err error
+ var am AttributesMatcher
+ if len(mp.Attributes) > 0 {
+ am, err = NewAttributesMatcher(mp.Config, mp.Attributes)
+ if err != nil {
+ return PropertiesMatcher{}, fmt.Errorf("error creating attribute filters: %w", err)
+ }
+ }
+
+ var rm AttributesMatcher
+ if len(mp.Resources) > 0 {
+ rm, err = NewAttributesMatcher(mp.Config, mp.Resources)
+ if err != nil {
+ return PropertiesMatcher{}, fmt.Errorf("error creating resource filters: %w", err)
+ }
+ }
+
+ return PropertiesMatcher{
+ libraries: lm,
+ attributes: am,
+ resources: rm,
+ }, nil
+}
+
+// Match matches a span or log to a set of properties.
+func (mp *PropertiesMatcher) Match(attributes pcommon.Map, resource pcommon.Resource, library pcommon.InstrumentationScope) bool {
+ for _, matcher := range mp.libraries {
+ if !matcher.Name.Matches(library.Name()) {
+ return false
+ }
+ if matcher.Version != nil && !matcher.Version.Matches(library.Version()) {
+ return false
+ }
+ }
+
+ if mp.resources != nil && !mp.resources.Match(resource.Attributes()) {
+ return false
+ }
+
+ return mp.attributes.Match(attributes)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/config.go
new file mode 100644
index 00000000000..5a0a4d861fa
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/config.go
@@ -0,0 +1,70 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+
+import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
+)
+
+// MatchType specifies the strategy for matching against `pmetric.Metric`s. This
+// is distinct from filterset.MatchType which matches against metric (and
+// tracing) names only. To support matching against metric names and
+// `pmetric.Metric`s, filtermetric.MatchType is effectively a superset of
+// filterset.MatchType.
+type MatchType string
+
+// These are the MatchTypes that users can specify for filtering
+// `pmetric.Metric`s.
+const (
+ Regexp = MatchType(filterset.Regexp)
+ Strict = MatchType(filterset.Strict)
+ Expr MatchType = "expr"
+)
+
+// MatchProperties specifies the set of properties in a metric to match against and the
+// type of string pattern matching to use.
+type MatchProperties struct {
+ // MatchType specifies the type of matching desired
+ MatchType MatchType `mapstructure:"match_type"`
+ // RegexpConfig specifies options for the Regexp match type
+ RegexpConfig *regexp.Config `mapstructure:"regexp"`
+
+ // MetricNames specifies the list of string patterns to match metric names against.
+ // A match occurs if the metric name matches at least one string pattern in this list.
+ MetricNames []string `mapstructure:"metric_names"`
+
+ // Expressions specifies the list of expr expressions to match metrics against.
+ // A match occurs if any datapoint in a metric matches at least one expression in this list.
+ Expressions []string `mapstructure:"expressions"`
+
+ // ResourceAttributes defines a list of possible resource attributes to match metrics against.
+ // A match occurs if any resource attribute matches all expressions in this given list.
+ ResourceAttributes []filterconfig.Attribute `mapstructure:"resource_attributes"`
+}
+
+func CreateMatchPropertiesFromDefault(properties *filterconfig.MatchProperties) *MatchProperties {
+ if properties == nil {
+ return nil
+ }
+
+ return &MatchProperties{
+ MatchType: MatchType(properties.Config.MatchType),
+ RegexpConfig: properties.Config.RegexpConfig,
+ MetricNames: properties.MetricNames,
+ ResourceAttributes: properties.Resources,
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/doc.go
new file mode 100644
index 00000000000..5fe9cde8781
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package filtermetric is a helper package for processing metrics.
+package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/expr_matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/expr_matcher.go
new file mode 100644
index 00000000000..9558043b2db
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/expr_matcher.go
@@ -0,0 +1,51 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+
+import (
+ "context"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+)
+
+type exprMatcher struct {
+ matchers []*filterexpr.Matcher
+}
+
+func newExprMatcher(expressions []string) (*exprMatcher, error) {
+ m := &exprMatcher{}
+ for _, expression := range expressions {
+ matcher, err := filterexpr.NewMatcher(expression)
+ if err != nil {
+ return nil, err
+ }
+ m.matchers = append(m.matchers, matcher)
+ }
+ return m, nil
+}
+
+func (m *exprMatcher) Eval(_ context.Context, tCtx ottlmetric.TransformContext) (bool, error) {
+ for _, matcher := range m.matchers {
+ matched, err := matcher.MatchMetric(tCtx.GetMetric())
+ if err != nil {
+ return false, err
+ }
+ if matched {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/filtermetric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/filtermetric.go
new file mode 100644
index 00000000000..d1fae224e31
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/filtermetric.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+
+import (
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+)
+
+// NewSkipExpr creates a BoolExpr that on evaluation returns true if a metric should NOT be processed or kept.
+// The logic determining if a metric should be processed is based on include and exclude settings.
+// Include properties are checked before exclude settings are checked.
+func NewSkipExpr(include *MatchProperties, exclude *MatchProperties) (expr.BoolExpr[ottlmetric.TransformContext], error) {
+ var matchers []expr.BoolExpr[ottlmetric.TransformContext]
+ inclExpr, err := newExpr(include)
+ if err != nil {
+ return nil, err
+ }
+ if inclExpr != nil {
+ matchers = append(matchers, expr.Not(inclExpr))
+ }
+ exclExpr, err := newExpr(exclude)
+ if err != nil {
+ return nil, err
+ }
+ if exclExpr != nil {
+ matchers = append(matchers, exclExpr)
+ }
+ return expr.Or(matchers...), nil
+}
+
+// NewMatcher constructs a metric Matcher. If an 'expr' match type is specified,
+// returns an expr matcher, otherwise a name matcher.
+func newExpr(mp *MatchProperties) (expr.BoolExpr[ottlmetric.TransformContext], error) {
+ if mp == nil {
+ return nil, nil
+ }
+
+ if mp.MatchType == Expr {
+ if len(mp.Expressions) == 0 {
+ return nil, nil
+ }
+ return newExprMatcher(mp.Expressions)
+ }
+ if len(mp.MetricNames) == 0 {
+ return nil, nil
+ }
+ return newNameMatcher(mp)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/name_matcher.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/name_matcher.go
new file mode 100644
index 00000000000..2b1c9c0da1c
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric/name_matcher.go
@@ -0,0 +1,47 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+
+import (
+ "context"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+)
+
+// nameMatcher matches metrics by metric properties against prespecified values for each property.
+type nameMatcher struct {
+ nameFilters filterset.FilterSet
+}
+
+func newNameMatcher(mp *MatchProperties) (*nameMatcher, error) {
+ nameFS, err := filterset.CreateFilterSet(
+ mp.MetricNames,
+ &filterset.Config{
+ MatchType: filterset.MatchType(mp.MatchType),
+ RegexpConfig: mp.RegexpConfig,
+ },
+ )
+ if err != nil {
+ return nil, err
+ }
+ return &nameMatcher{nameFilters: nameFS}, nil
+}
+
+// Eval matches a metric using the metric properties configured on the nameMatcher.
+// A metric only matches if every metric property configured on the nameMatcher is a match.
+func (m *nameMatcher) Eval(_ context.Context, tCtx ottlmetric.TransformContext) (bool, error) {
+ return m.nameFilters.Matches(tCtx.GetMetric().Name()), nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go
new file mode 100644
index 00000000000..7f30dca0ddd
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl/filter.go
@@ -0,0 +1,178 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/component"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+)
+
+// NewBoolExprForSpan creates a BoolExpr[ottlspan.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottlspan.TransformContext.
+// If a function named `drop` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForSpan(conditions []string, functions map[string]interface{}, errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspan.TransformContext], error) {
+ if _, ok := functions["drop"]; !ok {
+ functions["drop"] = drop[ottlspan.TransformContext]
+ }
+ statmentsStr := conditionsToStatements(conditions)
+ parser, err := ottlspan.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseStatements(statmentsStr)
+ if err != nil {
+ return nil, err
+ }
+ s := ottlspan.NewStatements(statements, set, ottlspan.WithErrorMode(errorMode))
+ return &s, nil
+}
+
+// NewBoolExprForSpanEvent creates a BoolExpr[ottlspanevent.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottlspanevent.TransformContext.
+// If a function named `drop` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForSpanEvent(conditions []string, functions map[string]interface{}, errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspanevent.TransformContext], error) {
+ if _, ok := functions["drop"]; !ok {
+ functions["drop"] = drop[ottlspanevent.TransformContext]
+ }
+ statmentsStr := conditionsToStatements(conditions)
+ parser, err := ottlspanevent.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseStatements(statmentsStr)
+ if err != nil {
+ return nil, err
+ }
+ s := ottlspanevent.NewStatements(statements, set, ottlspanevent.WithErrorMode(errorMode))
+ return &s, nil
+}
+
+// NewBoolExprForMetric creates a BoolExpr[ottlmetric.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottlmetric.TransformContext.
+// If a function named `drop` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForMetric(conditions []string, functions map[string]interface{}, errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlmetric.TransformContext], error) {
+ if _, ok := functions["drop"]; !ok {
+ functions["drop"] = drop[ottlmetric.TransformContext]
+ }
+ statmentsStr := conditionsToStatements(conditions)
+ parser, err := ottlmetric.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseStatements(statmentsStr)
+ if err != nil {
+ return nil, err
+ }
+ s := ottlmetric.NewStatements(statements, set, ottlmetric.WithErrorMode(errorMode))
+ return &s, nil
+}
+
+// NewBoolExprForDataPoint creates a BoolExpr[ottldatapoint.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottldatapoint.TransformContext.
+// If a function named `drop` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForDataPoint(conditions []string, functions map[string]interface{}, errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottldatapoint.TransformContext], error) {
+ if _, ok := functions["drop"]; !ok {
+ functions["drop"] = drop[ottldatapoint.TransformContext]
+ }
+ statmentsStr := conditionsToStatements(conditions)
+ parser, err := ottldatapoint.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseStatements(statmentsStr)
+ if err != nil {
+ return nil, err
+ }
+ s := ottldatapoint.NewStatements(statements, set, ottldatapoint.WithErrorMode(errorMode))
+ return &s, nil
+}
+
+// NewBoolExprForLog creates a BoolExpr[ottllog.TransformContext] that will return true if any of the given OTTL conditions evaluate to true.
+// The passed in functions should use the ottllog.TransformContext.
+// If a function named `drop` is not present in the function map it will be added automatically so that parsing works as expected
+func NewBoolExprForLog(conditions []string, functions map[string]interface{}, errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottllog.TransformContext], error) {
+ if _, ok := functions["drop"]; !ok {
+ functions["drop"] = drop[ottllog.TransformContext]
+ }
+ statmentsStr := conditionsToStatements(conditions)
+ parser, err := ottllog.NewParser(functions, set)
+ if err != nil {
+ return nil, err
+ }
+ statements, err := parser.ParseStatements(statmentsStr)
+ if err != nil {
+ return nil, err
+ }
+ s := ottllog.NewStatements(statements, set, ottllog.WithErrorMode(errorMode))
+ return &s, nil
+}
+
+func conditionsToStatements(conditions []string) []string {
+ statements := make([]string, len(conditions))
+ for i, condition := range conditions {
+ statements[i] = "drop() where " + condition
+ }
+ return statements
+}
+
+func StandardSpanFuncs() map[string]interface{} {
+ return standardFuncs[ottlspan.TransformContext]()
+}
+
+func StandardSpanEventFuncs() map[string]interface{} {
+ return standardFuncs[ottlspanevent.TransformContext]()
+}
+
+func StandardMetricFuncs() map[string]interface{} {
+ return standardFuncs[ottlmetric.TransformContext]()
+}
+
+func StandardDataPointFuncs() map[string]interface{} {
+ return standardFuncs[ottldatapoint.TransformContext]()
+}
+
+func StandardLogFuncs() map[string]interface{} {
+ return standardFuncs[ottllog.TransformContext]()
+}
+
+func standardFuncs[K any]() map[string]interface{} {
+ return map[string]interface{}{
+ "TraceID": ottlfuncs.TraceID[K],
+ "SpanID": ottlfuncs.SpanID[K],
+ "IsMatch": ottlfuncs.IsMatch[K],
+ "Concat": ottlfuncs.Concat[K],
+ "Split": ottlfuncs.Split[K],
+ "Int": ottlfuncs.Int[K],
+ "ConvertCase": ottlfuncs.ConvertCase[K],
+ "Substring": ottlfuncs.Substring[K],
+ "drop": drop[K],
+ }
+}
+
+func drop[K any]() (ottl.ExprFunc[K], error) {
+ return func(context.Context, K) (interface{}, error) {
+ return true, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go
new file mode 100644
index 00000000000..37dba0294cc
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/config.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterset // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+
+import (
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict"
+)
+
+// MatchType describes the type of pattern matching a FilterSet uses to filter strings.
+type MatchType string
+
+const (
+ // Regexp is the FilterType for filtering by regexp string matches.
+ Regexp MatchType = "regexp"
+ // Strict is the FilterType for filtering by exact string matches.
+ Strict MatchType = "strict"
+ // MatchTypeFieldName is the mapstructure field name for MatchType field.
+ MatchTypeFieldName = "match_type"
+)
+
+var (
+ validMatchTypes = []MatchType{Regexp, Strict}
+)
+
+// Config configures the matching behavior of a FilterSet.
+type Config struct {
+ MatchType MatchType `mapstructure:"match_type"`
+ RegexpConfig *regexp.Config `mapstructure:"regexp"`
+}
+
+func NewUnrecognizedMatchTypeError(matchType MatchType) error {
+ return fmt.Errorf("unrecognized %v: '%v', valid types are: %v", MatchTypeFieldName, matchType, validMatchTypes)
+}
+
+// CreateFilterSet creates a FilterSet from yaml config.
+func CreateFilterSet(filters []string, cfg *Config) (FilterSet, error) {
+ switch cfg.MatchType {
+ case Regexp:
+ return regexp.NewFilterSet(filters, cfg.RegexpConfig)
+ case Strict:
+ // Strict FilterSets do not have any extra configuration options, so call the constructor directly.
+ return strict.NewFilterSet(filters), nil
+ default:
+ return nil, NewUnrecognizedMatchTypeError(cfg.MatchType)
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/doc.go
new file mode 100644
index 00000000000..41c45d71503
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package filterset provides an interface for matching strings against a set of string filters.
+package filterset // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/filterset.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/filterset.go
new file mode 100644
index 00000000000..f7fe37cd969
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/filterset.go
@@ -0,0 +1,22 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterset // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+
+// FilterSet is an interface for matching strings against a set of filters.
+type FilterSet interface {
+ // Matches returns true if the given string matches at least one
+ // of the filters encapsulated by the FilterSet.
+ Matches(string) bool
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/config.go
new file mode 100644
index 00000000000..3eac860ba7e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/config.go
@@ -0,0 +1,25 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package regexp // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
+
+// Config represents the options for a NewFilterSet.
+type Config struct {
+ // CacheEnabled determines whether match results are LRU cached to make subsequent matches faster.
+ // Cache size is unlimited unless CacheMaxNumEntries is also specified.
+ CacheEnabled bool `mapstructure:"cacheenabled"`
+ // CacheMaxNumEntries is the max number of entries of the LRU cache that stores match results.
+ // CacheMaxNumEntries is ignored if CacheEnabled is false.
+ CacheMaxNumEntries int `mapstructure:"cachemaxnumentries"`
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/doc.go
new file mode 100644
index 00000000000..e201fe0127e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package regexp provides an implementation to match strings against a set of regexp string filters.
+package regexp // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/regexpfilterset.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/regexpfilterset.go
new file mode 100644
index 00000000000..47fbe25c824
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp/regexpfilterset.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package regexp // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
+
+import (
+ "regexp"
+
+ "github.com/golang/groupcache/lru"
+)
+
+// FilterSet encapsulates a set of filters and caches match results.
+// Filters are re2 regex strings.
+// FilterSet is exported for convenience, but has unexported fields and should be constructed through NewFilterSet.
+//
+// FilterSet satisfies the FilterSet interface from
+// "go.opentelemetry.io/collector/internal/processor/filterset"
+type FilterSet struct {
+ regexes []*regexp.Regexp
+ cacheEnabled bool
+ cache *lru.Cache
+}
+
+// NewFilterSet constructs a FilterSet of re2 regex strings.
+// If any of the given filters fail to compile into re2, an error is returned.
+func NewFilterSet(filters []string, cfg *Config) (*FilterSet, error) {
+ fs := &FilterSet{
+ regexes: make([]*regexp.Regexp, 0, len(filters)),
+ }
+
+ if cfg != nil && cfg.CacheEnabled {
+ fs.cacheEnabled = true
+ fs.cache = lru.New(cfg.CacheMaxNumEntries)
+ }
+
+ if err := fs.addFilters(filters); err != nil {
+ return nil, err
+ }
+
+ return fs, nil
+}
+
+// Matches returns true if the given string matches any of the FilterSet's filters.
+// The given string must be fully matched by at least one filter's re2 regex.
+func (rfs *FilterSet) Matches(toMatch string) bool {
+ if rfs.cacheEnabled {
+ if v, ok := rfs.cache.Get(toMatch); ok {
+ return v.(bool)
+ }
+ }
+
+ for _, r := range rfs.regexes {
+ if r.MatchString(toMatch) {
+ if rfs.cacheEnabled {
+ rfs.cache.Add(toMatch, true)
+ }
+ return true
+ }
+ }
+
+ if rfs.cacheEnabled {
+ rfs.cache.Add(toMatch, false)
+ }
+ return false
+}
+
+// addFilters compiles all the given filters and stores them as regexes.
+// All regexes are automatically anchored to enforce full string matches.
+func (rfs *FilterSet) addFilters(filters []string) error {
+ dedup := make(map[string]struct{}, len(filters))
+ for _, f := range filters {
+ if _, ok := dedup[f]; ok {
+ continue
+ }
+
+ re, err := regexp.Compile(f)
+ if err != nil {
+ return err
+ }
+ rfs.regexes = append(rfs.regexes, re)
+ dedup[f] = struct{}{}
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/doc.go
new file mode 100644
index 00000000000..2f7ac81634f
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/doc.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package strict provides an implementation to match strings against a set of exact match string filters.
+package strict // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/strictfilterset.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/strictfilterset.go
new file mode 100644
index 00000000000..26d59be400c
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict/strictfilterset.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package strict // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict"
+
+// FilterSet encapsulates a set of exact string match filters.
+// FilterSet is exported for convenience, but has unexported fields and should be constructed through NewFilterSet.
+//
+// regexpFilterSet satisfies the FilterSet interface from
+// "go.opentelemetry.io/collector/internal/processor/filterset"
+type FilterSet struct {
+ filters map[string]struct{}
+}
+
+// NewFilterSet constructs a FilterSet of exact string matches.
+func NewFilterSet(filters []string) *FilterSet {
+ fs := &FilterSet{
+ filters: make(map[string]struct{}, len(filters)),
+ }
+
+ for _, f := range filters {
+ fs.filters[f] = struct{}{}
+ }
+
+ return fs
+}
+
+// Matches returns true if the given string matches any of the FilterSet's filters.
+func (sfs *FilterSet) Matches(toMatch string) bool {
+ _, ok := sfs.filters[toMatch]
+ return ok
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go
new file mode 100644
index 00000000000..2dbb8b70c93
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan/filterspan.go
@@ -0,0 +1,146 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterspan // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ conventions "go.opentelemetry.io/collector/semconv/v1.6.1"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
+)
+
+// NewSkipExpr creates a BoolExpr that on evaluation returns true if a span should NOT be processed or kept.
+// The logic determining if a span should be processed is based on include and exclude settings.
+// Include properties are checked before exclude settings are checked.
+func NewSkipExpr(mp *filterconfig.MatchConfig) (expr.BoolExpr[ottlspan.TransformContext], error) {
+ var matchers []expr.BoolExpr[ottlspan.TransformContext]
+ inclExpr, err := newExpr(mp.Include)
+ if err != nil {
+ return nil, err
+ }
+ if inclExpr != nil {
+ matchers = append(matchers, expr.Not(inclExpr))
+ }
+ exclExpr, err := newExpr(mp.Exclude)
+ if err != nil {
+ return nil, err
+ }
+ if exclExpr != nil {
+ matchers = append(matchers, exclExpr)
+ }
+ return expr.Or(matchers...), nil
+}
+
+// propertiesMatcher allows matching a span against various span properties.
+type propertiesMatcher struct {
+ filtermatcher.PropertiesMatcher
+
+ // Service names to compare to.
+ serviceFilters filterset.FilterSet
+
+ // Span names to compare to.
+ nameFilters filterset.FilterSet
+
+ // Span kinds to compare to
+ kindFilters filterset.FilterSet
+}
+
+// newExpr creates a BoolExpr that matches based on the given MatchProperties.
+func newExpr(mp *filterconfig.MatchProperties) (expr.BoolExpr[ottlspan.TransformContext], error) {
+ if mp == nil {
+ return nil, nil
+ }
+
+ if err := mp.ValidateForSpans(); err != nil {
+ return nil, err
+ }
+
+ rm, err := filtermatcher.NewMatcher(mp)
+ if err != nil {
+ return nil, err
+ }
+
+ var serviceFS filterset.FilterSet
+ if len(mp.Services) > 0 {
+ serviceFS, err = filterset.CreateFilterSet(mp.Services, &mp.Config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating service name filters: %w", err)
+ }
+ }
+
+ var nameFS filterset.FilterSet
+ if len(mp.SpanNames) > 0 {
+ nameFS, err = filterset.CreateFilterSet(mp.SpanNames, &mp.Config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating span name filters: %w", err)
+ }
+ }
+
+ var kindFS filterset.FilterSet
+ if len(mp.SpanKinds) > 0 {
+ kindFS, err = filterset.CreateFilterSet(mp.SpanKinds, &mp.Config)
+ if err != nil {
+ return nil, fmt.Errorf("error creating span kind filters: %w", err)
+ }
+ }
+
+ return &propertiesMatcher{
+ PropertiesMatcher: rm,
+ serviceFilters: serviceFS,
+ nameFilters: nameFS,
+ kindFilters: kindFS,
+ }, nil
+}
+
+// Eval matches a span and service to a set of properties.
+// see filterconfig.MatchProperties for more details
+func (mp *propertiesMatcher) Eval(_ context.Context, tCtx ottlspan.TransformContext) (bool, error) {
+ // If a set of properties was not in the mp, all spans are considered to match on that property
+ if mp.serviceFilters != nil {
+ // Check resource and spans for service.name
+ serviceName := serviceNameForResource(tCtx.GetResource())
+
+ if !mp.serviceFilters.Matches(serviceName) {
+ return false, nil
+ }
+ }
+
+ if mp.nameFilters != nil && !mp.nameFilters.Matches(tCtx.GetSpan().Name()) {
+ return false, nil
+ }
+
+ if mp.kindFilters != nil && !mp.kindFilters.Matches(traceutil.SpanKindStr(tCtx.GetSpan().Kind())) {
+ return false, nil
+ }
+
+ return mp.PropertiesMatcher.Match(tCtx.GetSpan().Attributes(), tCtx.GetResource(), tCtx.GetInstrumentationScope()), nil
+}
+
+// serviceNameForResource gets the service name for a specified Resource.
+func serviceNameForResource(resource pcommon.Resource) string {
+ service, found := resource.Attributes().Get(conventions.AttributeServiceName)
+ if !found {
+ return ""
+ }
+ return service.AsString()
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md
new file mode 100644
index 00000000000..cb7c9f21612
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/CONTRIBUTING.md
@@ -0,0 +1,25 @@
+# Contributing
+
+This guide is specific to the OpenTelemetry Transformation Language. All guidelines in [Collector Contrib's CONTRIBUTING.MD](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md) must also be followed.
+
+## General Guidelines
+
+- Changes to the OpenTelemetry Transformation Language should be made independent of any component that depend on the package. Whenever possible, try not to submit PRs that change both the OTTL and a dependent component. Instead, submit a PR that updates the OTTL and then, once merged, update the component as needed.
+
+## New Values
+
+When adding new values to the grammar you must:
+
+1. Update the `Value` struct with the new value. This may also mean adding new token(s) to the lexer.
+2. Update `NewFunctionCall` to be able to handle calling functions with this new value.
+3. Update `NewGetter` to be able to handle the new value.
+4. Add new unit tests.
+
+## New Functions
+
+All new functions must be added via a new file. Function files must start with `func_`. Functions must be placed in `ottlfuncs`.
+
+Unit tests must be added for all new functions. Unit test files must start with `func_` and end in `_test`. Unit tests must be placed in the same directory as the function. Functions that are not specific to a pipeline should be tested independently of any specific pipeline. Functions that are specific to a pipeline should be tests against that pipeline.
+
+Function names should follow the [Function Syntax Guidelines](ottlfuncs/README.md#function-syntax)
+
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/Makefile
new file mode 100644
index 00000000000..ded7a36092d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md
new file mode 100644
index 00000000000..118c06b4731
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/README.md
@@ -0,0 +1,329 @@
+# OpenTelemetry Transformation Language
+
+The OpenTelemetry Transformation Language is a language for transforming open telemetry data based on the [OpenTelemetry Collector Processing Exploration](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/processing.md).
+
+This package reads in OTTL statements and converts them to invokable Booleans and functions based on the OTTL's grammar.
+
+The OTTL is signal agnostic; it is not aware of the type of telemetry on which it will operate. Instead, the Booleans and functions returned by the package must be passed a `TransformContext`, which provide access to the signal's telemetry. Telemetry data can be accessed and updated through [Getters and Setters](#getters-and-setters).
+
+## Grammar
+
+The OTTL grammar includes Invocations, Values and Boolean Expressions.
+
+### Invocations
+
+Invocations represent a function call that transform the underlying telemetry payload. Invocations are made up of 2 parts:
+
+- a string identifier. The string identifier must start with a lowercase letter.
+- zero or more Values (comma separated) surrounded by parentheses (`()`).
+
+**The OTTL does not define any function implementations.**
+Users must supply a map between string identifiers and the actual function implementation.
+The OTTL will use this map and reflection to generate Invocations, that can then be invoked by the user.
+See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs) for pre-made, usable functions.
+
+Example Invocations
+- `route()`
+- `set(field, 1)`
+
+#### Invocation parameters
+
+The OTTL will use reflection to determine parameter types when parsing an invocation within a statement.
+
+When developing functions that represent invocations, the following types are supported for single parameter values:
+- `Setter`
+- `GetSetter`
+- `Getter`
+- `Enum`
+- `string`
+- `float64`
+- `int64`
+- `bool`
+
+For slice parameters, the following types are supported:
+- `string`
+- `float64`
+- `int64`
+- `uint8`. Byte slice literals are parsed as byte slices by the OTTL.
+- `Getter`
+
+### Values
+
+Values are passed as input to an Invocation or are used in a Boolean Expression. Values can take the form of:
+- [Paths](#paths)
+- [Lists](#lists)
+- [Literals](#literals)
+- [Enums](#enums)
+- [Converters](#converters)
+- [Math Expressions](#math_expressions)
+
+#### Paths
+
+A Path Value is a reference to a telemetry field. Paths are made up of lowercase identifiers, dots (`.`), and square brackets combined with a string key (`["key"]`). **The interpretation of a Path is NOT implemented by the OTTL.** Instead, the user must provide a `PathExpressionParser` that the OTTL can use to interpret paths. As a result, how the Path parts are used is up to the user. However, it is recommended, that the parts be used like so:
+
+- Identifiers are used to map to a telemetry field.
+- Dots (`.`) are used to separate nested fields.
+- Square brackets and keys (`["key"]`) are used to access values within maps.
+
+When accessing a map's value, if the given key does not exist, `nil` will be returned.
+This can be used to check for the presence of a key within a map within a [Boolean Expression](#boolean_expressions).
+
+Example Paths
+- `name`
+- `value_double`
+- `resource.name`
+- `resource.attributes["key"]`
+
+#### Lists
+
+A List Value comprises a sequence of Values.
+Currently, list can only be created by the grammar to be used in functions or conditions;
+the grammar does not provide an accessor to individual list entries.
+
+Example List Values:
+- `[]`
+- `[1]`
+- `["1", "2", "3"]`
+- `["a", attributes["key"], Concat(["a", "b"], "-")]`
+
+#### Literals
+
+Literals are literal interpretations of the Value into a Go value. Accepted literals are:
+
+- Strings. Strings are represented as literals by surrounding the string in double quotes (`""`).
+- Ints. Ints are represented by any digit, optionally prepended by plus (`+`) or minus (`-`). Internally the OTTL represents all ints as `int64`
+- Floats. Floats are represented by digits separated by a dot (`.`), optionally prepended by plus (`+`) or minus (`-`). The leading digit is optional. Internally the OTTL represents all Floats as `float64`.
+- Bools. Bools are represented by the exact strings `true` and `false`.
+- Nil. Nil is represented by the exact string `nil`.
+- Byte slices. Byte slices are represented via a hex string prefaced with `0x`
+
+Example Literals
+- `"a string"`
+- `1`, `-1`
+- `1.5`, `-.5`
+- `true`, `false`
+- `nil`,
+- `0x0001`
+
+#### Enums
+
+Enums are uppercase identifiers that get interpreted during parsing and converted to an `int64`. **The interpretation of an Enum is NOT implemented by the OTTL.** Instead, the user must provide a `EnumParser` that the OTTL can use to interpret the Enum. The `EnumParser` returns an `int64` instead of a function, which means that the Enum's numeric value is retrieved during parsing instead of during execution.
+
+Within the grammar Enums are always used as `int64`. As a result, the Enum's symbol can be used as if it is an Int value.
+
+When defining a function that will be used as an Invocation by the OTTL, if the function needs to take an Enum then the function must use the `Enum` type for that argument, not an `int64`.
+
+#### Converters
+
+Converters are special functions that convert data to a new format before being passed to an Invocation or Boolean Expression.
+Like Invocations, Converters are made up of 2 parts:
+
+- a string identifier. The string identifier must start with an uppercase letter.
+- zero or more Values (comma separated) surrounded by parentheses (`()`).
+
+**The OTTL does not define any converter implementations.**
+Users must include converters in the same map that invocations are supplied.
+The OTTL will use this map and reflection to generate Converters that can then be invoked by the user.
+See [ottlfuncs](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#converters) for pre-made, usable Converters.
+
+Example Converters
+- `Int()`
+- `IsMatch(field, ".*")`
+
+
+#### Math Expressions
+
+Math Expressions represent arithmetic calculations. They support `+`, `-`, `*`, and `/`, along with `()` for grouping.
+
+Math Expressions currently only support `int64` and `float64`.
+Math Expressions support `Paths` and `Invocations` that return supported types.
+Note that `*` and `/` take precedence over `+` and `-`.
+Operations that share the same level of precedence will be executed in the order that they appear in the Math Expression.
+Math Expressions can be grouped with parentheses to override evaluation precedence.
+Math Expressions that mix `int64` and `float64` will result in an error.
+It is up to the function using the Math Expression to determine what to do with that error and the default return value of `nil`.
+Division by zero is gracefully handled with an error, but other arithmetic operations that would result in a panic will still result in a panic.
+Division of integers results in an integer and follows Go's rules for division of integers.
+
+Since Math Expressions support `Path` and `Invocation`, they are evaluated during data processing.
+__As a result, in order for a function to be able to accept an Math Expressions as a parameter it must use a `Getter`.__
+
+Example Math Expressions
+- `1 + 1`
+- `end_time_unix_nano - end_time_unix_nano`
+- `sum([1, 2, 3, 4]) + (10 / 1) - 1`
+
+
+### Boolean Expressions
+
+Boolean Expressions allow a decision to be made about whether an Invocation should be called. Boolean Expressions are optional. When used, the parsed statement will include a `Condition`, which can be used to evaluate the result of the statement's Boolean Expression. Boolean Expressions always evaluate to a boolean value (true or false).
+
+Boolean Expressions consist of the literal string `where` followed by one or more Booleans (see below).
+Booleans can be joined with the literal strings `and` and `or`.
+Booleans can be negated with the literal string `not`.
+Note that `not` has the highest precedence and `and` Boolean Expressions have higher precedence than `or`.
+Boolean Expressions can be grouped with parentheses to override evaluation precedence.
+
+### Booleans
+
+Booleans can be either:
+- A literal boolean value (`true` or `false`).
+- A Comparison, made up of a left Value, an operator, and a right Value. See [Values](#values) for details on what a Value can be.
+
+Operators determine how the two Values are compared.
+
+The valid operators are:
+
+- Equal (`==`). Tests if the left and right Values are equal (see the Comparison Rules below).
+- Not Equal (`!=`). Tests if the left and right Values are not equal.
+- Less Than (`<`). Tests if left is less than right.
+- Greater Than (`>`). Tests if left is greater than right.
+- Less Than or Equal To (`<=`). Tests if left is less than or equal to right.
+- Greater Than or Equal to (`>=`). Tests if left is greater than or equal to right.
+
+Booleans can be negated with the `not` keyword such as
+- `not true`
+- `not name == "foo"`
+- `not (IsMatch(name, "http_.*") == true and kind > 0)`
+
+### Comparison Rules
+
+The table below describes what happens when two Values are compared. Value types are provided by the user of OTTL. All of the value types supported by OTTL are listed in this table.
+
+If numeric values are of different types, they are compared as `float64`.
+
+For numeric values and strings, the comparison rules are those implemented by Go. Numeric values are done with signed comparisons. For binary values, `false` is considered to be less than `true`.
+
+For values that are not one of the basic primitive types, the only valid comparisons are Equal and Not Equal, which are implemented using Go's standard `==` and `!=` operators.
+
+A `not equal` notation in the table below means that the "!=" operator returns true, but any other operator returns false. Note that a nil byte array is considered equivalent to nil.
+
+
+| base type | bool | int64 | float64 | string | Bytes | nil |
+|-----------|-------------|---------------------|---------------------|---------------------------------|--------------------------|------------------------|
+| bool | normal, T>F | not equal | not equal | not equal | not equal | not equal |
+| int64 | not equal | compared as largest | compared as float64 | not equal | not equal | not equal |
+| float64 | not equal | compared as float64 | compared as largest | not equal | not equal | not equal |
+| string | not equal | not equal | not equal | normal (compared as Go strings) | not equal | not equal |
+| Bytes | not equal | not equal | not equal | not equal | byte-for-byte comparison | []byte(nil) == nil |
+| nil | not equal | not equal | not equal | not equal | []byte(nil) == nil | true for equality only |
+
+Examples:
+- `name == "a name"`
+- `1 < 2`
+- `attributes["custom-attr"] != nil`
+- `IsMatch(resource.attributes["host.name"], "pod-*") == true`
+
+## Accessing signal telemetry
+
+Access to signal telemetry is provided to OTTL functions through a `TransformContext` that is created by the user and passed during statement evaluation. To allow functions to operate on the `TransformContext`, the OTTL provides `Getter`, `Setter`, and `GetSetter` interfaces.
+
+### Getters and Setters
+
+Getters allow for reading the following types of data. See the respective section of each Value type for how they are interpreted.
+- [Paths](#paths).
+- [Enums](#enums).
+- [Literals](#literals).
+- [Invocations](#invocations).
+
+It is possible to update the Value in a telemetry field using a Setter. For read and write access, the `GetSetter` interface extends both interfaces.
+
+## Logging inside a OTTL function
+
+To emit logs inside a OTTL function, add a parameter of type [`component.TelemetrySettings`](https://pkg.go.dev/go.opentelemetry.io/collector/component#TelemetrySettings) to the function signature. The OTTL will then inject the TelemetrySettings that were passed to `NewParser` into the function. TelemetrySettings can be used to emit logs.
+
+## Examples
+
+These examples contain a SQL-like declarative language. Applied statements interact with only one signal, but statements can be declared across multiple signals. Functions used in examples are indicative of what could be useful, but are not implemented by the OTTL itself.
+
+### Remove a forbidden attribute
+
+```
+traces:
+ delete(attributes["http.request.header.authorization"])
+metrics:
+ delete(attributes["http.request.header.authorization"])
+logs:
+ delete(attributes["http.request.header.authorization"])
+```
+
+### Remove all attributes except for some
+
+```
+traces:
+ keep_keys(attributes, ["http.method", "http.status_code"])
+metrics:
+ keep_keys(attributes, ["http.method", "http.status_code"])
+logs:
+ keep_keys(attributes, ["http.method", "http.status_code"])
+```
+
+### Reduce cardinality of an attribute
+
+```
+traces:
+ replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}")
+```
+
+### Reduce cardinality of a span name
+
+```
+traces:
+ replace_match(name, "GET /user/*/list/*", "GET /user/{userId}/list/{listId}")
+```
+
+### Reduce cardinality of any matching attribute
+
+```
+traces:
+ replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}")
+```
+
+### Decrease the size of the telemetry payload
+
+```
+traces:
+ delete(resource.attributes["process.command_line"])
+metrics:
+ delete(resource.attributes["process.command_line"])
+logs:
+ delete(resource.attributes["process.command_line"])
+```
+
+### Attach information from resource into telemetry
+
+```
+metrics:
+ set(attributes["k8s_pod"], resource.attributes["k8s.pod.name"])
+```
+
+### Decorate error spans with additional information
+
+```
+traces:
+ set(attributes["whose_fault"], "theirs") where attributes["http.status"] == 400 or attributes["http.status"] == 404
+ set(attributes["whose_fault"], "ours") where attributes["http.status"] == 500
+```
+
+### Update a spans ID
+
+```
+logs:
+ set(span_id, SpanID(0x0000000000000000))
+traces:
+ set(span_id, SpanID(0x0000000000000000))
+```
+
+### Convert metric name to snake case
+
+```
+metrics:
+ set(metric.name, ConvertCase(metric.name, "snake"))
+```
+
+### Check if an attribute exists
+
+```
+traces:
+ set(attributes["test-passed"], true) where attributes["target-attribute"] != nil
+```
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go
new file mode 100644
index 00000000000..9b4fba326d5
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/boolean_value.go
@@ -0,0 +1,183 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "context"
+ "fmt"
+)
+
+// boolExpressionEvaluator is a function that returns the result.
+type boolExpressionEvaluator[K any] func(ctx context.Context, tCtx K) (bool, error)
+
+type BoolExpr[K any] struct {
+ boolExpressionEvaluator[K]
+}
+
+func (e BoolExpr[K]) Eval(ctx context.Context, tCtx K) (bool, error) {
+ return e.boolExpressionEvaluator(ctx, tCtx)
+}
+
+//nolint:unparam
+func not[K any](original BoolExpr[K]) (BoolExpr[K], error) {
+ return BoolExpr[K]{func(ctx context.Context, tCtx K) (bool, error) {
+ result, err := original.Eval(ctx, tCtx)
+ return !result, err
+ }}, nil
+}
+
+func alwaysTrue[K any](context.Context, K) (bool, error) {
+ return true, nil
+}
+
+func alwaysFalse[K any](context.Context, K) (bool, error) {
+ return false, nil
+}
+
+// builds a function that returns a short-circuited result of ANDing
+// boolExpressionEvaluator funcs
+func andFuncs[K any](funcs []BoolExpr[K]) BoolExpr[K] {
+ return BoolExpr[K]{func(ctx context.Context, tCtx K) (bool, error) {
+ for _, f := range funcs {
+ result, err := f.Eval(ctx, tCtx)
+ if err != nil {
+ return false, err
+ }
+ if !result {
+ return false, nil
+ }
+ }
+ return true, nil
+ }}
+}
+
+// builds a function that returns a short-circuited result of ORing
+// boolExpressionEvaluator funcs
+func orFuncs[K any](funcs []BoolExpr[K]) BoolExpr[K] {
+ return BoolExpr[K]{func(ctx context.Context, tCtx K) (bool, error) {
+ for _, f := range funcs {
+ result, err := f.Eval(ctx, tCtx)
+ if err != nil {
+ return false, err
+ }
+ if result {
+ return true, nil
+ }
+ }
+ return false, nil
+ }}
+}
+
+func (p *Parser[K]) newComparisonEvaluator(comparison *comparison) (BoolExpr[K], error) {
+ if comparison == nil {
+ return BoolExpr[K]{alwaysTrue[K]}, nil
+ }
+ left, err := p.newGetter(comparison.Left)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ right, err := p.newGetter(comparison.Right)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+
+ // The parser ensures that we'll never get an invalid comparison.Op, so we don't have to check that case.
+ return BoolExpr[K]{func(ctx context.Context, tCtx K) (bool, error) {
+ a, leftErr := left.Get(ctx, tCtx)
+ if leftErr != nil {
+ return false, leftErr
+ }
+ b, rightErr := right.Get(ctx, tCtx)
+ if rightErr != nil {
+ return false, rightErr
+ }
+ return p.compare(a, b, comparison.Op), nil
+ }}, nil
+
+}
+
+func (p *Parser[K]) newBoolExpr(expr *booleanExpression) (BoolExpr[K], error) {
+ if expr == nil {
+ return BoolExpr[K]{alwaysTrue[K]}, nil
+ }
+ f, err := p.newBooleanTermEvaluator(expr.Left)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ funcs := []BoolExpr[K]{f}
+ for _, rhs := range expr.Right {
+ f, err := p.newBooleanTermEvaluator(rhs.Term)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ funcs = append(funcs, f)
+ }
+
+ return orFuncs(funcs), nil
+}
+
+func (p *Parser[K]) newBooleanTermEvaluator(term *term) (BoolExpr[K], error) {
+ if term == nil {
+ return BoolExpr[K]{alwaysTrue[K]}, nil
+ }
+ f, err := p.newBooleanValueEvaluator(term.Left)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ funcs := []BoolExpr[K]{f}
+ for _, rhs := range term.Right {
+ f, err := p.newBooleanValueEvaluator(rhs.Value)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ funcs = append(funcs, f)
+ }
+
+ return andFuncs(funcs), nil
+}
+
+func (p *Parser[K]) newBooleanValueEvaluator(value *booleanValue) (BoolExpr[K], error) {
+ if value == nil {
+ return BoolExpr[K]{alwaysTrue[K]}, nil
+ }
+
+ var boolExpr BoolExpr[K]
+ var err error
+ switch {
+ case value.Comparison != nil:
+ boolExpr, err = p.newComparisonEvaluator(value.Comparison)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ case value.ConstExpr != nil:
+ if *value.ConstExpr {
+ boolExpr = BoolExpr[K]{alwaysTrue[K]}
+ } else {
+ boolExpr = BoolExpr[K]{alwaysFalse[K]}
+ }
+ case value.SubExpr != nil:
+ boolExpr, err = p.newBoolExpr(value.SubExpr)
+ if err != nil {
+ return BoolExpr[K]{}, err
+ }
+ default:
+ return BoolExpr[K]{}, fmt.Errorf("unhandled boolean operation %v", value)
+ }
+
+ if value.Negation != nil {
+ return not(boolExpr)
+ }
+ return boolExpr, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go
new file mode 100644
index 00000000000..ee6a9e0d5d6
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/compare.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "bytes"
+
+ "go.uber.org/zap"
+ "golang.org/x/exp/constraints"
+)
+
+// The functions in this file implement a general-purpose comparison of two
+// values of type any, which for the purposes of OTTL mean values that are one of
+// int, float, string, bool, or pointers to those, or []byte, or nil.
+
+// invalidComparison returns false for everything except NE (where it returns true to indicate that the
+// objects were definitely not equivalent).
+// It also gives us an opportunity to log something.
+func (p *Parser[K]) invalidComparison(msg string, op compareOp) bool {
+ p.telemetrySettings.Logger.Debug(msg, zap.Any("op", op))
+ return op == NE
+}
+
+// comparePrimitives implements a generic comparison helper for all Ordered types (derived from Float, Int, or string).
+// According to benchmarks, it's faster than explicit comparison functions for these types.
+func comparePrimitives[T constraints.Ordered](a T, b T, op compareOp) bool {
+ switch op {
+ case EQ:
+ return a == b
+ case NE:
+ return a != b
+ case LT:
+ return a < b
+ case LTE:
+ return a <= b
+ case GTE:
+ return a >= b
+ case GT:
+ return a > b
+ default:
+ return false
+ }
+}
+
+func compareBools(a bool, b bool, op compareOp) bool {
+ switch op {
+ case EQ:
+ return a == b
+ case NE:
+ return a != b
+ case LT:
+ return !a && b
+ case LTE:
+ return !a || b
+ case GTE:
+ return a || !b
+ case GT:
+ return a && !b
+ default:
+ return false
+ }
+}
+
+func compareBytes(a []byte, b []byte, op compareOp) bool {
+ switch op {
+ case EQ:
+ return bytes.Equal(a, b)
+ case NE:
+ return !bytes.Equal(a, b)
+ case LT:
+ return bytes.Compare(a, b) < 0
+ case LTE:
+ return bytes.Compare(a, b) <= 0
+ case GTE:
+ return bytes.Compare(a, b) >= 0
+ case GT:
+ return bytes.Compare(a, b) > 0
+ default:
+ return false
+ }
+}
+
+func (p *Parser[K]) compareBool(a bool, b any, op compareOp) bool {
+ switch v := b.(type) {
+ case bool:
+ return compareBools(a, v, op)
+ default:
+ return p.invalidComparison("bool to non-bool", op)
+ }
+}
+
+func (p *Parser[K]) compareString(a string, b any, op compareOp) bool {
+ switch v := b.(type) {
+ case string:
+ return comparePrimitives(a, v, op)
+ default:
+ return p.invalidComparison("string to non-string", op)
+ }
+}
+
+func (p *Parser[K]) compareByte(a []byte, b any, op compareOp) bool {
+ switch v := b.(type) {
+ case nil:
+ return op == NE
+ case []byte:
+ if v == nil {
+ return op == NE
+ }
+ return compareBytes(a, v, op)
+ default:
+ return p.invalidComparison("Bytes to non-Bytes", op)
+ }
+}
+
+func (p *Parser[K]) compareInt64(a int64, b any, op compareOp) bool {
+ switch v := b.(type) {
+ case int64:
+ return comparePrimitives(a, v, op)
+ case float64:
+ return comparePrimitives(float64(a), v, op)
+ default:
+ return p.invalidComparison("int to non-numeric value", op)
+ }
+}
+
+func (p *Parser[K]) compareFloat64(a float64, b any, op compareOp) bool {
+ switch v := b.(type) {
+ case int64:
+ return comparePrimitives(a, float64(v), op)
+ case float64:
+ return comparePrimitives(a, v, op)
+ default:
+ return p.invalidComparison("float to non-numeric value", op)
+ }
+}
+
+// a and b are the return values from a Getter; we try to compare them
+// according to the given operator.
+func (p *Parser[K]) compare(a any, b any, op compareOp) bool {
+ // nils are equal to each other and never equal to anything else,
+ // so if they're both nil, report equality.
+ if a == nil && b == nil {
+ return op == EQ || op == LTE || op == GTE
+ }
+ // Anything else, we switch on the left side first.
+ switch v := a.(type) {
+ case nil:
+ // If a was nil, it means b wasn't and inequalities don't apply,
+ // so let's swap and give it the chance to get evaluated.
+ return p.compare(b, nil, op)
+ case bool:
+ return p.compareBool(v, b, op)
+ case int64:
+ return p.compareInt64(v, b, op)
+ case float64:
+ return p.compareFloat64(v, b, op)
+ case string:
+ return p.compareString(v, b, op)
+ case []byte:
+ if v == nil {
+ return p.compare(b, nil, op)
+ }
+ return p.compareByte(v, b, op)
+ default:
+ // If we don't know what type it is, we can't do inequalities yet. So we can fall back to the old behavior where we just
+ // use Go's standard equality.
+ switch op {
+ case EQ:
+ return a == b
+ case NE:
+ return a != b
+ default:
+ return p.invalidComparison("unsupported type for inequality on left", op)
+ }
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/ids.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/ids.go
new file mode 100644
index 00000000000..c2a9a5871e1
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/ids.go
@@ -0,0 +1,46 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "encoding/hex"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+func ParseSpanID(spanIDStr string) (pcommon.SpanID, error) {
+ var id pcommon.SpanID
+ if hex.DecodedLen(len(spanIDStr)) != len(id) {
+ return pcommon.SpanID{}, errors.New("span ids must be 16 hex characters")
+ }
+ _, err := hex.Decode(id[:], []byte(spanIDStr))
+ if err != nil {
+ return pcommon.SpanID{}, err
+ }
+ return id, nil
+}
+
+func ParseTraceID(traceIDStr string) (pcommon.TraceID, error) {
+ var id pcommon.TraceID
+ if hex.DecodedLen(len(traceIDStr)) != len(id) {
+ return pcommon.TraceID{}, errors.New("trace ids must be 32 hex characters")
+ }
+ _, err := hex.Decode(id[:], []byte(traceIDStr))
+ if err != nil {
+ return pcommon.TraceID{}, err
+ }
+ return id, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/map.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/map.go
new file mode 100644
index 00000000000..d4aef5e88f2
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/map.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+func GetMapValue(attrs pcommon.Map, mapKey string) interface{} {
+ val, ok := attrs.Get(mapKey)
+ if !ok {
+ return nil
+ }
+ return GetValue(val)
+}
+
+func SetMapValue(attrs pcommon.Map, mapKey string, val interface{}) {
+ var value pcommon.Value
+ switch val.(type) {
+ case []string, []bool, []int64, []float64, [][]byte, []any:
+ value = pcommon.NewValueSlice()
+ default:
+ value = pcommon.NewValueEmpty()
+ }
+
+ SetValue(value, val)
+ value.CopyTo(attrs.PutEmpty(mapKey))
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/metric.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/metric.go
new file mode 100644
index 00000000000..ac609b2c8c8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/metric.go
@@ -0,0 +1,232 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pmetric"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type MetricContext interface {
+ GetMetric() pmetric.Metric
+}
+
+var MetricSymbolTable = map[ottl.EnumSymbol]ottl.Enum{
+ "AGGREGATION_TEMPORALITY_UNSPECIFIED": ottl.Enum(pmetric.AggregationTemporalityUnspecified),
+ "AGGREGATION_TEMPORALITY_DELTA": ottl.Enum(pmetric.AggregationTemporalityDelta),
+ "AGGREGATION_TEMPORALITY_CUMULATIVE": ottl.Enum(pmetric.AggregationTemporalityCumulative),
+ "METRIC_DATA_TYPE_NONE": ottl.Enum(pmetric.MetricTypeEmpty),
+ "METRIC_DATA_TYPE_GAUGE": ottl.Enum(pmetric.MetricTypeGauge),
+ "METRIC_DATA_TYPE_SUM": ottl.Enum(pmetric.MetricTypeSum),
+ "METRIC_DATA_TYPE_HISTOGRAM": ottl.Enum(pmetric.MetricTypeHistogram),
+ "METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM": ottl.Enum(pmetric.MetricTypeExponentialHistogram),
+ "METRIC_DATA_TYPE_SUMMARY": ottl.Enum(pmetric.MetricTypeSummary),
+}
+
+func MetricPathGetSetter[K MetricContext](path []ottl.Field) (ottl.GetSetter[K], error) {
+ if len(path) == 0 {
+ return accessMetric[K](), nil
+ }
+ switch path[0].Name {
+ case "name":
+ return accessName[K](), nil
+ case "description":
+ return accessDescription[K](), nil
+ case "unit":
+ return accessUnit[K](), nil
+ case "type":
+ return accessType[K](), nil
+ case "aggregation_temporality":
+ return accessAggTemporality[K](), nil
+ case "is_monotonic":
+ return accessIsMonotonic[K](), nil
+ case "data_points":
+ return accessDataPoints[K](), nil
+ }
+
+ return nil, fmt.Errorf("invalid metric path expression %v", path)
+}
+
+func accessMetric[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetMetric(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newMetric, ok := val.(pmetric.Metric); ok {
+ newMetric.CopyTo(tCtx.GetMetric())
+ }
+ return nil
+ },
+ }
+}
+
+func accessName[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetMetric().Name(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetMetric().SetName(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessDescription[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetMetric().Description(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetMetric().SetDescription(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessUnit[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetMetric().Unit(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetMetric().SetUnit(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessType[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetMetric().Type()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ // TODO Implement methods so correctly convert data types.
+ // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/10130
+ return nil
+ },
+ }
+}
+
+func accessAggTemporality[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ metric := tCtx.GetMetric()
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ return int64(metric.Sum().AggregationTemporality()), nil
+ case pmetric.MetricTypeHistogram:
+ return int64(metric.Histogram().AggregationTemporality()), nil
+ case pmetric.MetricTypeExponentialHistogram:
+ return int64(metric.ExponentialHistogram().AggregationTemporality()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newAggTemporality, ok := val.(int64); ok {
+ metric := tCtx.GetMetric()
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ metric.Sum().SetAggregationTemporality(pmetric.AggregationTemporality(newAggTemporality))
+ case pmetric.MetricTypeHistogram:
+ metric.Histogram().SetAggregationTemporality(pmetric.AggregationTemporality(newAggTemporality))
+ case pmetric.MetricTypeExponentialHistogram:
+ metric.ExponentialHistogram().SetAggregationTemporality(pmetric.AggregationTemporality(newAggTemporality))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessIsMonotonic[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ metric := tCtx.GetMetric()
+ if metric.Type() == pmetric.MetricTypeSum {
+ return metric.Sum().IsMonotonic(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newIsMonotonic, ok := val.(bool); ok {
+ metric := tCtx.GetMetric()
+ if metric.Type() == pmetric.MetricTypeSum {
+ metric.Sum().SetIsMonotonic(newIsMonotonic)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessDataPoints[K MetricContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ metric := tCtx.GetMetric()
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ return metric.Sum().DataPoints(), nil
+ case pmetric.MetricTypeGauge:
+ return metric.Gauge().DataPoints(), nil
+ case pmetric.MetricTypeHistogram:
+ return metric.Histogram().DataPoints(), nil
+ case pmetric.MetricTypeExponentialHistogram:
+ return metric.ExponentialHistogram().DataPoints(), nil
+ case pmetric.MetricTypeSummary:
+ return metric.Summary().DataPoints(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ metric := tCtx.GetMetric()
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ if newDataPoints, ok := val.(pmetric.NumberDataPointSlice); ok {
+ newDataPoints.CopyTo(metric.Sum().DataPoints())
+ }
+ case pmetric.MetricTypeGauge:
+ if newDataPoints, ok := val.(pmetric.NumberDataPointSlice); ok {
+ newDataPoints.CopyTo(metric.Gauge().DataPoints())
+ }
+ case pmetric.MetricTypeHistogram:
+ if newDataPoints, ok := val.(pmetric.HistogramDataPointSlice); ok {
+ newDataPoints.CopyTo(metric.Histogram().DataPoints())
+ }
+ case pmetric.MetricTypeExponentialHistogram:
+ if newDataPoints, ok := val.(pmetric.ExponentialHistogramDataPointSlice); ok {
+ newDataPoints.CopyTo(metric.ExponentialHistogram().DataPoints())
+ }
+ case pmetric.MetricTypeSummary:
+ if newDataPoints, ok := val.(pmetric.SummaryDataPointSlice); ok {
+ newDataPoints.CopyTo(metric.Summary().DataPoints())
+ }
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/resource.go
new file mode 100644
index 00000000000..976fb52f216
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/resource.go
@@ -0,0 +1,100 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type ResourceContext interface {
+ GetResource() pcommon.Resource
+}
+
+func ResourcePathGetSetter[K ResourceContext](path []ottl.Field) (ottl.GetSetter[K], error) {
+ if len(path) == 0 {
+ return accessResource[K](), nil
+ }
+ switch path[0].Name {
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessResourceAttributes[K](), nil
+ }
+ return accessResourceAttributesKey[K](mapKey), nil
+ case "dropped_attributes_count":
+ return accessResourceDroppedAttributesCount[K](), nil
+ }
+
+ return nil, fmt.Errorf("invalid resource path expression %v", path)
+}
+
+func accessResource[K ResourceContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetResource(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newRes, ok := val.(pcommon.Resource); ok {
+ newRes.CopyTo(tCtx.GetResource())
+ }
+ return nil
+ },
+ }
+}
+
+func accessResourceAttributes[K ResourceContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetResource().Attributes(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetResource().Attributes())
+ }
+ return nil
+ },
+ }
+}
+
+func accessResourceAttributesKey[K ResourceContext](mapKey *string) ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return GetMapValue(tCtx.GetResource().Attributes(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ SetMapValue(tCtx.GetResource().Attributes(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessResourceDroppedAttributesCount[K ResourceContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetResource().DroppedAttributesCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetResource().SetDroppedAttributesCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/scope.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/scope.go
new file mode 100644
index 00000000000..6d81f2fa747
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/scope.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type InstrumentationScopeContext interface {
+ GetInstrumentationScope() pcommon.InstrumentationScope
+}
+
+func ScopePathGetSetter[K InstrumentationScopeContext](path []ottl.Field) (ottl.GetSetter[K], error) {
+ if len(path) == 0 {
+ return accessInstrumentationScope[K](), nil
+ }
+
+ switch path[0].Name {
+ case "name":
+ return accessInstrumentationScopeName[K](), nil
+ case "version":
+ return accessInstrumentationScopeVersion[K](), nil
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessInstrumentationScopeAttributes[K](), nil
+ }
+ return accessInstrumentationScopeAttributesKey[K](mapKey), nil
+ case "dropped_attributes_count":
+ return accessInstrumentationScopeDroppedAttributesCount[K](), nil
+ }
+
+ return nil, fmt.Errorf("invalid scope path expression %v", path)
+}
+
+func accessInstrumentationScope[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetInstrumentationScope(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newIl, ok := val.(pcommon.InstrumentationScope); ok {
+ newIl.CopyTo(tCtx.GetInstrumentationScope())
+ }
+ return nil
+ },
+ }
+}
+
+func accessInstrumentationScopeAttributes[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetInstrumentationScope().Attributes(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetInstrumentationScope().Attributes())
+ }
+ return nil
+ },
+ }
+}
+
+func accessInstrumentationScopeAttributesKey[K InstrumentationScopeContext](mapKey *string) ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return GetMapValue(tCtx.GetInstrumentationScope().Attributes(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ SetMapValue(tCtx.GetInstrumentationScope().Attributes(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessInstrumentationScopeName[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetInstrumentationScope().Name(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetInstrumentationScope().SetName(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessInstrumentationScopeVersion[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetInstrumentationScope().Version(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetInstrumentationScope().SetVersion(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessInstrumentationScopeDroppedAttributesCount[K InstrumentationScopeContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetInstrumentationScope().DroppedAttributesCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetInstrumentationScope().SetDroppedAttributesCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/span.go
new file mode 100644
index 00000000000..871d4656f54
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/span.go
@@ -0,0 +1,464 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+ "go.opentelemetry.io/otel/trace"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type SpanContext interface {
+ GetSpan() ptrace.Span
+}
+
+var SpanSymbolTable = map[ottl.EnumSymbol]ottl.Enum{
+ "SPAN_KIND_UNSPECIFIED": ottl.Enum(ptrace.SpanKindUnspecified),
+ "SPAN_KIND_INTERNAL": ottl.Enum(ptrace.SpanKindInternal),
+ "SPAN_KIND_SERVER": ottl.Enum(ptrace.SpanKindServer),
+ "SPAN_KIND_CLIENT": ottl.Enum(ptrace.SpanKindClient),
+ "SPAN_KIND_PRODUCER": ottl.Enum(ptrace.SpanKindProducer),
+ "SPAN_KIND_CONSUMER": ottl.Enum(ptrace.SpanKindConsumer),
+ "STATUS_CODE_UNSET": ottl.Enum(ptrace.StatusCodeUnset),
+ "STATUS_CODE_OK": ottl.Enum(ptrace.StatusCodeOk),
+ "STATUS_CODE_ERROR": ottl.Enum(ptrace.StatusCodeError),
+}
+
+func SpanPathGetSetter[K SpanContext](path []ottl.Field) (ottl.GetSetter[K], error) {
+ if len(path) == 0 {
+ return accessSpan[K](), nil
+ }
+
+ switch path[0].Name {
+ case "trace_id":
+ if len(path) == 1 {
+ return accessTraceID[K](), nil
+ }
+ if path[1].Name == "string" {
+ return accessStringTraceID[K](), nil
+ }
+ case "span_id":
+ if len(path) == 1 {
+ return accessSpanID[K](), nil
+ }
+ if path[1].Name == "string" {
+ return accessStringSpanID[K](), nil
+ }
+ case "trace_state":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessTraceState[K](), nil
+ }
+ return accessTraceStateKey[K](mapKey), nil
+ case "parent_span_id":
+ if len(path) == 1 {
+ return accessParentSpanID[K](), nil
+ }
+ if path[1].Name == "string" {
+ return accessStringParentSpanID[K](), nil
+ }
+ case "name":
+ return accessSpanName[K](), nil
+ case "kind":
+ return accessKind[K](), nil
+ case "start_time_unix_nano":
+ return accessStartTimeUnixNano[K](), nil
+ case "end_time_unix_nano":
+ return accessEndTimeUnixNano[K](), nil
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessAttributes[K](), nil
+ }
+ return accessAttributesKey[K](mapKey), nil
+ case "dropped_attributes_count":
+ return accessSpanDroppedAttributesCount[K](), nil
+ case "events":
+ return accessEvents[K](), nil
+ case "dropped_events_count":
+ return accessDroppedEventsCount[K](), nil
+ case "links":
+ return accessLinks[K](), nil
+ case "dropped_links_count":
+ return accessDroppedLinksCount[K](), nil
+ case "status":
+ if len(path) == 1 {
+ return accessStatus[K](), nil
+ }
+ switch path[1].Name {
+ case "code":
+ return accessStatusCode[K](), nil
+ case "message":
+ return accessStatusMessage[K](), nil
+ }
+ }
+
+ return nil, fmt.Errorf("invalid span path expression %v", path)
+}
+
+func accessSpan[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newSpan, ok := val.(ptrace.Span); ok {
+ newSpan.CopyTo(tCtx.GetSpan())
+ }
+ return nil
+ },
+ }
+}
+
+func accessTraceID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().TraceID(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newTraceID, ok := val.(pcommon.TraceID); ok {
+ tCtx.GetSpan().SetTraceID(newTraceID)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStringTraceID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ id := tCtx.GetSpan().TraceID()
+ return hex.EncodeToString(id[:]), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ id, err := ParseTraceID(str)
+ if err != nil {
+ return err
+ }
+ tCtx.GetSpan().SetTraceID(id)
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().SpanID(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newSpanID, ok := val.(pcommon.SpanID); ok {
+ tCtx.GetSpan().SetSpanID(newSpanID)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStringSpanID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ id := tCtx.GetSpan().SpanID()
+ return hex.EncodeToString(id[:]), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ id, err := ParseSpanID(str)
+ if err != nil {
+ return err
+ }
+ tCtx.GetSpan().SetSpanID(id)
+ }
+ return nil
+ },
+ }
+}
+
+func accessTraceState[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().TraceState().AsRaw(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetSpan().TraceState().FromRaw(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessTraceStateKey[K SpanContext](mapKey *string) ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ if ts, err := trace.ParseTraceState(tCtx.GetSpan().TraceState().AsRaw()); err == nil {
+ return ts.Get(*mapKey), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ if ts, err := trace.ParseTraceState(tCtx.GetSpan().TraceState().AsRaw()); err == nil {
+ if updated, err := ts.Insert(*mapKey, str); err == nil {
+ tCtx.GetSpan().TraceState().FromRaw(updated.String())
+ }
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().ParentSpanID(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if newParentSpanID, ok := val.(pcommon.SpanID); ok {
+ tCtx.GetSpan().SetParentSpanID(newParentSpanID)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStringParentSpanID[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ id := tCtx.GetSpan().ParentSpanID()
+ return hex.EncodeToString(id[:]), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ id, err := ParseSpanID(str)
+ if err != nil {
+ return err
+ }
+ tCtx.GetSpan().SetParentSpanID(id)
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanName[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Name(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetSpan().SetName(str)
+ }
+ return nil
+ },
+ }
+}
+
+func accessKind[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetSpan().Kind()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetKind(ptrace.SpanKind(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessStartTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().StartTimestamp().AsTime().UnixNano(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i)))
+ }
+ return nil
+ },
+ }
+}
+
+func accessEndTimeUnixNano[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().EndTimestamp().AsTime().UnixNano(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i)))
+ }
+ return nil
+ },
+ }
+}
+
+func accessAttributes[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Attributes(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetSpan().Attributes())
+ }
+ return nil
+ },
+ }
+}
+
+func accessAttributesKey[K SpanContext](mapKey *string) ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return GetMapValue(tCtx.GetSpan().Attributes(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ SetMapValue(tCtx.GetSpan().Attributes(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessSpanDroppedAttributesCount[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetSpan().DroppedAttributesCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetDroppedAttributesCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessEvents[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Events(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if slc, ok := val.(ptrace.SpanEventSlice); ok {
+ tCtx.GetSpan().Events().RemoveIf(func(event ptrace.SpanEvent) bool {
+ return true
+ })
+ slc.CopyTo(tCtx.GetSpan().Events())
+ }
+ return nil
+ },
+ }
+}
+
+func accessDroppedEventsCount[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetSpan().DroppedEventsCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetDroppedEventsCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessLinks[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Links(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if slc, ok := val.(ptrace.SpanLinkSlice); ok {
+ tCtx.GetSpan().Links().RemoveIf(func(event ptrace.SpanLink) bool {
+ return true
+ })
+ slc.CopyTo(tCtx.GetSpan().Links())
+ }
+ return nil
+ },
+ }
+}
+
+func accessDroppedLinksCount[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetSpan().DroppedLinksCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().SetDroppedLinksCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessStatus[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Status(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if status, ok := val.(ptrace.Status); ok {
+ status.CopyTo(tCtx.GetSpan().Status())
+ }
+ return nil
+ },
+ }
+}
+
+func accessStatusCode[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return int64(tCtx.GetSpan().Status().Code()), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetSpan().Status().SetCode(ptrace.StatusCode(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessStatusMessage[K SpanContext]() ottl.StandardGetSetter[K] {
+ return ottl.StandardGetSetter[K]{
+ Getter: func(ctx context.Context, tCtx K) (interface{}, error) {
+ return tCtx.GetSpan().Status().Message(), nil
+ },
+ Setter: func(ctx context.Context, tCtx K, val interface{}) error {
+ if str, ok := val.(string); ok {
+ tCtx.GetSpan().Status().SetMessage(str)
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/value.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/value.go
new file mode 100644
index 00000000000..04c4881fc9b
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon/value.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlcommon // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+
+import (
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+func GetValue(val pcommon.Value) interface{} {
+ switch val.Type() {
+ case pcommon.ValueTypeStr:
+ return val.Str()
+ case pcommon.ValueTypeBool:
+ return val.Bool()
+ case pcommon.ValueTypeInt:
+ return val.Int()
+ case pcommon.ValueTypeDouble:
+ return val.Double()
+ case pcommon.ValueTypeMap:
+ return val.Map()
+ case pcommon.ValueTypeSlice:
+ return val.Slice()
+ case pcommon.ValueTypeBytes:
+ return val.Bytes().AsRaw()
+ }
+ return nil
+}
+
+func SetValue(value pcommon.Value, val interface{}) {
+ switch v := val.(type) {
+ case string:
+ value.SetStr(v)
+ case bool:
+ value.SetBool(v)
+ case int64:
+ value.SetInt(v)
+ case float64:
+ value.SetDouble(v)
+ case []byte:
+ value.SetEmptyBytes().FromRaw(v)
+ case []string:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, str := range v {
+ value.Slice().AppendEmpty().SetStr(str)
+ }
+ case []bool:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, b := range v {
+ value.Slice().AppendEmpty().SetBool(b)
+ }
+ case []int64:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, i := range v {
+ value.Slice().AppendEmpty().SetInt(i)
+ }
+ case []float64:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, f := range v {
+ value.Slice().AppendEmpty().SetDouble(f)
+ }
+ case [][]byte:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, b := range v {
+ value.Slice().AppendEmpty().SetEmptyBytes().FromRaw(b)
+ }
+ case []any:
+ value.SetEmptySlice().EnsureCapacity(len(v))
+ for _, a := range v {
+ pval := value.Slice().AppendEmpty()
+ SetValue(pval, a)
+ }
+ case pcommon.Map:
+ v.CopyTo(value.SetEmptyMap())
+ case map[string]interface{}:
+ value.SetEmptyMap()
+ for mk, mv := range v {
+ SetMapValue(value.Map(), mk, mv)
+ }
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/README.md
new file mode 100644
index 00000000000..bb8a36de62f
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/README.md
@@ -0,0 +1,66 @@
+# DataPoint Context
+
+The DataPoint Context is a Context implementation for [pdata DataPoints](https://github.com/open-telemetry/opentelemetry-collector/tree/main/pdata/pmetric), the collector's internal representation for OTLP metric data points. This Context should be used when interacting with individual OTLP data points.
+
+## Paths
+In general, the DataPoint Context supports accessing pdata using the field names from the [metrics proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+The following paths are supported.
+
+| path | field accessed | type |
+|------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the data point being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the data point being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the data point being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource.dropped_attributes_count | number of dropped attributes of the resource of the data point being processed | int64 |
+| instrumentation_scope | instrumentation scope of the data point being processed | pcommon.InstrumentationScope |
+| instrumentation_scope.name | name of the instrumentation scope of the data point being processed | string |
+| instrumentation_scope.version | version of the instrumentation scope of the data point being processed | string |
+| instrumentation_scope.dropped_attributes_count | number of dropped attributes of the instrumentation scope of the data point being processed | int64 |
+| instrumentation_scope.attributes | instrumentation scope attributes of the data point being processed | pcommon.Map |
+| instrumentation_scope.attributes\[""\] | the value of the instrumentation scope attribute of the data point being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| attributes | attributes of the data point being processed | pcommon.Map |
+| attributes\[""\] | the value of the attribute of the data point being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| metric | the metric to which the data point being processed belongs | pmetric.Metric |
+| metric.* | All fields exposed by the [ottlmetric context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlmetric) can accessed via `metric.` | varies |
+| positive | the positive buckets of the data point being processed | pmetric.ExponentialHistogramDataPoint |
+| positive.offset | the offset of the positive buckets of the data point being processed | int64 |
+| positive.bucket_counts | the bucket_counts of the positive buckets of the data point being processed | uint64 |
+| negative | the negative buckets of the data point being processed | pmetric.ExponentialHistogramDataPoint |
+| negative.offset | the offset of the negative buckets of the data point being processed | int64 |
+| negative.bucket_counts | the bucket_counts of the negative buckets of the data point being processed | uint64 |
+| start_time_unix_nano | the start time in unix nano of the data point being processed | int64 |
+| time_unix_nano | the time in unix nano of the data point being processed | int64 |
+| value_double | the double value of the data point being processed | float64 |
+| value_int | the int value of the data point being processed | int64 |
+| exemplars | the exemplars of the data point being processed | pmetric.ExemplarSlice |
+| flags | the flags of the data point being processed | int64 |
+| count | the count of the data point being processed | int64 |
+| sum | the sum of the data point being processed | float64 |
+| bucket_counts | the bucket counts of the data point being processed | []float64 |
+| explicit_bounds | the explicit bounds of the data point being processed | []int64 |
+| scale | the scale of the data point being processed | int64 |
+| zero_count | the zero_count of the data point being processed | int64 |
+| quantile_values | the quantile_values of the data point being processed | pmetric.SummaryDataPointValueAtQuantileSlice |
+
+## Enums
+
+The DataPoint Context supports the enum names from the [metrics proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto).
+
+In addition, it also supports an enum for metrics data type, with the numeric value being [defined by pdata](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pmetric/metrics.go).
+
+| Enum Symbol | Value |
+|----------------------------------------|-------|
+| FLAG_NONE | 0 |
+| FLAG_NO_RECORDED_VALUE | 1 |
+| AGGREGATION_TEMPORALITY_UNSPECIFIED | 0 |
+| AGGREGATION_TEMPORALITY_DELTA | 1 |
+| AGGREGATION_TEMPORALITY_CUMULATIVE | 2 |
+| METRIC_DATA_TYPE_NONE | 0 |
+| METRIC_DATA_TYPE_GAUGE | 1 |
+| METRIC_DATA_TYPE_SUM | 2 |
+| METRIC_DATA_TYPE_HISTOGRAM | 3 |
+| METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM | 4 |
+| METRIC_DATA_TYPE_SUMMARY | 5 |
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go
new file mode 100644
index 00000000000..be4a4bd3e06
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint/datapoint.go
@@ -0,0 +1,736 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottldatapoint // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+var _ ottlcommon.InstrumentationScopeContext = TransformContext{}
+
+type TransformContext struct {
+ dataPoint interface{}
+ metric pmetric.Metric
+ metrics pmetric.MetricSlice
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(dataPoint interface{}, metric pmetric.Metric, metrics pmetric.MetricSlice, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ dataPoint: dataPoint,
+ metric: metric,
+ metrics: metrics,
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetDataPoint() interface{} {
+ return tCtx.dataPoint
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) GetMetric() pmetric.Metric {
+ return tCtx.metric
+}
+
+func (tCtx TransformContext) GetMetrics() pmetric.MetricSlice {
+ return tCtx.metrics
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+var symbolTable = map[ottl.EnumSymbol]ottl.Enum{
+ "FLAG_NONE": 0,
+ "FLAG_NO_RECORDED_VALUE": 1,
+}
+
+func init() {
+ for k, v := range ottlcommon.MetricSymbolTable {
+ symbolTable[k] = v
+ }
+}
+
+func parseEnum(val *ottl.EnumSymbol) (*ottl.Enum, error) {
+ if val != nil {
+ if enum, ok := symbolTable[*val]; ok {
+ return &enum, nil
+ }
+ return nil, fmt.Errorf("enum symbol, %s, not found", *val)
+ }
+ return nil, fmt.Errorf("enum symbol not provided")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ case "resource":
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path[1:])
+ case "instrumentation_scope":
+ return ottlcommon.ScopePathGetSetter[TransformContext](path[1:])
+ case "metric":
+ return ottlcommon.MetricPathGetSetter[TransformContext](path[1:])
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessAttributes(), nil
+ }
+ return accessAttributesKey(mapKey), nil
+ case "start_time_unix_nano":
+ return accessStartTimeUnixNano(), nil
+ case "time_unix_nano":
+ return accessTimeUnixNano(), nil
+ case "value_double":
+ return accessDoubleValue(), nil
+ case "value_int":
+ return accessIntValue(), nil
+ case "exemplars":
+ return accessExemplars(), nil
+ case "flags":
+ return accessFlags(), nil
+ case "count":
+ return accessCount(), nil
+ case "sum":
+ return accessSum(), nil
+ case "bucket_counts":
+ return accessBucketCounts(), nil
+ case "explicit_bounds":
+ return accessExplicitBounds(), nil
+ case "scale":
+ return accessScale(), nil
+ case "zero_count":
+ return accessZeroCount(), nil
+ case "positive":
+ if len(path) == 1 {
+ return accessPositive(), nil
+ }
+ switch path[1].Name {
+ case "offset":
+ return accessPositiveOffset(), nil
+ case "bucket_counts":
+ return accessPositiveBucketCounts(), nil
+ }
+ case "negative":
+ if len(path) == 1 {
+ return accessNegative(), nil
+ }
+ switch path[1].Name {
+ case "offset":
+ return accessNegativeOffset(), nil
+ case "bucket_counts":
+ return accessNegativeBucketCounts(), nil
+ }
+ case "quantile_values":
+ return accessQuantileValues(), nil
+ }
+ return nil, fmt.Errorf("invalid path expression %v", path)
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessAttributes() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), nil
+ case pmetric.HistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Attributes(), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Attributes(), nil
+ case pmetric.SummaryDataPoint:
+ return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Attributes(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes())
+ }
+ case pmetric.HistogramDataPoint:
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Attributes())
+ }
+ case pmetric.ExponentialHistogramDataPoint:
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Attributes())
+ }
+ case pmetric.SummaryDataPoint:
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Attributes())
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessAttributesKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return ottlcommon.GetMapValue(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), *mapKey), nil
+ case pmetric.HistogramDataPoint:
+ return ottlcommon.GetMapValue(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Attributes(), *mapKey), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return ottlcommon.GetMapValue(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Attributes(), *mapKey), nil
+ case pmetric.SummaryDataPoint:
+ return ottlcommon.GetMapValue(tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Attributes(), *mapKey), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ ottlcommon.SetMapValue(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Attributes(), *mapKey, val)
+ case pmetric.HistogramDataPoint:
+ ottlcommon.SetMapValue(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Attributes(), *mapKey, val)
+ case pmetric.ExponentialHistogramDataPoint:
+ ottlcommon.SetMapValue(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Attributes(), *mapKey, val)
+ case pmetric.SummaryDataPoint:
+ ottlcommon.SetMapValue(tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Attributes(), *mapKey, val)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStartTimeUnixNano() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return tCtx.GetDataPoint().(pmetric.NumberDataPoint).StartTimestamp().AsTime().UnixNano(), nil
+ case pmetric.HistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).StartTimestamp().AsTime().UnixNano(), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).StartTimestamp().AsTime().UnixNano(), nil
+ case pmetric.SummaryDataPoint:
+ return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).StartTimestamp().AsTime().UnixNano(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newTime, ok := val.(int64); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ tCtx.GetDataPoint().(pmetric.NumberDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.HistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.ExponentialHistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.SummaryDataPoint:
+ tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Timestamp().AsTime().UnixNano(), nil
+ case pmetric.HistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Timestamp().AsTime().UnixNano(), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Timestamp().AsTime().UnixNano(), nil
+ case pmetric.SummaryDataPoint:
+ return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Timestamp().AsTime().UnixNano(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newTime, ok := val.(int64); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ tCtx.GetDataPoint().(pmetric.NumberDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.HistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.ExponentialHistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ case pmetric.SummaryDataPoint:
+ tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTime)))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessDoubleValue() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok {
+ return numberDataPoint.DoubleValue(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newDouble, ok := val.(float64); ok {
+ if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok {
+ numberDataPoint.SetDoubleValue(newDouble)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessIntValue() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok {
+ return numberDataPoint.IntValue(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newInt, ok := val.(int64); ok {
+ if numberDataPoint, ok := tCtx.GetDataPoint().(pmetric.NumberDataPoint); ok {
+ numberDataPoint.SetIntValue(newInt)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessExemplars() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return tCtx.GetDataPoint().(pmetric.NumberDataPoint).Exemplars(), nil
+ case pmetric.HistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Exemplars(), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Exemplars(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newExemplars, ok := val.(pmetric.ExemplarSlice); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ newExemplars.CopyTo(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Exemplars())
+ case pmetric.HistogramDataPoint:
+ newExemplars.CopyTo(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Exemplars())
+ case pmetric.ExponentialHistogramDataPoint:
+ newExemplars.CopyTo(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Exemplars())
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessFlags() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.NumberDataPoint).Flags()), nil
+ case pmetric.HistogramDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Flags()), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Flags()), nil
+ case pmetric.SummaryDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Flags()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newFlags, ok := val.(int64); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.NumberDataPoint:
+ tCtx.GetDataPoint().(pmetric.NumberDataPoint).SetFlags(pmetric.DataPointFlags(newFlags))
+ case pmetric.HistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetFlags(pmetric.DataPointFlags(newFlags))
+ case pmetric.ExponentialHistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetFlags(pmetric.DataPointFlags(newFlags))
+ case pmetric.SummaryDataPoint:
+ tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetFlags(pmetric.DataPointFlags(newFlags))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessCount() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.HistogramDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Count()), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Count()), nil
+ case pmetric.SummaryDataPoint:
+ return int64(tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Count()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newCount, ok := val.(int64); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.HistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetCount(uint64(newCount))
+ case pmetric.ExponentialHistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetCount(uint64(newCount))
+ case pmetric.SummaryDataPoint:
+ tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetCount(uint64(newCount))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessSum() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.HistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.HistogramDataPoint).Sum(), nil
+ case pmetric.ExponentialHistogramDataPoint:
+ return tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).Sum(), nil
+ case pmetric.SummaryDataPoint:
+ return tCtx.GetDataPoint().(pmetric.SummaryDataPoint).Sum(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newSum, ok := val.(float64); ok {
+ switch tCtx.GetDataPoint().(type) {
+ case pmetric.HistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.HistogramDataPoint).SetSum(newSum)
+ case pmetric.ExponentialHistogramDataPoint:
+ tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint).SetSum(newSum)
+ case pmetric.SummaryDataPoint:
+ tCtx.GetDataPoint().(pmetric.SummaryDataPoint).SetSum(newSum)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessExplicitBounds() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok {
+ return histogramDataPoint.ExplicitBounds().AsRaw(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newExplicitBounds, ok := val.([]float64); ok {
+ if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok {
+ histogramDataPoint.ExplicitBounds().FromRaw(newExplicitBounds)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessBucketCounts() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok {
+ return histogramDataPoint.BucketCounts().AsRaw(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newBucketCount, ok := val.([]uint64); ok {
+ if histogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.HistogramDataPoint); ok {
+ histogramDataPoint.BucketCounts().FromRaw(newBucketCount)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessScale() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return int64(expoHistogramDataPoint.Scale()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newScale, ok := val.(int64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.SetScale(int32(newScale))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessZeroCount() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return int64(expoHistogramDataPoint.ZeroCount()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newZeroCount, ok := val.(int64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.SetZeroCount(uint64(newZeroCount))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessPositive() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return expoHistogramDataPoint.Positive(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newPositive, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ newPositive.CopyTo(expoHistogramDataPoint.Positive())
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessPositiveOffset() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return int64(expoHistogramDataPoint.Positive().Offset()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newPositiveOffset, ok := val.(int64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.Positive().SetOffset(int32(newPositiveOffset))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessPositiveBucketCounts() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return expoHistogramDataPoint.Positive().BucketCounts().AsRaw(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newPositiveBucketCounts, ok := val.([]uint64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.Positive().BucketCounts().FromRaw(newPositiveBucketCounts)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessNegative() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return expoHistogramDataPoint.Negative(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newNegative, ok := val.(pmetric.ExponentialHistogramDataPointBuckets); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ newNegative.CopyTo(expoHistogramDataPoint.Negative())
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessNegativeOffset() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return int64(expoHistogramDataPoint.Negative().Offset()), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newNegativeOffset, ok := val.(int64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.Negative().SetOffset(int32(newNegativeOffset))
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessNegativeBucketCounts() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ return expoHistogramDataPoint.Negative().BucketCounts().AsRaw(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newNegativeBucketCounts, ok := val.([]uint64); ok {
+ if expoHistogramDataPoint, ok := tCtx.GetDataPoint().(pmetric.ExponentialHistogramDataPoint); ok {
+ expoHistogramDataPoint.Negative().BucketCounts().FromRaw(newNegativeBucketCounts)
+ }
+ }
+ return nil
+ },
+ }
+}
+
+func accessQuantileValues() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok {
+ return summaryDataPoint.QuantileValues(), nil
+ }
+ return nil, nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newQuantileValues, ok := val.(pmetric.SummaryDataPointValueAtQuantileSlice); ok {
+ if summaryDataPoint, ok := tCtx.GetDataPoint().(pmetric.SummaryDataPoint); ok {
+ newQuantileValues.CopyTo(summaryDataPoint.QuantileValues())
+ }
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/README.md
new file mode 100644
index 00000000000..f2defde785f
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/README.md
@@ -0,0 +1,70 @@
+# Log Context
+
+The Log Context is a Context implementation for [pdata Logs](https://github.com/open-telemetry/opentelemetry-collector/tree/main/pdata/plog), the collector's internal representation for OTLP log data. This Context should be used when interacted with OTLP logs.
+
+## Paths
+In general, the Log Context supports accessing pdata using the field names from the [logs proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+All TraceIDs and SpanIDs are returned as pdata [SpanID](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/spanid.go) and [TraceID](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/traceid.go) types. Use the [SpanID function](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md#spanid) and [TraceID function](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md#traceid) when interacting with pdata representations of SpanID and TraceID. When checking for nil, instead check against an empty byte slice (`SpanID(0x0000000000000000)` and `TraceID(0x00000000000000000000000000000000)`).
+
+The following paths are supported.
+
+| path | field accessed | type |
+|------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the log being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the log being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the log being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource.dropped_attributes_count | number of dropped attributes of the resource of the log being processed | int64 |
+| instrumentation_scope | instrumentation scope of the log being processed | pcommon.InstrumentationScope |
+| instrumentation_scope.name | name of the instrumentation scope of the log being processed | string |
+| instrumentation_scope.version | version of the instrumentation scope of the log being processed | string |
+| instrumentation_scope.dropped_attributes_count | number of dropped attributes of the instrumentation scope of the log being processed | int64 |
+| instrumentation_scope.attributes | instrumentation scope attributes of the data point being processed | pcommon.Map |
+| instrumentation_scope.attributes\[""\] | the value of the instrumentation scope attribute of the data point being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| attributes | attributes of the log being processed | pcommon.Map |
+| attributes\[""\] | the value of the attribute of the log being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| trace_id | a byte slice representation of the trace id | pcommon.TraceID |
+| trace_id.string | a string representation of the trace id | string |
+| span_id | a byte slice representation of the span id | pcommon.SpanID |
+| span_id.string | a string representation of the span id | string |
+| time_unix_nano | the time in unix nano of the log being processed | int64 |
+| observed_time_unix_nano | the observed time in unix nano of the log being processed | int64 |
+| severity_number | the severity numbner of the log being processed | int64 |
+| severity_text | the severity text of the log being processed | string |
+| body | the body of the log being processed | any |
+| dropped_attributes_count | the number of dropped attributes of the log being processed | int64 |
+| flags | the flags of the log being processed | int64 |
+
+## Enums
+
+The Log Context supports the enum names from the [logs proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto).
+
+| Enum Symbol | Value |
+|-----------------------------|-------|
+| SEVERITY_NUMBER_UNSPECIFIED | 0 |
+| SEVERITY_NUMBER_TRACE | 1 |
+| SEVERITY_NUMBER_TRACE2 | 2 |
+| SEVERITY_NUMBER_TRACE3 | 3 |
+| SEVERITY_NUMBER_TRACE4 | 4 |
+| SEVERITY_NUMBER_DEBUG | 5 |
+| SEVERITY_NUMBER_DEBUG2 | 6 |
+| SEVERITY_NUMBER_DEBUG3 | 7 |
+| SEVERITY_NUMBER_DEBUG4 | 8 |
+| SEVERITY_NUMBER_INFO | 9 |
+| SEVERITY_NUMBER_INFO2 | 10 |
+| SEVERITY_NUMBER_INFO3 | 11 |
+| SEVERITY_NUMBER_INFO4 | 12 |
+| SEVERITY_NUMBER_WARN | 13 |
+| SEVERITY_NUMBER_WARN2 | 14 |
+| SEVERITY_NUMBER_WARN3 | 15 |
+| SEVERITY_NUMBER_WARN4 | 16 |
+| SEVERITY_NUMBER_ERROR | 17 |
+| SEVERITY_NUMBER_ERROR2 | 18 |
+| SEVERITY_NUMBER_ERROR3 | 19 |
+| SEVERITY_NUMBER_ERROR4 | 20 |
+| SEVERITY_NUMBER_FATAL | 21 |
+| SEVERITY_NUMBER_FATAL2 | 22 |
+| SEVERITY_NUMBER_FATAL3 | 23 |
+| SEVERITY_NUMBER_FATAL4 | 24 |
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go
new file mode 100644
index 00000000000..bb59edc7969
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog/log.go
@@ -0,0 +1,408 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottllog // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/plog"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+var _ ottlcommon.InstrumentationScopeContext = TransformContext{}
+
+type TransformContext struct {
+ logRecord plog.LogRecord
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(logRecord plog.LogRecord, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ logRecord: logRecord,
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetLogRecord() plog.LogRecord {
+ return tCtx.logRecord
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+var symbolTable = map[ottl.EnumSymbol]ottl.Enum{
+ "SEVERITY_NUMBER_UNSPECIFIED": ottl.Enum(plog.SeverityNumberUnspecified),
+ "SEVERITY_NUMBER_TRACE": ottl.Enum(plog.SeverityNumberTrace),
+ "SEVERITY_NUMBER_TRACE2": ottl.Enum(plog.SeverityNumberTrace2),
+ "SEVERITY_NUMBER_TRACE3": ottl.Enum(plog.SeverityNumberTrace3),
+ "SEVERITY_NUMBER_TRACE4": ottl.Enum(plog.SeverityNumberTrace4),
+ "SEVERITY_NUMBER_DEBUG": ottl.Enum(plog.SeverityNumberDebug),
+ "SEVERITY_NUMBER_DEBUG2": ottl.Enum(plog.SeverityNumberDebug2),
+ "SEVERITY_NUMBER_DEBUG3": ottl.Enum(plog.SeverityNumberDebug3),
+ "SEVERITY_NUMBER_DEBUG4": ottl.Enum(plog.SeverityNumberDebug4),
+ "SEVERITY_NUMBER_INFO": ottl.Enum(plog.SeverityNumberInfo),
+ "SEVERITY_NUMBER_INFO2": ottl.Enum(plog.SeverityNumberInfo2),
+ "SEVERITY_NUMBER_INFO3": ottl.Enum(plog.SeverityNumberInfo3),
+ "SEVERITY_NUMBER_INFO4": ottl.Enum(plog.SeverityNumberInfo4),
+ "SEVERITY_NUMBER_WARN": ottl.Enum(plog.SeverityNumberWarn),
+ "SEVERITY_NUMBER_WARN2": ottl.Enum(plog.SeverityNumberWarn2),
+ "SEVERITY_NUMBER_WARN3": ottl.Enum(plog.SeverityNumberWarn3),
+ "SEVERITY_NUMBER_WARN4": ottl.Enum(plog.SeverityNumberWarn4),
+ "SEVERITY_NUMBER_ERROR": ottl.Enum(plog.SeverityNumberError),
+ "SEVERITY_NUMBER_ERROR2": ottl.Enum(plog.SeverityNumberError2),
+ "SEVERITY_NUMBER_ERROR3": ottl.Enum(plog.SeverityNumberError3),
+ "SEVERITY_NUMBER_ERROR4": ottl.Enum(plog.SeverityNumberError4),
+ "SEVERITY_NUMBER_FATAL": ottl.Enum(plog.SeverityNumberFatal),
+ "SEVERITY_NUMBER_FATAL2": ottl.Enum(plog.SeverityNumberFatal2),
+ "SEVERITY_NUMBER_FATAL3": ottl.Enum(plog.SeverityNumberFatal3),
+ "SEVERITY_NUMBER_FATAL4": ottl.Enum(plog.SeverityNumberFatal4),
+}
+
+func parseEnum(val *ottl.EnumSymbol) (*ottl.Enum, error) {
+ if val != nil {
+ if enum, ok := symbolTable[*val]; ok {
+ return &enum, nil
+ }
+ return nil, fmt.Errorf("enum symbol, %s, not found", *val)
+ }
+ return nil, fmt.Errorf("enum symbol not provided")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ case "resource":
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path[1:])
+ case "instrumentation_scope":
+ return ottlcommon.ScopePathGetSetter[TransformContext](path[1:])
+ case "time_unix_nano":
+ return accessTimeUnixNano(), nil
+ case "observed_time_unix_nano":
+ return accessObservedTimeUnixNano(), nil
+ case "severity_number":
+ return accessSeverityNumber(), nil
+ case "severity_text":
+ return accessSeverityText(), nil
+ case "body":
+ return accessBody(), nil
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessAttributes(), nil
+ }
+ return accessAttributesKey(mapKey), nil
+ case "dropped_attributes_count":
+ return accessDroppedAttributesCount(), nil
+ case "flags":
+ return accessFlags(), nil
+ case "trace_id":
+ if len(path) == 1 {
+ return accessTraceID(), nil
+ }
+ if path[1].Name == "string" {
+ return accessStringTraceID(), nil
+ }
+ case "span_id":
+ if len(path) == 1 {
+ return accessSpanID(), nil
+ }
+ if path[1].Name == "string" {
+ return accessStringSpanID(), nil
+ }
+ }
+
+ return nil, fmt.Errorf("invalid path expression %v", path)
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessTimeUnixNano() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().Timestamp().AsTime().UnixNano(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetLogRecord().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i)))
+ }
+ return nil
+ },
+ }
+}
+
+func accessObservedTimeUnixNano() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().ObservedTimestamp().AsTime().UnixNano(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetLogRecord().SetObservedTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i)))
+ }
+ return nil
+ },
+ }
+}
+
+func accessSeverityNumber() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return int64(tCtx.GetLogRecord().SeverityNumber()), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetLogRecord().SetSeverityNumber(plog.SeverityNumber(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessSeverityText() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().SeverityText(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if s, ok := val.(string); ok {
+ tCtx.GetLogRecord().SetSeverityText(s)
+ }
+ return nil
+ },
+ }
+}
+
+func accessBody() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetValue(tCtx.GetLogRecord().Body()), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetValue(tCtx.GetLogRecord().Body(), val)
+ return nil
+ },
+ }
+}
+
+func accessAttributes() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().Attributes(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetLogRecord().Attributes())
+ }
+ return nil
+ },
+ }
+}
+
+func accessAttributesKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.GetLogRecord().Attributes(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.GetLogRecord().Attributes(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessDroppedAttributesCount() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return int64(tCtx.GetLogRecord().DroppedAttributesCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetLogRecord().SetDroppedAttributesCount(uint32(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessFlags() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return int64(tCtx.GetLogRecord().Flags()), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if i, ok := val.(int64); ok {
+ tCtx.GetLogRecord().SetFlags(plog.LogRecordFlags(i))
+ }
+ return nil
+ },
+ }
+}
+
+func accessTraceID() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().TraceID(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newTraceID, ok := val.(pcommon.TraceID); ok {
+ tCtx.GetLogRecord().SetTraceID(newTraceID)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStringTraceID() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ id := tCtx.GetLogRecord().TraceID()
+ return hex.EncodeToString(id[:]), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if str, ok := val.(string); ok {
+ id, err := ottlcommon.ParseTraceID(str)
+ if err != nil {
+ return err
+ }
+ tCtx.GetLogRecord().SetTraceID(id)
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanID() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetLogRecord().SpanID(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newSpanID, ok := val.(pcommon.SpanID); ok {
+ tCtx.GetLogRecord().SetSpanID(newSpanID)
+ }
+ return nil
+ },
+ }
+}
+
+func accessStringSpanID() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ id := tCtx.GetLogRecord().SpanID()
+ return hex.EncodeToString(id[:]), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if str, ok := val.(string); ok {
+ id, err := ottlcommon.ParseSpanID(str)
+ if err != nil {
+ return err
+ }
+ tCtx.GetLogRecord().SetSpanID(id)
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/README.md
new file mode 100644
index 00000000000..9ea07ab1bfd
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/README.md
@@ -0,0 +1,46 @@
+# Metric Context
+
+The Metric Context is a Context implementation for [pdata Metric](https://github.com/open-telemetry/opentelemetry-collector/tree/main/pdata/pmetric), the collector's internal representation for OTLP metrics. This Context should be used when interacting with individual OTLP metrics.
+
+## Paths
+In general, the Metric Context supports accessing pdata using the field names from the [metrics proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+The following paths are supported.
+
+| path | field accessed | type |
+|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the metric being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the metric being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the metric being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| instrumentation_scope | instrumentation scope of the metric being processed | pcommon.InstrumentationScope |
+| instrumentation_scope.name | name of the instrumentation scope of the metric being processed | string |
+| instrumentation_scope.version | version of the instrumentation scope of the metric being processed | string |
+| instrumentation_scope.attributes | instrumentation scope attributes of the metric being processed | pcommon.Map |
+| instrumentation_scope.attributes\[""\] | the value of the instrumentation scope attribute of the metric being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| name | the name of the metric | string |
+| description | the description of the metric | string |
+| unit | the unit of the metric | string |
+| type | the data type of the metric | int64 |
+| aggregation_temporality | the aggregation temporality of the metric | int64 |
+| is_monotonic | the monotonicity of the metric | bool |
+| data_points | the data points of the metric | pmetric.NumberDataPointSlice, pmetric.HistogramDataPointSlice, pmetric.ExponentialHistogramDataPointSlice, or pmetric.SummaryDataPointSlice |
+
+## Enums
+
+The Metrics Context supports the enum names from the [metrics proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto).
+
+In addition, it also supports an enum for metrics data type, with the numeric value being [defined by pdata](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pmetric/metrics.go).
+
+| Enum Symbol | Value |
+|----------------------------------------|-------|
+| AGGREGATION_TEMPORALITY_UNSPECIFIED | 0 |
+| AGGREGATION_TEMPORALITY_DELTA | 1 |
+| AGGREGATION_TEMPORALITY_CUMULATIVE | 2 |
+| METRIC_DATA_TYPE_NONE | 0 |
+| METRIC_DATA_TYPE_GAUGE | 1 |
+| METRIC_DATA_TYPE_SUM | 2 |
+| METRIC_DATA_TYPE_HISTOGRAM | 3 |
+| METRIC_DATA_TYPE_EXPONENTIAL_HISTOGRAM | 4 |
+| METRIC_DATA_TYPE_SUMMARY | 5 |
\ No newline at end of file
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go
new file mode 100644
index 00000000000..65dbb2315be
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric/metrics.go
@@ -0,0 +1,159 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlmetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+var _ ottlcommon.InstrumentationScopeContext = TransformContext{}
+var _ ottlcommon.MetricContext = TransformContext{}
+
+type TransformContext struct {
+ metric pmetric.Metric
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(metric pmetric.Metric, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ metric: metric,
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetMetric() pmetric.Metric {
+ return tCtx.metric
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, err
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+var symbolTable = ottlcommon.MetricSymbolTable
+
+func parseEnum(val *ottl.EnumSymbol) (*ottl.Enum, error) {
+ if val != nil {
+ if enum, ok := symbolTable[*val]; ok {
+ return &enum, nil
+ }
+ return nil, fmt.Errorf("enum symbol, %s, not found", *val)
+ }
+ return nil, fmt.Errorf("enum symbol not provided")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ case "resource":
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path[1:])
+ case "instrumentation_scope":
+ return ottlcommon.ScopePathGetSetter[TransformContext](path[1:])
+ default:
+ return ottlcommon.MetricPathGetSetter[TransformContext](path)
+ }
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/README.md
new file mode 100644
index 00000000000..c37d855e20e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/README.md
@@ -0,0 +1,20 @@
+# Resource Context
+
+The Resource Context is a Context implementation for [pdata Resources](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/generated_resource.go), the Collector's internal representation for an OTLP Resource. This Context should be used when interacting only with OTLP resources.
+
+## Paths
+In general, the Resource Context supports accessing pdata using the field names from the [resource proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/resource/v1/resource.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+The following paths are supported.
+
+| path | field accessed | type |
+|--------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| attributes | attributes of the resource being processed | pcommon.Map |
+| attributes\[""\] | the value of the attribute of the resource being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| dropped_attributes_count | number of dropped attributes of the resource being processed | int64 |
+
+## Enums
+
+The Resource Context does not define any Enums at this time.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go
new file mode 100644
index 00000000000..6dcbe73b5f4
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource/resource.go
@@ -0,0 +1,132 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlresource // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+
+type TransformContext struct {
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+func parseEnum(_ *ottl.EnumSymbol) (*ottl.Enum, error) {
+ return nil, fmt.Errorf("resource context does not provide Enum support")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ default:
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path)
+ }
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/README.md
new file mode 100644
index 00000000000..aa6e77b89eb
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/README.md
@@ -0,0 +1,64 @@
+# Span Context
+
+The Span Context is a Context implementation for [pdata Spans](https://github.com/open-telemetry/opentelemetry-collector/tree/main/pdata/ptrace), the collector's internal representation for OTLP span data. This Context should be used when interacted with OTLP spans.
+
+## Paths
+In general, the Span Context supports accessing pdata using the field names from the [traces proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+All TraceIDs and SpanIDs are returned as pdata [SpanID](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/spanid.go) and [TraceID](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/pcommon/traceid.go) types. Use the [SpanID function](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md#spanid) and [TraceID function](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/ottlfuncs/README.md#traceid) when interacting with pdata representations of SpanID and TraceID. When checking for nil, instead check against an empty byte slice (`SpanID(0x0000000000000000)` and `TraceID(0x00000000000000000000000000000000)`).
+
+The following paths are supported.
+
+| path | field accessed | type |
+|------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the span being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the span being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the span being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource.dropped_attributes_count | number of dropped attributes of the resource of the span being processed | int64 |
+| instrumentation_scope | instrumentation scope of the span being processed | pcommon.InstrumentationScope |
+| instrumentation_scope.name | name of the instrumentation scope of the span being processed | string |
+| instrumentation_scope.version | version of the instrumentation scope of the span being processed | string |
+| instrumentation_scope.dropped_attributes_count | number of dropped attributes of the instrumentation scope of the span being processed | int64 |
+| instrumentation_scope.attributes | instrumentation scope attributes of the span being processed | pcommon.Map |
+| instrumentation_scope.attributes\[""\] | the value of the instrumentation scope attribute of the span being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| attributes | attributes of the span being processed | pcommon.Map |
+| attributes\[""\] | the value of the attribute of the span being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| trace_id | a byte slice representation of the trace id | pcommon.TraceID |
+| trace_id.string | a string representation of the trace id | string |
+| span_id | a byte slice representation of the span id | pcommon.SpanID |
+| span_id.string | a string representation of the span id | string |
+| parent_span_id | a byte slice representation of the parent span id | pcommon.SpanID |
+| parent_span_id.string | a string representation of the parent span id | string |
+| trace_state | the trace state of the current span | string |
+| trace_state\[""\] | an individual entry in the trace state | string |
+| status | the status of the span being processed | ptrace.Status |
+| status.code | the status code of the span being processed | int64 |
+| status.message | the status message of the span being processed | string |
+| name | the name of the span | string |
+| kind | the kind of the span | int64 |
+| start_time_unix_nano | the start time in unix nano of the span | int64 |
+| end_time_unix_nano | the end time in unix nano of the span | int64 |
+| dropped_attributes_count | the dropped attributes count of the span | int64 |
+| events | the events of the span | ptrace.SpanEventSlice |
+| dropped_events_count | the dropped events count of the span | int64 |
+| links | the links of the span | ptrace.SpanLinkSlice |
+| dropped_links_count | the dropped links count of the span | int64 |
+
+
+## Enums
+
+The Span Context supports the enum names from the [traces proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto).
+
+| Enum Symbol | Value |
+|-----------------------|-------|
+| SPAN_KIND_UNSPECIFIED | 0 |
+| SPAN_KIND_INTERNAL | 1 |
+| SPAN_KIND_SERVER | 2 |
+| SPAN_KIND_CLIENT | 3 |
+| SPAN_KIND_PRODUCER | 4 |
+| SPAN_KIND_CONSUMER | 5 |
+| STATUS_CODE_UNSET | 0 |
+| STATUS_CODE_OK | 1 |
+| STATUS_CODE_ERROR | 2 |
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go
new file mode 100644
index 00000000000..e52b9446a97
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan/span.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlspan // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+var _ ottlcommon.InstrumentationScopeContext = TransformContext{}
+
+type TransformContext struct {
+ span ptrace.Span
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ span: span,
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetSpan() ptrace.Span {
+ return tCtx.span
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+func parseEnum(val *ottl.EnumSymbol) (*ottl.Enum, error) {
+ if val != nil {
+ if enum, ok := ottlcommon.SpanSymbolTable[*val]; ok {
+ return &enum, nil
+ }
+ return nil, fmt.Errorf("enum symbol, %s, not found", *val)
+ }
+ return nil, fmt.Errorf("enum symbol not provided")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ case "resource":
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path[1:])
+ case "instrumentation_scope":
+ return ottlcommon.ScopePathGetSetter[TransformContext](path[1:])
+ default:
+ return ottlcommon.SpanPathGetSetter[TransformContext](path)
+ }
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/README.md
new file mode 100644
index 00000000000..9050939de91
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/README.md
@@ -0,0 +1,44 @@
+# Span Event Context
+
+The Span Event Context is a Context implementation for [pdata SpanEvents](https://github.com/open-telemetry/opentelemetry-collector/blob/main/pdata/ptrace/generated_spanevent.go), the Collector's internal representation for OTLP Span Event data. This Context should be used when interacting with individual OTLP Span Events.
+
+## Paths
+In general, the Span Event Context supports accessing pdata using the field names from the [traces proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto). All integers are returned and set via `int64`. All doubles are returned and set via `float64`.
+
+The following paths are supported.
+
+| path | field accessed | type |
+|----------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------|
+| cache | the value of the current transform context's temporary cache. cache can be used as a temporary placeholder for data during complex transformations | pcommon.Map |
+| cache\[""\] | the value of an item in cache | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| resource | resource of the span event being processed | pcommon.Resource |
+| resource.attributes | resource attributes of the span event being processed | pcommon.Map |
+| resource.attributes\[""\] | the value of the resource attribute of the span event being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| instrumentation_scope | instrumentation scope of the span event being processed | pcommon.InstrumentationScope |
+| instrumentation_scope.name | name of the instrumentation scope of the span event being processed | string |
+| instrumentation_scope.version | version of the instrumentation scope of the span event being processed | string |
+| instrumentation_scope.attributes | instrumentation scope attributes of the span event being processed | pcommon.Map |
+| instrumentation_scope.attributes\[""\] | the value of the instrumentation scope attribute of the span event being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| span | span of the span event being processed | ptrace.Span |
+| span.* | All fields exposed by the [ottlspan context](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/contexts/ottlspan) can accessed via `span.` | varies |
+| attributes | attributes of the span event being processed | pcommon.Map |
+| attributes\[""\] | the value of the attribute of the span event being processed | string, bool, int64, float64, pcommon.Map, pcommon.Slice, []byte or nil |
+| time_unix_nano | time_unix_nano of the span event being processed | int64 |
+| name | name of the span event being processed | string |
+| dropped_attributes_count | dropped_attributes_count of the span event being processed | int64 |
+
+## Enums
+
+The Span Event Context supports the enum names from the [traces proto](https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/trace/v1/trace.proto).
+
+| Enum Symbol | Value |
+|-----------------------|-------|
+| SPAN_KIND_UNSPECIFIED | 0 |
+| SPAN_KIND_INTERNAL | 1 |
+| SPAN_KIND_SERVER | 2 |
+| SPAN_KIND_CLIENT | 3 |
+| SPAN_KIND_PRODUCER | 4 |
+| SPAN_KIND_CONSUMER | 5 |
+| STATUS_CODE_UNSET | 0 |
+| STATUS_CODE_OK | 1 |
+| STATUS_CODE_ERROR | 2 |
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go
new file mode 100644
index 00000000000..cf7a7c092a3
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent/span_events.go
@@ -0,0 +1,246 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlspanevent // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon"
+)
+
+var _ ottlcommon.ResourceContext = TransformContext{}
+var _ ottlcommon.InstrumentationScopeContext = TransformContext{}
+var _ ottlcommon.SpanContext = TransformContext{}
+
+type TransformContext struct {
+ spanEvent ptrace.SpanEvent
+ span ptrace.Span
+ instrumentationScope pcommon.InstrumentationScope
+ resource pcommon.Resource
+ cache pcommon.Map
+}
+
+type Option func(*ottl.Parser[TransformContext])
+
+func NewTransformContext(spanEvent ptrace.SpanEvent, span ptrace.Span, instrumentationScope pcommon.InstrumentationScope, resource pcommon.Resource) TransformContext {
+ return TransformContext{
+ spanEvent: spanEvent,
+ span: span,
+ instrumentationScope: instrumentationScope,
+ resource: resource,
+ cache: pcommon.NewMap(),
+ }
+}
+
+func (tCtx TransformContext) GetSpanEvent() ptrace.SpanEvent {
+ return tCtx.spanEvent
+}
+
+func (tCtx TransformContext) GetSpan() ptrace.Span {
+ return tCtx.span
+}
+
+func (tCtx TransformContext) GetInstrumentationScope() pcommon.InstrumentationScope {
+ return tCtx.instrumentationScope
+}
+
+func (tCtx TransformContext) GetResource() pcommon.Resource {
+ return tCtx.resource
+}
+
+func (tCtx TransformContext) getCache() pcommon.Map {
+ return tCtx.cache
+}
+
+func NewParser(functions map[string]interface{}, telemetrySettings component.TelemetrySettings, options ...Option) (ottl.Parser[TransformContext], error) {
+ p, err := ottl.NewParser[TransformContext](
+ functions,
+ parsePath,
+ telemetrySettings,
+ ottl.WithEnumParser[TransformContext](parseEnum),
+ )
+ if err != nil {
+ return ottl.Parser[TransformContext]{}, err
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type StatementsOption func(*ottl.Statements[TransformContext])
+
+func WithErrorMode(errorMode ottl.ErrorMode) StatementsOption {
+ return func(s *ottl.Statements[TransformContext]) {
+ ottl.WithErrorMode[TransformContext](errorMode)(s)
+ }
+}
+
+func NewStatements(statements []*ottl.Statement[TransformContext], telemetrySettings component.TelemetrySettings, options ...StatementsOption) ottl.Statements[TransformContext] {
+ s := ottl.NewStatements(statements, telemetrySettings)
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+func parseEnum(val *ottl.EnumSymbol) (*ottl.Enum, error) {
+ if val != nil {
+ if enum, ok := ottlcommon.SpanSymbolTable[*val]; ok {
+ return &enum, nil
+ }
+ return nil, fmt.Errorf("enum symbol, %s, not found", *val)
+ }
+ return nil, fmt.Errorf("enum symbol not provided")
+}
+
+func parsePath(val *ottl.Path) (ottl.GetSetter[TransformContext], error) {
+ if val != nil && len(val.Fields) > 0 {
+ return newPathGetSetter(val.Fields)
+ }
+ return nil, fmt.Errorf("bad path %v", val)
+}
+
+func newPathGetSetter(path []ottl.Field) (ottl.GetSetter[TransformContext], error) {
+ switch path[0].Name {
+ case "cache":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessCache(), nil
+ }
+ return accessCacheKey(mapKey), nil
+ case "resource":
+ return ottlcommon.ResourcePathGetSetter[TransformContext](path[1:])
+ case "instrumentation_scope":
+ return ottlcommon.ScopePathGetSetter[TransformContext](path[1:])
+ case "span":
+ return ottlcommon.SpanPathGetSetter[TransformContext](path[1:])
+ case "time_unix_nano":
+ return accessSpanEventTimeUnixNano(), nil
+ case "name":
+ return accessSpanEventName(), nil
+ case "attributes":
+ mapKey := path[0].MapKey
+ if mapKey == nil {
+ return accessSpanEventAttributes(), nil
+ }
+ return accessSpanEventAttributesKey(mapKey), nil
+ case "dropped_attributes_count":
+ return accessSpanEventDroppedAttributeCount(), nil
+ }
+
+ return nil, fmt.Errorf("invalid scope path expression %v", path)
+}
+
+func accessCache() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.getCache(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if m, ok := val.(pcommon.Map); ok {
+ m.CopyTo(tCtx.getCache())
+ }
+ return nil
+ },
+ }
+}
+
+func accessCacheKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.getCache(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.getCache(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessSpanEventTimeUnixNano() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetSpanEvent().Timestamp().AsTime().UnixNano(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newTimestamp, ok := val.(int64); ok {
+ tCtx.GetSpanEvent().SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, newTimestamp)))
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanEventName() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetSpanEvent().Name(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newName, ok := val.(string); ok {
+ tCtx.GetSpanEvent().SetName(newName)
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanEventAttributes() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return tCtx.GetSpanEvent().Attributes(), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.CopyTo(tCtx.GetSpanEvent().Attributes())
+ }
+ return nil
+ },
+ }
+}
+
+func accessSpanEventAttributesKey(mapKey *string) ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return ottlcommon.GetMapValue(tCtx.GetSpanEvent().Attributes(), *mapKey), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ ottlcommon.SetMapValue(tCtx.GetSpanEvent().Attributes(), *mapKey, val)
+ return nil
+ },
+ }
+}
+
+func accessSpanEventDroppedAttributeCount() ottl.StandardGetSetter[TransformContext] {
+ return ottl.StandardGetSetter[TransformContext]{
+ Getter: func(ctx context.Context, tCtx TransformContext) (interface{}, error) {
+ return int64(tCtx.GetSpanEvent().DroppedAttributesCount()), nil
+ },
+ Setter: func(ctx context.Context, tCtx TransformContext, val interface{}) error {
+ if newCount, ok := val.(int64); ok {
+ tCtx.GetSpanEvent().SetDroppedAttributesCount(uint32(newCount))
+ }
+ return nil
+ },
+ }
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go
new file mode 100644
index 00000000000..a65c013cbda
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/expression.go
@@ -0,0 +1,184 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "context"
+ "fmt"
+)
+
+type ExprFunc[K any] func(ctx context.Context, tCtx K) (interface{}, error)
+
+type Expr[K any] struct {
+ exprFunc ExprFunc[K]
+}
+
+func (e Expr[K]) Eval(ctx context.Context, tCtx K) (interface{}, error) {
+ return e.exprFunc(ctx, tCtx)
+}
+
+type Getter[K any] interface {
+ Get(ctx context.Context, tCtx K) (interface{}, error)
+}
+
+type Setter[K any] interface {
+ Set(ctx context.Context, tCtx K, val interface{}) error
+}
+
+type GetSetter[K any] interface {
+ Getter[K]
+ Setter[K]
+}
+
+type StandardGetSetter[K any] struct {
+ Getter func(ctx context.Context, tCtx K) (interface{}, error)
+ Setter func(ctx context.Context, tCtx K, val interface{}) error
+}
+
+func (path StandardGetSetter[K]) Get(ctx context.Context, tCtx K) (interface{}, error) {
+ return path.Getter(ctx, tCtx)
+}
+
+func (path StandardGetSetter[K]) Set(ctx context.Context, tCtx K, val interface{}) error {
+ return path.Setter(ctx, tCtx, val)
+}
+
+type literal[K any] struct {
+ value interface{}
+}
+
+func (l literal[K]) Get(context.Context, K) (interface{}, error) {
+ return l.value, nil
+}
+
+type exprGetter[K any] struct {
+ expr Expr[K]
+}
+
+func (g exprGetter[K]) Get(ctx context.Context, tCtx K) (interface{}, error) {
+ return g.expr.Eval(ctx, tCtx)
+}
+
+type listGetter[K any] struct {
+ slice []Getter[K]
+}
+
+func (l *listGetter[K]) Get(ctx context.Context, tCtx K) (interface{}, error) {
+ evaluated := make([]any, len(l.slice))
+
+ for i, v := range l.slice {
+ val, err := v.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ evaluated[i] = val
+ }
+
+ return evaluated, nil
+}
+
+type StringGetter[K any] interface {
+ Get(ctx context.Context, tCtx K) (string, error)
+}
+
+type IntGetter[K any] interface {
+ Get(ctx context.Context, tCtx K) (int64, error)
+}
+
+type StandardTypeGetter[K any, T any] struct {
+ Getter func(ctx context.Context, tCtx K) (interface{}, error)
+}
+
+func (g StandardTypeGetter[K, T]) Get(ctx context.Context, tCtx K) (T, error) {
+ var v T
+ val, err := g.Getter(ctx, tCtx)
+ if err != nil {
+ return v, err
+ }
+ if val == nil {
+ return v, fmt.Errorf("expected %T but got nil", v)
+ }
+ v, ok := val.(T)
+ if !ok {
+ return v, fmt.Errorf("expected %T but got %T", v, val)
+ }
+ return v, nil
+}
+
+func (p *Parser[K]) newGetter(val value) (Getter[K], error) {
+ if val.IsNil != nil && *val.IsNil {
+ return &literal[K]{value: nil}, nil
+ }
+
+ if s := val.String; s != nil {
+ return &literal[K]{value: *s}, nil
+ }
+ if b := val.Bool; b != nil {
+ return &literal[K]{value: bool(*b)}, nil
+ }
+ if b := val.Bytes; b != nil {
+ return &literal[K]{value: ([]byte)(*b)}, nil
+ }
+
+ if val.Enum != nil {
+ enum, err := p.enumParser(val.Enum)
+ if err != nil {
+ return nil, err
+ }
+ return &literal[K]{value: int64(*enum)}, nil
+ }
+
+ if eL := val.Literal; eL != nil {
+ if f := eL.Float; f != nil {
+ return &literal[K]{value: *f}, nil
+ }
+ if i := eL.Int; i != nil {
+ return &literal[K]{value: *i}, nil
+ }
+ if eL.Path != nil {
+ return p.pathParser(eL.Path)
+ }
+ if eL.Converter != nil {
+ call, err := p.newFunctionCall(invocation{
+ Function: eL.Converter.Function,
+ Arguments: eL.Converter.Arguments,
+ })
+ if err != nil {
+ return nil, err
+ }
+ return &exprGetter[K]{
+ expr: call,
+ }, nil
+ }
+ }
+
+ if val.List != nil {
+ lg := listGetter[K]{slice: make([]Getter[K], len(val.List.Values))}
+ for i, v := range val.List.Values {
+ getter, err := p.newGetter(v)
+ if err != nil {
+ return nil, err
+ }
+ lg.slice[i] = getter
+ }
+ return &lg, nil
+ }
+
+ if val.MathExpression == nil {
+ // In practice, can't happen since the DSL grammar guarantees one is set
+ return nil, fmt.Errorf("no value field set. This is a bug in the OpenTelemetry Transformation Language")
+ }
+ return p.evaluateMathExpression(val.MathExpression)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go
new file mode 100644
index 00000000000..53cbdbc7f76
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/functions.go
@@ -0,0 +1,231 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+type PathExpressionParser[K any] func(*Path) (GetSetter[K], error)
+
+type EnumParser func(*EnumSymbol) (*Enum, error)
+
+type Enum int64
+
+func (p *Parser[K]) newFunctionCall(inv invocation) (Expr[K], error) {
+ f, ok := p.functions[inv.Function]
+ if !ok {
+ return Expr[K]{}, fmt.Errorf("undefined function %v", inv.Function)
+ }
+ args, err := p.buildArgs(inv, reflect.TypeOf(f))
+ if err != nil {
+ return Expr[K]{}, fmt.Errorf("error while parsing arguments for call to '%v': %w", inv.Function, err)
+ }
+
+ returnVals := reflect.ValueOf(f).Call(args)
+
+ if returnVals[1].IsNil() {
+ err = nil
+ } else {
+ err = returnVals[1].Interface().(error)
+ }
+
+ return Expr[K]{exprFunc: returnVals[0].Interface().(ExprFunc[K])}, err
+}
+
+func (p *Parser[K]) buildArgs(inv invocation, fType reflect.Type) ([]reflect.Value, error) {
+ var args []reflect.Value
+ // Some function arguments may be intended to take values from the calling processor
+ // instead of being passed by the caller of the OTTL function, so we have to keep
+ // track of the index of the argument passed within the DSL.
+ // e.g. TelemetrySettings, which is provided by the processor to the OTTL Parser struct.
+ DSLArgumentIndex := 0
+ for i := 0; i < fType.NumIn(); i++ {
+ argType := fType.In(i)
+
+ arg, isInternalArg := p.buildInternalArg(argType)
+ if isInternalArg {
+ args = append(args, reflect.ValueOf(arg))
+ continue
+ }
+
+ if DSLArgumentIndex >= len(inv.Arguments) {
+ return nil, fmt.Errorf("not enough arguments")
+ }
+
+ argVal := inv.Arguments[DSLArgumentIndex]
+
+ var val any
+ var err error
+ if argType.Kind() == reflect.Slice {
+ val, err = p.buildSliceArg(argVal, argType)
+ } else {
+ val, err = p.buildArg(argVal, argType)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("invalid argument at position %v: %w", DSLArgumentIndex, err)
+ }
+ args = append(args, reflect.ValueOf(val))
+
+ DSLArgumentIndex++
+ }
+
+ if len(inv.Arguments) > DSLArgumentIndex {
+ return nil, fmt.Errorf("too many arguments")
+ }
+
+ return args, nil
+}
+
+func (p *Parser[K]) buildSliceArg(argVal value, argType reflect.Type) (any, error) {
+ name := argType.Elem().Name()
+ switch {
+ case name == reflect.Uint8.String():
+ if argVal.Bytes == nil {
+ return nil, fmt.Errorf("slice parameter must be a byte slice literal")
+ }
+ return ([]byte)(*argVal.Bytes), nil
+ case name == reflect.String.String():
+ arg, err := buildSlice[string](argVal, argType, p.buildArg, name)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ case name == reflect.Float64.String():
+ arg, err := buildSlice[float64](argVal, argType, p.buildArg, name)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ case name == reflect.Int64.String():
+ arg, err := buildSlice[int64](argVal, argType, p.buildArg, name)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ case strings.HasPrefix(name, "Getter"):
+ arg, err := buildSlice[Getter[K]](argVal, argType, p.buildArg, name)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ default:
+ return nil, fmt.Errorf("unsupported slice type '%s' for function", argType.Elem().Name())
+ }
+}
+
+// Handle interfaces that can be passed as arguments to OTTL function invocations.
+func (p *Parser[K]) buildArg(argVal value, argType reflect.Type) (any, error) {
+ name := argType.Name()
+ switch {
+ case strings.HasPrefix(name, "Setter"):
+ fallthrough
+ case strings.HasPrefix(name, "GetSetter"):
+ if argVal.Literal == nil || argVal.Literal.Path == nil {
+ return nil, fmt.Errorf("must be a Path")
+ }
+ arg, err := p.pathParser(argVal.Literal.Path)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ case strings.HasPrefix(name, "Getter"):
+ arg, err := p.newGetter(argVal)
+ if err != nil {
+ return nil, err
+ }
+ return arg, nil
+ case strings.HasPrefix(name, "StringGetter"):
+ arg, err := p.newGetter(argVal)
+ if err != nil {
+ return nil, err
+ }
+ return StandardTypeGetter[K, string]{Getter: arg.Get}, nil
+ case strings.HasPrefix(name, "IntGetter"):
+ arg, err := p.newGetter(argVal)
+ if err != nil {
+ return nil, err
+ }
+ return StandardTypeGetter[K, int64]{Getter: arg.Get}, nil
+ case name == "Enum":
+ arg, err := p.enumParser(argVal.Enum)
+ if err != nil {
+ return nil, fmt.Errorf("must be an Enum")
+ }
+ return *arg, nil
+ case name == reflect.String.String():
+ if argVal.String == nil {
+ return nil, fmt.Errorf("must be a string")
+ }
+ return *argVal.String, nil
+ case name == reflect.Float64.String():
+ if argVal.Literal == nil || argVal.Literal.Float == nil {
+ return nil, fmt.Errorf("must be a float")
+ }
+ return *argVal.Literal.Float, nil
+ case name == reflect.Int64.String():
+ if argVal.Literal == nil || argVal.Literal.Int == nil {
+ return nil, fmt.Errorf("must be an int")
+ }
+ return *argVal.Literal.Int, nil
+ case name == reflect.Bool.String():
+ if argVal.Bool == nil {
+ return nil, fmt.Errorf("must be a bool")
+ }
+ return bool(*argVal.Bool), nil
+ default:
+ return nil, errors.New("unsupported argument type")
+ }
+}
+
+// Handle interfaces that can be declared as parameters to a OTTL function, but will
+// never be called in an invocation. Returns whether the arg is an internal arg.
+func (p *Parser[K]) buildInternalArg(argType reflect.Type) (any, bool) {
+ if argType.Name() == "TelemetrySettings" {
+ return p.telemetrySettings, true
+ }
+ return nil, false
+}
+
+type buildArgFunc func(value, reflect.Type) (any, error)
+
+func buildSlice[T any](argVal value, argType reflect.Type, buildArg buildArgFunc, name string) (any, error) {
+ if argVal.List == nil {
+ return nil, fmt.Errorf("must be a list of type %v", name)
+ }
+
+ vals := []T{}
+ values := argVal.List.Values
+ for j := 0; j < len(values); j++ {
+ untypedVal, err := buildArg(values[j], argType.Elem())
+ if err != nil {
+ return nil, fmt.Errorf("error while parsing list argument at index %v: %w", j, err)
+ }
+
+ val, ok := untypedVal.(T)
+
+ if !ok {
+ return nil, fmt.Errorf("invalid element type at list index %v, must be of type %v", j, name)
+ }
+
+ vals = append(vals, val)
+ }
+
+ return vals, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go
new file mode 100644
index 00000000000..0d9a170d0ba
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/grammar.go
@@ -0,0 +1,435 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "github.com/alecthomas/participle/v2/lexer"
+)
+
+// parsedStatement represents a parsed statement. It is the entry point into the statement DSL.
+type parsedStatement struct {
+ Invocation invocation `parser:"(@@"`
+ // If converter is matched then return error
+ Converter *converter `parser:"|@@)"`
+ WhereClause *booleanExpression `parser:"( 'where' @@ )?"`
+}
+
+func (p *parsedStatement) checkForCustomError() error {
+ if p.Converter != nil {
+ return fmt.Errorf("invocation names must start with a lowercase letter but got '%v'", p.Converter.Function)
+ }
+ err := p.Invocation.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ if p.WhereClause != nil {
+ return p.WhereClause.checkForCustomError()
+ }
+ return nil
+}
+
+// booleanValue represents something that evaluates to a boolean --
+// either an equality or inequality, explicit true or false, or
+// a parenthesized subexpression.
+type booleanValue struct {
+ Negation *string `parser:"@OpNot?"`
+ Comparison *comparison `parser:"( @@"`
+ ConstExpr *boolean `parser:"| @Boolean"`
+ SubExpr *booleanExpression `parser:"| '(' @@ ')' )"`
+}
+
+func (b *booleanValue) checkForCustomError() error {
+ if b.Comparison != nil {
+ return b.Comparison.checkForCustomError()
+ }
+ if b.SubExpr != nil {
+ return b.SubExpr.checkForCustomError()
+ }
+ return nil
+}
+
+// opAndBooleanValue represents the right side of an AND boolean expression.
+type opAndBooleanValue struct {
+ Operator string `parser:"@OpAnd"`
+ Value *booleanValue `parser:"@@"`
+}
+
+func (b *opAndBooleanValue) checkForCustomError() error {
+ return b.Value.checkForCustomError()
+}
+
+// term represents an arbitrary number of boolean values joined by AND.
+type term struct {
+ Left *booleanValue `parser:"@@"`
+ Right []*opAndBooleanValue `parser:"@@*"`
+}
+
+func (b *term) checkForCustomError() error {
+ err := b.Left.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ for _, r := range b.Right {
+ err = r.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// opOrTerm represents the right side of an OR boolean expression.
+type opOrTerm struct {
+ Operator string `parser:"@OpOr"`
+ Term *term `parser:"@@"`
+}
+
+func (b *opOrTerm) checkForCustomError() error {
+ return b.Term.checkForCustomError()
+}
+
+// booleanExpression represents a true/false decision expressed
+// as an arbitrary number of terms separated by OR.
+type booleanExpression struct {
+ Left *term `parser:"@@"`
+ Right []*opOrTerm `parser:"@@*"`
+}
+
+func (b *booleanExpression) checkForCustomError() error {
+ err := b.Left.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ for _, r := range b.Right {
+ err = r.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// compareOp is the type of a comparison operator.
+type compareOp int
+
+// These are the allowed values of a compareOp
+const (
+ EQ compareOp = iota
+ NE
+ LT
+ LTE
+ GTE
+ GT
+)
+
+// a fast way to get from a string to a compareOp
+var compareOpTable = map[string]compareOp{
+ "==": EQ,
+ "!=": NE,
+ "<": LT,
+ "<=": LTE,
+ ">": GT,
+ ">=": GTE,
+}
+
+// Capture is how the parser converts an operator string to a compareOp.
+func (c *compareOp) Capture(values []string) error {
+ op, ok := compareOpTable[values[0]]
+ if !ok {
+ return fmt.Errorf("'%s' is not a valid operator", values[0])
+ }
+ *c = op
+ return nil
+}
+
+// String() for compareOp gives us more legible test results and error messages.
+func (c *compareOp) String() string {
+ switch *c {
+ case EQ:
+ return "EQ"
+ case NE:
+ return "NE"
+ case LT:
+ return "LT"
+ case LTE:
+ return "LTE"
+ case GTE:
+ return "GTE"
+ case GT:
+ return "GT"
+ default:
+ return "UNKNOWN OP!"
+ }
+}
+
+// comparison represents an optional boolean condition.
+type comparison struct {
+ Left value `parser:"@@"`
+ Op compareOp `parser:"@OpComparison"`
+ Right value `parser:"@@"`
+}
+
+func (c *comparison) checkForCustomError() error {
+ err := c.Left.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ err = c.Right.checkForCustomError()
+ return err
+}
+
+// invocation represents the function call of a statement.
+type invocation struct {
+ Function string `parser:"@(Lowercase(Uppercase | Lowercase)*)"`
+ Arguments []value `parser:"'(' ( @@ ( ',' @@ )* )? ')'"`
+}
+
+func (i *invocation) checkForCustomError() error {
+ var err error
+ for _, arg := range i.Arguments {
+ err = arg.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// converter represents a converter function call.
+type converter struct {
+ Function string `parser:"@(Uppercase(Uppercase | Lowercase)*)"`
+ Arguments []value `parser:"'(' ( @@ ( ',' @@ )* )? ')'"`
+}
+
+// value represents a part of a parsed statement which is resolved to a value of some sort. This can be a telemetry path
+// mathExpression, function call, or literal.
+type value struct {
+ IsNil *isNil `parser:"( @'nil'"`
+ Literal *mathExprLiteral `parser:"| @@ (?! OpAddSub | OpMultDiv)"`
+ MathExpression *mathExpression `parser:"| @@"`
+ Bytes *byteSlice `parser:"| @Bytes"`
+ String *string `parser:"| @String"`
+ Bool *boolean `parser:"| @Boolean"`
+ Enum *EnumSymbol `parser:"| @Uppercase"`
+ List *list `parser:"| @@)"`
+}
+
+func (v *value) checkForCustomError() error {
+ if v.Literal != nil {
+ return v.Literal.checkForCustomError()
+ }
+ if v.MathExpression != nil {
+ return v.MathExpression.checkForCustomError()
+ }
+ return nil
+}
+
+// Path represents a telemetry path mathExpression.
+type Path struct {
+ Fields []Field `parser:"@@ ( '.' @@ )*"`
+}
+
+// Field is an item within a Path.
+type Field struct {
+ Name string `parser:"@Lowercase"`
+ MapKey *string `parser:"( '[' @String ']' )?"`
+}
+
+type list struct {
+ Values []value `parser:"'[' (@@)* (',' @@)* ']'"`
+}
+
+// byteSlice type for capturing byte slices
+type byteSlice []byte
+
+func (b *byteSlice) Capture(values []string) error {
+ rawStr := values[0][2:]
+ newBytes, err := hex.DecodeString(rawStr)
+ if err != nil {
+ return err
+ }
+ *b = newBytes
+ return nil
+}
+
+// boolean Type for capturing booleans, see:
+// https://github.com/alecthomas/participle#capturing-boolean-value
+type boolean bool
+
+func (b *boolean) Capture(values []string) error {
+ *b = values[0] == "true"
+ return nil
+}
+
+type isNil bool
+
+func (n *isNil) Capture(_ []string) error {
+ *n = true
+ return nil
+}
+
+type mathExprLiteral struct {
+ // If invocation is matched then error
+ Invocation *invocation `parser:"( @@"`
+ Converter *converter `parser:"| @@"`
+ Float *float64 `parser:"| @Float"`
+ Int *int64 `parser:"| @Int"`
+ Path *Path `parser:"| @@ )"`
+}
+
+func (m *mathExprLiteral) checkForCustomError() error {
+ if m.Invocation != nil {
+ return fmt.Errorf("converter names must start with an uppercase letter but got '%v'", m.Invocation.Function)
+ }
+ return nil
+}
+
+type mathValue struct {
+ Literal *mathExprLiteral `parser:"( @@"`
+ SubExpression *mathExpression `parser:"| '(' @@ ')' )"`
+}
+
+func (m *mathValue) checkForCustomError() error {
+ if m.Literal != nil {
+ return m.Literal.checkForCustomError()
+ }
+ return m.SubExpression.checkForCustomError()
+}
+
+type opMultDivValue struct {
+ Operator mathOp `parser:"@OpMultDiv"`
+ Value *mathValue `parser:"@@"`
+}
+
+func (m *opMultDivValue) checkForCustomError() error {
+ return m.Value.checkForCustomError()
+}
+
+type addSubTerm struct {
+ Left *mathValue `parser:"@@"`
+ Right []*opMultDivValue `parser:"@@*"`
+}
+
+func (m *addSubTerm) checkForCustomError() error {
+ err := m.Left.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ for _, r := range m.Right {
+ err = r.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type opAddSubTerm struct {
+ Operator mathOp `parser:"@OpAddSub"`
+ Term *addSubTerm `parser:"@@"`
+}
+
+func (m *opAddSubTerm) checkForCustomError() error {
+ return m.Term.checkForCustomError()
+}
+
+type mathExpression struct {
+ Left *addSubTerm `parser:"@@"`
+ Right []*opAddSubTerm `parser:"@@*"`
+}
+
+func (m *mathExpression) checkForCustomError() error {
+ err := m.Left.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ for _, r := range m.Right {
+ err = r.checkForCustomError()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type mathOp int
+
+const (
+ ADD mathOp = iota
+ SUB
+ MULT
+ DIV
+)
+
+var mathOpTable = map[string]mathOp{
+ "+": ADD,
+ "-": SUB,
+ "*": MULT,
+ "/": DIV,
+}
+
+func (m *mathOp) Capture(values []string) error {
+ op, ok := mathOpTable[values[0]]
+ if !ok {
+ return fmt.Errorf("'%s' is not a valid operator", values[0])
+ }
+ *m = op
+ return nil
+}
+
+func (m *mathOp) String() string {
+ switch *m {
+ case ADD:
+ return "+"
+ case SUB:
+ return "-"
+ case MULT:
+ return "*"
+ case DIV:
+ return "/"
+ default:
+ return "UNKNOWN OP!"
+ }
+}
+
+type EnumSymbol string
+
+// buildLexer constructs a SimpleLexer definition.
+// Note that the ordering of these rules matters.
+// It's in a separate function so it can be easily tested alone (see lexer_test.go).
+func buildLexer() *lexer.StatefulDefinition {
+ return lexer.MustSimple([]lexer.SimpleRule{
+ {Name: `Bytes`, Pattern: `0x[a-fA-F0-9]+`},
+ {Name: `Float`, Pattern: `[-+]?\d*\.\d+([eE][-+]?\d+)?`},
+ {Name: `Int`, Pattern: `[-+]?\d+`},
+ {Name: `String`, Pattern: `"(\\"|[^"])*"`},
+ {Name: `OpNot`, Pattern: `\b(not)\b`},
+ {Name: `OpOr`, Pattern: `\b(or)\b`},
+ {Name: `OpAnd`, Pattern: `\b(and)\b`},
+ {Name: `OpComparison`, Pattern: `==|!=|>=|<=|>|<`},
+ {Name: `OpAddSub`, Pattern: `\+|\-`},
+ {Name: `OpMultDiv`, Pattern: `\/|\*`},
+ {Name: `Boolean`, Pattern: `\b(true|false)\b`},
+ {Name: `LParen`, Pattern: `\(`},
+ {Name: `RParen`, Pattern: `\)`},
+ {Name: `Punct`, Pattern: `[,.\[\]]`},
+ {Name: `Uppercase`, Pattern: `[A-Z][A-Z0-9_]*`},
+ {Name: `Lowercase`, Pattern: `[a-z][a-z0-9_]*`},
+ {Name: "whitespace", Pattern: `\s+`},
+ })
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go
new file mode 100644
index 00000000000..b4cdb13db91
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/math.go
@@ -0,0 +1,135 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "context"
+ "fmt"
+)
+
+func (p *Parser[K]) evaluateMathExpression(expr *mathExpression) (Getter[K], error) {
+ mainGetter, err := p.evaluateAddSubTerm(expr.Left)
+ if err != nil {
+ return nil, err
+ }
+ for _, rhs := range expr.Right {
+ getter, err := p.evaluateAddSubTerm(rhs.Term)
+ if err != nil {
+ return nil, err
+ }
+ mainGetter = attemptMathOperation(mainGetter, rhs.Operator, getter)
+ }
+
+ return mainGetter, nil
+}
+
+func (p *Parser[K]) evaluateAddSubTerm(term *addSubTerm) (Getter[K], error) {
+ mainGetter, err := p.evaluateMathValue(term.Left)
+ if err != nil {
+ return nil, err
+ }
+ for _, rhs := range term.Right {
+ getter, err := p.evaluateMathValue(rhs.Value)
+ if err != nil {
+ return nil, err
+ }
+ mainGetter = attemptMathOperation(mainGetter, rhs.Operator, getter)
+ }
+
+ return mainGetter, nil
+}
+
+func (p *Parser[K]) evaluateMathValue(val *mathValue) (Getter[K], error) {
+ switch {
+ case val.Literal != nil:
+ return p.newGetter(value{Literal: val.Literal})
+ case val.SubExpression != nil:
+ return p.evaluateMathExpression(val.SubExpression)
+ }
+
+ return nil, fmt.Errorf("unsupported mathematical value %v", val)
+}
+
+func attemptMathOperation[K any](lhs Getter[K], op mathOp, rhs Getter[K]) Getter[K] {
+ return exprGetter[K]{
+ expr: Expr[K]{
+ exprFunc: func(ctx context.Context, tCtx K) (interface{}, error) {
+ x, err := lhs.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ y, err := rhs.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ switch newX := x.(type) {
+ case int64:
+ switch newY := y.(type) {
+ case int64:
+ result, err := performOp[int64](newX, newY, op)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+ case float64:
+ result, err := performOp[float64](float64(newX), newY, op)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, fmt.Errorf("%v must be int64 or float64", y)
+ }
+ case float64:
+ switch newY := y.(type) {
+ case int64:
+ result, err := performOp[float64](newX, float64(newY), op)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+ case float64:
+ result, err := performOp[float64](newX, newY, op)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+ default:
+ return nil, fmt.Errorf("%v must be int64 or float64", y)
+ }
+ default:
+ return nil, fmt.Errorf("%v must be int64 or float64", x)
+ }
+ },
+ },
+ }
+}
+
+func performOp[N int64 | float64](x N, y N, op mathOp) (N, error) {
+ switch op {
+ case ADD:
+ return x + y, nil
+ case SUB:
+ return x - y, nil
+ case MULT:
+ return x * y, nil
+ case DIV:
+ if y == 0 {
+ return 0, fmt.Errorf("attempted to divide by 0")
+ }
+ return x / y, nil
+ }
+ return 0, fmt.Errorf("invalid operation %v", op)
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md
new file mode 100644
index 00000000000..e1666209de9
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/README.md
@@ -0,0 +1,443 @@
+# OTTL Functions
+
+The following functions are intended to be used in implementations of the OpenTelemetry Transformation Language that interact with otel data via the collector's internal data model, [pdata](https://github.com/open-telemetry/opentelemetry-collector/tree/main/pdata). These functions may make assumptions about the types of the data returned by Paths.
+
+## Functions
+
+Functions are the way that components that use OTTL transform telemetry.
+
+Functions:
+- Are allowed to transform telemetry. When a Function is invoked the expectation is that the underlying telemetry is modified in some way.
+- May have side effects. Some Functions may generate telemetry and add it to the telemetry payload to be processed in this batch.
+- May return values. Although not common, Functions may return values, but they do not have to.
+
+List of available Functions:
+- [delete_key](#delete_key)
+- [delete_matching_keys](#delete_matching_keys)
+- [keep_keys](#keep_keys)
+- [limit](#limit)
+- [merge_maps](#merge_maps)
+- [replace_all_matches](#replace_all_matches)
+- [replace_all_patterns](#replace_all_patterns)
+- [replace_match](#replace_match)
+- [replace_pattern](#replace_pattern)
+- [set](#set)
+- [truncate_all](#truncate_all)
+
+## Converters
+
+Converters are functions that help translate between the OTTL grammar and the underlying pdata structure.
+They manipulate the OTTL grammar value into a form that will make working with the telemetry easier or more efficient.
+
+Converters:
+- Are pure functions. They should never change the underlying telemetry and the same inputs should always result in the same output.
+- Always return something.
+
+List of available Converters:
+- [Concat](#concat)
+- [ConvertCase](#convertcase)
+- [Int](#int)
+- [IsMatch](#ismatch)
+- [ParseJSON](#ParseJSON)
+- [SpanID](#spanid)
+- [Split](#split)
+- [TraceID](#traceid)
+- [Substring](#substring)
+
+### Concat
+
+`Concat(values[], delimiter)`
+
+The `Concat` factory function takes a delimiter and a sequence of values and concatenates their string representation. Unsupported values, such as lists or maps that may substantially increase payload size, are not added to the resulting string.
+
+`values` is a list of values passed as arguments. It supports paths, primitive values, and byte slices (such as trace IDs or span IDs).
+
+`delimiter` is a string value that is placed between strings during concatenation. If no delimiter is desired, then simply pass an empty string.
+
+Examples:
+
+- `Concat([attributes["http.method"], attributes["http.path"]], ": ")`
+
+
+- `Concat([name, 1], " ")`
+
+
+- `Concat(["HTTP method is: ", attributes["http.method"]], "")`
+
+### ConvertCase
+
+`ConvertCase(target, toCase)`
+
+The `ConvertCase` factory function converts the `target` string into the desired case `toCase`.
+
+`target` is a string. `toCase` is a string.
+
+If the `target` is not a string or does not exist, the `ConvertCase` factory function will return `nil`.
+
+`toCase` can be:
+
+- `lower`: Converts the `target` string to lowercase (e.g. `MY_METRIC` to `my_metric`)
+- `upper`: Converts the `target` string to uppercase (e.g. `my_metric` to `MY_METRIC`)
+- `snake`: Converts the `target` string to snakecase (e.g. `myMetric` to `my_metric`)
+- `camel`: Converts the `target` string to camelcase (e.g. `my_metric` to `MyMetric`)
+
+If `toCase` is any value other than the options above, the `ConvertCase` factory function will return an error during collector startup.
+
+Examples:
+
+- `ConvertCase(metric.name, "snake")`
+
+### Int
+
+`Int(value)`
+
+The `Int` factory function converts the `value` to int type.
+
+The returned type is int64.
+
+The input `value` types:
+* float64. Fraction is discharged (truncation towards zero).
+* string. Trying to parse an integer from string if it fails then nil will be returned.
+* bool. If `value` is true, then the function will return 1 otherwise 0.
+* int64. The function returns the `value` without changes.
+
+If `value` is another type or parsing failed nil is always returned.
+
+The `value` is either a path expression to a telemetry field to retrieve or a literal.
+
+Examples:
+
+- `Int(attributes["http.status_code"])`
+
+
+- `Int("2.0")`
+
+### IsMatch
+
+`IsMatch(target, pattern)`
+
+The `IsMatch` factory function returns true if the `target` matches the regex `pattern`.
+
+`target` is either a path expression to a telemetry field to retrieve or a literal string. `pattern` is a regexp pattern.
+The matching semantics are identical to `regexp.MatchString`.
+
+The function matches the target against the pattern, returning true if the match is successful and false otherwise.
+If target is not a string, it will be converted to one:
+
+- booleans, ints and floats will be converted using `strconv`
+- byte slices will be encoded using base64
+- OTLP Maps and Slices will be JSON encoded
+- other OTLP Values will use their canonical string representation via `AsString`
+
+If target is nil, false is always returned.
+
+Examples:
+
+- `IsMatch(attributes["http.path"], "foo")`
+
+
+- `IsMatch("string", ".*ring")`
+
+### ParseJSON
+
+`ParseJSON(target)`
+
+The `ParseJSON` factory function returns a `pcommon.Map` struct that is a result of parsing the target string as JSON
+
+`target` is a Getter that returns a string. This string should be in json format.
+
+Unmarshalling is done using [jsoniter](https://github.com/json-iterator/go).
+Each JSON type is converted into a `pdata.Value` using the following map:
+
+```
+JSON boolean -> bool
+JSON number -> float64
+JSON string -> string
+JSON null -> nil
+JSON arrays -> pdata.SliceValue
+JSON objects -> map[string]any
+```
+
+Examples:
+
+- `ParseJSON("{\"attr\":true}")`
+
+
+- `ParseJSON(attributes["kubernetes"])`
+
+
+- `ParseJSON(body)`
+
+### SpanID
+
+`SpanID(bytes)`
+
+The `SpanID` factory function returns a `pdata.SpanID` struct from the given byte slice.
+
+`bytes` is a byte slice of exactly 8 bytes.
+
+Examples:
+
+- `SpanID(0x0000000000000000)`
+
+### Split
+
+`Split(target, delimiter)`
+
+The `Split` factory function separates a string by the delimiter, and returns an array of substrings.
+
+`target` is a string. `delimiter` is a string.
+
+If the `target` is not a string or does not exist, the `Split` factory function will return `nil`.
+
+Examples:
+
+- ```Split("A|B|C", "|")```
+
+### TraceID
+
+`TraceID(bytes)`
+
+The `TraceID` factory function returns a `pdata.TraceID` struct from the given byte slice.
+
+`bytes` is a byte slice of exactly 16 bytes.
+
+Examples:
+
+- `TraceID(0x00000000000000000000000000000000)`
+
+### Substring
+
+`Substring(target, start, length)`
+
+The `Substring` Converter returns a substring from the given start index to the specified length.
+
+`target` is a string. `start` and `length` are `int64`.
+
+The `Substring` Converter will return `nil` if the given parameters are invalid, e.x. `target` is not a string, or the start/length exceed the length of the `target` string.
+
+Examples:
+
+- `Substring("123456789", 0, 3)`
+
+### delete_key
+
+`delete_key(target, key)`
+
+The `delete_key` function removes a key from a `pdata.Map`
+
+`target` is a path expression to a `pdata.Map` type field. `key` is a string that is a key in the map.
+
+The key will be deleted from the map.
+
+Examples:
+
+- `delete_key(attributes, "http.request.header.authorization")`
+
+
+- `delete_key(resource.attributes, "http.request.header.authorization")`
+
+### delete_matching_keys
+
+`delete_matching_keys(target, pattern)`
+
+The `delete_matching_keys` function removes all keys from a `pdata.Map` that match a regex pattern.
+
+`target` is a path expression to a `pdata.Map` type field. `pattern` is a regex string.
+
+All keys that match the pattern will be deleted from the map.
+
+Examples:
+
+- `delete_key(attributes, "http.request.header.authorization")`
+
+
+- `delete_key(resource.attributes, "http.request.header.authorization")`
+
+### keep_keys
+
+`keep_keys(target, keys[])`
+
+The `keep_keys` function removes all keys from the `pdata.Map` that do not match one of the supplied keys.
+
+`target` is a path expression to a `pdata.Map` type field. `keys` is a slice of one or more strings.
+
+The map will be changed to only contain the keys specified by the list of strings.
+
+Examples:
+
+- `keep_keys(attributes, ["http.method"])`
+
+
+- `keep_keys(resource.attributes, ["http.method", "http.route", "http.url"])`
+
+### limit
+
+`limit(target, limit, priority_keys[])`
+
+The `limit` function reduces the number of elements in a `pdata.Map` to be no greater than the limit.
+
+`target` is a path expression to a `pdata.Map` type field. `limit` is a non-negative integer.
+`priority_keys` is a list of strings of attribute keys that won't be dropped during limiting.
+
+The number of priority keys must be less than the supplied `limit`.
+
+The map will be mutated such that the number of items does not exceed the limit.
+The map is not copied or reallocated.
+
+Which items are dropped is random, provide keys in `priority_keys` to preserve required keys.
+
+Examples:
+
+- `limit(attributes, 100, [])`
+
+
+- `limit(resource.attributes, 50, ["http.host", "http.method"])`
+
+### merge_maps
+
+`merge_maps(target, source, strategy)`
+
+The `merge_maps` function merges the source map into the target map using the supplied strategy to handle conflicts.
+
+`target` is a `pdata.Map` type field. `source` is a `pdata.Map` type field. `strategy` is a string that must be one of `insert`, `update`, or `upsert`.
+
+If strategy is:
+- `insert`: Insert the value from `source` into `target` where the key does not already exist.
+- `update`: Update the entry in `target` with the value from `source` where the key does exist.
+- `upsert`: Performs insert or update. Insert the value from `source` into `target` where the key does not already exist and update the entry in `target` with the value from `source` where the key does exist.
+
+`merge_maps` is a special case of the [`set` function](#set). If you need to completely override `target`, use `set` instead.
+
+Examples:
+
+- `merge_maps(attributes, ParseJSON(body), "upsert")`
+
+
+- `merge_maps(attributes, ParseJSON(attributes["kubernetes"]), "update")`
+
+
+- `merge_maps(attributes, resource.attributes, "insert")`
+
+### replace_all_matches
+
+`replace_all_matches(target, pattern, replacement)`
+
+The `replace_all_matches` function replaces any matching string value with the replacement string.
+
+`target` is a path expression to a `pdata.Map` type field. `pattern` is a string following [filepath.Match syntax](https://pkg.go.dev/path/filepath#Match). `replacement` is a string.
+
+Each string value in `target` that matches `pattern` will get replaced with `replacement`. Non-string values are ignored.
+
+Examples:
+
+- `replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}")`
+
+### replace_all_patterns
+
+`replace_all_patterns(target, mode, regex, replacement)`
+
+The `replace_all_patterns` function replaces any segments in a string value or key that match the regex pattern with the replacement string.
+
+`target` is a path expression to a `pdata.Map` type field. `regex` is a regex string indicating a segment to replace. `replacement` is a string.
+
+`mode` determines whether the match and replace will occur on the map's value or key. Valid values are `key` and `value`.
+
+If one or more sections of `target` match `regex` they will get replaced with `replacement`.
+
+The `replacement` string can refer to matched groups using [regexp.Expand syntax](https://pkg.go.dev/regexp#Regexp.Expand).
+
+Examples:
+
+- `replace_all_patterns(attributes, "value", "/account/\\d{4}", "/account/{accountId}")`
+- `replace_all_patterns(attributes, "key", "/account/\\d{4}", "/account/{accountId}")`
+- `replace_all_patterns(attributes, "key", "^kube_([0-9A-Za-z]+_)", "k8s.$$1.")`
+
+Note that when using OTTL within the collector's configuration file, `$` must be escaped to `$$` to bypass
+environment variable substitution logic. To input a literal `$` from the configuration file, use `$$$`.
+If using OTTL outside of collector configuration, `$` should not be escaped and a literal `$` can be entered using `$$`.
+
+### replace_pattern
+
+`replace_pattern(target, regex, replacement)`
+
+The `replace_pattern` function allows replacing all string sections that match a regex pattern with a new value.
+
+`target` is a path expression to a telemetry field. `regex` is a regex string indicating a segment to replace. `replacement` is a string.
+
+If one or more sections of `target` match `regex` they will get replaced with `replacement`.
+
+The `replacement` string can refer to matched groups using [regexp.Expand syntax](https://pkg.go.dev/regexp#Regexp.Expand).
+
+Examples:
+
+- `replace_pattern(resource.attributes["process.command_line"], "password\\=[^\\s]*(\\s?)", "password=***")`
+- `replace_pattern(name, "^kube_([0-9A-Za-z]+_)", "k8s.$$1.")`
+
+Note that when using OTTL within the collector's configuration file, `$` must be escaped to `$$` to bypass
+environment variable substitution logic. To input a literal `$` from the configuration file, use `$$$`.
+If using OTTL outside of collector configuration, `$` should not be escaped and a literal `$` can be entered using `$$`.
+
+### replace_match
+
+`replace_match(target, pattern, replacement)`
+
+The `replace_match` function allows replacing entire strings if they match a glob pattern.
+
+`target` is a path expression to a telemetry field. `pattern` is a string following [filepath.Match syntax](https://pkg.go.dev/path/filepath#Match). `replacement` is a string.
+
+If `target` matches `pattern` it will get replaced with `replacement`.
+
+Examples:
+
+- `replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}")`
+
+### set
+
+`set(target, value)`
+
+The `set` function allows users to set a telemetry field using a value.
+
+`target` is a path expression to a telemetry field. `value` is any value type. If `value` resolves to `nil`, e.g. it references an unset map value, there will be no action.
+
+How the underlying telemetry field is updated is decided by the path expression implementation provided by the user to the `ottl.ParseStatements`.
+
+Examples:
+
+- `set(attributes["http.path"], "/foo")`
+
+
+- `set(name, attributes["http.route"])`
+
+
+- `set(trace_state["svc"], "example")`
+
+
+- `set(attributes["source"], trace_state["source"])`
+
+### truncate_all
+
+`truncate_all(target, limit)`
+
+The `truncate_all` function truncates all string values in a `pdata.Map` so that none are longer than the limit.
+
+`target` is a path expression to a `pdata.Map` type field. `limit` is a non-negative integer.
+
+The map will be mutated such that the number of characters in all string values is less than or equal to the limit. Non-string values are ignored.
+
+Examples:
+
+- `truncate_all(attributes, 100)`
+
+
+- `truncate_all(resource.attributes, 50)`
+
+## Function syntax
+
+Functions should be named and formatted according to the following standards.
+- Function names MUST start with a verb unless it is a Factory that creates a new type.
+- Factory functions MUST be UpperCamelCase.
+- Function names that contain multiple words MUST separate those words with `_`.
+- Functions that interact with multiple items MUST have plurality in the name. Ex: `truncate_all`, `keep_keys`, `replace_all_matches`.
+- Functions that interact with a single item MUST NOT have plurality in the name. If a function would interact with multiple items due to a condition, like `where`, it is still considered singular. Ex: `set`, `delete`, `replace_match`.
+- Functions that change a specific target MUST set the target as the first parameter.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_concat.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_concat.go
new file mode 100644
index 00000000000..f674ef31013
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_concat.go
@@ -0,0 +1,54 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Concat[K any](vals []ottl.Getter[K], delimiter string) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ builder := strings.Builder{}
+ for i, rv := range vals {
+ val, err := rv.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ switch v := val.(type) {
+ case string:
+ builder.WriteString(v)
+ case []byte:
+ builder.WriteString(fmt.Sprintf("%x", v))
+ case int64:
+ builder.WriteString(fmt.Sprint(v))
+ case float64:
+ builder.WriteString(fmt.Sprint(v))
+ case bool:
+ builder.WriteString(fmt.Sprint(v))
+ case nil:
+ builder.WriteString(fmt.Sprint(v))
+ }
+
+ if i != len(vals)-1 {
+ builder.WriteString(delimiter)
+ }
+ }
+ return builder.String(), nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_case.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_case.go
new file mode 100644
index 00000000000..5cc79ac2783
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_convert_case.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/iancoleman/strcase"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func ConvertCase[K any](target ottl.Getter[K], toCase string) (ottl.ExprFunc[K], error) {
+ if toCase != "lower" && toCase != "upper" && toCase != "snake" && toCase != "camel" {
+ return nil, fmt.Errorf("invalid case: %s, allowed cases are: lower, upper, snake, camel", toCase)
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if valStr, ok := val.(string); ok {
+
+ if valStr == "" {
+ return valStr, nil
+ }
+
+ switch toCase {
+ // Convert string to lowercase (SOME_NAME -> some_name)
+ case "lower":
+ return strings.ToLower(valStr), nil
+
+ // Convert string to uppercase (some_name -> SOME_NAME)
+ case "upper":
+ return strings.ToUpper(valStr), nil
+
+ // Convert string to snake case (someName -> some_name)
+ case "snake":
+ return strcase.ToSnake(valStr), nil
+
+ // Convert string to camel case (some_name -> SomeName)
+ case "camel":
+ return strcase.ToCamel(valStr), nil
+
+ default:
+ return nil, fmt.Errorf("error handling unexpected case: %s", toCase)
+ }
+ }
+
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_key.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_key.go
new file mode 100644
index 00000000000..8013c9a910e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_key.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func DeleteKey[K any](target ottl.Getter[K], key string) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.Remove(key)
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_matching_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_matching_keys.go
new file mode 100644
index 00000000000..0f0b84b2cde
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_delete_matching_keys.go
@@ -0,0 +1,48 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func DeleteMatchingKeys[K any](target ottl.Getter[K], pattern string) (ottl.ExprFunc[K], error) {
+ compiledPattern, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("the regex pattern supplied to delete_matching_keys is not a valid pattern: %w", err)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.RemoveIf(func(key string, _ pcommon.Value) bool {
+ return compiledPattern.MatchString(key)
+ })
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_int.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_int.go
new file mode 100644
index 00000000000..b53beeac52f
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_int.go
@@ -0,0 +1,51 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "strconv"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Int[K any](target ottl.Getter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ value, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ switch value := value.(type) {
+ case int64:
+ return value, nil
+ case string:
+ intValue, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, nil
+ }
+
+ return intValue, nil
+ case float64:
+ return (int64)(value), nil
+ case bool:
+ if value {
+ return int64(1), nil
+ }
+ return int64(0), nil
+ default:
+ return nil, nil
+ }
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_match.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_match.go
new file mode 100644
index 00000000000..c46bd308477
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_is_match.go
@@ -0,0 +1,74 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+
+ jsoniter "github.com/json-iterator/go"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func IsMatch[K any](target ottl.Getter[K], pattern string) (ottl.ExprFunc[K], error) {
+ compiledPattern, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("the pattern supplied to IsMatch is not a valid regexp pattern: %w", err)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return false, nil
+ }
+
+ switch v := val.(type) {
+ case string:
+ return compiledPattern.MatchString(v), nil
+ case bool:
+ return compiledPattern.MatchString(strconv.FormatBool(v)), nil
+ case int64:
+ return compiledPattern.MatchString(strconv.FormatInt(v, 10)), nil
+ case float64:
+ return compiledPattern.MatchString(strconv.FormatFloat(v, 'f', -1, 64)), nil
+ case []byte:
+ return compiledPattern.MatchString(base64.StdEncoding.EncodeToString(v)), nil
+ case pcommon.Map:
+ result, err := jsoniter.MarshalToString(v.AsRaw())
+ if err != nil {
+ return nil, err
+ }
+ return compiledPattern.MatchString(result), nil
+ case pcommon.Slice:
+ result, err := jsoniter.MarshalToString(v.AsRaw())
+ if err != nil {
+ return nil, err
+ }
+ return compiledPattern.MatchString(result), nil
+ case pcommon.Value:
+ return compiledPattern.MatchString(v.AsString()), nil
+ default:
+ return nil, errors.New("unsupported type")
+ }
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go
new file mode 100644
index 00000000000..1ade013ae02
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_keep_keys.go
@@ -0,0 +1,51 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func KeepKeys[K any](target ottl.GetSetter[K], keys []string) (ottl.ExprFunc[K], error) {
+ keySet := make(map[string]struct{}, len(keys))
+ for _, key := range keys {
+ keySet[key] = struct{}{}
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+
+ if attrs, ok := val.(pcommon.Map); ok {
+ attrs.RemoveIf(func(key string, value pcommon.Value) bool {
+ _, ok := keySet[key]
+ return !ok
+ })
+ if attrs.Len() == 0 {
+ attrs.Clear()
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go
new file mode 100644
index 00000000000..baa2478f020
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_limit.go
@@ -0,0 +1,80 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Limit[K any](target ottl.GetSetter[K], limit int64, priorityKeys []string) (ottl.ExprFunc[K], error) {
+ if limit < 0 {
+ return nil, fmt.Errorf("invalid limit for limit function, %d cannot be negative", limit)
+ }
+ if limit < int64(len(priorityKeys)) {
+ return nil, fmt.Errorf(
+ "invalid limit for limit function, %d cannot be less than number of priority attributes %d",
+ limit, len(priorityKeys),
+ )
+ }
+ keep := make(map[string]struct{}, len(priorityKeys))
+ for _, key := range priorityKeys {
+ keep[key] = struct{}{}
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+
+ attrs, ok := val.(pcommon.Map)
+ if !ok {
+ return nil, nil
+ }
+
+ if int64(attrs.Len()) <= limit {
+ return nil, nil
+ }
+
+ count := int64(0)
+ for _, key := range priorityKeys {
+ if _, ok := attrs.Get(key); ok {
+ count++
+ }
+ }
+
+ attrs.RemoveIf(func(key string, value pcommon.Value) bool {
+ if _, ok := keep[key]; ok {
+ return false
+ }
+ if count < limit {
+ count++
+ return false
+ }
+ return true
+ })
+ // TODO: Write log when limiting is performed
+ // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9730
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_merge_maps.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_merge_maps.go
new file mode 100644
index 00000000000..85b5eb78823
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_merge_maps.go
@@ -0,0 +1,85 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+const (
+ INSERT = "insert"
+ UPDATE = "update"
+ UPSERT = "upsert"
+)
+
+// MergeMaps function merges the source map into the target map using the supplied strategy to handle conflicts.
+// Strategy definitions:
+//
+// insert: Insert the value from `source` into `target` where the key does not already exist.
+// update: Update the entry in `target` with the value from `source` where the key does exist
+// upsert: Performs insert or update. Insert the value from `source` into `target` where the key does not already exist and update the entry in `target` with the value from `source` where the key does exist.
+func MergeMaps[K any](target ottl.Getter[K], source ottl.Getter[K], strategy string) (ottl.ExprFunc[K], error) {
+ if strategy != INSERT && strategy != UPDATE && strategy != UPSERT {
+ return nil, fmt.Errorf("invalid value for strategy, %v, must be 'insert', 'update' or 'upsert'", strategy)
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ targetVal, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if targetMap, ok := targetVal.(pcommon.Map); ok {
+ val, err := source.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ if valueMap, ok := val.(pcommon.Map); ok {
+ switch strategy {
+ case INSERT:
+ valueMap.Range(func(k string, v pcommon.Value) bool {
+ if _, ok := targetMap.Get(k); !ok {
+ tv := targetMap.PutEmpty(k)
+ v.CopyTo(tv)
+ }
+ return true
+ })
+ case UPDATE:
+ valueMap.Range(func(k string, v pcommon.Value) bool {
+ if tv, ok := targetMap.Get(k); ok {
+ v.CopyTo(tv)
+ }
+ return true
+ })
+ case UPSERT:
+ valueMap.Range(func(k string, v pcommon.Value) bool {
+ tv := targetMap.PutEmpty(k)
+ v.CopyTo(tv)
+ return true
+ })
+ default:
+ return nil, fmt.Errorf("unknown strategy, %v", strategy)
+ }
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go
new file mode 100644
index 00000000000..bfb03506fc5
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_parse_json.go
@@ -0,0 +1,55 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ jsoniter "github.com/json-iterator/go"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+// ParseJSON factory function returns a `pcommon.Map` struct that is a result of parsing the target string as JSON
+// Each JSON type is converted into a `pdata.Value` using the following map:
+//
+// JSON boolean -> bool
+// JSON number -> float64
+// JSON string -> string
+// JSON null -> nil
+// JSON arrays -> pdata.SliceValue
+// JSON objects -> map[string]any
+func ParseJSON[K any](target ottl.Getter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ targetVal, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ jsonStr, ok := targetVal.(string)
+ if !ok {
+ return nil, fmt.Errorf("target must be a string but got %T", targetVal)
+ }
+ var parsedValue map[string]interface{}
+ err = jsoniter.UnmarshalFromString(jsonStr, &parsedValue)
+ if err != nil {
+ return nil, err
+ }
+ result := pcommon.NewMap()
+ err = result.FromRaw(parsedValue)
+ return result, err
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go
new file mode 100644
index 00000000000..141794c662c
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_matches.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/gobwas/glob"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func ReplaceAllMatches[K any](target ottl.GetSetter[K], pattern string, replacement string) (ottl.ExprFunc[K], error) {
+ glob, err := glob.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("the pattern supplied to replace_match is not a valid pattern: %w", err)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+ attrs, ok := val.(pcommon.Map)
+ if !ok {
+ return nil, nil
+ }
+ updated := pcommon.NewMap()
+ attrs.CopyTo(updated)
+ updated.Range(func(key string, value pcommon.Value) bool {
+ if glob.Match(value.Str()) {
+ value.SetStr(replacement)
+ }
+ return true
+ })
+ err = target.Set(ctx, tCtx, updated)
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_patterns.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_patterns.go
new file mode 100644
index 00000000000..d52053f33c2
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_all_patterns.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+const (
+ modeKey = "key"
+ modeValue = "value"
+)
+
+func ReplaceAllPatterns[K any](target ottl.GetSetter[K], mode string, regexPattern string, replacement string) (ottl.ExprFunc[K], error) {
+ compiledPattern, err := regexp.Compile(regexPattern)
+ if err != nil {
+ return nil, fmt.Errorf("the regex pattern supplied to replace_all_patterns is not a valid pattern: %w", err)
+ }
+ if mode != modeValue && mode != modeKey {
+ return nil, fmt.Errorf("invalid mode %v, must be either 'key' or 'value'", mode)
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+ attrs, ok := val.(pcommon.Map)
+ if !ok {
+ return nil, nil
+ }
+ updated := pcommon.NewMap()
+ updated.EnsureCapacity(attrs.Len())
+ attrs.Range(func(key string, originalValue pcommon.Value) bool {
+ switch mode {
+ case modeValue:
+ if compiledPattern.MatchString(originalValue.Str()) {
+ updatedString := compiledPattern.ReplaceAllString(originalValue.Str(), replacement)
+ updated.PutStr(key, updatedString)
+ } else {
+ updated.PutStr(key, originalValue.Str())
+ }
+ case modeKey:
+ if compiledPattern.MatchString(key) {
+ updatedKey := compiledPattern.ReplaceAllLiteralString(key, replacement)
+ updated.PutStr(updatedKey, originalValue.Str())
+ } else {
+ updated.PutStr(key, originalValue.Str())
+ }
+ }
+ return true
+ })
+ err = target.Set(ctx, tCtx, updated)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_match.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_match.go
new file mode 100644
index 00000000000..db5796a3245
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_match.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/gobwas/glob"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func ReplaceMatch[K any](target ottl.GetSetter[K], pattern string, replacement string) (ottl.ExprFunc[K], error) {
+ glob, err := glob.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("the pattern supplied to replace_match is not a valid pattern: %w", err)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+ if valStr, ok := val.(string); ok {
+ if glob.Match(valStr) {
+ err = target.Set(ctx, tCtx, replacement)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_pattern.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_pattern.go
new file mode 100644
index 00000000000..57fdb3e0b4e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_replace_pattern.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func ReplacePattern[K any](target ottl.GetSetter[K], regexPattern string, replacement string) (ottl.ExprFunc[K], error) {
+ compiledPattern, err := regexp.Compile(regexPattern)
+ if err != nil {
+ return nil, fmt.Errorf("the regex pattern supplied to replace_pattern is not a valid pattern: %w", err)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ originalVal, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if originalVal == nil {
+ return nil, nil
+ }
+ if originalValStr, ok := originalVal.(string); ok {
+ if compiledPattern.MatchString(originalValStr) {
+ updatedStr := compiledPattern.ReplaceAllString(originalValStr, replacement)
+ err = target.Set(ctx, tCtx, updatedStr)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_set.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_set.go
new file mode 100644
index 00000000000..617cd39383c
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_set.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Set[K any](target ottl.Setter[K], value ottl.Getter[K]) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := value.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+
+ // No fields currently support `null` as a valid type.
+ if val != nil {
+ err = target.Set(ctx, tCtx, val)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_span_id.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_span_id.go
new file mode 100644
index 00000000000..1ec1febe88b
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_span_id.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func SpanID[K any](bytes []byte) (ottl.ExprFunc[K], error) {
+ if len(bytes) != 8 {
+ return nil, errors.New("span ids must be 8 bytes")
+ }
+ var idArr [8]byte
+ copy(idArr[:8], bytes)
+ id := pcommon.SpanID(idArr)
+ return func(context.Context, K) (interface{}, error) {
+ return id, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_split.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_split.go
new file mode 100644
index 00000000000..fee994288d9
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_split.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "strings"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Split[K any](target ottl.Getter[K], delimiter string) (ottl.ExprFunc[K], error) {
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val != nil {
+ if valStr, ok := val.(string); ok {
+ return strings.Split(valStr, delimiter), nil
+ }
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_substring.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_substring.go
new file mode 100644
index 00000000000..5bdaf81cbc3
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_substring.go
@@ -0,0 +1,45 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Substring[K any](target ottl.Getter[K], start int64, length int64) (ottl.ExprFunc[K], error) {
+ if start < 0 {
+ return nil, fmt.Errorf("invalid start for substring function, %d cannot be negative", start)
+ }
+ if length <= 0 {
+ return nil, fmt.Errorf("invalid length for substring function, %d cannot be negative or zero", length)
+ }
+
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if valStr, ok := val.(string); ok {
+ if (start + length) > int64(len(valStr)) {
+ return nil, fmt.Errorf("invalid range for substring function, %d cannot be greater than the length of target string(%d)", start+length, len(valStr))
+ }
+ return valStr[start : start+length], nil
+ }
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_trace_id.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_trace_id.go
new file mode 100644
index 00000000000..e09438a620a
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_trace_id.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func TraceID[K any](bytes []byte) (ottl.ExprFunc[K], error) {
+ if len(bytes) != 16 {
+ return nil, errors.New("traces ids must be 16 bytes")
+ }
+ var idArr [16]byte
+ copy(idArr[:16], bytes)
+ id := pcommon.TraceID(idArr)
+ return func(context.Context, K) (interface{}, error) {
+ return id, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go
new file mode 100644
index 00000000000..6ef7f246bf0
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs/func_truncate_all.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pcommon"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func TruncateAll[K any](target ottl.GetSetter[K], limit int64) (ottl.ExprFunc[K], error) {
+ if limit < 0 {
+ return nil, fmt.Errorf("invalid limit for truncate_all function, %d cannot be negative", limit)
+ }
+ return func(ctx context.Context, tCtx K) (interface{}, error) {
+ if limit < 0 {
+ return nil, nil
+ }
+
+ val, err := target.Get(ctx, tCtx)
+ if err != nil {
+ return nil, err
+ }
+ if val == nil {
+ return nil, nil
+ }
+
+ attrs, ok := val.(pcommon.Map)
+ if !ok {
+ return nil, nil
+ }
+
+ updated := pcommon.NewMap()
+ attrs.CopyTo(updated)
+ updated.Range(func(key string, value pcommon.Value) bool {
+ stringVal := value.Str()
+ if int64(len(stringVal)) > limit {
+ value.SetStr(stringVal[:limit])
+ }
+ return true
+ })
+ err = target.Set(ctx, tCtx, updated)
+ if err != nil {
+ return nil, err
+ }
+ // TODO: Write log when truncation is performed
+ // https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/9730
+ return nil, nil
+ }, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go
new file mode 100644
index 00000000000..afc98dc9d4b
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/parser.go
@@ -0,0 +1,235 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ottl // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/alecthomas/participle/v2"
+ "go.opentelemetry.io/collector/component"
+ "go.uber.org/zap"
+)
+
+type ErrorMode string
+
+const (
+ IgnoreError ErrorMode = "ignore"
+ PropagateError ErrorMode = "propagate"
+)
+
+func (e *ErrorMode) UnmarshalText(text []byte) error {
+ str := ErrorMode(strings.ToLower(string(text)))
+ switch str {
+ case IgnoreError, PropagateError:
+ *e = str
+ return nil
+ default:
+ return fmt.Errorf("unknown error mode %v", str)
+ }
+}
+
+type Parser[K any] struct {
+ functions map[string]interface{}
+ pathParser PathExpressionParser[K]
+ enumParser EnumParser
+ telemetrySettings component.TelemetrySettings
+}
+
+// Statement holds a top level Statement for processing telemetry data. A Statement is a combination of a function
+// invocation and the boolean expression to match telemetry for invoking the function.
+type Statement[K any] struct {
+ function Expr[K]
+ condition BoolExpr[K]
+ origText string
+}
+
+// Execute is a function that will execute the statement's function if the statement's condition is met.
+// Returns true if the function was run, returns false otherwise.
+// If the statement contains no condition, the function will run and true will be returned.
+// In addition, the functions return value is always returned.
+func (s *Statement[K]) Execute(ctx context.Context, tCtx K) (any, bool, error) {
+ condition, err := s.condition.Eval(ctx, tCtx)
+ if err != nil {
+ return nil, false, err
+ }
+ var result any
+ if condition {
+ result, err = s.function.Eval(ctx, tCtx)
+ if err != nil {
+ return nil, true, err
+ }
+ }
+ return result, condition, nil
+}
+
+func NewParser[K any](
+ functions map[string]interface{},
+ pathParser PathExpressionParser[K],
+ settings component.TelemetrySettings,
+ options ...Option[K],
+) (Parser[K], error) {
+ if settings.Logger == nil {
+ return Parser[K]{}, fmt.Errorf("logger cannot be nil")
+ }
+ p := Parser[K]{
+ functions: functions,
+ pathParser: pathParser,
+ enumParser: func(*EnumSymbol) (*Enum, error) {
+ return nil, fmt.Errorf("enums aren't supported for the current context: %T", new(K))
+ },
+ telemetrySettings: settings,
+ }
+ for _, opt := range options {
+ opt(&p)
+ }
+ return p, nil
+}
+
+type Option[K any] func(*Parser[K])
+
+func WithEnumParser[K any](parser EnumParser) Option[K] {
+ return func(p *Parser[K]) {
+ p.enumParser = parser
+ }
+}
+
+func (p *Parser[K]) ParseStatements(statements []string) ([]*Statement[K], error) {
+ var parsedStatements []*Statement[K]
+ for _, statement := range statements {
+ ps, err := p.ParseStatement(statement)
+ if err != nil {
+ return nil, err
+ }
+ parsedStatements = append(parsedStatements, ps)
+ }
+ return parsedStatements, nil
+}
+
+func (p *Parser[K]) ParseStatement(statement string) (*Statement[K], error) {
+ parsed, err := parseStatement(statement)
+ if err != nil {
+ return nil, err
+ }
+ function, err := p.newFunctionCall(parsed.Invocation)
+ if err != nil {
+ return nil, err
+ }
+ expression, err := p.newBoolExpr(parsed.WhereClause)
+ if err != nil {
+ return nil, err
+ }
+ return &Statement[K]{
+ function: function,
+ condition: expression,
+ origText: statement,
+ }, nil
+}
+
+var parser = newParser[parsedStatement]()
+
+func parseStatement(raw string) (*parsedStatement, error) {
+ parsed, err := parser.ParseString("", raw)
+
+ if err != nil {
+ return nil, fmt.Errorf("unable to parse OTTL statement: %w", err)
+ }
+ err = parsed.checkForCustomError()
+ if err != nil {
+ return nil, err
+ }
+
+ return parsed, nil
+}
+
+// newParser returns a parser that can be used to read a string into a parsedStatement. An error will be returned if the string
+// is not formatted for the DSL.
+func newParser[G any]() *participle.Parser[G] {
+ lex := buildLexer()
+ parser, err := participle.Build[G](
+ participle.Lexer(lex),
+ participle.Unquote("String"),
+ participle.Elide("whitespace"),
+ participle.UseLookahead(participle.MaxLookahead), // Allows negative lookahead to work properly in 'value' for 'mathExprLiteral'.
+ )
+ if err != nil {
+ panic("Unable to initialize parser; this is a programming error in the transformprocessor:" + err.Error())
+ }
+ return parser
+}
+
+// Statements represents a list of statements that will be executed sequentially for a TransformContext.
+type Statements[K any] struct {
+ statements []*Statement[K]
+ errorMode ErrorMode
+ telemetrySettings component.TelemetrySettings
+}
+
+type StatementsOption[K any] func(*Statements[K])
+
+func WithErrorMode[K any](errorMode ErrorMode) StatementsOption[K] {
+ return func(s *Statements[K]) {
+ s.errorMode = errorMode
+ }
+}
+
+func NewStatements[K any](statements []*Statement[K], telemetrySettings component.TelemetrySettings, options ...StatementsOption[K]) Statements[K] {
+ s := Statements[K]{
+ statements: statements,
+ telemetrySettings: telemetrySettings,
+ }
+ for _, op := range options {
+ op(&s)
+ }
+ return s
+}
+
+// Execute is a function that will execute all the statements in the Statements list.
+func (s *Statements[K]) Execute(ctx context.Context, tCtx K) error {
+ for _, statement := range s.statements {
+ _, _, err := statement.Execute(ctx, tCtx)
+ if err != nil {
+ if s.errorMode == PropagateError {
+ err = fmt.Errorf("failed to execute statement: %v, %w", statement.origText, err)
+ return err
+ }
+ s.telemetrySettings.Logger.Warn("failed to execute statement", zap.Error(err), zap.String("statement", statement.origText))
+ }
+ }
+ return nil
+}
+
+// Eval returns true if any statement's condition is true and returns false otherwise.
+// Does not execute the statement's function.
+// When errorMode is `propagate`, errors cause the evaluation to be false and an error is returned.
+// When errorMode is `ignore`, errors cause evaluation to continue to the next statement.
+func (s *Statements[K]) Eval(ctx context.Context, tCtx K) (bool, error) {
+ for _, statement := range s.statements {
+ match, err := statement.condition.Eval(ctx, tCtx)
+ if err != nil {
+ if s.errorMode == PropagateError {
+ err = fmt.Errorf("failed to eval statement: %v, %w", statement.origText, err)
+ return false, err
+ }
+ s.telemetrySettings.Logger.Warn("failed to eval statement", zap.Error(err), zap.String("statement", statement.origText))
+ continue
+ }
+ if match {
+ return true, nil
+ }
+ }
+ return false, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile
new file mode 100644
index 00000000000..ded7a36092d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go
new file mode 100644
index 00000000000..03376ba953e
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil/hash.go
@@ -0,0 +1,170 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pdatautil // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil"
+
+import (
+ "encoding/binary"
+ "hash"
+ "math"
+ "sort"
+ "sync"
+
+ "github.com/cespare/xxhash/v2"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+)
+
+var (
+ extraByte = []byte{'\xf3'}
+ keyPrefix = []byte{'\xf4'}
+ valEmpty = []byte{'\xf5'}
+ valBytesPrefix = []byte{'\xf6'}
+ valStrPrefix = []byte{'\xf7'}
+ valBoolTrue = []byte{'\xf8'}
+ valBoolFalse = []byte{'\xf9'}
+ valIntPrefix = []byte{'\xfa'}
+ valDoublePrefix = []byte{'\xfb'}
+ valMapPrefix = []byte{'\xfc'}
+ valMapSuffix = []byte{'\xfd'}
+ valSlicePrefix = []byte{'\xfe'}
+ valSliceSuffix = []byte{'\xff'}
+)
+
+type hashWriter struct {
+ h hash.Hash
+ strBuf []byte
+ keysBuf []string
+ sumHash []byte
+ numBuf []byte
+}
+
+func newHashWriter() *hashWriter {
+ return &hashWriter{
+ h: xxhash.New(),
+ strBuf: make([]byte, 0, 128),
+ keysBuf: make([]string, 0, 16),
+ sumHash: make([]byte, 0, 16),
+ numBuf: make([]byte, 8),
+ }
+}
+
+var hashWriterPool = &sync.Pool{
+ New: func() interface{} { return newHashWriter() },
+}
+
+// MapHash return a hash for the provided map.
+// Maps with the same underlying key/value pairs in different order produce the same deterministic hash value.
+func MapHash(m pcommon.Map) [16]byte {
+ hw := hashWriterPool.Get().(*hashWriter)
+ defer hashWriterPool.Put(hw)
+ hw.h.Reset()
+ hw.writeMapHash(m)
+ return hw.hashSum128()
+}
+
+// ValueHash return a hash for the provided pcommon.Value.
+func ValueHash(v pcommon.Value) [16]byte {
+ hw := hashWriterPool.Get().(*hashWriter)
+ defer hashWriterPool.Put(hw)
+ hw.h.Reset()
+ hw.writeValueHash(v)
+ return hw.hashSum128()
+}
+
+func (hw *hashWriter) writeMapHash(m pcommon.Map) {
+ // For each recursive call into this function we want to preserve the previous buffer state
+ // while also adding new keys to the buffer. nextIndex is the index of the first new key
+ // added to the buffer for this call of the function.
+ // This also works for the first non-recursive call of this function because the buffer is always empty
+ // on the first call due to it being cleared of any added keys at then end of the function.
+ nextIndex := len(hw.keysBuf)
+
+ m.Range(func(k string, v pcommon.Value) bool {
+ hw.keysBuf = append(hw.keysBuf, k)
+ return true
+ })
+
+ // Get only the newly added keys from the buffer by slicing the buffer from nextIndex to the end
+ workingKeySet := hw.keysBuf[nextIndex:]
+
+ sort.Strings(workingKeySet)
+ for _, k := range workingKeySet {
+ v, _ := m.Get(k)
+ hw.strBuf = hw.strBuf[:0]
+ hw.strBuf = append(hw.strBuf, keyPrefix...)
+ hw.strBuf = append(hw.strBuf, k...)
+ hw.h.Write(hw.strBuf)
+ hw.writeValueHash(v)
+ }
+
+ // Remove all keys that were added to the buffer during this call of the function
+ hw.keysBuf = hw.keysBuf[:nextIndex]
+}
+
+func (hw *hashWriter) writeSliceHash(sl pcommon.Slice) {
+ for i := 0; i < sl.Len(); i++ {
+ hw.writeValueHash(sl.At(i))
+ }
+}
+
+func (hw *hashWriter) writeValueHash(v pcommon.Value) {
+ switch v.Type() {
+ case pcommon.ValueTypeStr:
+ hw.strBuf = hw.strBuf[:0]
+ hw.strBuf = append(hw.strBuf, valStrPrefix...)
+ hw.strBuf = append(hw.strBuf, v.Str()...)
+ hw.h.Write(hw.strBuf)
+ case pcommon.ValueTypeBool:
+ if v.Bool() {
+ hw.h.Write(valBoolTrue)
+ } else {
+ hw.h.Write(valBoolFalse)
+ }
+ case pcommon.ValueTypeInt:
+ hw.h.Write(valIntPrefix)
+ binary.LittleEndian.PutUint64(hw.numBuf, uint64(v.Int()))
+ hw.h.Write(hw.numBuf)
+ case pcommon.ValueTypeDouble:
+ hw.h.Write(valDoublePrefix)
+ binary.LittleEndian.PutUint64(hw.numBuf, math.Float64bits(v.Double()))
+ hw.h.Write(hw.numBuf)
+ case pcommon.ValueTypeMap:
+ hw.h.Write(valMapPrefix)
+ hw.writeMapHash(v.Map())
+ hw.h.Write(valMapSuffix)
+ case pcommon.ValueTypeSlice:
+ hw.h.Write(valSlicePrefix)
+ hw.writeSliceHash(v.Slice())
+ hw.h.Write(valSliceSuffix)
+ case pcommon.ValueTypeBytes:
+ hw.h.Write(valBytesPrefix)
+ hw.h.Write(v.Bytes().AsRaw())
+ case pcommon.ValueTypeEmpty:
+ hw.h.Write(valEmpty)
+ }
+}
+
+// hashSum128 returns a [16]byte hash sum.
+func (hw *hashWriter) hashSum128() [16]byte {
+ b := hw.sumHash[:0]
+ b = hw.h.Sum(b)
+
+ // Append an extra byte to generate another part of the hash sum
+ _, _ = hw.h.Write(extraByte)
+ b = hw.h.Sum(b)
+
+ res := [16]byte{}
+ copy(res[:], b)
+ return res
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE
new file mode 100644
index 00000000000..261eeb9e9f8
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile
new file mode 100644
index 00000000000..ded7a36092d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/Makefile
@@ -0,0 +1 @@
+include ../../Makefile.Common
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md
new file mode 100644
index 00000000000..5430e58f467
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/README.md
@@ -0,0 +1,390 @@
+# Filter Processor
+
+| Status | |
+| ------------------------ | --------------------- |
+| Stability | [alpha] |
+| Supported pipeline types | metrics, logs, traces |
+| Distributions | [core], [contrib] |
+
+The filter processor can be configured to include or exclude:
+
+- Logs, based on OTTL conditions or resource attributes using the `strict` or `regexp` match types
+- Metrics based on OTTL Conditions or metric name in the case of the `strict` or `regexp` match types,
+ or based on other metric attributes in the case of the `expr` match type.
+ Please refer to [config.go](./config.go) for the config spec.
+- Data points based on OTTL conditions
+- Spans based on OTTL conditions or span names and resource attributes, all with full regex support
+- Span Events based on OTTL conditions.
+
+For OTTL conditions configuration see [OTTL](#ottl). For all other options, continue reading.
+
+It takes a pipeline type, of which `logs` `metrics`, and `traces` are supported, followed
+by an action:
+
+- `include`: Any names NOT matching filters are excluded from remainder of pipeline
+- `exclude`: Any names matching filters are excluded from remainder of pipeline
+
+For the actions the following parameters are required:
+
+For logs:
+
+- `match_type`: `strict`|`regexp`
+- `resource_attributes`: ResourceAttributes defines a list of possible resource
+ attributes to match logs against.
+ A match occurs if any resource attribute matches all expressions in this given list.
+- `record_attributes`: RecordAttributes defines a list of possible record
+ attributes to match logs against.
+ A match occurs if any record attribute matches all expressions in this given list.
+- `severity_texts`: SeverityTexts defines a list of possible severity texts to match the logs against.
+ A match occurs if the record matches any expression in this given list.
+- `bodies`: Bodies defines a list of possible log bodies to match the logs against.
+ A match occurs if the record matches any expression in this given list.
+- `severity_number`: SeverityNumber defines how to match a record based on its SeverityNumber.
+ The following can be configured for matching a log record's SeverityNumber:
+ - `min`: Min defines the minimum severity with which a log record should match.
+ e.g. if this is "WARN", all log records with "WARN" severity and above (WARN[2-4], ERROR[2-4], FATAL[2-4]) are matched.
+ The list of valid severities that may be used for this option can be found [here](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity). You may use either the numerical "SeverityNumber" or the "Short Name"
+ - `match_undefined`: MatchUndefinedSeverity defines whether to match logs with undefined severity or not when using the `min_severity` matching option.
+ By default, this is `false`.
+
+For metrics:
+
+- `match_type`: `strict`|`regexp`|`expr`
+- `metric_names`: (only for a `match_type` of `strict` or `regexp`) list of strings
+ or re2 regex patterns
+- `expressions`: (only for a `match_type` of `expr`) list of `expr` expressions
+ (see "Using an `expr` match_type" below)
+- `resource_attributes`: ResourceAttributes defines a list of possible resource
+ attributes to match metrics against.
+ A match occurs if any resource attribute matches all expressions in this given list.
+
+This processor uses [re2 regex][re2_regex] for regex syntax.
+
+[re2_regex]: https://github.com/google/re2/wiki/Syntax
+
+More details can be found at [include/exclude metrics](../attributesprocessor/README.md#includeexclude-filtering).
+
+Examples:
+
+```yaml
+processors:
+ filter/1:
+ metrics:
+ include:
+ match_type: regexp
+ metric_names:
+ - prefix/.*
+ - prefix_.*
+ resource_attributes:
+ - key: container.name
+ value: app_container_1
+ exclude:
+ match_type: strict
+ metric_names:
+ - hello_world
+ - hello/world
+ filter/2:
+ logs:
+ include:
+ match_type: strict
+ resource_attributes:
+ - key: host.name
+ value: just_this_one_hostname
+ filter/regexp:
+ logs:
+ include:
+ match_type: regexp
+ resource_attributes:
+ - key: host.name
+ value: prefix.*
+ filter/regexp_record:
+ logs:
+ include:
+ match_type: regexp
+ record_attributes:
+ - key: record_attr
+ value: prefix_.*
+ # Filter on severity text field
+ filter/severity_text:
+ logs:
+ include:
+ match_type: regexp
+ severity_texts:
+ - INFO[2-4]?
+ - WARN[2-4]?
+ - ERROR[2-4]?
+ # Filter out logs below INFO (no DEBUG or TRACE level logs),
+ # retaining logs with undefined severity
+ filter/severity_number:
+ logs:
+ include:
+ severity_number:
+ min: "INFO"
+ match_undefined: true
+ filter/bodies:
+ logs:
+ include:
+ match_type: regexp
+ bodies:
+ - ^IMPORTANT RECORD
+```
+
+Refer to the config files in [testdata](./testdata) for detailed
+examples on using the processor.
+
+## Using an "expr" match_type
+
+In addition to matching metric names with the `strict` or `regexp` match types, the filter processor
+supports matching entire `Metric`s using the [expr](https://github.com/antonmedv/expr) expression engine.
+
+The `expr` filter evaluates the supplied boolean expressions _per datapoint_ on a metric, and returns a result
+for the entire metric. If any datapoint evaluates to true then the entire metric evaluates to true, otherwise
+false.
+
+Made available to the expression environment are the following:
+
+* `MetricName`
+ a variable containing the current Metric's name
+* `MetricType`
+ a variable containing the current Metric's type: "Gauge", "Sum", "Histogram", "ExponentialHistogram" or "Summary".
+* `Label(name)`
+ a function that takes a label name string as an argument and returns a string: the value of a label with that
+ name if one exists, or ""
+* `HasLabel(name)`
+ a function that takes a label name string as an argument and returns a boolean: true if the datapoint has a label
+ with that name, false otherwise
+
+Example:
+
+```yaml
+processors:
+ filter/1:
+ metrics:
+ exclude:
+ match_type: expr
+ expressions:
+ - MetricName == "my.metric" && Label("my_label") == "abc123"
+ - MetricType == "Histogram"
+```
+
+The above config will filter out any Metric that both has the name "my.metric" and has at least one datapoint
+with a label of 'my_label="abc123"'.
+
+### Support for multiple expressions
+
+As with `strict` and `regexp`, multiple `expr` expressions are allowed.
+
+For example, the following two filters have the same effect: they filter out metrics named "system.cpu.time" and
+"system.disk.io".
+
+```yaml
+processors:
+ filter/expr:
+ metrics:
+ exclude:
+ match_type: expr
+ expressions:
+ - MetricName == "system.cpu.time"
+ - MetricName == "system.disk.io"
+ filter/strict:
+ metrics:
+ exclude:
+ match_type: strict
+ metric_names:
+ - system.cpu.time
+ - system.disk.io
+```
+
+The expressions are effectively ORed per datapoint. So for the above `expr` configuration, given a datapoint, if its
+parent Metric's name is "system.cpu.time" or "system.disk.io" then there's a match. The conditions are tested against
+all the datapoints in a Metric until there's a match, in which case the entire Metric is considered a match, and in
+the above example the Metric will be excluded. If after testing all the datapoints in a Metric against all the
+expressions there isn't a match, the entire Metric is considered to be not matching.
+
+
+### Filter metrics using resource attributes
+In addition to the names, metrics can be filtered using resource attributes. `resource_attributes` takes a list of resource attributes to filter metrics against.
+
+Following example will include only the metrics coming from `app_container_1` (the value for `container.name` resource attribute is `app_container_1`).
+
+```yaml
+processors:
+ filter/resource_attributes_include:
+ metrics:
+ include:
+ match_type: strict
+ metric_names:
+ - hello_world
+ - hello/world
+ resource_attributes:
+ - key: container.name
+ value: app_container_1
+```
+
+Following example will exclude all the metrics coming from `app_container_1` (the value for `container.name` resource attribute is `app_container_1`).
+
+```yaml
+processors:
+ filter/resource_attributes_exclude:
+ metrics:
+ exclude:
+ match_type: strict
+ metric_names:
+ - hello_world
+ - hello/world
+ resource_attributes:
+ - key: container.name
+ value: app_container_1
+```
+
+We can also use `regexp` to filter metrics using resource attributes. Following example will include only the metrics coming from `app_container_1` or `app_container_2` (the value for `container.name` resource attribute is either `app_container_1` or `app_container_2`).
+
+```yaml
+processors:
+ filter/resource_attributes_regexp:
+ metrics:
+ exclude:
+ match_type: regexp
+ metric_names:
+ - hello_world
+ - hello/world
+ resource_attributes:
+ - key: container.name
+ value: (app_container_1|app_container_1)
+```
+
+In case the no metric names are provided, `matric_names` being empty, the filtering is only done at resource level.
+
+### Filter Spans from Traces
+
+* This pipeline is able to drop spans and whole traces
+* Note: If this drops a parent span, it does not search out it's children leading to a missing Span in your trace visualization
+
+See the documentation in the [attribute processor](../attributesprocessor/README.md) for syntax
+
+For spans, one of Services, SpanNames, Attributes, Resources or Libraries must be specified with a
+non-empty value for a valid configuration.
+
+```yaml
+processors:
+ filter/spans:
+ spans:
+ include:
+ match_type: strict
+ services:
+ - app_3
+ exclude:
+ match_type: regexp
+ services:
+ - app_1
+ - app_2
+ span_names:
+ - hello_world
+ - hello/world
+ attributes:
+ - key: container.name
+ value: (app_container_1|app_container_2)
+ libraries:
+ - name: opentelemetry
+ version: 0.0-beta
+ resources:
+ - key: container.host
+ value: (localhost|127.0.0.1)
+```
+
+## OTTL
+The [OpenTelemetry Transformation Language](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md) is a language for interacting with telemetry within the collector in generic ways.
+The filterprocessor can be configured to use OTTL conditions to determine when to drop telemetry.
+If any condition is met, the telemetry is dropped (each condition is ORed together).
+Each configuration option corresponds with a different type of telemetry and OTTL Context.
+See the table below for details on each context and the fields it exposes.
+
+| Config | OTTL Context |
+|---------------------|------------------------------------------------------------------------------------------------------------------------------------|
+| `traces.span` | [Span](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspan/README.md) |
+| `traces.spanevent` | [SpanEvent](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlspanevent/README.md) |
+| `metrics.metric` | [Metric](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottlmetric/README.md) |
+| `metrics.datapoint` | [DataPoint](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottldatapoint/README.md) |
+| `logs.log_record` | [Log](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/contexts/ottllog/README.md) |
+
+The OTTL allows the use of `and`, `or`, and `()` in conditions.
+See [OTTL Boolean Expressions](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/ottl/README.md#boolean-expressions) for more details.
+
+For conditions that apply to the same signal, such as spans and span events, if the "higher" level telemetry matches a condition and is dropped, the "lower" level condition will not be checked.
+This means that if a span is dropped but a span event condition was defined, the span event condition will not be checked.
+The same relationship applies to metrics and datapoints.
+
+If all span events for a span are dropped, the span will be left intact.
+If all datapoints for a metric are dropped, the metric will also be dropped.
+
+The filter processor also allows configuring an optional field, `error_mode`, which will determine how the processor reacts to errors that occur while processing an OTTL condition.
+
+| error_mode | description |
+|-----------------------|----------------------------------------------------------------------------------------------------------------------------|
+| ignore | The processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode. |
+| propagate | The processor returns the error up the pipeline. This will result in the payload being dropped from the collector. |
+
+If not specified, `propagate` will be used.
+
+### OTTL Functions
+
+The filter processor has access to all the [factory functions of the OTTL](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#ottl-functions)
+
+In addition, the processor defines a few of its own functions:
+
+**Metrics only functions**
+- [HasAttrKeyOnDatapoint](#HasAttrKeyOnDatapoint)
+- [HasAttrOnDatapoint](#HasAttrOnDatapoint)
+
+#### HasAttrKeyOnDatapoint
+
+`HasAttrKeyOnDatapoint(key)`
+
+Returns `true` if the given key appears in the attribute map of any datapoint on a metric.
+`key` must be a string.
+
+Examples:
+
+- `HasAttrKeyOnDatapoint("http.method")`
+
+#### HasAttrOnDatapoint
+
+`HasAttrOnDatapoint(key, value)`
+
+Returns `true` if the given key and value appears in the attribute map of any datapoint on a metric.
+`key` and `value` must both be strings.
+
+Examples:
+
+- `HasAttrOnDatapoint("http.method", "GET")`
+
+### OTTL Examples
+
+```yaml
+processors:
+ filter/ottl:
+ error_mode: ignore
+ traces:
+ span:
+ - 'attributes["container.name"] == "app_container_1"'
+ - 'resource.attributes["host.name"] == "localhost"'
+ - 'name == "app_3"'
+ spanevent:
+ - 'attributes["grpc"] == true'
+ - 'IsMatch(name, ".*grpc.*") == true'
+ metrics:
+ metric:
+ - 'name == "my.metric" and resource.attributes["my_label"] == "abc123"'
+ - 'type == METRIC_DATA_TYPE_HISTOGRAM'
+ datapoint:
+ - 'metric.type == METRIC_DATA_TYPE_SUMMARY'
+ - 'resource.attributes["service.name"] == "my_service_name"'
+ logs:
+ log_record:
+ - 'IsMatch(body, ".*password.*") == true'
+ - 'severity_number < SEVERITY_NUMBER_WARN'
+```
+
+[alpha]:https://github.com/open-telemetry/opentelemetry-collector#alpha
+[contrib]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib
+[core]:https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go
new file mode 100644
index 00000000000..804854faa45
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/config.go
@@ -0,0 +1,328 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/plog"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common"
+)
+
+// Config defines configuration for Resource processor.
+type Config struct {
+ // ErrorMode determines how the processor reacts to errors that occur while processing an OTTL condition.
+ // Valid values are `ignore` and `propagate`.
+ // `ignore` means the processor ignores errors returned by conditions and continues on to the next condition. This is the recommended mode.
+ // `propagate` means the processor returns the error up the pipeline. This will result in the payload being dropped from the collector.
+ // The default value is `propagate`.
+ ErrorMode ottl.ErrorMode `mapstructure:"error_mode"`
+
+ Metrics MetricFilters `mapstructure:"metrics"`
+
+ Logs LogFilters `mapstructure:"logs"`
+
+ Spans filterconfig.MatchConfig `mapstructure:"spans"`
+
+ Traces TraceFilters `mapstructure:"traces"`
+}
+
+// MetricFilters filters by Metric properties.
+type MetricFilters struct {
+ // Include match properties describe metrics that should be included in the Collector Service pipeline,
+ // all other metrics should be dropped from further processing.
+ // If both Include and Exclude are specified, Include filtering occurs first.
+ Include *filtermetric.MatchProperties `mapstructure:"include"`
+
+ // Exclude match properties describe metrics that should be excluded from the Collector Service pipeline,
+ // all other metrics should be included.
+ // If both Include and Exclude are specified, Include filtering occurs first.
+ Exclude *filtermetric.MatchProperties `mapstructure:"exclude"`
+
+ // RegexpConfig specifies options for the Regexp match type
+ RegexpConfig *regexp.Config `mapstructure:"regexp"`
+
+ // MetricConditions is a list of OTTL conditions for an ottlmetric context.
+ // If any condition resolves to true, the metric will be dropped.
+ // Supports `and`, `or`, and `()`
+ MetricConditions []string `mapstructure:"metric"`
+
+ // DataPointConditions is a list of OTTL conditions for an ottldatapoint context.
+ // If any condition resolves to true, the datapoint will be dropped.
+ // Supports `and`, `or`, and `()`
+ DataPointConditions []string `mapstructure:"datapoint"`
+}
+
+// TraceFilters filters by OTTL conditions
+type TraceFilters struct {
+ // SpanConditions is a list of OTTL conditions for an ottlspan context.
+ // If any condition resolves to true, the span will be dropped.
+ // Supports `and`, `or`, and `()`
+ SpanConditions []string `mapstructure:"span"`
+
+ // SpanEventConditions is a list of OTTL conditions for an ottlspanevent context.
+ // If any condition resolves to true, the span event will be dropped.
+ // Supports `and`, `or`, and `()`
+ SpanEventConditions []string `mapstructure:"spanevent"`
+}
+
+// LogFilters filters by Log properties.
+type LogFilters struct {
+ // Include match properties describe logs that should be included in the Collector Service pipeline,
+ // all other logs should be dropped from further processing.
+ // If both Include and Exclude are specified, Include filtering occurs first.
+ Include *LogMatchProperties `mapstructure:"include"`
+ // Exclude match properties describe logs that should be excluded from the Collector Service pipeline,
+ // all other logs should be included.
+ // If both Include and Exclude are specified, Include filtering occurs first.
+ Exclude *LogMatchProperties `mapstructure:"exclude"`
+
+ // LogConditions is a list of OTTL conditions for an ottllog context.
+ // If any condition resolves to true, the log event will be dropped.
+ // Supports `and`, `or`, and `()`
+ LogConditions []string `mapstructure:"log_record"`
+}
+
+// LogMatchType specifies the strategy for matching against `plog.Log`s.
+type LogMatchType string
+
+// These are the MatchTypes that users can specify for filtering
+// `plog.Log`s.
+const (
+ Strict = LogMatchType(filterset.Strict)
+ Regexp = LogMatchType(filterset.Regexp)
+)
+
+var severityToNumber = map[string]plog.SeverityNumber{
+ "1": plog.SeverityNumberTrace,
+ "2": plog.SeverityNumberTrace2,
+ "3": plog.SeverityNumberTrace3,
+ "4": plog.SeverityNumberTrace4,
+ "5": plog.SeverityNumberDebug,
+ "6": plog.SeverityNumberDebug2,
+ "7": plog.SeverityNumberDebug3,
+ "8": plog.SeverityNumberDebug4,
+ "9": plog.SeverityNumberInfo,
+ "10": plog.SeverityNumberInfo2,
+ "11": plog.SeverityNumberInfo3,
+ "12": plog.SeverityNumberInfo4,
+ "13": plog.SeverityNumberWarn,
+ "14": plog.SeverityNumberWarn2,
+ "15": plog.SeverityNumberWarn3,
+ "16": plog.SeverityNumberWarn4,
+ "17": plog.SeverityNumberError,
+ "18": plog.SeverityNumberError2,
+ "19": plog.SeverityNumberError3,
+ "20": plog.SeverityNumberError4,
+ "21": plog.SeverityNumberFatal,
+ "22": plog.SeverityNumberFatal2,
+ "23": plog.SeverityNumberFatal3,
+ "24": plog.SeverityNumberFatal4,
+ "TRACE": plog.SeverityNumberTrace,
+ "TRACE2": plog.SeverityNumberTrace2,
+ "TRACE3": plog.SeverityNumberTrace3,
+ "TRACE4": plog.SeverityNumberTrace4,
+ "DEBUG": plog.SeverityNumberDebug,
+ "DEBUG2": plog.SeverityNumberDebug2,
+ "DEBUG3": plog.SeverityNumberDebug3,
+ "DEBUG4": plog.SeverityNumberDebug4,
+ "INFO": plog.SeverityNumberInfo,
+ "INFO2": plog.SeverityNumberInfo2,
+ "INFO3": plog.SeverityNumberInfo3,
+ "INFO4": plog.SeverityNumberInfo4,
+ "WARN": plog.SeverityNumberWarn,
+ "WARN2": plog.SeverityNumberWarn2,
+ "WARN3": plog.SeverityNumberWarn3,
+ "WARN4": plog.SeverityNumberWarn4,
+ "ERROR": plog.SeverityNumberError,
+ "ERROR2": plog.SeverityNumberError2,
+ "ERROR3": plog.SeverityNumberError3,
+ "ERROR4": plog.SeverityNumberError4,
+ "FATAL": plog.SeverityNumberFatal,
+ "FATAL2": plog.SeverityNumberFatal2,
+ "FATAL3": plog.SeverityNumberFatal3,
+ "FATAL4": plog.SeverityNumberFatal4,
+}
+
+var errInvalidSeverity = errors.New("not a valid severity")
+
+// logSeverity is a type that represents a SeverityNumber as a string
+type logSeverity string
+
+// validate checks that the logSeverity is valid
+func (l logSeverity) validate() error {
+ if l == "" {
+ // No severity specified, which means to ignore this field.
+ return nil
+ }
+
+ capsSeverity := strings.ToUpper(string(l))
+ if _, ok := severityToNumber[capsSeverity]; !ok {
+ return fmt.Errorf("'%s' is not a valid severity: %w", string(l), errInvalidSeverity)
+ }
+ return nil
+}
+
+// severityNumber returns the severity number that the logSeverity represents
+func (l logSeverity) severityNumber() plog.SeverityNumber {
+ capsSeverity := strings.ToUpper(string(l))
+ return severityToNumber[capsSeverity]
+}
+
+// LogMatchProperties specifies the set of properties in a log to match against and the
+// type of string pattern matching to use.
+type LogMatchProperties struct {
+ // LogMatchType specifies the type of matching desired
+ LogMatchType LogMatchType `mapstructure:"match_type"`
+
+ // ResourceAttributes defines a list of possible resource attributes to match logs against.
+ // A match occurs if any resource attribute matches all expressions in this given list.
+ ResourceAttributes []filterconfig.Attribute `mapstructure:"resource_attributes"`
+
+ // RecordAttributes defines a list of possible record attributes to match logs against.
+ // A match occurs if any record attribute matches at least one expression in this given list.
+ RecordAttributes []filterconfig.Attribute `mapstructure:"record_attributes"`
+
+ // SeverityTexts is a list of strings that the LogRecord's severity text field must match
+ // against.
+ SeverityTexts []string `mapstructure:"severity_texts"`
+
+ // SeverityNumberProperties defines how to match against a log record's SeverityNumber, if defined.
+ SeverityNumberProperties *LogSeverityNumberMatchProperties `mapstructure:"severity_number"`
+
+ // LogBodies is a list of strings that the LogRecord's body field must match
+ // against.
+ LogBodies []string `mapstructure:"bodies"`
+}
+
+// validate checks that the LogMatchProperties is valid
+func (lmp LogMatchProperties) validate() error {
+ if lmp.SeverityNumberProperties != nil {
+ return lmp.SeverityNumberProperties.validate()
+ }
+ return nil
+}
+
+// isEmpty returns true if the properties is "empty" (meaning, there are no filters specified)
+// if this is the case, the filter should be ignored.
+func (lmp LogMatchProperties) isEmpty() bool {
+ return len(lmp.ResourceAttributes) == 0 && len(lmp.RecordAttributes) == 0 &&
+ len(lmp.SeverityTexts) == 0 && len(lmp.LogBodies) == 0 &&
+ lmp.SeverityNumberProperties == nil
+}
+
+// matchProperties converts the LogMatchProperties to a corresponding filterconfig.MatchProperties
+func (lmp LogMatchProperties) matchProperties() *filterconfig.MatchProperties {
+ mp := &filterconfig.MatchProperties{
+ Config: filterset.Config{
+ MatchType: filterset.MatchType(lmp.LogMatchType),
+ },
+ Resources: lmp.ResourceAttributes,
+ Attributes: lmp.RecordAttributes,
+ LogSeverityTexts: lmp.SeverityTexts,
+ LogBodies: lmp.LogBodies,
+ }
+
+ // Include SeverityNumberProperties if defined
+ if lmp.SeverityNumberProperties != nil {
+ mp.LogSeverityNumber = &filterconfig.LogSeverityNumberMatchProperties{
+ Min: lmp.SeverityNumberProperties.Min.severityNumber(),
+ MatchUndefined: lmp.SeverityNumberProperties.MatchUndefined,
+ }
+ }
+
+ return mp
+}
+
+type LogSeverityNumberMatchProperties struct {
+ // Min is the minimum severity needed for the log record to match.
+ // This corresponds to the short names specified here:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity
+ // this field is case-insensitive ("INFO" == "info")
+ Min logSeverity `mapstructure:"min"`
+
+ // MatchUndefined lets logs records with "unknown" severity match.
+ // If MinSeverity is not set, this field is ignored, as fields are not matched based on severity.
+ MatchUndefined bool `mapstructure:"match_undefined"`
+}
+
+// validate checks that the LogMatchProperties is valid
+func (lmp LogSeverityNumberMatchProperties) validate() error {
+ return lmp.Min.validate()
+}
+
+var _ component.Config = (*Config)(nil)
+
+// Validate checks if the processor configuration is valid
+func (cfg *Config) Validate() error {
+ if (cfg.Traces.SpanConditions != nil || cfg.Traces.SpanEventConditions != nil) && (cfg.Spans.Include != nil || cfg.Spans.Exclude != nil) {
+ return fmt.Errorf("cannot use ottl conditions and include/exclude for spans at the same time")
+ }
+ if (cfg.Metrics.MetricConditions != nil || cfg.Metrics.DataPointConditions != nil) && (cfg.Metrics.Include != nil || cfg.Metrics.Exclude != nil) {
+ return fmt.Errorf("cannot use ottl conditions and include/exclude for metrics at the same time")
+ }
+ if cfg.Logs.LogConditions != nil && (cfg.Logs.Include != nil || cfg.Logs.Exclude != nil) {
+ return fmt.Errorf("cannot use ottl conditions and include/exclude for logs at the same time")
+ }
+
+ var errors error
+
+ if cfg.Traces.SpanConditions != nil {
+ _, err := filterottl.NewBoolExprForSpan(cfg.Traces.SpanConditions, filterottl.StandardSpanFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()})
+ errors = multierr.Append(errors, err)
+ }
+
+ if cfg.Traces.SpanEventConditions != nil {
+ _, err := filterottl.NewBoolExprForSpanEvent(cfg.Traces.SpanEventConditions, filterottl.StandardSpanEventFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()})
+ errors = multierr.Append(errors, err)
+ }
+
+ if cfg.Metrics.MetricConditions != nil {
+ _, err := filterottl.NewBoolExprForMetric(cfg.Metrics.MetricConditions, common.MetricFunctions(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()})
+ errors = multierr.Append(errors, err)
+ }
+
+ if cfg.Metrics.DataPointConditions != nil {
+ _, err := filterottl.NewBoolExprForDataPoint(cfg.Metrics.DataPointConditions, filterottl.StandardDataPointFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()})
+ errors = multierr.Append(errors, err)
+ }
+
+ if cfg.Logs.LogConditions != nil {
+ _, err := filterottl.NewBoolExprForLog(cfg.Logs.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()})
+ errors = multierr.Append(errors, err)
+ }
+
+ if cfg.Logs.LogConditions != nil && cfg.Logs.Include != nil {
+ errors = multierr.Append(errors, cfg.Logs.Include.validate())
+ }
+
+ if cfg.Logs.LogConditions != nil && cfg.Logs.Exclude != nil {
+ errors = multierr.Append(errors, cfg.Logs.Exclude.validate())
+ }
+
+ return errors
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go
new file mode 100644
index 00000000000..e68bc0dfd39
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/doc.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package filterprocessor implements a processor for filtering
+// (dropping) metrics and/or spans by various properties.
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go
new file mode 100644
index 00000000000..9c454cf27d9
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/factory.go
@@ -0,0 +1,109 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/processor"
+ "go.opentelemetry.io/collector/processor/processorhelper"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+const (
+ // The value of "type" key in configuration.
+ typeStr = "filter"
+ // The stability level of the processor.
+ stability = component.StabilityLevelAlpha
+)
+
+var processorCapabilities = consumer.Capabilities{MutatesData: true}
+
+// NewFactory returns a new factory for the Filter processor.
+func NewFactory() processor.Factory {
+ return processor.NewFactory(
+ typeStr,
+ createDefaultConfig,
+ processor.WithMetrics(createMetricsProcessor, stability),
+ processor.WithLogs(createLogsProcessor, stability),
+ processor.WithTraces(createTracesProcessor, stability),
+ )
+}
+
+func createDefaultConfig() component.Config {
+ return &Config{
+ ErrorMode: ottl.PropagateError,
+ }
+}
+
+func createMetricsProcessor(
+ ctx context.Context,
+ set processor.CreateSettings,
+ cfg component.Config,
+ nextConsumer consumer.Metrics,
+) (processor.Metrics, error) {
+ fp, err := newFilterMetricProcessor(set.TelemetrySettings, cfg.(*Config))
+ if err != nil {
+ return nil, err
+ }
+ return processorhelper.NewMetricsProcessor(
+ ctx,
+ set,
+ cfg,
+ nextConsumer,
+ fp.processMetrics,
+ processorhelper.WithCapabilities(processorCapabilities))
+}
+
+func createLogsProcessor(
+ ctx context.Context,
+ set processor.CreateSettings,
+ cfg component.Config,
+ nextConsumer consumer.Logs,
+) (processor.Logs, error) {
+ fp, err := newFilterLogsProcessor(set.TelemetrySettings, cfg.(*Config))
+ if err != nil {
+ return nil, err
+ }
+ return processorhelper.NewLogsProcessor(
+ ctx,
+ set,
+ cfg,
+ nextConsumer,
+ fp.processLogs,
+ processorhelper.WithCapabilities(processorCapabilities))
+}
+
+func createTracesProcessor(
+ ctx context.Context,
+ set processor.CreateSettings,
+ cfg component.Config,
+ nextConsumer consumer.Traces,
+) (processor.Traces, error) {
+ fp, err := newFilterSpansProcessor(set.TelemetrySettings, cfg.(*Config))
+ if err != nil {
+ return nil, err
+ }
+ return processorhelper.NewTracesProcessor(
+ ctx,
+ set,
+ cfg,
+ nextConsumer,
+ fp.processTraces,
+ processorhelper.WithCapabilities(processorCapabilities))
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common/functions.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common/functions.go
new file mode 100644
index 00000000000..ce8d56b96a6
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common/functions.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/pdata/pmetric"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+)
+
+func MetricFunctions() map[string]interface{} {
+ funcs := filterottl.StandardMetricFuncs()
+ funcs["HasAttrKeyOnDatapoint"] = hasAttributeKeyOnDatapoint
+ funcs["HasAttrOnDatapoint"] = hasAttributeOnDatapoint
+ return funcs
+}
+
+func hasAttributeOnDatapoint(key string, expectedVal string) (ottl.ExprFunc[ottlmetric.TransformContext], error) {
+ return func(ctx context.Context, tCtx ottlmetric.TransformContext) (interface{}, error) {
+ return checkDataPoints(tCtx, key, &expectedVal)
+ }, nil
+}
+
+func hasAttributeKeyOnDatapoint(key string) (ottl.ExprFunc[ottlmetric.TransformContext], error) {
+ return func(ctx context.Context, tCtx ottlmetric.TransformContext) (interface{}, error) {
+ return checkDataPoints(tCtx, key, nil)
+ }, nil
+}
+
+func checkDataPoints(tCtx ottlmetric.TransformContext, key string, expectedVal *string) (interface{}, error) {
+ metric := tCtx.GetMetric()
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ return checkNumberDataPointSlice(metric.Sum().DataPoints(), key, expectedVal), nil
+ case pmetric.MetricTypeGauge:
+ return checkNumberDataPointSlice(metric.Gauge().DataPoints(), key, expectedVal), nil
+ case pmetric.MetricTypeHistogram:
+ return checkHistogramDataPointSlice(metric.Histogram().DataPoints(), key, expectedVal), nil
+ case pmetric.MetricTypeExponentialHistogram:
+ return checkExponentialHistogramDataPointSlice(metric.ExponentialHistogram().DataPoints(), key, expectedVal), nil
+ case pmetric.MetricTypeSummary:
+ return checkSummaryDataPointSlice(metric.Summary().DataPoints(), key, expectedVal), nil
+ }
+ return nil, fmt.Errorf("unknown metric type")
+}
+
+func checkNumberDataPointSlice(dps pmetric.NumberDataPointSlice, key string, expectedVal *string) bool {
+ for i := 0; i < dps.Len(); i++ {
+ dp := dps.At(i)
+ value, ok := dp.Attributes().Get(key)
+ if ok {
+ if expectedVal != nil {
+ return value.Str() == *expectedVal
+ }
+ return true
+ }
+ }
+ return false
+}
+
+func checkHistogramDataPointSlice(dps pmetric.HistogramDataPointSlice, key string, expectedVal *string) bool {
+ for i := 0; i < dps.Len(); i++ {
+ dp := dps.At(i)
+ value, ok := dp.Attributes().Get(key)
+ if ok {
+ if expectedVal != nil {
+ return value.Str() == *expectedVal
+ }
+ return true
+ }
+ }
+ return false
+}
+
+func checkExponentialHistogramDataPointSlice(dps pmetric.ExponentialHistogramDataPointSlice, key string, expectedVal *string) bool {
+ for i := 0; i < dps.Len(); i++ {
+ dp := dps.At(i)
+ value, ok := dp.Attributes().Get(key)
+ if ok {
+ if expectedVal != nil {
+ return value.Str() == *expectedVal
+ }
+ return true
+ }
+ }
+ return false
+}
+
+func checkSummaryDataPointSlice(dps pmetric.SummaryDataPointSlice, key string, expectedVal *string) bool {
+ for i := 0; i < dps.Len(); i++ {
+ dp := dps.At(i)
+ value, ok := dp.Attributes().Get(key)
+ if ok {
+ if expectedVal != nil {
+ return value.Str() == *expectedVal
+ }
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go
new file mode 100644
index 00000000000..34b5db6eee9
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/logs.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/plog"
+ "go.opentelemetry.io/collector/processor/processorhelper"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog"
+)
+
+type filterLogProcessor struct {
+ skipExpr expr.BoolExpr[ottllog.TransformContext]
+ logger *zap.Logger
+}
+
+func newFilterLogsProcessor(set component.TelemetrySettings, cfg *Config) (*filterLogProcessor, error) {
+ flp := &filterLogProcessor{
+ logger: set.Logger,
+ }
+ if cfg.Logs.LogConditions != nil {
+ skipExpr, err := filterottl.NewBoolExprForLog(cfg.Logs.LogConditions, filterottl.StandardLogFuncs(), cfg.ErrorMode, set)
+ if err != nil {
+ return nil, err
+ }
+ flp.skipExpr = skipExpr
+ return flp, nil
+ }
+
+ cfgMatch := filterconfig.MatchConfig{}
+ if cfg.Logs.Include != nil && !cfg.Logs.Include.isEmpty() {
+ cfgMatch.Include = cfg.Logs.Include.matchProperties()
+ }
+
+ if cfg.Logs.Exclude != nil && !cfg.Logs.Exclude.isEmpty() {
+ cfgMatch.Exclude = cfg.Logs.Exclude.matchProperties()
+ }
+
+ skipExpr, err := filterlog.NewSkipExpr(&cfgMatch)
+ if err != nil {
+ return nil, fmt.Errorf("failed to build skip matcher: %w", err)
+ }
+ flp.skipExpr = skipExpr
+
+ return flp, nil
+}
+
+func (flp *filterLogProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) {
+ if flp.skipExpr == nil {
+ return ld, nil
+ }
+
+ var errors error
+ ld.ResourceLogs().RemoveIf(func(rl plog.ResourceLogs) bool {
+ resource := rl.Resource()
+ rl.ScopeLogs().RemoveIf(func(sl plog.ScopeLogs) bool {
+ scope := sl.Scope()
+ lrs := sl.LogRecords()
+ lrs.RemoveIf(func(lr plog.LogRecord) bool {
+ skip, err := flp.skipExpr.Eval(ctx, ottllog.NewTransformContext(lr, scope, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+
+ return sl.LogRecords().Len() == 0
+ })
+ return rl.ScopeLogs().Len() == 0
+ })
+
+ if errors != nil {
+ flp.logger.Error("failed processing logs", zap.Error(errors))
+ return ld, errors
+ }
+ if ld.ResourceLogs().Len() == 0 {
+ return ld, processorhelper.ErrSkipProcessingData
+ }
+ return ld, nil
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go
new file mode 100644
index 00000000000..3d22045786d
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/metrics.go
@@ -0,0 +1,280 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/pcommon"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/processor/processorhelper"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common"
+)
+
+type filterMetricProcessor struct {
+ skipResourceExpr expr.BoolExpr[ottlresource.TransformContext]
+ skipMetricExpr expr.BoolExpr[ottlmetric.TransformContext]
+ skipDataPointExpr expr.BoolExpr[ottldatapoint.TransformContext]
+ logger *zap.Logger
+}
+
+func newFilterMetricProcessor(set component.TelemetrySettings, cfg *Config) (*filterMetricProcessor, error) {
+ var err error
+ fsp := &filterMetricProcessor{
+ logger: set.Logger,
+ }
+ if cfg.Metrics.MetricConditions != nil || cfg.Metrics.DataPointConditions != nil {
+ if cfg.Metrics.MetricConditions != nil {
+ fsp.skipMetricExpr, err = filterottl.NewBoolExprForMetric(cfg.Metrics.MetricConditions, common.MetricFunctions(), cfg.ErrorMode, set)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if cfg.Metrics.DataPointConditions != nil {
+ fsp.skipDataPointExpr, err = filterottl.NewBoolExprForDataPoint(cfg.Metrics.DataPointConditions, filterottl.StandardDataPointFuncs(), cfg.ErrorMode, set)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return fsp, nil
+ }
+
+ fsp.skipResourceExpr, err = newSkipResExpr(cfg.Metrics.Include, cfg.Metrics.Exclude)
+ if err != nil {
+ return nil, err
+ }
+
+ fsp.skipMetricExpr, err = filtermetric.NewSkipExpr(cfg.Metrics.Include, cfg.Metrics.Exclude)
+ if err != nil {
+ return nil, err
+ }
+
+ includeMatchType := ""
+ var includeExpressions []string
+ var includeMetricNames []string
+ var includeResourceAttributes []filterconfig.Attribute
+ if cfg.Metrics.Include != nil {
+ includeMatchType = string(cfg.Metrics.Include.MatchType)
+ includeExpressions = cfg.Metrics.Include.Expressions
+ includeMetricNames = cfg.Metrics.Include.MetricNames
+ includeResourceAttributes = cfg.Metrics.Include.ResourceAttributes
+ }
+
+ excludeMatchType := ""
+ var excludeExpressions []string
+ var excludeMetricNames []string
+ var excludeResourceAttributes []filterconfig.Attribute
+ if cfg.Metrics.Exclude != nil {
+ excludeMatchType = string(cfg.Metrics.Exclude.MatchType)
+ excludeExpressions = cfg.Metrics.Exclude.Expressions
+ excludeMetricNames = cfg.Metrics.Exclude.MetricNames
+ excludeResourceAttributes = cfg.Metrics.Exclude.ResourceAttributes
+ }
+
+ set.Logger.Info(
+ "Metric filter configured",
+ zap.String("include match_type", includeMatchType),
+ zap.Strings("include expressions", includeExpressions),
+ zap.Strings("include metric names", includeMetricNames),
+ zap.Any("include metrics with resource attributes", includeResourceAttributes),
+ zap.String("exclude match_type", excludeMatchType),
+ zap.Strings("exclude expressions", excludeExpressions),
+ zap.Strings("exclude metric names", excludeMetricNames),
+ zap.Any("exclude metrics with resource attributes", excludeResourceAttributes),
+ )
+
+ return fsp, nil
+}
+
+// processMetrics filters the given metrics based off the filterMetricProcessor's filters.
+func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) {
+ if fmp.skipResourceExpr == nil && fmp.skipMetricExpr == nil && fmp.skipDataPointExpr == nil {
+ return md, nil
+ }
+
+ var errors error
+ md.ResourceMetrics().RemoveIf(func(rmetrics pmetric.ResourceMetrics) bool {
+ resource := rmetrics.Resource()
+ if fmp.skipResourceExpr != nil {
+ skip, err := fmp.skipResourceExpr.Eval(ctx, ottlresource.NewTransformContext(resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ if skip {
+ return true
+ }
+ }
+ rmetrics.ScopeMetrics().RemoveIf(func(smetrics pmetric.ScopeMetrics) bool {
+ scope := smetrics.Scope()
+ smetrics.Metrics().RemoveIf(func(metric pmetric.Metric) bool {
+ if fmp.skipMetricExpr != nil {
+ skip, err := fmp.skipMetricExpr.Eval(ctx, ottlmetric.NewTransformContext(metric, scope, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ }
+ if skip {
+ return true
+ }
+ }
+ if fmp.skipDataPointExpr != nil {
+ switch metric.Type() {
+ case pmetric.MetricTypeSum:
+ errors = multierr.Append(errors, fmp.handleNumberDataPoints(ctx, metric.Sum().DataPoints(), metric, smetrics.Metrics(), scope, resource))
+ return metric.Sum().DataPoints().Len() == 0
+ case pmetric.MetricTypeGauge:
+ errors = multierr.Append(errors, fmp.handleNumberDataPoints(ctx, metric.Gauge().DataPoints(), metric, smetrics.Metrics(), scope, resource))
+ return metric.Gauge().DataPoints().Len() == 0
+ case pmetric.MetricTypeHistogram:
+ errors = multierr.Append(errors, fmp.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metric, smetrics.Metrics(), scope, resource))
+ return metric.Histogram().DataPoints().Len() == 0
+ case pmetric.MetricTypeExponentialHistogram:
+ errors = multierr.Append(errors, fmp.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource))
+ return metric.ExponentialHistogram().DataPoints().Len() == 0
+ case pmetric.MetricTypeSummary:
+ errors = multierr.Append(errors, fmp.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metric, smetrics.Metrics(), scope, resource))
+ return metric.Summary().DataPoints().Len() == 0
+ default:
+ return false
+ }
+ }
+ return false
+ })
+ return smetrics.Metrics().Len() == 0
+ })
+ return rmetrics.ScopeMetrics().Len() == 0
+ })
+
+ if errors != nil {
+ fmp.logger.Error("failed processing metrics", zap.Error(errors))
+ return md, errors
+ }
+ if md.ResourceMetrics().Len() == 0 {
+ return md, processorhelper.ErrSkipProcessingData
+ }
+ return md, nil
+}
+
+func newSkipResExpr(include *filtermetric.MatchProperties, exclude *filtermetric.MatchProperties) (expr.BoolExpr[ottlresource.TransformContext], error) {
+ var matchers []expr.BoolExpr[ottlresource.TransformContext]
+ inclExpr, err := newResExpr(include)
+ if err != nil {
+ return nil, err
+ }
+ if inclExpr != nil {
+ matchers = append(matchers, expr.Not(inclExpr))
+ }
+ exclExpr, err := newResExpr(exclude)
+ if err != nil {
+ return nil, err
+ }
+ if exclExpr != nil {
+ matchers = append(matchers, exclExpr)
+ }
+ return expr.Or(matchers...), nil
+}
+
+type resExpr filtermatcher.AttributesMatcher
+
+func (r resExpr) Eval(_ context.Context, tCtx ottlresource.TransformContext) (bool, error) {
+ return filtermatcher.AttributesMatcher(r).Match(tCtx.GetResource().Attributes()), nil
+}
+
+func newResExpr(mp *filtermetric.MatchProperties) (expr.BoolExpr[ottlresource.TransformContext], error) {
+ if mp == nil {
+ return nil, nil
+ }
+ attributeMatcher, err := filtermatcher.NewAttributesMatcher(
+ filterset.Config{
+ MatchType: filterset.MatchType(mp.MatchType),
+ RegexpConfig: mp.RegexpConfig,
+ },
+ mp.ResourceAttributes,
+ )
+ if err != nil {
+ return nil, err
+ }
+ if attributeMatcher == nil {
+ return nil, err
+ }
+ return resExpr(attributeMatcher), nil
+}
+
+func (fmp *filterMetricProcessor) handleNumberDataPoints(ctx context.Context, dps pmetric.NumberDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error {
+ var errors error
+ dps.RemoveIf(func(datapoint pmetric.NumberDataPoint) bool {
+ skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+ return errors
+}
+
+func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, dps pmetric.HistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error {
+ var errors error
+ dps.RemoveIf(func(datapoint pmetric.HistogramDataPoint) bool {
+ skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+ return errors
+}
+
+func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error {
+ var errors error
+ dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool {
+ skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+ return errors
+}
+
+func (fmp *filterMetricProcessor) handleSummaryDataPoints(ctx context.Context, dps pmetric.SummaryDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error {
+ var errors error
+ dps.RemoveIf(func(datapoint pmetric.SummaryDataPoint) bool {
+ skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+ return errors
+}
diff --git a/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go
new file mode 100644
index 00000000000..c9d36943d09
--- /dev/null
+++ b/vendor/github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/traces.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filterprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+ "go.opentelemetry.io/collector/processor/processorhelper"
+ "go.uber.org/multierr"
+ "go.uber.org/zap"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent"
+)
+
+type filterSpanProcessor struct {
+ skipSpanExpr expr.BoolExpr[ottlspan.TransformContext]
+ skipSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext]
+ logger *zap.Logger
+}
+
+func newFilterSpansProcessor(set component.TelemetrySettings, cfg *Config) (*filterSpanProcessor, error) {
+ var err error
+ fsp := &filterSpanProcessor{
+ logger: set.Logger,
+ }
+ if cfg.Traces.SpanConditions != nil || cfg.Traces.SpanEventConditions != nil {
+ if cfg.Traces.SpanConditions != nil {
+ fsp.skipSpanExpr, err = filterottl.NewBoolExprForSpan(cfg.Traces.SpanConditions, filterottl.StandardSpanFuncs(), cfg.ErrorMode, set)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if cfg.Traces.SpanEventConditions != nil {
+ fsp.skipSpanEventExpr, err = filterottl.NewBoolExprForSpanEvent(cfg.Traces.SpanEventConditions, filterottl.StandardSpanEventFuncs(), cfg.ErrorMode, set)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return fsp, nil
+ }
+
+ fsp.skipSpanExpr, err = filterspan.NewSkipExpr(&cfg.Spans)
+ if err != nil {
+ return nil, err
+ }
+
+ includeMatchType, excludeMatchType := "[None]", "[None]"
+ if cfg.Spans.Include != nil {
+ includeMatchType = string(cfg.Spans.Include.MatchType)
+ }
+
+ if cfg.Spans.Exclude != nil {
+ excludeMatchType = string(cfg.Spans.Exclude.MatchType)
+ }
+
+ set.Logger.Info(
+ "Span filter configured",
+ zap.String("[Include] match_type", includeMatchType),
+ zap.String("[Exclude] match_type", excludeMatchType),
+ )
+
+ return fsp, nil
+}
+
+// processTraces filters the given spans of a traces based off the filterSpanProcessor's filters.
+func (fsp *filterSpanProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
+ if fsp.skipSpanExpr == nil && fsp.skipSpanEventExpr == nil {
+ return td, nil
+ }
+
+ var errors error
+ td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool {
+ resource := rs.Resource()
+ rs.ScopeSpans().RemoveIf(func(ss ptrace.ScopeSpans) bool {
+ scope := ss.Scope()
+ ss.Spans().RemoveIf(func(span ptrace.Span) bool {
+ if fsp.skipSpanExpr != nil {
+ skip, err := fsp.skipSpanExpr.Eval(ctx, ottlspan.NewTransformContext(span, scope, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ if skip {
+ return true
+ }
+ }
+ if fsp.skipSpanEventExpr != nil {
+ span.Events().RemoveIf(func(spanEvent ptrace.SpanEvent) bool {
+ skip, err := fsp.skipSpanEventExpr.Eval(ctx, ottlspanevent.NewTransformContext(spanEvent, span, scope, resource))
+ if err != nil {
+ errors = multierr.Append(errors, err)
+ return false
+ }
+ return skip
+ })
+ }
+ return false
+ })
+ return ss.Spans().Len() == 0
+ })
+ return rs.ScopeSpans().Len() == 0
+ })
+
+ if errors != nil {
+ fsp.logger.Error("failed processing traces", zap.Error(errors))
+ return td, errors
+ }
+ if td.ResourceSpans().Len() == 0 {
+ return td, processorhelper.ErrSkipProcessingData
+ }
+ return td, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go
new file mode 100644
index 00000000000..3ffc3adc750
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/logs.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package processorhelper // import "go.opentelemetry.io/collector/processor/processorhelper"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/pdata/plog"
+ "go.opentelemetry.io/collector/processor"
+)
+
+// ProcessLogsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component.
+// If error is returned then returned data are ignored. It MUST not call the next component.
+type ProcessLogsFunc func(context.Context, plog.Logs) (plog.Logs, error)
+
+type logProcessor struct {
+ component.StartFunc
+ component.ShutdownFunc
+ consumer.Logs
+}
+
+// NewLogsProcessor creates a component.LogsProcessor that ensure context propagation and the right tags are set.
+func NewLogsProcessor(
+ _ context.Context,
+ set processor.CreateSettings,
+ _ component.Config,
+ nextConsumer consumer.Logs,
+ logsFunc ProcessLogsFunc,
+ options ...Option,
+) (processor.Logs, error) {
+ // TODO: Add observability metrics support
+ if logsFunc == nil {
+ return nil, errors.New("nil logsFunc")
+ }
+
+ if nextConsumer == nil {
+ return nil, component.ErrNilNextConsumer
+ }
+
+ eventOptions := spanAttributes(set.ID)
+ bs := fromOptions(options)
+ logsConsumer, err := consumer.NewLogs(func(ctx context.Context, ld plog.Logs) error {
+ span := trace.SpanFromContext(ctx)
+ span.AddEvent("Start processing.", eventOptions)
+ var err error
+ ld, err = logsFunc(ctx, ld)
+ span.AddEvent("End processing.", eventOptions)
+ if err != nil {
+ if errors.Is(err, ErrSkipProcessingData) {
+ return nil
+ }
+ return err
+ }
+ return nextConsumer.ConsumeLogs(ctx, ld)
+ }, bs.consumerOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &logProcessor{
+ StartFunc: bs.StartFunc,
+ ShutdownFunc: bs.ShutdownFunc,
+ Logs: logsConsumer,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go
new file mode 100644
index 00000000000..e5b0483b971
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/metrics.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package processorhelper // import "go.opentelemetry.io/collector/processor/processorhelper"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/pdata/pmetric"
+ "go.opentelemetry.io/collector/processor"
+)
+
+// ProcessMetricsFunc is a helper function that processes the incoming data and returns the data to be sent to the next component.
+// If error is returned then returned data are ignored. It MUST not call the next component.
+type ProcessMetricsFunc func(context.Context, pmetric.Metrics) (pmetric.Metrics, error)
+
+type metricsProcessor struct {
+ component.StartFunc
+ component.ShutdownFunc
+ consumer.Metrics
+}
+
+// NewMetricsProcessor creates a component.MetricsProcessor that ensure context propagation and the right tags are set.
+func NewMetricsProcessor(
+ _ context.Context,
+ set processor.CreateSettings,
+ _ component.Config,
+ nextConsumer consumer.Metrics,
+ metricsFunc ProcessMetricsFunc,
+ options ...Option,
+) (processor.Metrics, error) {
+ // TODO: Add observability metrics support
+ if metricsFunc == nil {
+ return nil, errors.New("nil metricsFunc")
+ }
+
+ if nextConsumer == nil {
+ return nil, component.ErrNilNextConsumer
+ }
+
+ eventOptions := spanAttributes(set.ID)
+ bs := fromOptions(options)
+ metricsConsumer, err := consumer.NewMetrics(func(ctx context.Context, md pmetric.Metrics) error {
+ span := trace.SpanFromContext(ctx)
+ span.AddEvent("Start processing.", eventOptions)
+ var err error
+ md, err = metricsFunc(ctx, md)
+ span.AddEvent("End processing.", eventOptions)
+ if err != nil {
+ if errors.Is(err, ErrSkipProcessingData) {
+ return nil
+ }
+ return err
+ }
+ return nextConsumer.ConsumeMetrics(ctx, md)
+ }, bs.consumerOptions...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &metricsProcessor{
+ StartFunc: bs.StartFunc,
+ ShutdownFunc: bs.ShutdownFunc,
+ Metrics: metricsConsumer,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go
new file mode 100644
index 00000000000..281db20c3a6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/processor.go
@@ -0,0 +1,82 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package processorhelper // import "go.opentelemetry.io/collector/processor/processorhelper"
+
+import (
+ "errors"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics"
+)
+
+// ErrSkipProcessingData is a sentinel value to indicate when traces or metrics should intentionally be dropped
+// from further processing in the pipeline because the data is determined to be irrelevant. A processor can return this error
+// to stop further processing without propagating an error back up the pipeline to logs.
+var ErrSkipProcessingData = errors.New("sentinel error to skip processing data from the remainder of the pipeline")
+
+// Option apply changes to internalOptions.
+type Option func(*baseSettings)
+
+// WithStart overrides the default Start function for an processor.
+// The default shutdown function does nothing and always returns nil.
+func WithStart(start component.StartFunc) Option {
+ return func(o *baseSettings) {
+ o.StartFunc = start
+ }
+}
+
+// WithShutdown overrides the default Shutdown function for an processor.
+// The default shutdown function does nothing and always returns nil.
+func WithShutdown(shutdown component.ShutdownFunc) Option {
+ return func(o *baseSettings) {
+ o.ShutdownFunc = shutdown
+ }
+}
+
+// WithCapabilities overrides the default GetCapabilities function for an processor.
+// The default GetCapabilities function returns mutable capabilities.
+func WithCapabilities(capabilities consumer.Capabilities) Option {
+ return func(o *baseSettings) {
+ o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities))
+ }
+}
+
+type baseSettings struct {
+ component.StartFunc
+ component.ShutdownFunc
+ consumerOptions []consumer.Option
+}
+
+// fromOptions returns the internal settings starting from the default and applying all options.
+func fromOptions(options []Option) *baseSettings {
+ // Start from the default options:
+ opts := &baseSettings{
+ consumerOptions: []consumer.Option{consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})},
+ }
+
+ for _, op := range options {
+ op(opts)
+ }
+
+ return opts
+}
+
+func spanAttributes(id component.ID) trace.EventOption {
+ return trace.WithAttributes(attribute.String(obsmetrics.ProcessorKey, id.String()))
+}
diff --git a/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go
new file mode 100644
index 00000000000..f349aa62a51
--- /dev/null
+++ b/vendor/go.opentelemetry.io/collector/processor/processorhelper/traces.go
@@ -0,0 +1,83 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package processorhelper // import "go.opentelemetry.io/collector/processor/processorhelper"
+
+import (
+ "context"
+ "errors"
+
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/collector/component"
+ "go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/pdata/ptrace"
+ "go.opentelemetry.io/collector/processor"
+)
+
+// ProcessTracesFunc is a helper function that processes the incoming data and returns the data to be sent to the next component.
+// If error is returned then returned data are ignored. It MUST not call the next component.
+type ProcessTracesFunc func(context.Context, ptrace.Traces) (ptrace.Traces, error)
+
+type tracesProcessor struct {
+ component.StartFunc
+ component.ShutdownFunc
+ consumer.Traces
+}
+
+// NewTracesProcessor creates a component.TracesProcessor that ensure context propagation and the right tags are set.
+func NewTracesProcessor(
+ _ context.Context,
+ set processor.CreateSettings,
+ _ component.Config,
+ nextConsumer consumer.Traces,
+ tracesFunc ProcessTracesFunc,
+ options ...Option,
+) (processor.Traces, error) {
+ // TODO: Add observability Traces support
+ if tracesFunc == nil {
+ return nil, errors.New("nil tracesFunc")
+ }
+
+ if nextConsumer == nil {
+ return nil, component.ErrNilNextConsumer
+ }
+
+ eventOptions := spanAttributes(set.ID)
+ bs := fromOptions(options)
+ traceConsumer, err := consumer.NewTraces(func(ctx context.Context, td ptrace.Traces) error {
+ span := trace.SpanFromContext(ctx)
+ span.AddEvent("Start processing.", eventOptions)
+ var err error
+ td, err = tracesFunc(ctx, td)
+ span.AddEvent("End processing.", eventOptions)
+ if err != nil {
+ if errors.Is(err, ErrSkipProcessingData) {
+ return nil
+ }
+ return err
+ }
+ return nextConsumer.ConsumeTraces(ctx, td)
+ }, bs.consumerOptions...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return &tracesProcessor{
+ StartFunc: bs.StartFunc,
+ ShutdownFunc: bs.ShutdownFunc,
+ Traces: traceConsumer,
+ }, nil
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6dfead71274..aba5b7bd104 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -65,6 +65,10 @@ github.com/VividCortex/gohistogram
# github.com/alecthomas/kong v0.7.1
## explicit; go 1.18
github.com/alecthomas/kong
+# github.com/alecthomas/participle/v2 v2.0.0-beta.5
+## explicit; go 1.18
+github.com/alecthomas/participle/v2
+github.com/alecthomas/participle/v2/lexer
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
@@ -81,6 +85,20 @@ github.com/alicebob/miniredis/v2/server
# github.com/andybalholm/brotli v1.0.4
## explicit; go 1.12
github.com/andybalholm/brotli
+# github.com/antonmedv/expr v1.12.3
+## explicit; go 1.13
+github.com/antonmedv/expr
+github.com/antonmedv/expr/ast
+github.com/antonmedv/expr/builtin
+github.com/antonmedv/expr/checker
+github.com/antonmedv/expr/compiler
+github.com/antonmedv/expr/conf
+github.com/antonmedv/expr/file
+github.com/antonmedv/expr/optimizer
+github.com/antonmedv/expr/parser
+github.com/antonmedv/expr/parser/lexer
+github.com/antonmedv/expr/vm
+github.com/antonmedv/expr/vm/runtime
# github.com/apache/thrift v0.18.1
## explicit; go 1.19
github.com/apache/thrift/lib/go/thrift
@@ -290,6 +308,16 @@ github.com/go-redis/redis/v8/internal/util
# github.com/go-test/deep v1.0.8
## explicit; go 1.16
github.com/go-test/deep
+# github.com/gobwas/glob v0.2.3
+## explicit
+github.com/gobwas/glob
+github.com/gobwas/glob/compiler
+github.com/gobwas/glob/match
+github.com/gobwas/glob/syntax
+github.com/gobwas/glob/syntax/ast
+github.com/gobwas/glob/syntax/lexer
+github.com/gobwas/glob/util/runes
+github.com/gobwas/glob/util/strings
# github.com/gogo/googleapis v1.4.1
## explicit; go 1.12
github.com/gogo/googleapis/google/api
@@ -473,6 +501,9 @@ github.com/hashicorp/serf/coordinate
# github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d
## explicit
github.com/hashicorp/yamux
+# github.com/iancoleman/strcase v0.2.0
+## explicit; go 1.16
+github.com/iancoleman/strcase
# github.com/inconshreveable/mousetrap v1.0.1
## explicit; go 1.18
github.com/inconshreveable/mousetrap
@@ -722,9 +753,36 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/traceutil
+# github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.74.0
+## explicit; go 1.19
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterconfig
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterexpr
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterlog
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermatcher
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filtermetric
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/regexp
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterset/strict
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterspan
# github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.74.0
## explicit; go 1.19
github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent
+# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.74.0
+## explicit; go 1.19
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/internal/ottlcommon
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspanevent
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs
+# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.74.0
+## explicit; go 1.19
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil
# github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.74.0
## explicit; go 1.19
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger
@@ -736,6 +794,10 @@ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencen
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2
+# github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.74.0
+## explicit; go 1.19
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor/internal/common
# github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.74.0
## explicit; go 1.19
github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver
@@ -1144,6 +1206,7 @@ go.opentelemetry.io/collector/otelcol/internal/configunmarshaler
go.opentelemetry.io/collector/otelcol/internal/grpclog
go.opentelemetry.io/collector/otelcol/internal/sharedgate
go.opentelemetry.io/collector/processor
+go.opentelemetry.io/collector/processor/processorhelper
go.opentelemetry.io/collector/service
go.opentelemetry.io/collector/service/extensions
go.opentelemetry.io/collector/service/internal/capabilityconsumer