Skip to content

Commit

Permalink
Add initial scraping of prometheus targets
Browse files Browse the repository at this point in the history
Add discovery and scraping services

Update discoverers to use ID be Initers

Remove ApplyConditionalDefaults from all discoverers

Update scraper service config to dash names and Initer

Update scraper service to not block on configuration update

Add service tests for all discoverers

Add redacting to marathon discovery and scraper config

Add validation to scraper service configuration

Add DB/RP to scrape targets data

Add blacklisting to scraper

Update name of file discovery to files

Fix deadlock for scraper service on update before open

Update prom discovery logging to use prom log interface

Update server tests to have discovery/scraper expectations

Add basic configuration for scrapers/discovers to kapacitor.conf

Remove debug output from circle testing

Update locking for scraper service

add support for k8s to be a list or a single entry
  • Loading branch information
goller authored and nathanielc committed Apr 29, 2017
1 parent 6eb087a commit dc98421
Show file tree
Hide file tree
Showing 59 changed files with 3,954 additions and 137 deletions.
7 changes: 4 additions & 3 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ required = ["github.com/benbjohnson/tmpl","github.com/golang/protobuf/protoc-gen
branch = "master"
name = "github.com/mitchellh/mapstructure"

[[dependencies]]
branch = "go-discovery-logger"
name = "github.com/prometheus/prometheus"
source = "github.com/goller/prometheus"

[[dependencies]]
branch = "master"
name = "github.com/shurcooL/markdownfmt"
Expand Down
125 changes: 125 additions & 0 deletions etc/kapacitor/kapacitor.conf
Original file line number Diff line number Diff line change
Expand Up @@ -452,3 +452,128 @@ default-retention-policy = ""
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"

#[[scrapers]]
# enabled = false
# name = "myscraper"
# discoverer-id = ""
# discoverer-service = ""
# db = "mydb"
# rp = "myrp"
# type = "prometheus"
# scheme = "http"
# metrics-path = "/metrics"
# scrape-interval = "1m0s"
# scrape-timeout = "10s"
# username = ""
# password = ""
# bearer-token = ""
# ssl-ca = ""
# ssl-cert = ""
# ssl-key = ""
# ssl-server-name = ""
# insecure-skip-verify = false
#
#[[azure]]
# enabled = false
# id = "myazure"
# port = 80
# subscription-id = ""
# tenant-id = ""
# client-id = ""
# client-secret = ""
# refresh-interval = "5m0s"
#
#[[consul]]
# enabled = false
# id = "myconsul"
# address = "127.0.0.1:8500"
# token = ""
# datacenter = ""
# tag-separator = ","
# scheme = "http"
# username = ""
# password = ""
# ssl-ca = ""
# ssl-cert = ""
# ssl-key = ""
# ssl-server-name = ""
# insecure-skip-verify = false
#
#[[dns]]
# enabled = false
# id = "mydns"
# refresh-interval = "30s"
# ## Type can be SRV, A, or AAAA
# type = "SRV"
# ## Port is the port to scrape for records returned by A or AAAA types
# port = 80
#
#[[ec2]]
# enabled = false
# id = "myec2"
# region = "us-east-1"
# access-key = ""
# secret-key = ""
# profile = ""
# refresh-interval = "1m0s"
# port = 80
#
#[[files]]
# enabled = false
# id = "myfile"
# refresh-interval = "5m0s"
#
#[[gce]]
# enabled = false
# id = "mygce"
# project = ""
# zone = ""
# filter = ""
# refresh-interval = "1m0s"
# port = 80
# tag-separator = ","
#
#[[marathon]]
# enabled = false
# id = "mymarathon"
# timeout = "30s"
# refresh-interval = "30s"
# bearer-token = ""
# ssl-ca = ""
# ssl-cert = ""
# ssl-key = ""
# ssl-server-name = ""
# insecure-skip-verify = false
#
#[[nerve]]
# enabled = false
# id = "mynerve"
# timeout = "10s"
#
#[[serverset]]
# enabled = false
# id = "myserverset"
# timeout = "10s"
#
#[[static]]
# enabled = false
# id = "mystatic"
# targets = []
# [static.labels]
#
#[[triton]]
# enabled = false
# id = "mytriton"
# account = ""
# dns-suffix = ""
# endpoint = ""
# port = 9163
# refresh-interval = "1m0s"
# version = 1
# ssl-ca = ""
# ssl-cert = ""
# ssl-key = ""
# ssl-server-name = ""
# insecure-skip-verify = false
#
2 changes: 1 addition & 1 deletion integrations/helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ type k8sScales struct {
func (k k8sAutoscale) Versions() (k8s.APIVersions, error) {
return k8s.APIVersions{}, nil
}
func (k k8sAutoscale) Client() (k8s.Client, error) {
func (k k8sAutoscale) Client(string) (k8s.Client, error) {
return k, nil
}
func (k k8sAutoscale) Scales(namespace string) k8s.ScalesInterface {
Expand Down
2 changes: 1 addition & 1 deletion k8s_autoscale.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ type K8sAutoscaleNode struct {

// Create a new K8sAutoscaleNode which can trigger autoscale event for a Kubernetes cluster.
func newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log.Logger) (*K8sAutoscaleNode, error) {
client, err := et.tm.K8sService.Client()
client, err := et.tm.K8sService.Client(n.Cluster)
if err != nil {
return nil, fmt.Errorf("cannot use the k8sAutoscale node, could not create kubernetes client: %v", err)
}
Expand Down
58 changes: 58 additions & 0 deletions listmap/listmap.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
package listmap

import (
"bytes"
"fmt"
"reflect"

"github.com/BurntSushi/toml"
"github.com/pkg/errors"
)

// DoUnmarshalTOML unmarshals either a list of maps or just a single map into dst.
// The argument dst must be a pointer to a slice.
func DoUnmarshalTOML(dst, src interface{}) error {
dstV := reflect.Indirect(reflect.ValueOf(dst))
if !dstV.CanSet() {
return errors.New("dst must be settable")
}
dstK := dstV.Kind()
if dstK != reflect.Slice {
return errors.New("dst must be a slice")
}

srcV := reflect.ValueOf(src)
srcK := srcV.Kind()

var srvValues []reflect.Value
switch srcK {
case reflect.Slice:
l := srcV.Len()
srvValues = make([]reflect.Value, l)
for i := 0; i < l; i++ {
srvValues[i] = srcV.Index(i)
}
case reflect.Map:
srvValues = []reflect.Value{srcV}
default:
return fmt.Errorf("src must be a slice or map, got %v", srcK)
}

// We want to preserve the TOML decoding behavior exactly,
// so we first re-encode the src data and then decode again,
// only this time directly into the element of the slice.
var buf bytes.Buffer
dstV.Set(reflect.MakeSlice(dstV.Type(), len(srvValues), len(srvValues)))
for i, v := range srvValues {
if err := toml.NewEncoder(&buf).Encode(v.Interface()); err != nil {
return errors.Wrap(err, "failed to reencode toml data")
}
newValue := reflect.New(dstV.Type().Elem())
if _, err := toml.Decode(buf.String(), newValue.Interface()); err != nil {
return err
}
dstV.Index(i).Set(reflect.Indirect(newValue))
buf.Reset()
}
return nil
}
4 changes: 4 additions & 0 deletions pipeline/k8s_autoscale.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ const (
type K8sAutoscaleNode struct {
chainnode

// Cluster is the name of the Kubernetes cluster to use.
Cluster string

// Namespace is the namespace of the resource, if empty the default namespace will be used.
Namespace string

Expand Down Expand Up @@ -141,6 +144,7 @@ type K8sAutoscaleNode struct {
func newK8sAutoscaleNode(e EdgeType) *K8sAutoscaleNode {
k := &K8sAutoscaleNode{
chainnode: newBasicChainNode("k8s_autoscale", e, StreamEdge),
Cluster: "default",
Min: 1,
Kind: client.DeploymentsKind,
NamespaceTag: DefaultNamespaceTag,
Expand Down
Loading

0 comments on commit dc98421

Please sign in to comment.