Skip to content

Commit

Permalink
Add ability to add selectors as tags in kube_inventory (influxdata#7267)
Browse files Browse the repository at this point in the history
  • Loading branch information
jimmyseto authored Jun 18, 2020
1 parent 8c01766 commit c8b2423
Show file tree
Hide file tree
Showing 14 changed files with 1,269 additions and 34 deletions.
26 changes: 21 additions & 5 deletions plugins/inputs/kube_inventory/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,13 @@ This plugin generates metrics derived from the state of the following Kubernetes

- daemonsets
- deployments
- endpoints
- ingress
- nodes
- persistentvolumes
- persistentvolumeclaims
- pods (containers)
- services
- statefulsets

Kubernetes is a fast moving project, with a new minor release every 3 months. As
Expand Down Expand Up @@ -60,6 +63,12 @@ avoid cardinality issues:
## Overrides resource_exclude if both set.
# resource_include = [ "deployments", "nodes", "statefulsets" ]

## selectors to include and exclude as tags. Globs accepted.
## Note that an empty array for both will include all selectors as tags
## selector_exclude overrides selector_include if both set.
selector_include = []
selector_exclude = ["*"]

## Optional TLS Config
# tls_ca = "/path/to/cafile"
# tls_cert = "/path/to/certfile"
Expand Down Expand Up @@ -126,6 +135,7 @@ subjects:
- tags:
- daemonset_name
- namespace
- selector (\*varies)
- fields:
- generation
- current_number_scheduled
Expand All @@ -140,6 +150,7 @@ subjects:
- tags:
- deployment_name
- namespace
- selector (\*varies)
- fields:
- replicas_available
- replicas_unavailable
Expand Down Expand Up @@ -200,6 +211,7 @@ subjects:
- namespace
- phase
- storageclass
- selector (\*varies)
- fields:
- phase_type (int, [see below](#pvc-phase_type))
Expand All @@ -209,6 +221,7 @@ subjects:
- namespace
- node_name
- pod_name
- node_selector (\*varies)
- state
- readiness
- fields:
Expand All @@ -229,6 +242,7 @@ subjects:
- port_protocol
- external_name
- cluster_ip
- selector (\*varies)
- fields
- created
- generation
Expand All @@ -239,6 +253,7 @@ subjects:
- tags:
- statefulset_name
- namespace
- selector (\*varies)
- fields:
- created
- generation
Expand Down Expand Up @@ -277,14 +292,15 @@ The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated

```
kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000
kubernetes_daemonset,daemonset_name=telegraf,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000
kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000
kubernetes_daemonset,daemonset_name=telegraf,selector_select1=s1,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000
kubernetes_deployment,deployment_name=deployd,selector_select1=s1,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000
kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000
kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000
kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,storageclass=ebs-1-retain phase_type=0i 1547597615000000000
kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,selector_select1=s1,storageclass=ebs-1-retain phase_type=0i 1547597615000000000
kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000
kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
kubernetes_service,cluster_ip=172.29.61.80,namespace=redis-cache-0001,port_name=redis,port_protocol=TCP,selector_app=myapp,selector_io.kompose.service=redis,selector_role=slave,service_name=redis-slave created=1588690034000000000i,generation=0i,port=6379i,target_port=0i 1547597616000000000
kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,node_selector_node-role.kubernetes.io/compute=true,pod_name=tick1,state=running,readiness=ready resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,state_reason="",resource_requests_memory_bytes=524288000 1547597616000000000
kubernetes_statefulset,namespace=default,selector_select1=s1,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000
```
[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering
Expand Down
5 changes: 5 additions & 0 deletions plugins/inputs/kube_inventory/daemonset.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accu
"daemonset_name": d.Metadata.GetName(),
"namespace": d.Metadata.GetNamespace(),
}
for key, val := range d.GetSpec().GetSelector().GetMatchLabels() {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}

if d.Metadata.CreationTimestamp.GetSeconds() != 0 {
fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano()
Expand Down
190 changes: 187 additions & 3 deletions plugins/inputs/kube_inventory/daemonset_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package kube_inventory

import (
"reflect"
"strings"
"testing"
"time"

Expand All @@ -12,6 +14,8 @@ import (

func TestDaemonSet(t *testing.T) {
cli := &client{}
selectInclude := []string{}
selectExclude := []string{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())
tests := []struct {
Expand Down Expand Up @@ -55,6 +59,14 @@ func TestDaemonSet(t *testing.T) {
},
CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
},
Spec: &v1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
},
},
},
Expand All @@ -75,8 +87,10 @@ func TestDaemonSet(t *testing.T) {
"created": now.UnixNano(),
},
Tags: map[string]string{
"daemonset_name": "daemon1",
"namespace": "ns1",
"daemonset_name": "daemon1",
"namespace": "ns1",
"selector_select1": "s1",
"selector_select2": "s2",
},
},
},
Expand All @@ -87,8 +101,11 @@ func TestDaemonSet(t *testing.T) {

for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
client: cli,
SelectorInclude: selectInclude,
SelectorExclude: selectExclude,
}
ks.createSelectorFilters()
acc := new(testutil.Accumulator)
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
err := ks.gatherDaemonSet(*dset, acc)
Expand Down Expand Up @@ -121,3 +138,170 @@ func TestDaemonSet(t *testing.T) {
}
}
}

func TestDaemonSetSelectorFilter(t *testing.T) {
cli := &client{}
now := time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location())

responseMap := map[string]interface{}{
"/daemonsets/": &v1.DaemonSetList{
Items: []*v1.DaemonSet{
{
Status: &v1.DaemonSetStatus{
CurrentNumberScheduled: toInt32Ptr(3),
DesiredNumberScheduled: toInt32Ptr(5),
NumberAvailable: toInt32Ptr(2),
NumberMisscheduled: toInt32Ptr(2),
NumberReady: toInt32Ptr(1),
NumberUnavailable: toInt32Ptr(1),
UpdatedNumberScheduled: toInt32Ptr(2),
},
Metadata: &metav1.ObjectMeta{
Generation: toInt64Ptr(11221),
Namespace: toStrPtr("ns1"),
Name: toStrPtr("daemon1"),
Labels: map[string]string{
"lab1": "v1",
"lab2": "v2",
},
CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())},
},
Spec: &v1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"select1": "s1",
"select2": "s2",
},
},
},
},
},
},
}

tests := []struct {
name string
handler *mockHandler
hasError bool
include []string
exclude []string
expected map[string]string
}{
{
name: "nil filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: nil,
exclude: nil,
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "empty filters equals all selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{},
exclude: []string{},
expected: map[string]string{
"selector_select1": "s1",
"selector_select2": "s2",
},
},
{
name: "include filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"select1"},
exclude: []string{},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude filter equals only non-excluded selectors (overrides include filter)",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{},
exclude: []string{"select2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "include glob filter equals only include-matched selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{"*1"},
exclude: []string{},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{},
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
{
name: "exclude glob filter equals only non-excluded selectors",
handler: &mockHandler{
responseMap: responseMap,
},
hasError: false,
include: []string{},
exclude: []string{"*2"},
expected: map[string]string{
"selector_select1": "s1",
},
},
}
for _, v := range tests {
ks := &KubernetesInventory{
client: cli,
}
ks.SelectorInclude = v.include
ks.SelectorExclude = v.exclude
ks.createSelectorFilters()
acc := new(testutil.Accumulator)
for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items {
err := ks.gatherDaemonSet(*dset, acc)
if err != nil {
t.Errorf("Failed to gather daemonset - %s", err.Error())
}
}

// Grab selector tags
actual := map[string]string{}
for _, metric := range acc.Metrics {
for key, val := range metric.Tags {
if strings.Contains(key, "selector_") {
actual[key] = val
}
}
}

if !reflect.DeepEqual(v.expected, actual) {
t.Fatalf("actual selector tags (%v) do not match expected selector tags (%v)", actual, v.expected)
}
}
}
5 changes: 5 additions & 0 deletions plugins/inputs/kube_inventory/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,11 @@ func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Ac
"deployment_name": d.Metadata.GetName(),
"namespace": d.Metadata.GetNamespace(),
}
for key, val := range d.GetSpec().GetSelector().GetMatchLabels() {
if ki.selectorFilter.Match(key) {
tags["selector_"+key] = val
}
}

acc.AddFields(deploymentMeasurement, fields, tags)

Expand Down
Loading

0 comments on commit c8b2423

Please sign in to comment.