From 1f3957339d3d5ece5335b1c0c88eec17ee5aa353 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Tue, 26 Feb 2019 18:32:37 +0100 Subject: [PATCH 1/6] Ensure kubernetes caches don't expire if they are being read --- CHANGELOG.next.asciidoc | 1 + .../module/kubernetes/util/metrics_cache.go | 80 +++++++++++-------- .../kubernetes/util/metrics_cache_test.go | 43 ++++++---- 3 files changed, 75 insertions(+), 49 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index adbe33d0e6f0..a868105a791b 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -176,6 +176,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d - Added function to close sql database connection. {pull}10355[10355] - Fix issue with `elasticsearch/node_stats` metricset (x-pack) not indexing `source_node` field. {pull}10639[10639] - Migrate docker autodiscover to ECS. {issue}10757[10757] {pull}10862[10862] +- Fix issue in kubernetes module preventing usage percentages to be properly calculated. {pull}10946[10946] *Packetbeat* diff --git a/metricbeat/module/kubernetes/util/metrics_cache.go b/metricbeat/module/kubernetes/util/metrics_cache.go index ffa8f235ed52..76644f36656a 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache.go +++ b/metricbeat/module/kubernetes/util/metrics_cache.go @@ -18,6 +18,7 @@ package util import ( + "context" "sync" "time" ) @@ -28,22 +29,22 @@ var PerfMetrics = NewPerfMetricsCache() const defaultTimeout = 120 * time.Second var now = time.Now -var sleep = time.Sleep +var after = time.After // NewPerfMetricsCache initializes and returns a new PerfMetricsCache func NewPerfMetricsCache() *PerfMetricsCache { + ctx := context.TODO() return &PerfMetricsCache{ - NodeMemAllocatable: newValueMap(defaultTimeout), - NodeCoresAllocatable: newValueMap(defaultTimeout), + NodeMemAllocatable: newValueMap(ctx, defaultTimeout), + NodeCoresAllocatable: newValueMap(ctx, defaultTimeout), - ContainerMemLimit: newValueMap(defaultTimeout), - ContainerCoresLimit: newValueMap(defaultTimeout), + ContainerMemLimit: newValueMap(ctx, defaultTimeout), + ContainerCoresLimit: newValueMap(ctx, defaultTimeout), } } // PerfMetricsCache stores known metrics from Kubernetes nodes and containers type PerfMetricsCache struct { - mutex sync.RWMutex NodeMemAllocatable *valueMap NodeCoresAllocatable *valueMap @@ -51,18 +52,19 @@ type PerfMetricsCache struct { ContainerCoresLimit *valueMap } -func newValueMap(timeout time.Duration) *valueMap { - return &valueMap{ - values: map[string]value{}, +func newValueMap(ctx context.Context, timeout time.Duration) *valueMap { + m := &valueMap{ + values: map[string]*value{}, timeout: timeout, } + m.startWorkers(ctx) + return m } type valueMap struct { - sync.RWMutex - running bool + sync.Mutex timeout time.Duration - values map[string]value + values map[string]*value } type value struct { @@ -70,6 +72,10 @@ type value struct { expires int64 } +func (v *value) renew(timeout time.Duration) { + v.expires = now().Add(timeout).Unix() +} + // ContainerUID creates an unique ID for from namespace, pod name and container name func ContainerUID(namespace, pod, container string) string { return namespace + "-" + pod + "-" + container @@ -77,17 +83,20 @@ func ContainerUID(namespace, pod, container string) string { // Get value func (m *valueMap) Get(name string) float64 { - m.RLock() - defer m.RUnlock() - return m.values[name].value + return m.getWithDefault(name, 0) } // Get value func (m *valueMap) GetWithDefault(name string, def float64) float64 { - m.RLock() - defer m.RUnlock() + return m.getWithDefault(name, def) +} + +func (m *valueMap) getWithDefault(name string, def float64) float64 { + m.Lock() + defer m.Unlock() val, ok := m.values[name] if ok { + val.renew(m.timeout) return val.value } return def @@ -97,26 +106,27 @@ func (m *valueMap) GetWithDefault(name string, def float64) float64 { func (m *valueMap) Set(name string, val float64) { m.Lock() defer m.Unlock() - m.ensureCleanupWorker() - m.values[name] = value{val, now().Add(m.timeout).Unix()} + v := &value{value: val} + v.renew(m.timeout) + m.values[name] = v } -func (m *valueMap) ensureCleanupWorker() { - if !m.running { - // Run worker to cleanup expired entries - m.running = true - go func() { - for { - sleep(m.timeout) - m.Lock() - now := now().Unix() - for name, val := range m.values { - if now > val.expires { - delete(m.values, name) - } +func (m *valueMap) startWorkers(ctx context.Context) { + go func() { + for { + var now time.Time + select { + case now = <-after(m.timeout): + case <-ctx.Done(): + return + } + m.Lock() + for name, val := range m.values { + if now.Unix() > val.expires { + delete(m.values, name) } - m.Unlock() } - }() - } + m.Unlock() + } + }() } diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index d5ce7bd2bb89..227bc29c42ed 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -18,6 +18,7 @@ package util import ( + "context" "testing" "time" @@ -26,40 +27,52 @@ import ( func TestTimeout(t *testing.T) { // Mock monotonic time: - fakeTimeCh := make(chan int64) + fakeTimeCh := make(chan time.Time) + fakeTime := time.Now() go func() { - fakeTime := time.Now().Unix() for { - fakeTime++ + fakeTime = fakeTime.Add(1 * time.Millisecond) fakeTimeCh <- fakeTime } }() now = func() time.Time { - return time.Unix(<-fakeTimeCh, 0) + return <-fakeTimeCh } - // Blocking sleep: - sleepCh := make(chan struct{}) - sleep = func(time.Duration) { - <-sleepCh + // Blocking after: + afterCh := make(chan time.Time) + after = func(time.Duration) <-chan time.Time { + return afterCh } - test := newValueMap(1 * time.Second) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test := newValueMap(ctx, defaultTimeout) test.Set("foo", 3.14) + // Check it is not removed if it is being read + for i := 0; i < 20; i++ { + fakeTime = fakeTime.Add(10 * time.Second) + afterCh <- fakeTime + assert.Equal(t, 3.14, test.Get("foo")) + } + // Let cleanup do its job - sleepCh <- struct{}{} - sleepCh <- struct{}{} - sleepCh <- struct{}{} + for i := 0; i < 3; i++ { + fakeTime = fakeTime.Add(defaultTimeout) + afterCh <- fakeTime + } // Check it expired assert.Equal(t, 0.0, test.Get("foo")) } func TestValueMap(t *testing.T) { - test := newValueMap(defaultTimeout) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test := newValueMap(ctx, defaultTimeout) // no value assert.Equal(t, 0.0, test.Get("foo")) @@ -70,7 +83,9 @@ func TestValueMap(t *testing.T) { } func TestGetWithDefault(t *testing.T) { - test := newValueMap(defaultTimeout) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + test := newValueMap(ctx, defaultTimeout) // Empty + default assert.Equal(t, 0.0, test.Get("foo")) From a7637b7c5c19974f25b0d577b8d13780a717c76b Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 27 Feb 2019 15:11:04 +0100 Subject: [PATCH 2/6] Avoid globals and fix race conditions --- .../module/kubernetes/util/metrics_cache.go | 76 ++++++++++++------- .../kubernetes/util/metrics_cache_test.go | 67 ++++++++-------- 2 files changed, 84 insertions(+), 59 deletions(-) diff --git a/metricbeat/module/kubernetes/util/metrics_cache.go b/metricbeat/module/kubernetes/util/metrics_cache.go index 76644f36656a..8c8aa429952c 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache.go +++ b/metricbeat/module/kubernetes/util/metrics_cache.go @@ -18,7 +18,6 @@ package util import ( - "context" "sync" "time" ) @@ -28,18 +27,14 @@ var PerfMetrics = NewPerfMetricsCache() const defaultTimeout = 120 * time.Second -var now = time.Now -var after = time.After - // NewPerfMetricsCache initializes and returns a new PerfMetricsCache func NewPerfMetricsCache() *PerfMetricsCache { - ctx := context.TODO() return &PerfMetricsCache{ - NodeMemAllocatable: newValueMap(ctx, defaultTimeout), - NodeCoresAllocatable: newValueMap(ctx, defaultTimeout), + NodeMemAllocatable: newValueMap(defaultTimeout), + NodeCoresAllocatable: newValueMap(defaultTimeout), - ContainerMemLimit: newValueMap(ctx, defaultTimeout), - ContainerCoresLimit: newValueMap(ctx, defaultTimeout), + ContainerMemLimit: newValueMap(defaultTimeout), + ContainerCoresLimit: newValueMap(defaultTimeout), } } @@ -52,19 +47,44 @@ type PerfMetricsCache struct { ContainerCoresLimit *valueMap } -func newValueMap(ctx context.Context, timeout time.Duration) *valueMap { - m := &valueMap{ +func newValueMap(timeout time.Duration) *valueMap { + return newValueMapWithClock(timeout, clock{}) +} + +func newValueMapWithClock(timeout time.Duration, clock clock) *valueMap { + m := valueMap{ values: map[string]*value{}, timeout: timeout, + time: clock, + } + m.startWorkers() + return &m +} + +type clock struct { + now func() time.Time + after func(time.Duration) <-chan time.Time +} + +func (c *clock) Now() time.Time { + if c.now != nil { + return c.now() + } + return time.Now() +} + +func (c *clock) After(d time.Duration) <-chan time.Time { + if c.after != nil { + return c.after(d) } - m.startWorkers(ctx) - return m + return time.After(d) } type valueMap struct { sync.Mutex timeout time.Duration values map[string]*value + time clock } type value struct { @@ -72,10 +92,6 @@ type value struct { expires int64 } -func (v *value) renew(timeout time.Duration) { - v.expires = now().Add(timeout).Unix() -} - // ContainerUID creates an unique ID for from namespace, pod name and container name func ContainerUID(namespace, pod, container string) string { return namespace + "-" + pod + "-" + container @@ -96,7 +112,7 @@ func (m *valueMap) getWithDefault(name string, def float64) float64 { defer m.Unlock() val, ok := m.values[name] if ok { - val.renew(m.timeout) + m.renew(val) return val.value } return def @@ -106,20 +122,24 @@ func (m *valueMap) getWithDefault(name string, def float64) float64 { func (m *valueMap) Set(name string, val float64) { m.Lock() defer m.Unlock() - v := &value{value: val} - v.renew(m.timeout) - m.values[name] = v + v, ok := m.values[name] + if ok { + v.value = val + } else { + v = &value{value: val} + m.values[name] = v + } + m.renew(v) +} + +func (m *valueMap) renew(v *value) { + v.expires = m.time.Now().Add(m.timeout).Unix() } -func (m *valueMap) startWorkers(ctx context.Context) { +func (m *valueMap) startWorkers() { go func() { for { - var now time.Time - select { - case now = <-after(m.timeout): - case <-ctx.Done(): - return - } + now := <-m.time.After(m.timeout) m.Lock() for name, val := range m.values { if now.Unix() > val.expires { diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index 227bc29c42ed..152031c45807 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -18,61 +18,68 @@ package util import ( - "context" + "sync" "testing" "time" "github.com/stretchr/testify/assert" ) +type fakeClock struct { + sync.RWMutex + time int64 +} + +func (c *fakeClock) get() time.Time { + c.Lock() + defer c.Unlock() + c.time++ + return time.Unix(c.time, 0) +} + +func (c *fakeClock) advance(n int64) { + c.Lock() + defer c.Unlock() + c.time += n +} + func TestTimeout(t *testing.T) { // Mock monotonic time: - fakeTimeCh := make(chan time.Time) - fakeTime := time.Now() - go func() { - for { - fakeTime = fakeTime.Add(1 * time.Millisecond) - fakeTimeCh <- fakeTime - } - }() - - now = func() time.Time { - return <-fakeTimeCh - } + fakeTime := fakeClock{} // Blocking after: afterCh := make(chan time.Time) - after = func(time.Duration) <-chan time.Time { + after := func(time.Duration) <-chan time.Time { return afterCh } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - test := newValueMap(ctx, defaultTimeout) - + c := clock{ + now: fakeTime.get, + after: after, + } + test := newValueMapWithClock(defaultTimeout, c) test.Set("foo", 3.14) // Check it is not removed if it is being read - for i := 0; i < 20; i++ { - fakeTime = fakeTime.Add(10 * time.Second) - afterCh <- fakeTime + for i := 0; i < 4; i++ { assert.Equal(t, 3.14, test.Get("foo")) + fakeTime.advance(int64(defaultTimeout.Seconds()) / 2) + afterCh <- fakeTime.get() + afterCh <- fakeTime.get() } // Let cleanup do its job - for i := 0; i < 3; i++ { - fakeTime = fakeTime.Add(defaultTimeout) - afterCh <- fakeTime - } + fakeTime.advance(int64(defaultTimeout.Seconds())) + afterCh <- fakeTime.get() + afterCh <- fakeTime.get() + afterCh <- fakeTime.get() // Check it expired assert.Equal(t, 0.0, test.Get("foo")) } func TestValueMap(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - test := newValueMap(ctx, defaultTimeout) + test := newValueMap(defaultTimeout) // no value assert.Equal(t, 0.0, test.Get("foo")) @@ -83,9 +90,7 @@ func TestValueMap(t *testing.T) { } func TestGetWithDefault(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - test := newValueMap(ctx, defaultTimeout) + test := newValueMap(defaultTimeout) // Empty + default assert.Equal(t, 0.0, test.Get("foo")) From 7249a729d09d4605cc415dd1bccf2839a8d4c215 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 27 Feb 2019 15:15:09 +0100 Subject: [PATCH 3/6] Remove unneeded line --- metricbeat/module/kubernetes/util/metrics_cache_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index 152031c45807..bec781f8525c 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -72,7 +72,6 @@ func TestTimeout(t *testing.T) { fakeTime.advance(int64(defaultTimeout.Seconds())) afterCh <- fakeTime.get() afterCh <- fakeTime.get() - afterCh <- fakeTime.get() // Check it expired assert.Equal(t, 0.0, test.Get("foo")) From 0ad1a6373be1b2c8e336d4dbafeff382bd9e9990 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Wed, 27 Feb 2019 16:42:34 +0100 Subject: [PATCH 4/6] No read lock used, changing to simple mutex --- metricbeat/module/kubernetes/util/metrics_cache_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index bec781f8525c..cd24cc2b8188 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -26,7 +26,7 @@ import ( ) type fakeClock struct { - sync.RWMutex + sync.Mutex time int64 } From a3376e12c2d1f2f523d1fc0ca2791cdc72d0b847 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 28 Feb 2019 19:54:01 +0100 Subject: [PATCH 5/6] Use common cache for kubernetes caches --- .../module/kubernetes/util/metrics_cache.go | 117 ++++++------------ .../kubernetes/util/metrics_cache_test.go | 54 -------- 2 files changed, 40 insertions(+), 131 deletions(-) diff --git a/metricbeat/module/kubernetes/util/metrics_cache.go b/metricbeat/module/kubernetes/util/metrics_cache.go index 8c8aa429952c..b63d7ccbf76b 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache.go +++ b/metricbeat/module/kubernetes/util/metrics_cache.go @@ -18,13 +18,18 @@ package util import ( - "sync" "time" + + "github.com/elastic/beats/libbeat/common" ) // PerfMetrics stores known metrics from Kubernetes nodes and containers var PerfMetrics = NewPerfMetricsCache() +func init() { + PerfMetrics.Start() +} + const defaultTimeout = 120 * time.Second // NewPerfMetricsCache initializes and returns a new PerfMetricsCache @@ -47,106 +52,64 @@ type PerfMetricsCache struct { ContainerCoresLimit *valueMap } -func newValueMap(timeout time.Duration) *valueMap { - return newValueMapWithClock(timeout, clock{}) -} - -func newValueMapWithClock(timeout time.Duration, clock clock) *valueMap { - m := valueMap{ - values: map[string]*value{}, - timeout: timeout, - time: clock, - } - m.startWorkers() - return &m +// Start cache workers +func (c *PerfMetricsCache) Start() { + c.NodeMemAllocatable.Start() + c.NodeCoresAllocatable.Start() + c.ContainerMemLimit.Start() + c.ContainerCoresLimit.Start() } -type clock struct { - now func() time.Time - after func(time.Duration) <-chan time.Time -} - -func (c *clock) Now() time.Time { - if c.now != nil { - return c.now() - } - return time.Now() -} - -func (c *clock) After(d time.Duration) <-chan time.Time { - if c.after != nil { - return c.after(d) - } - return time.After(d) +// Stop cache workers +func (c *PerfMetricsCache) Stop() { + c.NodeMemAllocatable.Stop() + c.NodeCoresAllocatable.Stop() + c.ContainerMemLimit.Stop() + c.ContainerCoresLimit.Stop() } type valueMap struct { - sync.Mutex + cache *common.Cache timeout time.Duration - values map[string]*value - time clock -} - -type value struct { - value float64 - expires int64 } -// ContainerUID creates an unique ID for from namespace, pod name and container name -func ContainerUID(namespace, pod, container string) string { - return namespace + "-" + pod + "-" + container +func newValueMap(timeout time.Duration) *valueMap { + return &valueMap{ + cache: common.NewCache(timeout, 0), + timeout: timeout, + } } // Get value func (m *valueMap) Get(name string) float64 { - return m.getWithDefault(name, 0) + return m.GetWithDefault(name, 0.0) } // Get value func (m *valueMap) GetWithDefault(name string, def float64) float64 { - return m.getWithDefault(name, def) -} - -func (m *valueMap) getWithDefault(name string, def float64) float64 { - m.Lock() - defer m.Unlock() - val, ok := m.values[name] - if ok { - m.renew(val) - return val.value + v := m.cache.Get(name) + if v, ok := v.(float64); ok { + return v } return def } // Set value func (m *valueMap) Set(name string, val float64) { - m.Lock() - defer m.Unlock() - v, ok := m.values[name] - if ok { - v.value = val - } else { - v = &value{value: val} - m.values[name] = v - } - m.renew(v) + m.cache.PutWithTimeout(name, val, m.timeout) +} + +// Start cache workers +func (m *valueMap) Start() { + m.cache.StartJanitor(m.timeout) } -func (m *valueMap) renew(v *value) { - v.expires = m.time.Now().Add(m.timeout).Unix() +// Stop cache workers +func (m *valueMap) Stop() { + m.cache.StopJanitor() } -func (m *valueMap) startWorkers() { - go func() { - for { - now := <-m.time.After(m.timeout) - m.Lock() - for name, val := range m.values { - if now.Unix() > val.expires { - delete(m.values, name) - } - } - m.Unlock() - } - }() +// ContainerUID creates an unique ID for from namespace, pod name and container name +func ContainerUID(namespace, pod, container string) string { + return namespace + "-" + pod + "-" + container } diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index cd24cc2b8188..4ee253709503 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -18,65 +18,11 @@ package util import ( - "sync" "testing" - "time" "github.com/stretchr/testify/assert" ) -type fakeClock struct { - sync.Mutex - time int64 -} - -func (c *fakeClock) get() time.Time { - c.Lock() - defer c.Unlock() - c.time++ - return time.Unix(c.time, 0) -} - -func (c *fakeClock) advance(n int64) { - c.Lock() - defer c.Unlock() - c.time += n -} - -func TestTimeout(t *testing.T) { - // Mock monotonic time: - fakeTime := fakeClock{} - - // Blocking after: - afterCh := make(chan time.Time) - after := func(time.Duration) <-chan time.Time { - return afterCh - } - - c := clock{ - now: fakeTime.get, - after: after, - } - test := newValueMapWithClock(defaultTimeout, c) - test.Set("foo", 3.14) - - // Check it is not removed if it is being read - for i := 0; i < 4; i++ { - assert.Equal(t, 3.14, test.Get("foo")) - fakeTime.advance(int64(defaultTimeout.Seconds()) / 2) - afterCh <- fakeTime.get() - afterCh <- fakeTime.get() - } - - // Let cleanup do its job - fakeTime.advance(int64(defaultTimeout.Seconds())) - afterCh <- fakeTime.get() - afterCh <- fakeTime.get() - - // Check it expired - assert.Equal(t, 0.0, test.Get("foo")) -} - func TestValueMap(t *testing.T) { test := newValueMap(defaultTimeout) From 304bec53333af7343582a4c2fd436faba575a268 Mon Sep 17 00:00:00 2001 From: Jaime Soriano Pastor Date: Thu, 28 Feb 2019 20:01:09 +0100 Subject: [PATCH 6/6] Use a character not valid for kubernetes names to join values for unique UID --- metricbeat/module/kubernetes/util/metrics_cache.go | 2 +- metricbeat/module/kubernetes/util/metrics_cache_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metricbeat/module/kubernetes/util/metrics_cache.go b/metricbeat/module/kubernetes/util/metrics_cache.go index b63d7ccbf76b..7ffff06edfea 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache.go +++ b/metricbeat/module/kubernetes/util/metrics_cache.go @@ -111,5 +111,5 @@ func (m *valueMap) Stop() { // ContainerUID creates an unique ID for from namespace, pod name and container name func ContainerUID(namespace, pod, container string) string { - return namespace + "-" + pod + "-" + container + return namespace + "/" + pod + "/" + container } diff --git a/metricbeat/module/kubernetes/util/metrics_cache_test.go b/metricbeat/module/kubernetes/util/metrics_cache_test.go index 4ee253709503..649c1f5fb868 100644 --- a/metricbeat/module/kubernetes/util/metrics_cache_test.go +++ b/metricbeat/module/kubernetes/util/metrics_cache_test.go @@ -47,5 +47,5 @@ func TestGetWithDefault(t *testing.T) { } func TestContainerUID(t *testing.T) { - assert.Equal(t, "a-b-c", ContainerUID("a", "b", "c")) + assert.Equal(t, "a/b/c", ContainerUID("a", "b", "c")) }