Skip to content

Commit

Permalink
Merge pull request #2500 from thanos-io/release-0.12
Browse files Browse the repository at this point in the history
Merge v0.12.1 into master
  • Loading branch information
squat authored Apr 22, 2020
2 parents 822bc7c + 60a2c9e commit 45a6bc4
Show file tree
Hide file tree
Showing 14 changed files with 224 additions and 81 deletions.
9 changes: 8 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan

We use *breaking* word for marking changes that are not backward compatible (relates only to v0.y.z releases.)

## Unreleased
## [v0.12.1](https://github.com/thanos-io/thanos/releases/tag/v0.12.1) - 2020.04.20

### Fixed

- [#2411](https://github.com/thanos-io/thanos/pull/2411) Query: fix a bug where queries might not time out sometimes due to issues with one or more StoreAPIs.
- [#2474](https://github.com/thanos-io/thanos/pull/2474) Store: fix a panic caused by concurrent memory access during block filtering.
- [#2472](https://github.com/thanos-io/thanos/pull/2472) Compact: fix a bug where partial blocks were never deleted, causing spam of warnings.
- [#2484](https://github.com/thanos-io/thanos/pull/2484) Query/Ruler: fix issue #2483, when web.route-prefix is set, it is added twice in HTTP router prefix.

### Fixed

Expand Down
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.12.0
0.12.1
2 changes: 1 addition & 1 deletion cmd/thanos/compact.go
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ func runCompact(
}

// No need to resync before partial uploads and delete marked blocks. Last sync should be valid.
compact.BestEffortCleanAbortedPartialUploads(ctx, logger, sy.Partial(), bkt, partialUploadDeleteAttempts, blocksMarkedForDeletion)
compact.BestEffortCleanAbortedPartialUploads(ctx, logger, sy.Partial(), bkt, partialUploadDeleteAttempts, blocksCleaned, blockCleanupFailures)
if err := blocksCleaner.DeleteMarkedBlocks(ctx); err != nil {
return errors.Wrap(err, "error cleaning blocks")
}
Expand Down
3 changes: 1 addition & 2 deletions cmd/thanos/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"fmt"
"math"
"net/http"
"path"
"strings"
"time"

Expand Down Expand Up @@ -357,7 +356,7 @@ func runQuery(

api := v1.NewAPI(logger, reg, engine, queryableCreator, enableAutodownsampling, enablePartialResponse, replicaLabels, instantDefaultMaxSourceResolution)

api.Register(router.WithPrefix(path.Join(webRoutePrefix, "/api/v1")), tracer, logger, ins)
api.Register(router.WithPrefix("/api/v1"), tracer, logger, ins)

srv := httpserver.New(logger, reg, comp, httpProbe,
httpserver.WithListen(httpBindAddr),
Expand Down
3 changes: 1 addition & 2 deletions cmd/thanos/rule.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"math/rand"
"net/http"
"net/url"
"path"
"path/filepath"
"strconv"
"strings"
Expand Down Expand Up @@ -576,7 +575,7 @@ func runRule(
ui.NewRuleUI(logger, reg, ruleMgr, alertQueryURL.String(), webExternalPrefix, webPrefixHeaderName).Register(router, ins)

api := v1.NewAPI(logger, reg, ruleMgr)
api.Register(router.WithPrefix(path.Join(webRoutePrefix, "/api/v1")), tracer, logger, ins)
api.Register(router.WithPrefix("/api/v1"), tracer, logger, ins)

srv := httpserver.New(logger, reg, comp, httpProbe,
httpserver.WithListen(httpBindAddr),
Expand Down
2 changes: 1 addition & 1 deletion docs/release-process.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Release shepherd responsibilities:
| Release | Time of first RC | Shepherd (GitHub handle) |
|-----------|--------------------------|--------------------------|
| v0.13.0 | (planned) 2020.05.13 | `TBD` |
| v0.12.0 | (planned) 2020.04.01 | `@squat` |
| v0.12.0 | 2020.04.15 | `@squat` |
| v0.11.0 | 2020.02.19 | `@metalmatze` |
| v0.10.0 | 2020.01.08 | `@GiedriusS` |
| v0.9.0 | 2019.11.26 | `@bwplotka` |
Expand Down
3 changes: 3 additions & 0 deletions pkg/block/fetcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -550,6 +550,7 @@ var _ MetadataFilter = &DeduplicateFilter{}
// Not go-routine safe.
type DeduplicateFilter struct {
duplicateIDs []ulid.ULID
mu sync.Mutex
}

// NewDeduplicateFilter creates DeduplicateFilter.
Expand Down Expand Up @@ -603,11 +604,13 @@ func (f *DeduplicateFilter) filterForResolution(root *Node, metaSlice []*metadat

duplicateULIDs := getNonRootIDs(root)
for _, id := range duplicateULIDs {
f.mu.Lock()
if metas[id] != nil {
f.duplicateIDs = append(f.duplicateIDs, id)
}
synced.WithLabelValues(duplicateMeta).Inc()
delete(metas, id)
f.mu.Unlock()
}
}

Expand Down
14 changes: 10 additions & 4 deletions pkg/compact/clean.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@ func BestEffortCleanAbortedPartialUploads(
partial map[ulid.ULID]error,
bkt objstore.Bucket,
deleteAttempts prometheus.Counter,
blocksMarkedForDeletion prometheus.Counter,
blockCleanups prometheus.Counter,
blockCleanupFailures prometheus.Counter,
) {
level.Info(logger).Log("msg", "started cleaning of aborted partial uploads")

Expand All @@ -45,10 +46,15 @@ func BestEffortCleanAbortedPartialUploads(

deleteAttempts.Inc()
level.Info(logger).Log("msg", "found partially uploaded block; marking for deletion", "block", id)
if err := block.MarkForDeletion(ctx, logger, bkt, id, blocksMarkedForDeletion); err != nil {
level.Warn(logger).Log("msg", "failed to delete aborted partial upload; skipping", "block", id, "thresholdAge", PartialUploadThresholdAge, "err", err)
return
// We don't gather any information about deletion marks for partial blocks, so let's simply remove it. We waited
// long PartialUploadThresholdAge already.
// TODO(bwplotka): Fix some edge cases: https://github.com/thanos-io/thanos/issues/2470 .
if err := block.Delete(ctx, logger, bkt, id); err != nil {
blockCleanupFailures.Inc()
level.Warn(logger).Log("msg", "failed to delete aborted partial upload; will retry in next iteration", "block", id, "thresholdAge", PartialUploadThresholdAge, "err", err)
continue
}
blockCleanups.Inc()
level.Info(logger).Log("msg", "deleted aborted partial upload", "block", id, "thresholdAge", PartialUploadThresholdAge)
}
level.Info(logger).Log("msg", "cleaning of aborted partial uploads done")
Expand Down
15 changes: 6 additions & 9 deletions pkg/compact/clean_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,22 +59,19 @@ func TestBestEffortCleanAbortedPartialUploads(t *testing.T) {
testutil.Ok(t, bkt.Upload(ctx, path.Join(shouldIgnoreID2.String(), "chunks", "000001"), &fakeChunk))

deleteAttempts := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
blocksMarkedForDeletion := promauto.With(nil).NewCounter(prometheus.CounterOpts{})

blockCleanups := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
blockCleanupFailures := promauto.With(nil).NewCounter(prometheus.CounterOpts{})
_, partial, err := metaFetcher.Fetch(ctx)
testutil.Ok(t, err)

BestEffortCleanAbortedPartialUploads(ctx, logger, partial, bkt, deleteAttempts, blocksMarkedForDeletion)
BestEffortCleanAbortedPartialUploads(ctx, logger, partial, bkt, deleteAttempts, blockCleanups, blockCleanupFailures)
testutil.Equals(t, 1.0, promtest.ToFloat64(deleteAttempts))
testutil.Equals(t, 1.0, promtest.ToFloat64(blockCleanups))
testutil.Equals(t, 0.0, promtest.ToFloat64(blockCleanupFailures))

exists, err := bkt.Exists(ctx, path.Join(shouldDeleteID.String(), "chunks", "000001"))
testutil.Ok(t, err)
testutil.Equals(t, true, exists)

exists, err = bkt.Exists(ctx, path.Join(shouldDeleteID.String(), metadata.DeletionMarkFilename))
testutil.Ok(t, err)
testutil.Equals(t, true, exists)
testutil.Equals(t, 1.0, promtest.ToFloat64(blocksMarkedForDeletion))
testutil.Equals(t, false, exists)

exists, err = bkt.Exists(ctx, path.Join(shouldIgnoreID1.String(), "chunks", "000001"))
testutil.Ok(t, err)
Expand Down
Loading

0 comments on commit 45a6bc4

Please sign in to comment.