From 3c6309b9451267061d6f296422f542d751b10465 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 1 Dec 2017 14:23:14 -0500 Subject: [PATCH] Use context instead of checking core values directly --- vault/core.go | 7 +++---- vault/expiration.go | 42 ++++++++++++++++++++---------------------- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/vault/core.go b/vault/core.go index 5ec32443066c..9a9df431be47 100644 --- a/vault/core.go +++ b/vault/core.go @@ -1498,8 +1498,6 @@ func (c *Core) sealInternal() error { // Signal the standby goroutine to shutdown, wait for completion close(c.standbyStopCh) - c.requestContext = nil - // Release the lock while we wait to avoid deadlocking c.stateLock.Unlock() <-c.standbyDoneCh @@ -1536,9 +1534,8 @@ func (c *Core) postUnseal() (retErr error) { defer metrics.MeasureSince([]string{"core", "post_unseal"}, time.Now()) defer func() { if retErr != nil { + c.requestContextCancelFunc() c.preSeal() - } else { - c.requestContext, c.requestContextCancelFunc = context.WithCancel(context.Background()) } }() c.logger.Info("core: post-unseal setup starting") @@ -1559,6 +1556,8 @@ func (c *Core) postUnseal() (retErr error) { c.seal.SetRecoveryConfig(nil) } + c.requestContext, c.requestContextCancelFunc = context.WithCancel(context.Background()) + if err := enterprisePostUnseal(c); err != nil { return err } diff --git a/vault/expiration.go b/vault/expiration.go index 47e7ba41ff76..8c88ba63f4c8 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -12,6 +12,7 @@ import ( "github.com/armon/go-metrics" log "github.com/mgutz/logxi/v1" + context "golang.org/x/net/context" "github.com/hashicorp/errwrap" multierror "github.com/hashicorp/go-multierror" @@ -51,13 +52,11 @@ const ( // If a secret is not renewed in timely manner, it may be expired, and // the ExpirationManager will handle doing automatic revocation. type ExpirationManager struct { - core *Core - router *Router - idView *BarrierView - tokenView *BarrierView - tokenStore *TokenStore - logger log.Logger - coreStateLock *sync.RWMutex + router *Router + idView *BarrierView + tokenView *BarrierView + tokenStore *TokenStore + logger log.Logger pending map[string]*time.Timer pendingLock sync.RWMutex @@ -70,26 +69,30 @@ type ExpirationManager struct { restoreLocks []*locksutil.LockEntry restoreLoaded sync.Map quitCh chan struct{} + + coreStateLock *sync.RWMutex + quitContext context.Context } // NewExpirationManager creates a new ExpirationManager that is backed // using a given view, and uses the provided router for revocation. func NewExpirationManager(c *Core, view *BarrierView) *ExpirationManager { exp := &ExpirationManager{ - core: c, - router: c.router, - idView: view.SubView(leaseViewPrefix), - tokenView: view.SubView(tokenViewPrefix), - tokenStore: c.tokenStore, - logger: c.logger, - pending: make(map[string]*time.Timer), - coreStateLock: &c.stateLock, + router: c.router, + idView: view.SubView(leaseViewPrefix), + tokenView: view.SubView(tokenViewPrefix), + tokenStore: c.tokenStore, + logger: c.logger, + pending: make(map[string]*time.Timer), // new instances of the expiration manager will go immediately into // restore mode restoreMode: 1, restoreLocks: locksutil.CreateLocks(), quitCh: make(chan struct{}), + + coreStateLock: &c.stateLock, + quitContext: c.requestContext, } if exp.logger == nil { @@ -979,13 +982,8 @@ func (m *ExpirationManager) expireID(leaseID string) { } m.coreStateLock.RLock() - if m.core.sealed { - m.logger.Error("expiration: sealed, not attempting further revocation of lease", "lease_id", leaseID) - m.coreStateLock.RUnlock() - return - } - if m.core.standby { - m.logger.Error("expiration: standby, not attempting further revocation of lease", "lease_id", leaseID) + if m.quitContext.Err() == context.Canceled { + m.logger.Error("expiration: core context canceled, not attempting further revocation of lease", "lease_id", leaseID) m.coreStateLock.RUnlock() return }