Skip to content

Commit

Permalink
Seal migration with Raft (#8103)
Browse files Browse the repository at this point in the history
* Seal migration after unsealing

* Refactor migration fields migrationInformation in core

* Perform seal migration as part of postUnseal

* Remove the sleep logic

* Use proper seal in the unseal function

* Fix migration from Auto to Shamir

* Fix the recovery config missing issue

* Address the non-ha migration case

* Fix the multi cluster case

* Avoid re-running seal migration

* Run the post migration code in new leaders

* Fix the issue of wrong recovery being set

* Address review feedback

* Add more complete testing coverage for seal migrations.   (#8247)

* Add more complete testing coverage for seal migrations.  Also remove VAULT_ACC gate from some tests that just depend on docker, cleanup dangling recovery config in storage after migration, and fix a call in adjustCoreForSealMigration that seems broken.

* Fix the issue of wrong recovery key being set

* Adapt tests to work with multiple cores.

* Add missing line to disable raft join.

Co-authored-by: Vishal Nayak <[email protected]>

* Fix all known issues

* Remove warning

* Review feedback.

* Revert my previous change that broke raft tests.  We'll need to come back and at least comment
this once we better understand why it's needed.

* Don't allow migration between same types for now

* Disable auto to auto tests for now since it uses migration between same types which is not allowed

* Update vault/core.go

Co-Authored-By: Brian Kassouf <[email protected]>

* Add migration logs

* Address review comments

* Add the recovery config check back

* Skip a few steps if migration is already done

* Return from waitForLeadership if migration fails

Co-authored-by: ncabatoff <[email protected]>
Co-authored-by: Brian Kassouf <[email protected]>
  • Loading branch information
3 people authored Feb 13, 2020
1 parent ab473c6 commit 9f980ad
Show file tree
Hide file tree
Showing 8 changed files with 625 additions and 575 deletions.
152 changes: 152 additions & 0 deletions command/seal_migration_oss_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
// +build !enterprise

package command

import (
"context"
"encoding/base64"
"testing"

wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)

func TestSealMigration_AutoToShamir(t *testing.T) {
t.Parallel()
t.Run("inmem", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.InmemBackendSetup)
})

t.Run("file", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.FileBackendSetup)
})

t.Run("consul", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.ConsulBackendSetup)
})

t.Run("raft", func(t *testing.T) {
t.Parallel()
testSealMigrationAutoToShamir(t, teststorage.RaftBackendSetup)
})
}

func testSealMigrationAutoToShamir(t *testing.T, setup teststorage.ClusterSetupMutator) {
tcluster := newTransitSealServer(t)
defer tcluster.Cleanup()

tcluster.makeKey(t, "key1")
var seals []vault.Seal
conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{
DisableSealWrap: true,
}, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
SkipInit: true,
NumCores: 3,
SealFunc: func() vault.Seal {
tseal := tcluster.makeSeal(t, "key1")
seals = append(seals, tseal)
return tseal
},
},
setup,
)
opts.SetupFunc = nil
cluster := vault.NewTestCluster(t, conf, opts)
cluster.Start()
defer cluster.Cleanup()

client := cluster.Cores[0].Client
initResp, err := client.Sys().Init(&api.InitRequest{
RecoveryShares: 5,
RecoveryThreshold: 3,
})
if err != nil {
t.Fatal(err)
}
for _, k := range initResp.RecoveryKeysB64 {
b, _ := base64.RawStdEncoding.DecodeString(k)
cluster.RecoveryKeys = append(cluster.RecoveryKeys, b)
}

testhelpers.WaitForActiveNode(t, cluster)

rootToken := initResp.RootToken
client.SetToken(rootToken)
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}

logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})

if err := adjustCoreForSealMigration(logger, cluster.Cores[0].Core, shamirSeal, seals[0]); err != nil {
t.Fatal(err)
}

// Although we're unsealing using the recovery keys, this is still an
// autounseal; if we stopped the transit cluster this would fail.
var resp *api.SealStatusResponse
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err == nil {
t.Fatal("expected error due to lack of migrate parameter")
}
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key, Migrate: true})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}
testhelpers.WaitForActiveNode(t, cluster)

// Seal and unseal again to verify that things are working fine
if err := client.Sys().Seal(); err != nil {
t.Fatal(err)
}

tcluster.Cleanup()
// Assign nil to Cores so the deferred Cleanup doesn't break.
tcluster.Cores = nil

// Now the recovery keys are actually the barrier unseal keys.
for _, key := range initResp.RecoveryKeysB64 {
resp, err = client.Sys().UnsealWithOptions(&api.UnsealOpts{Key: key})
if err != nil {
t.Fatal(err)
}
if resp == nil || !resp.Sealed {
break
}
}
if resp == nil || resp.Sealed {
t.Fatalf("expected unsealed state; got %#v", resp)
}

b, r, err := cluster.Cores[0].Core.PhysicalSealConfigs(context.Background())
if err != nil {
t.Fatal(err)
}
verifyBarrierConfig(t, b, wrapping.Shamir, 5, 3, 1)
if r != nil {
t.Fatalf("expected nil recovery config, got: %#v", r)
}
}
Loading

0 comments on commit 9f980ad

Please sign in to comment.