Skip to content

Commit

Permalink
remotecache: fix inline cache in manifest lists
Browse files Browse the repository at this point in the history
Signed-off-by: Tonis Tiigi <[email protected]>
  • Loading branch information
tonistiigi committed Aug 13, 2019
1 parent 2b2bdac commit c114e43
Show file tree
Hide file tree
Showing 5 changed files with 208 additions and 13 deletions.
23 changes: 13 additions & 10 deletions cache/remotecache/import.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"io"
"sync"
"time"

"github.com/containerd/containerd/content"
Expand Down Expand Up @@ -110,8 +109,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
return nil, err
}

var mu sync.Mutex
cc := v1.NewCacheChains()
var cMap = map[digest.Digest]*v1.CacheChains{}

eg, ctx := errgroup.WithContext(ctx)
for dgst, dt := range m {
Expand Down Expand Up @@ -183,12 +181,11 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
if err != nil {
return errors.WithStack(err)
}

mu.Lock()
cc := v1.NewCacheChains()
if err := v1.ParseConfig(config, layers, cc); err != nil {
return err
}
mu.Unlock()
cMap[dgst] = cc
return nil
})
}(dgst, dt)
Expand All @@ -198,11 +195,17 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
return nil, err
}

keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
cms := make([]solver.CacheManager, 0, len(cMap))

for _, cc := range cMap {
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
}
cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage))
}
return solver.NewCacheManager(id, keysStorage, resultStorage), nil

return solver.NewCombinedCacheManager(cms, nil), nil
}

func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error {
Expand Down
7 changes: 7 additions & 0 deletions cache/remotecache/inline/inline.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, nil
}

func (ce *exporter) reset() {
cc := v1.NewCacheChains()
ce.CacheExporterTarget = cc
ce.chains = cc
}

func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
config, descs, err := ce.chains.Marshal()
if err != nil {
Expand Down Expand Up @@ -82,6 +88,7 @@ func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
if err != nil {
return nil, err
}
ce.reset()

return dt, nil
}
Expand Down
182 changes: 182 additions & 0 deletions frontend/dockerfile/dockerfile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/upload/uploadprovider"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/testutil"
"github.com/moby/buildkit/util/testutil/httpserver"
"github.com/moby/buildkit/util/testutil/integration"
Expand Down Expand Up @@ -90,6 +91,7 @@ var allTests = []integration.Test{
testTarExporter,
testDefaultEnvWithArgs,
testEnvEmptyFormatting,
testCacheMultiPlatformImportExport,
}

var fileOpTests = []integration.Test{
Expand Down Expand Up @@ -3371,6 +3373,131 @@ LABEL foo=bar
require.Equal(t, "baz", v)
}

func testCacheMultiPlatformImportExport(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)

registry, err := sb.NewRegistry()
if errors.Cause(err) == integration.ErrorRequirements {
t.Skip(err.Error())
}
require.NoError(t, err)

dockerfile := []byte(`
FROM --platform=$BUILDPLATFORM busybox AS base
ARG TARGETARCH
RUN echo -n $TARGETARCH> arch && cat /dev/urandom | head -c 100 | sha256sum > unique
FROM scratch
COPY --from=base unique /
COPY --from=base arch /
`)

dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)

c, err := client.New(context.TODO(), sb.Address())
require.NoError(t, err)
defer c.Close()

target := registry + "/buildkit/testexportdf:multi"

// exportCache := []client.CacheOptionsEntry{
// {
// Type: "registry",
// Attrs: map[string]string{"ref": target},
// },
// }
// importCache := target

exportCache := []client.CacheOptionsEntry{
{
Type: "inline",
},
}
importCache := target + "-img"

_, err = f.Solve(context.TODO(), c, client.SolveOpt{
Exports: []client.ExportEntry{
{
Type: client.ExporterImage,
Attrs: map[string]string{
"push": "true",
"name": target + "-img",
},
},
},
CacheExports: exportCache,
FrontendAttrs: map[string]string{
"platform": "linux/amd64,linux/arm/v7",
},
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
},
}, nil)
require.NoError(t, err)

desc, provider, err := contentutil.ProviderFromRef(target + "-img")
require.NoError(t, err)

imgMap, err := readIndex(provider, desc)
require.NoError(t, err)

require.Equal(t, 2, len(imgMap))

require.Equal(t, "amd64", string(imgMap["linux/amd64"].layers[1]["arch"].Data))
dtamd := imgMap["linux/amd64"].layers[0]["unique"].Data
dtarm := imgMap["linux/arm/v7"].layers[0]["unique"].Data
require.NotEqual(t, dtamd, dtarm)

for i := 0; i < 2; i++ {
err = c.Prune(context.TODO(), nil, client.PruneAll)
require.NoError(t, err)

checkAllRemoved(t, c, sb)

_, err = f.Solve(context.TODO(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
"cache-from": importCache,
"platform": "linux/amd64,linux/arm/v7",
},
Exports: []client.ExportEntry{
{
Type: client.ExporterImage,
Attrs: map[string]string{
"push": "true",
"name": target + "-img",
},
},
},
CacheExports: exportCache,
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
},
}, nil)
require.NoError(t, err)

desc2, provider, err := contentutil.ProviderFromRef(target + "-img")
require.NoError(t, err)

require.Equal(t, desc.Digest, desc2.Digest)

imgMap, err = readIndex(provider, desc2)
require.NoError(t, err)

require.Equal(t, 2, len(imgMap))

require.Equal(t, "arm", string(imgMap["linux/arm/v7"].layers[1]["arch"].Data))
dtamd2 := imgMap["linux/amd64"].layers[0]["unique"].Data
dtarm2 := imgMap["linux/arm/v7"].layers[0]["unique"].Data
require.Equal(t, string(dtamd), string(dtamd2))
require.Equal(t, string(dtarm), string(dtarm2))
}
}

func testCacheImportExport(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)

Expand Down Expand Up @@ -4286,3 +4413,58 @@ func fixedWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser
return wc, nil
}
}

type imageInfo struct {
desc ocispec.Descriptor
layers []map[string]*testutil.TarItem
}

func readIndex(p content.Provider, desc ocispec.Descriptor) (map[string]*imageInfo, error) {
ctx := context.TODO()
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}
var idx ocispec.Index
if err := json.Unmarshal(dt, &idx); err != nil {
return nil, err
}

mi := map[string]*imageInfo{}

for _, m := range idx.Manifests {
img, err := readImage(p, m)
if err != nil {
return nil, err
}
mi[platforms.Format(*m.Platform)] = img
}
return mi, nil
}
func readImage(p content.Provider, desc ocispec.Descriptor) (*imageInfo, error) {
ii := &imageInfo{desc: desc}

ctx := context.TODO()
dt, err := content.ReadBlob(ctx, p, desc)
if err != nil {
return nil, err
}

var mfst ocispec.Manifest
if err := json.Unmarshal(dt, &mfst); err != nil {
return nil, err
}

for _, l := range mfst.Layers {
dt, err := content.ReadBlob(ctx, p, l)
if err != nil {
return nil, err
}
m, err := testutil.ReadTarToMap(dt, true)
if err != nil {
return nil, err
}
ii.layers = append(ii.layers, m)
}
return ii, nil
}
7 changes: 5 additions & 2 deletions solver/combinedcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ import (
"golang.org/x/sync/errgroup"
)

func newCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
func NewCombinedCacheManager(cms []CacheManager, main CacheManager) CacheManager {
return &combinedCacheManager{cms: cms, main: main}
}

Expand Down Expand Up @@ -80,7 +80,7 @@ func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res
res.Result.Release(context.TODO())
}
}()
if rec.cacheManager != cm.main {
if rec.cacheManager != cm.main && cm.main != nil {
for _, res := range results {
if _, err := cm.main.Save(res.CacheKey, res.Result, res.CacheResult.CreatedAt); err != nil {
return nil, err
Expand All @@ -91,6 +91,9 @@ func (cm *combinedCacheManager) Load(ctx context.Context, rec *CacheRecord) (res
}

func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Time) (*ExportableCacheKey, error) {
if cm.main == nil {
return nil, nil
}
return cm.main.Save(key, s, createdAt)
}

Expand Down
2 changes: 1 addition & 1 deletion solver/jobs.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func (s *state) combinedCacheManager() CacheManager {
return s.mainCache
}

return newCombinedCacheManager(cms, s.mainCache)
return NewCombinedCacheManager(cms, s.mainCache)
}

func (s *state) Release() {
Expand Down

0 comments on commit c114e43

Please sign in to comment.