From 3f474cc2f93ab3045998096d86e6c704b8d4aa30 Mon Sep 17 00:00:00 2001 From: "Jonathan A. Sternberg" Date: Tue, 5 Nov 2024 11:51:54 -0600 Subject: [PATCH] bake: prototype for composable bake attributes This allows using either the csv syntax or a map to specify certain attributes. Signed-off-by: Jonathan A. Sternberg --- bake/bake.go | 254 +++++---- bake/bake_test.go | 105 ++-- bake/compose.go | 13 +- bake/compose_test.go | 42 +- bake/hcl_test.go | 4 + bake/hclparser/LICENSE | 355 ++++++++++++ bake/hclparser/gohcl/decode.go | 351 ++++++++++++ bake/hclparser/gohcl/decode_test.go | 813 ++++++++++++++++++++++++++++ bake/hclparser/gohcl/doc.go | 65 +++ bake/hclparser/gohcl/encode.go | 194 +++++++ bake/hclparser/gohcl/encode_test.go | 67 +++ bake/hclparser/gohcl/schema.go | 184 +++++++ bake/hclparser/gohcl/schema_test.go | 233 ++++++++ bake/hclparser/gohcl/types.go | 19 + bake/hclparser/hclparser.go | 11 +- bake/hclparser/type_implied.go | 161 ++++++ bake/hclparser/type_implied_ext.go | 49 ++ controller/pb/export.go | 1 + go.mod | 2 +- util/buildflags/cty.go | 44 ++ util/buildflags/export.go | 200 +++++-- 21 files changed, 2939 insertions(+), 228 deletions(-) create mode 100644 bake/hclparser/LICENSE create mode 100644 bake/hclparser/gohcl/decode.go create mode 100644 bake/hclparser/gohcl/decode_test.go create mode 100644 bake/hclparser/gohcl/doc.go create mode 100644 bake/hclparser/gohcl/encode.go create mode 100644 bake/hclparser/gohcl/encode_test.go create mode 100644 bake/hclparser/gohcl/schema.go create mode 100644 bake/hclparser/gohcl/schema_test.go create mode 100644 bake/hclparser/gohcl/types.go create mode 100644 bake/hclparser/type_implied.go create mode 100644 bake/hclparser/type_implied_ext.go create mode 100644 util/buildflags/cty.go diff --git a/bake/bake.go b/bake/bake.go index e56980e7d526..86e5b7fd1df5 100644 --- a/bake/bake.go +++ b/bake/bake.go @@ -2,6 +2,7 @@ package bake import ( "context" + "encoding" "io" "os" "path" @@ -28,7 +29,6 @@ import ( "github.com/moby/buildkit/session/auth/authprovider" "github.com/moby/buildkit/util/entitlements" "github.com/pkg/errors" - "github.com/tonistiigi/go-csvvalue" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/convert" ) @@ -496,7 +496,9 @@ func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[st if err != nil { return err } - t2.Outputs = []string{"type=cacheonly"} + t2.Outputs = []*buildflags.ExportEntry{ + {Type: "cacheonly"}, + } t2.linked = true m[target] = t2 } @@ -695,59 +697,61 @@ type Target struct { // Inherits is the only field that cannot be overridden with --set Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional" cty:"inherits"` - Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"` - Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"` - Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"` - Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"` - Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"` - DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"` - Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"` - Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"` - Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"` - CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"` - CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"` - Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"` - Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"` - SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"` - Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"` - Outputs []string `json:"output,omitempty" hcl:"output,optional" cty:"output"` - Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"` - NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"` - NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"` - NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"` - ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"` - Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"` - Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"` - Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"` + Annotations []string `json:"annotations,omitempty" hcl:"annotations,optional" cty:"annotations"` + Attest []string `json:"attest,omitempty" hcl:"attest,optional" cty:"attest"` + Context *string `json:"context,omitempty" hcl:"context,optional" cty:"context"` + Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional" cty:"contexts"` + Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional" cty:"dockerfile"` + DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional" cty:"dockerfile-inline"` + Args map[string]*string `json:"args,omitempty" hcl:"args,optional" cty:"args"` + Labels map[string]*string `json:"labels,omitempty" hcl:"labels,optional" cty:"labels"` + Tags []string `json:"tags,omitempty" hcl:"tags,optional" cty:"tags"` + CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional" cty:"cache-from"` + CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional" cty:"cache-to"` + Target *string `json:"target,omitempty" hcl:"target,optional" cty:"target"` + Secrets []string `json:"secret,omitempty" hcl:"secret,optional" cty:"secret"` + SSH []string `json:"ssh,omitempty" hcl:"ssh,optional" cty:"ssh"` + Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional" cty:"platforms"` + Outputs []*buildflags.ExportEntry `json:"output,omitempty" hcl:"output,optional" cty:"output"` + Pull *bool `json:"pull,omitempty" hcl:"pull,optional" cty:"pull"` + NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional" cty:"no-cache"` + NetworkMode *string `json:"network,omitempty" hcl:"network,optional" cty:"network"` + NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional" cty:"no-cache-filter"` + ShmSize *string `json:"shm-size,omitempty" hcl:"shm-size,optional"` + Ulimits []string `json:"ulimits,omitempty" hcl:"ulimits,optional"` + Call *string `json:"call,omitempty" hcl:"call,optional" cty:"call"` + Entitlements []string `json:"entitlements,omitempty" hcl:"entitlements,optional" cty:"entitlements"` // IMPORTANT: if you add more fields here, do not forget to update newOverrides/AddOverrides and docs/bake-reference.md. // linked is a private field to mark a target used as a linked one linked bool } -var _ hclparser.WithEvalContexts = &Target{} -var _ hclparser.WithGetName = &Target{} -var _ hclparser.WithEvalContexts = &Group{} -var _ hclparser.WithGetName = &Group{} +var ( + _ hclparser.WithEvalContexts = &Target{} + _ hclparser.WithGetName = &Target{} + _ hclparser.WithEvalContexts = &Group{} + _ hclparser.WithGetName = &Group{} +) func (t *Target) normalize() { - t.Annotations = removeDupes(t.Annotations) + t.Annotations = removeDupesStr(t.Annotations) t.Attest = removeAttestDupes(t.Attest) - t.Tags = removeDupes(t.Tags) - t.Secrets = removeDupes(t.Secrets) - t.SSH = removeDupes(t.SSH) - t.Platforms = removeDupes(t.Platforms) - t.CacheFrom = removeDupes(t.CacheFrom) - t.CacheTo = removeDupes(t.CacheTo) + t.Tags = removeDupesStr(t.Tags) + t.Secrets = removeDupesStr(t.Secrets) + t.SSH = removeDupesStr(t.SSH) + t.Platforms = removeDupesStr(t.Platforms) + t.CacheFrom = removeDupesStr(t.CacheFrom) + t.CacheTo = removeDupesStr(t.CacheTo) t.Outputs = removeDupes(t.Outputs) - t.NoCacheFilter = removeDupes(t.NoCacheFilter) - t.Ulimits = removeDupes(t.Ulimits) + t.NoCacheFilter = removeDupesStr(t.NoCacheFilter) + t.Ulimits = removeDupesStr(t.Ulimits) if t.NetworkMode != nil && *t.NetworkMode == "host" { t.Entitlements = append(t.Entitlements, "network.host") } - t.Entitlements = removeDupes(t.Entitlements) + t.Entitlements = removeDupesStr(t.Entitlements) for k, v := range t.Contexts { if v == "" { @@ -904,7 +908,11 @@ func (t *Target) AddOverrides(overrides map[string]Override) error { case "platform": t.Platforms = o.ArrValue case "output": - t.Outputs = o.ArrValue + outputs, err := parseArrValue[buildflags.ExportEntry](o.ArrValue) + if err != nil { + return errors.Wrap(err, "invalid value for outputs") + } + t.Outputs = outputs case "entitlements": t.Entitlements = append(t.Entitlements, o.ArrValue...) case "annotations": @@ -1354,10 +1362,11 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) { } bo.CacheTo = controllerapi.CreateCaches(cacheExports) - outputs, err := buildflags.ParseExports(t.Outputs) - if err != nil { - return nil, err + outputs := make([]*controllerapi.ExportEntry, len(t.Outputs)) + for i, output := range t.Outputs { + outputs[i] = output.ToPB() } + bo.Exports, err = controllerapi.CreateExports(outputs) if err != nil { return nil, err @@ -1403,7 +1412,35 @@ func defaultTarget() *Target { return &Target{} } -func removeDupes(s []string) []string { +type hasEqual[E any] interface { + Equal(other E) bool +} + +func removeDupes[E hasEqual[E]](s []E) []E { + // Move backwards through the slice. + // For each element, any elements after the current element are unique. + // If we find our current element conflicts with an existing element, + // then we swap the offender with the end of the slice and chop it off. + + // Start at the second to last element. + // The last element is always unique. + for i := len(s) - 2; i > 0; i-- { + elem := s[i] + // Check for duplicates after our current element. + for j := i + 1; j < len(s); j++ { + if elem.Equal(s[j]) { + // Found a duplicate, exchange the + // duplicate with the last element. + s[j], s[len(s)-1] = s[len(s)-1], s[j] + s = s[:len(s)-1] + break + } + } + } + return s +} + +func removeDupesStr(s []string) []string { i := 0 seen := make(map[string]struct{}, len(s)) for _, v := range s { @@ -1440,86 +1477,76 @@ func removeAttestDupes(s []string) []string { return res } -func parseOutput(str string) map[string]string { - fields, err := csvvalue.Fields(str, nil) - if err != nil { - return nil - } - res := map[string]string{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) == 2 { - res[parts[0]] = parts[1] - } - } - return res -} - -func parseOutputType(str string) string { - if out := parseOutput(str); out != nil { - if v, ok := out["type"]; ok { - return v +func setPushOverride(outputs []*buildflags.ExportEntry, push bool) []*buildflags.ExportEntry { + if !push { + // Disable push for any relevant export types + for i := 0; i < len(outputs); { + output := outputs[i] + switch output.Type { + case "registry": + // Filter out registry output type + outputs[i], outputs[len(outputs)-1] = outputs[len(outputs)-1], outputs[i] + outputs = outputs[:len(outputs)-1] + continue + case "image": + // Override push attribute + output.Attrs["push"] = "false" + } + i++ } + return outputs } - return "" -} -func setPushOverride(outputs []string, push bool) []string { - var out []string + // Force push to be enabled setPush := true for _, output := range outputs { - typ := parseOutputType(output) - if typ == "image" || typ == "registry" { - // no need to set push if image or registry types already defined + if output.Type != "docker" { + // If there is an output type that is not docker, don't set "push" setPush = false - if typ == "registry" { - if !push { - // don't set registry output if "push" is false - continue - } - // no need to set "push" attribute to true for registry - out = append(out, output) - continue - } - out = append(out, output+",push="+strconv.FormatBool(push)) - } else { - if typ != "docker" { - // if there is any output that is not docker, don't set "push" - setPush = false - } - out = append(out, output) + } + + // Set push attribute for image + if output.Type == "image" { + output.Attrs["push"] = "true" } } - if push && setPush { - out = append(out, "type=image,push=true") + + if setPush { + // No existing output that pushes so add one + outputs = append(outputs, &buildflags.ExportEntry{ + Type: "image", + Attrs: map[string]string{ + "push": "true", + }, + }) } - return out + return outputs } -func setLoadOverride(outputs []string, load bool) []string { +func setLoadOverride(outputs []*buildflags.ExportEntry, load bool) []*buildflags.ExportEntry { if !load { return outputs } - setLoad := true + for _, output := range outputs { - if typ := parseOutputType(output); typ == "docker" { - if v := parseOutput(output); v != nil { - // dest set means we want to output as tar so don't set load - if _, ok := v["dest"]; !ok { - setLoad = false - break - } + switch output.Type { + case "docker": + // if dest is not set, we can reuse this entry and do not need to set load + if output.Destination == "" { + return outputs } - } else if typ != "image" && typ != "registry" && typ != "oci" { + case "image", "registry", "oci": + // Ignore + default: // if there is any output that is not an image, registry // or oci, don't set "load" similar to push override - setLoad = false - break + return outputs } } - if setLoad { - outputs = append(outputs, "type=docker") - } + + outputs = append(outputs, &buildflags.ExportEntry{ + Type: "docker", + }) return outputs } @@ -1558,3 +1585,20 @@ func toNamedContexts(m map[string]string) map[string]build.NamedContext { } return m2 } + +type arrValue[B any] interface { + encoding.TextUnmarshaler + *B +} + +func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) { + outputs := make([]*T, 0, len(s)) + for _, text := range s { + output := new(T) + if err := PT(output).UnmarshalText([]byte(text)); err != nil { + return nil, err + } + outputs = append(outputs, output) + } + return outputs, nil +} diff --git a/bake/bake_test.go b/bake/bake_test.go index bb95499a76a8..001311046b28 100644 --- a/bake/bake_test.go +++ b/bake/bake_test.go @@ -2,6 +2,7 @@ package bake import ( "context" + "fmt" "os" "path/filepath" "sort" @@ -228,7 +229,7 @@ func TestPushOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=image,push=true", m["app"].Outputs[0]) + require.Equal(t, "type=image,push=true", m["app"].Outputs[0].String()) }) t.Run("type image", func(t *testing.T) { @@ -242,7 +243,7 @@ func TestPushOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0]) + require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0].String()) }) t.Run("type image push false", func(t *testing.T) { @@ -256,7 +257,7 @@ func TestPushOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=false"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0]) + require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0].String()) }) t.Run("type registry", func(t *testing.T) { @@ -270,7 +271,7 @@ func TestPushOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.push=true"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=registry", m["app"].Outputs[0]) + require.Equal(t, "type=registry", m["app"].Outputs[0].String()) }) t.Run("type registry push false", func(t *testing.T) { @@ -300,9 +301,9 @@ func TestPushOverride(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(m)) require.Equal(t, 1, len(m["foo"].Outputs)) - require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs) + require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs)) require.Equal(t, 1, len(m["bar"].Outputs)) - require.Equal(t, []string{"type=image,push=true"}, m["bar"].Outputs) + require.Equal(t, []string{"type=image,push=true"}, stringify(m["bar"].Outputs)) }) } @@ -317,7 +318,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=docker", m["app"].Outputs[0]) + require.Equal(t, "type=docker", m["app"].Outputs[0].String()) }) t.Run("type docker", func(t *testing.T) { @@ -331,7 +332,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, []string{"type=docker"}, m["app"].Outputs) + require.Equal(t, []string{"type=docker"}, stringify(m["app"].Outputs)) }) t.Run("type image", func(t *testing.T) { @@ -345,7 +346,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 2, len(m["app"].Outputs)) - require.Equal(t, []string{"type=image", "type=docker"}, m["app"].Outputs) + require.Equal(t, []string{"type=docker", "type=image"}, stringify(m["app"].Outputs)) }) t.Run("type image load false", func(t *testing.T) { @@ -359,7 +360,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=false"}, nil) require.NoError(t, err) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, []string{"type=image"}, m["app"].Outputs) + require.Equal(t, []string{"type=image"}, stringify(m["app"].Outputs)) }) t.Run("type registry", func(t *testing.T) { @@ -373,7 +374,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 2, len(m["app"].Outputs)) - require.Equal(t, []string{"type=registry", "type=docker"}, m["app"].Outputs) + require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["app"].Outputs)) }) t.Run("type oci", func(t *testing.T) { @@ -387,7 +388,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 2, len(m["app"].Outputs)) - require.Equal(t, []string{"type=oci,dest=out", "type=docker"}, m["app"].Outputs) + require.Equal(t, []string{"type=docker", "type=oci,dest=out"}, stringify(m["app"].Outputs)) }) t.Run("type docker with dest", func(t *testing.T) { @@ -401,7 +402,7 @@ func TestLoadOverride(t *testing.T) { m, _, err := ReadTargets(context.TODO(), []File{fp}, []string{"app"}, []string{"*.load=true"}, nil) require.NoError(t, err) require.Equal(t, 2, len(m["app"].Outputs)) - require.Equal(t, []string{"type=docker,dest=out", "type=docker"}, m["app"].Outputs) + require.Equal(t, []string{"type=docker", "type=docker,dest=out"}, stringify(m["app"].Outputs)) }) t.Run("type local and empty target", func(t *testing.T) { @@ -418,9 +419,9 @@ func TestLoadOverride(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(m)) require.Equal(t, 1, len(m["foo"].Outputs)) - require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs) + require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs)) require.Equal(t, 1, len(m["bar"].Outputs)) - require.Equal(t, []string{"type=docker"}, m["bar"].Outputs) + require.Equal(t, []string{"type=docker"}, stringify(m["bar"].Outputs)) }) } @@ -440,12 +441,10 @@ func TestLoadAndPushOverride(t *testing.T) { require.Equal(t, 2, len(m)) require.Equal(t, 1, len(m["foo"].Outputs)) - sort.Strings(m["foo"].Outputs) - require.Equal(t, []string{"type=local,dest=out"}, m["foo"].Outputs) + require.Equal(t, []string{"type=local,dest=out"}, stringify(m["foo"].Outputs)) require.Equal(t, 2, len(m["bar"].Outputs)) - sort.Strings(m["bar"].Outputs) - require.Equal(t, []string{"type=docker", "type=image,push=true"}, m["bar"].Outputs) + require.Equal(t, []string{"type=docker", "type=image,push=true"}, stringify(m["bar"].Outputs)) }) t.Run("type registry", func(t *testing.T) { @@ -461,8 +460,7 @@ func TestLoadAndPushOverride(t *testing.T) { require.Equal(t, 1, len(m)) require.Equal(t, 2, len(m["foo"].Outputs)) - sort.Strings(m["foo"].Outputs) - require.Equal(t, []string{"type=docker", "type=registry"}, m["foo"].Outputs) + require.Equal(t, []string{"type=docker", "type=registry"}, stringify(m["foo"].Outputs)) }) } @@ -674,7 +672,7 @@ func TestOverrideMerge(t *testing.T) { require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms) require.Equal(t, 1, len(m["app"].Outputs)) - require.Equal(t, "type=registry", m["app"].Outputs[0]) + require.Equal(t, "type=registry", m["app"].Outputs[0].String()) } func TestReadContexts(t *testing.T) { @@ -840,7 +838,7 @@ func TestReadContextFromTargetChain(t *testing.T) { mid, ok := m["mid"] require.True(t, ok) require.Equal(t, 1, len(mid.Outputs)) - require.Equal(t, "type=cacheonly", mid.Outputs[0]) + require.Equal(t, "type=cacheonly", mid.Outputs[0].String()) require.Equal(t, 1, len(mid.Contexts)) base, ok := m["base"] @@ -924,7 +922,8 @@ func TestReadTargetsDefault(t *testing.T) { Data: []byte(` target "default" { dockerfile = "test" -}`)} +}`), + } m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil) require.NoError(t, err) @@ -942,7 +941,8 @@ func TestReadTargetsSpecified(t *testing.T) { Data: []byte(` target "image" { dockerfile = "test" -}`)} +}`), + } _, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil) require.Error(t, err) @@ -967,7 +967,8 @@ group "foo" { } target "image" { dockerfile = "test" -}`)} +}`), + } m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil) require.NoError(t, err) @@ -993,7 +994,8 @@ target "foo" { } target "image" { dockerfile = "test" -}`)} +}`), + } m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil) require.NoError(t, err) @@ -1036,7 +1038,8 @@ target "image-release" { inherits = ["image"] output = ["type=image,push=true"] tags = ["user/app:latest"] -}`)} +}`), + } fyml := File{ Name: "docker-compose.yml", @@ -1060,7 +1063,8 @@ services: args: CT_ECR: foo CT_TAG: bar - image: ct-fake-aws:bar`)} + image: ct-fake-aws:bar`), + } fjson := File{ Name: "docker-bake.json", @@ -1081,7 +1085,8 @@ services: ] } } - }`)} + }`), + } m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil) require.NoError(t, err) @@ -1089,7 +1094,7 @@ services: require.Equal(t, []string{"image"}, g["default"].Targets) require.Equal(t, 1, len(m)) require.Equal(t, 1, len(m["image"].Outputs)) - require.Equal(t, "type=docker", m["image"].Outputs[0]) + require.Equal(t, "type=docker", m["image"].Outputs[0].String()) m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil) require.NoError(t, err) @@ -1097,7 +1102,7 @@ services: require.Equal(t, []string{"image-release"}, g["default"].Targets) require.Equal(t, 1, len(m)) require.Equal(t, 1, len(m["image-release"].Outputs)) - require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0]) + require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String()) m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil) require.NoError(t, err) @@ -1106,7 +1111,7 @@ services: require.Equal(t, 2, len(m)) require.Equal(t, ".", *m["image"].Context) require.Equal(t, 1, len(m["image-release"].Outputs)) - require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0]) + require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0].String()) m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil) require.NoError(t, err) @@ -1166,7 +1171,8 @@ target "foo" { } target "image" { output = ["type=docker"] -}`)} +}`), + } m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil) require.NoError(t, err) @@ -1200,7 +1206,8 @@ target "foo" { } target "image" { output = ["type=docker"] -}`)} +}`), + } m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil) require.NoError(t, err) @@ -1209,7 +1216,7 @@ target "image" { require.Equal(t, []string{"foo", "image"}, g["foo"].Targets) require.Equal(t, 2, len(m)) require.Equal(t, "bar", *m["foo"].Dockerfile) - require.Equal(t, "type=docker", m["image"].Outputs[0]) + require.Equal(t, "type=docker", m["image"].Outputs[0].String()) m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil) require.NoError(t, err) @@ -1218,7 +1225,7 @@ target "image" { require.Equal(t, []string{"foo", "image"}, g["foo"].Targets) require.Equal(t, 2, len(m)) require.Equal(t, "bar", *m["foo"].Dockerfile) - require.Equal(t, "type=docker", m["image"].Outputs[0]) + require.Equal(t, "type=docker", m["image"].Outputs[0].String()) } func TestNestedInherits(t *testing.T) { @@ -1247,7 +1254,8 @@ target "c" { } target "d" { inherits = ["b", "c"] -}`)} +}`), + } cases := []struct { name string @@ -1315,7 +1323,8 @@ group "default" { "child1", "child2" ] -}`)} +}`), + } cases := []struct { name string @@ -1351,9 +1360,9 @@ group "default" { require.Equal(t, []string{"child1", "child2"}, g["default"].Targets) require.Equal(t, 2, len(m)) require.Equal(t, tt.wantch1, m["child1"].Args) - require.Equal(t, []string{"type=docker"}, m["child1"].Outputs) + require.Equal(t, []string{"type=docker"}, stringify(m["child1"].Outputs)) require.Equal(t, tt.wantch2, m["child2"].Args) - require.Equal(t, []string{"type=docker"}, m["child2"].Outputs) + require.Equal(t, []string{"type=docker"}, stringify(m["child2"].Outputs)) }) } } @@ -1442,7 +1451,8 @@ group "e" { target "f" { context = "./foo" -}`)} +}`), + } cases := []struct { names []string @@ -1721,7 +1731,7 @@ func TestAnnotations(t *testing.T) { require.Equal(t, 1, len(m)) require.Contains(t, m, "app") - require.Equal(t, "type=image,name=foo", m["app"].Outputs[0]) + require.Equal(t, "type=image,name=foo", m["app"].Outputs[0].String()) require.Equal(t, "manifest[linux/amd64]:foo=bar", m["app"].Annotations[0]) require.Len(t, bo["app"].Exports, 1) @@ -1856,3 +1866,12 @@ func TestNetNone(t *testing.T) { require.Len(t, bo["app"].Allow, 0) require.Equal(t, "none", bo["app"].NetworkMode) } + +func stringify[V fmt.Stringer](values []V) []string { + s := make([]string, len(values)) + for i, v := range values { + s[i] = v.String() + } + sort.Strings(s) + return s +} diff --git a/bake/compose.go b/bake/compose.go index 6036e53473a6..ecd08eee028b 100644 --- a/bake/compose.go +++ b/bake/compose.go @@ -12,6 +12,7 @@ import ( "github.com/compose-spec/compose-go/v2/dotenv" "github.com/compose-spec/compose-go/v2/loader" composetypes "github.com/compose-spec/compose-go/v2/types" + "github.com/docker/buildx/util/buildflags" dockeropts "github.com/docker/cli/opts" "github.com/docker/go-units" "github.com/pkg/errors" @@ -292,8 +293,10 @@ type xbake struct { // https://github.com/docker/docs/blob/main/content/build/bake/compose-file.md#extension-field-with-x-bake } -type stringMap map[string]string -type stringArray []string +type ( + stringMap map[string]string + stringArray []string +) func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error { var multi []string @@ -345,7 +348,11 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error { t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...)) } if len(xb.Outputs) > 0 { - t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...)) + outputs, err := parseArrValue[buildflags.ExportEntry](xb.Outputs) + if err != nil { + return err + } + t.Outputs = removeDupes(append(t.Outputs, outputs...)) } if xb.Pull != nil { t.Pull = xb.Pull diff --git a/bake/compose_test.go b/bake/compose_test.go index 28541095f484..70a942869326 100644 --- a/bake/compose_test.go +++ b/bake/compose_test.go @@ -12,7 +12,7 @@ import ( ) func TestParseCompose(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: db: build: ./db @@ -89,7 +89,7 @@ secrets: } func TestNoBuildOutOfTreeService(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: external: image: "verycooldb:1337" @@ -103,7 +103,7 @@ services: } func TestParseComposeTarget(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: db: build: @@ -129,7 +129,7 @@ services: } func TestComposeBuildWithoutContext(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: db: build: @@ -153,7 +153,7 @@ services: } func TestBuildArgEnvCompose(t *testing.T) { - var dt = []byte(` + dt := []byte(` version: "3.8" services: example: @@ -179,7 +179,7 @@ services: } func TestInconsistentComposeFile(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: webapp: entrypoint: echo 1 @@ -190,7 +190,7 @@ services: } func TestAdvancedNetwork(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: db: networks: @@ -215,7 +215,7 @@ networks: } func TestTags(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: example: image: example @@ -233,7 +233,7 @@ services: } func TestDependsOnList(t *testing.T) { - var dt = []byte(` + dt := []byte(` version: "3.8" services: @@ -269,7 +269,7 @@ networks: } func TestComposeExt(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: addon: image: ct-addon:bar @@ -345,14 +345,14 @@ services: require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets) require.Equal(t, []string{"default"}, c.Targets[1].SSH) require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms) - require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs) + require.Equal(t, []string{"type=docker"}, stringify(c.Targets[1].Outputs)) require.Equal(t, newBool(true), c.Targets[1].NoCache) require.Equal(t, ptrstr("128MiB"), c.Targets[1].ShmSize) require.Equal(t, []string{"nofile=1024:1024"}, c.Targets[1].Ulimits) } func TestComposeExtDedup(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: webapp: image: app:bar @@ -396,7 +396,7 @@ func TestEnv(t *testing.T) { _, err = envf.WriteString("FOO=bsdf -csdf\n") require.NoError(t, err) - var dt = []byte(` + dt := []byte(` services: scratch: build: @@ -424,7 +424,7 @@ func TestDotEnv(t *testing.T) { err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644) require.NoError(t, err) - var dt = []byte(` + dt := []byte(` services: scratch: build: @@ -443,7 +443,7 @@ services: } func TestPorts(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: foo: build: @@ -664,7 +664,7 @@ target "default" { } func TestComposeNullArgs(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: scratch: build: @@ -680,7 +680,7 @@ services: } func TestDependsOn(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: foo: build: @@ -711,7 +711,7 @@ services: `), 0644) require.NoError(t, err) - var dt = []byte(` + dt := []byte(` include: - compose-foo.yml @@ -740,7 +740,7 @@ services: } func TestDevelop(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: scratch: build: @@ -759,7 +759,7 @@ services: } func TestCgroup(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: scratch: build: @@ -772,7 +772,7 @@ services: } func TestProjectName(t *testing.T) { - var dt = []byte(` + dt := []byte(` services: scratch: build: diff --git a/bake/hcl_test.go b/bake/hcl_test.go index ac5e61eb6951..ef9865dbffa7 100644 --- a/bake/hcl_test.go +++ b/bake/hcl_test.go @@ -17,6 +17,7 @@ func TestHCLBasic(t *testing.T) { target "db" { context = "./db" tags = ["docker.io/tonistiigi/db"] + output = ["type=image"] } target "webapp" { @@ -25,6 +26,9 @@ func TestHCLBasic(t *testing.T) { args = { buildno = "123" } + output = [ + { type = "image" } + ] } target "cross" { diff --git a/bake/hclparser/LICENSE b/bake/hclparser/LICENSE new file mode 100644 index 000000000000..e25da5fad948 --- /dev/null +++ b/bake/hclparser/LICENSE @@ -0,0 +1,355 @@ +Copyright (c) 2014 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/bake/hclparser/gohcl/decode.go b/bake/hclparser/gohcl/decode.go new file mode 100644 index 000000000000..d5139a7940ac --- /dev/null +++ b/bake/hclparser/gohcl/decode.go @@ -0,0 +1,351 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/hashicorp/hcl/v2" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeOptions allows customizing sections of the decoding process. +type DecodeOptions struct { + ImpliedType func(gv interface{}) (cty.Type, error) + Convert func(in cty.Value, want cty.Type) (cty.Value, error) +} + +func (o DecodeOptions) DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + o = o.withDefaults() + + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return o.decodeBodyToValue(body, ctx, rv.Elem()) +} + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + return DecodeOptions{}.DecodeBody(body, ctx, val) +} + +func (o DecodeOptions) decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return o.decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return o.decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func (o DecodeOptions) decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Body != nil { + fieldIdx := *tags.Body + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(body)) + + default: + diags = append(diags, o.decodeBodyToValue(body, ctx, fieldV)...) + } + } + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, o.decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, o.DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + if val.Field(fieldIdx).IsNil() { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := val.Field(fieldIdx) + if sli.IsNil() { + sli = reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + } + + for i, block := range blocks { + if isPtr { + if i >= sli.Len() { + sli = reflect.Append(sli, reflect.New(ty)) + } + v := sli.Index(i) + if v.IsNil() { + v = reflect.New(ty) + } + diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + if i >= sli.Len() { + sli = reflect.Append(sli, reflect.Indirect(reflect.New(ty))) + } + diags = append(diags, o.decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + if sli.Len() > len(blocks) { + sli.SetLen(len(blocks)) + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := val.Field(fieldIdx) + if v.IsNil() { + v = reflect.New(ty) + } + diags = append(diags, o.decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, o.decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func (o DecodeOptions) decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, o.DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func (o DecodeOptions) decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + diags := o.decodeBodyToValue(block.Body, ctx, v) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(v.Type()) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + return diags +} + +func (o DecodeOptions) DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + o = o.withDefaults() + + srcVal, diags := expr.Value(ctx) + + convTy, err := o.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = o.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + return DecodeOptions{}.DecodeExpression(expr, ctx, val) +} + +func (o DecodeOptions) withDefaults() DecodeOptions { + if o.ImpliedType == nil { + o.ImpliedType = gocty.ImpliedType + } + + if o.Convert == nil { + o.Convert = convert.Convert + } + return o +} diff --git a/bake/hclparser/gohcl/decode_test.go b/bake/hclparser/gohcl/decode_test.go new file mode 100644 index 000000000000..1ac5d0496f39 --- /dev/null +++ b/bake/hclparser/gohcl/decode_test.go @@ -0,0 +1,813 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" + hclJSON "github.com/hashicorp/hcl/v2/json" + "github.com/zclconf/go-cty/cty" +) + +func TestDecodeBody(t *testing.T) { + deepEquals := func(other interface{}) func(v interface{}) bool { + return func(v interface{}) bool { + return reflect.DeepEqual(v, other) + } + } + + type withNameExpression struct { + Name hcl.Expression `hcl:"name"` + } + + type withTwoAttributes struct { + A string `hcl:"a,optional"` + B string `hcl:"b,optional"` + } + + type withNestedBlock struct { + Plain string `hcl:"plain,optional"` + Nested *withTwoAttributes `hcl:"nested,block"` + } + + type withListofNestedBlocks struct { + Nested []*withTwoAttributes `hcl:"nested,block"` + } + + type withListofNestedBlocksNoPointers struct { + Nested []withTwoAttributes `hcl:"nested,block"` + } + + tests := []struct { + Body map[string]interface{} + Target func() interface{} + Check func(v interface{}) bool + DiagCount int + }{ + { + map[string]interface{}{}, + makeInstantiateType(struct{}{}), + deepEquals(struct{}{}), + 0, + }, + { + map[string]interface{}{}, + makeInstantiateType(struct { + Name string `hcl:"name"` + }{}), + deepEquals(struct { + Name string `hcl:"name"` + }{}), + 1, // name is required + }, + { + map[string]interface{}{}, + makeInstantiateType(struct { + Name *string `hcl:"name"` + }{}), + deepEquals(struct { + Name *string `hcl:"name"` + }{}), + 0, + }, // name nil + { + map[string]interface{}{}, + makeInstantiateType(struct { + Name string `hcl:"name,optional"` + }{}), + deepEquals(struct { + Name string `hcl:"name,optional"` + }{}), + 0, + }, // name optional + { + map[string]interface{}{}, + makeInstantiateType(withNameExpression{}), + func(v interface{}) bool { + if v == nil { + return false + } + + wne, valid := v.(withNameExpression) + if !valid { + return false + } + + if wne.Name == nil { + return false + } + + nameVal, _ := wne.Name.Value(nil) + if !nameVal.IsNull() { + return false + } + + return true + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + }, + makeInstantiateType(withNameExpression{}), + func(v interface{}) bool { + if v == nil { + return false + } + + wne, valid := v.(withNameExpression) + if !valid { + return false + } + + if wne.Name == nil { + return false + } + + nameVal, _ := wne.Name.Value(nil) + if !nameVal.Equals(cty.StringVal("Ermintrude")).True() { + return false + } + + return true + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + }{}), + deepEquals(struct { + Name string `hcl:"name"` + }{"Ermintrude"}), + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 23, + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + }{}), + deepEquals(struct { + Name string `hcl:"name"` + }{"Ermintrude"}), + 1, // Extraneous "age" property + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 50, + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + Attrs hcl.Attributes `hcl:",remain"` + }{}), + func(gotI interface{}) bool { + got := gotI.(struct { + Name string `hcl:"name"` + Attrs hcl.Attributes `hcl:",remain"` + }) + return got.Name == "Ermintrude" && len(got.Attrs) == 1 && got.Attrs["age"] != nil + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 50, + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + Remain hcl.Body `hcl:",remain"` + }{}), + func(gotI interface{}) bool { + got := gotI.(struct { + Name string `hcl:"name"` + Remain hcl.Body `hcl:",remain"` + }) + + attrs, _ := got.Remain.JustAttributes() + + return got.Name == "Ermintrude" && len(attrs) == 1 && attrs["age"] != nil + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "living": true, + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + Remain map[string]cty.Value `hcl:",remain"` + }{}), + deepEquals(struct { + Name string `hcl:"name"` + Remain map[string]cty.Value `hcl:",remain"` + }{ + Name: "Ermintrude", + Remain: map[string]cty.Value{ + "living": cty.True, + }, + }), + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 50, + }, + makeInstantiateType(struct { + Name string `hcl:"name"` + Body hcl.Body `hcl:",body"` + Remain hcl.Body `hcl:",remain"` + }{}), + func(gotI interface{}) bool { + got := gotI.(struct { + Name string `hcl:"name"` + Body hcl.Body `hcl:",body"` + Remain hcl.Body `hcl:",remain"` + }) + + attrs, _ := got.Body.JustAttributes() + + return got.Name == "Ermintrude" && len(attrs) == 2 && + attrs["name"] != nil && attrs["age"] != nil + }, + 0, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating no diagnostics is good enough for this one. + return true + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}}, + }, + makeInstantiateType(struct { + Noodle struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating no diagnostics is good enough for this one. + return true + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}, {}}, + }, + makeInstantiateType(struct { + Noodle struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating one diagnostic is good enough for this one. + return true + }, + 1, + }, + { + map[string]interface{}{}, + makeInstantiateType(struct { + Noodle struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating one diagnostic is good enough for this one. + return true + }, + 1, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating one diagnostic is good enough for this one. + return true + }, + 1, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle *struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + return gotI.(struct { + Noodle *struct{} `hcl:"noodle,block"` + }).Noodle != nil + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}}, + }, + makeInstantiateType(struct { + Noodle *struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + return gotI.(struct { + Noodle *struct{} `hcl:"noodle,block"` + }).Noodle != nil + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle *struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + return gotI.(struct { + Noodle *struct{} `hcl:"noodle,block"` + }).Noodle == nil + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}, {}}, + }, + makeInstantiateType(struct { + Noodle *struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating one diagnostic is good enough for this one. + return true + }, + 1, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle []struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodle := gotI.(struct { + Noodle []struct{} `hcl:"noodle,block"` + }).Noodle + return len(noodle) == 0 + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}}, + }, + makeInstantiateType(struct { + Noodle []struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodle := gotI.(struct { + Noodle []struct{} `hcl:"noodle,block"` + }).Noodle + return len(noodle) == 1 + }, + 0, + }, + { + map[string]interface{}{ + "noodle": []map[string]interface{}{{}, {}}, + }, + makeInstantiateType(struct { + Noodle []struct{} `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodle := gotI.(struct { + Noodle []struct{} `hcl:"noodle,block"` + }).Noodle + return len(noodle) == 2 + }, + 0, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{}, + }, + makeInstantiateType(struct { + Noodle struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // Generating two diagnostics is good enough for this one. + // (one for the missing noodle block and the other for + // the JSON serialization detecting the missing level of + // heirarchy for the label.) + return true + }, + 2, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{ + "foo_foo": map[string]interface{}{}, + }, + }, + makeInstantiateType(struct { + Noodle struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodle := gotI.(struct { + Noodle struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }).Noodle + return noodle.Name == "foo_foo" + }, + 0, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{ + "foo_foo": map[string]interface{}{}, + "bar_baz": map[string]interface{}{}, + }, + }, + makeInstantiateType(struct { + Noodle struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + // One diagnostic is enough for this one. + return true + }, + 1, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{ + "foo_foo": map[string]interface{}{}, + "bar_baz": map[string]interface{}{}, + }, + }, + makeInstantiateType(struct { + Noodles []struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodles := gotI.(struct { + Noodles []struct { + Name string `hcl:"name,label"` + } `hcl:"noodle,block"` + }).Noodles + return len(noodles) == 2 && (noodles[0].Name == "foo_foo" || noodles[0].Name == "bar_baz") && (noodles[1].Name == "foo_foo" || noodles[1].Name == "bar_baz") && noodles[0].Name != noodles[1].Name + }, + 0, + }, + { + map[string]interface{}{ + "noodle": map[string]interface{}{ + "foo_foo": map[string]interface{}{ + "type": "rice", + }, + }, + }, + makeInstantiateType(struct { + Noodle struct { + Name string `hcl:"name,label"` + Type string `hcl:"type"` + } `hcl:"noodle,block"` + }{}), + func(gotI interface{}) bool { + noodle := gotI.(struct { + Noodle struct { + Name string `hcl:"name,label"` + Type string `hcl:"type"` + } `hcl:"noodle,block"` + }).Noodle + return noodle.Name == "foo_foo" && noodle.Type == "rice" + }, + 0, + }, + + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 34, + }, + makeInstantiateType(map[string]string(nil)), + deepEquals(map[string]string{ + "name": "Ermintrude", + "age": "34", + }), + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 89, + }, + makeInstantiateType(map[string]*hcl.Attribute(nil)), + func(gotI interface{}) bool { + got := gotI.(map[string]*hcl.Attribute) + return len(got) == 2 && got["name"] != nil && got["age"] != nil + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "age": 13, + }, + makeInstantiateType(map[string]hcl.Expression(nil)), + func(gotI interface{}) bool { + got := gotI.(map[string]hcl.Expression) + return len(got) == 2 && got["name"] != nil && got["age"] != nil + }, + 0, + }, + { + map[string]interface{}{ + "name": "Ermintrude", + "living": true, + }, + makeInstantiateType(map[string]cty.Value(nil)), + deepEquals(map[string]cty.Value{ + "name": cty.StringVal("Ermintrude"), + "living": cty.True, + }), + 0, + }, + { + // Retain "nested" block while decoding + map[string]interface{}{ + "plain": "foo", + }, + func() interface{} { + return &withNestedBlock{ + Plain: "bar", + Nested: &withTwoAttributes{ + A: "bar", + }, + } + }, + func(gotI interface{}) bool { + foo := gotI.(withNestedBlock) + return foo.Plain == "foo" && foo.Nested != nil && foo.Nested.A == "bar" + }, + 0, + }, + { + // Retain values in "nested" block while decoding + map[string]interface{}{ + "nested": map[string]interface{}{ + "a": "foo", + }, + }, + func() interface{} { + return &withNestedBlock{ + Nested: &withTwoAttributes{ + B: "bar", + }, + } + }, + func(gotI interface{}) bool { + foo := gotI.(withNestedBlock) + return foo.Nested.A == "foo" && foo.Nested.B == "bar" + }, + 0, + }, + { + // Retain values in "nested" block list while decoding + map[string]interface{}{ + "nested": []map[string]interface{}{ + { + "a": "foo", + }, + }, + }, + func() interface{} { + return &withListofNestedBlocks{ + Nested: []*withTwoAttributes{ + &withTwoAttributes{ + B: "bar", + }, + }, + } + }, + func(gotI interface{}) bool { + n := gotI.(withListofNestedBlocks) + return n.Nested[0].A == "foo" && n.Nested[0].B == "bar" + }, + 0, + }, + { + // Remove additional elements from the list while decoding nested blocks + map[string]interface{}{ + "nested": []map[string]interface{}{ + { + "a": "foo", + }, + }, + }, + func() interface{} { + return &withListofNestedBlocks{ + Nested: []*withTwoAttributes{ + &withTwoAttributes{ + B: "bar", + }, + &withTwoAttributes{ + B: "bar", + }, + }, + } + }, + func(gotI interface{}) bool { + n := gotI.(withListofNestedBlocks) + return len(n.Nested) == 1 + }, + 0, + }, + { + // Make sure decoding value slices works the same as pointer slices. + map[string]interface{}{ + "nested": []map[string]interface{}{ + { + "b": "bar", + }, + { + "b": "baz", + }, + }, + }, + func() interface{} { + return &withListofNestedBlocksNoPointers{ + Nested: []withTwoAttributes{ + { + B: "foo", + }, + }, + } + }, + func(gotI interface{}) bool { + n := gotI.(withListofNestedBlocksNoPointers) + return n.Nested[0].B == "bar" && len(n.Nested) == 2 + }, + 0, + }, + } + + for i, test := range tests { + // For convenience here we're going to use the JSON parser + // to process the given body. + buf, err := json.Marshal(test.Body) + if err != nil { + t.Fatalf("error JSON-encoding body for test %d: %s", i, err) + } + + t.Run(string(buf), func(t *testing.T) { + file, diags := hclJSON.Parse(buf, "test.json") + if len(diags) != 0 { + t.Fatalf("diagnostics while parsing: %s", diags.Error()) + } + + targetVal := reflect.ValueOf(test.Target()) + + diags = DecodeBody(file.Body, nil, targetVal.Interface()) + if len(diags) != test.DiagCount { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount) + for _, diag := range diags { + t.Logf(" - %s", diag.Error()) + } + } + got := targetVal.Elem().Interface() + if !test.Check(got) { + t.Errorf("wrong result\ngot: %s", spew.Sdump(got)) + } + }) + } + +} + +func TestDecodeExpression(t *testing.T) { + tests := []struct { + Value cty.Value + Target interface{} + Want interface{} + DiagCount int + }{ + { + cty.StringVal("hello"), + "", + "hello", + 0, + }, + { + cty.StringVal("hello"), + cty.NilVal, + cty.StringVal("hello"), + 0, + }, + { + cty.NumberIntVal(2), + "", + "2", + 0, + }, + { + cty.StringVal("true"), + false, + true, + 0, + }, + { + cty.NullVal(cty.String), + "", + "", + 1, // null value is not allowed + }, + { + cty.UnknownVal(cty.String), + "", + "", + 1, // value must be known + }, + { + cty.ListVal([]cty.Value{cty.True}), + false, + false, + 1, // bool required + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("%02d", i), func(t *testing.T) { + expr := &fixedExpression{test.Value} + + targetVal := reflect.New(reflect.TypeOf(test.Target)) + + diags := DecodeExpression(expr, nil, targetVal.Interface()) + if len(diags) != test.DiagCount { + t.Errorf("wrong number of diagnostics %d; want %d", len(diags), test.DiagCount) + for _, diag := range diags { + t.Logf(" - %s", diag.Error()) + } + } + got := targetVal.Elem().Interface() + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.Want) + } + }) + } +} + +type fixedExpression struct { + val cty.Value +} + +func (e *fixedExpression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.val, nil +} + +func (e *fixedExpression) Range() (r hcl.Range) { + return +} +func (e *fixedExpression) StartRange() (r hcl.Range) { + return +} + +func (e *fixedExpression) Variables() []hcl.Traversal { + return nil +} + +func makeInstantiateType(target interface{}) func() interface{} { + return func() interface{} { + return reflect.New(reflect.TypeOf(target)).Interface() + } +} diff --git a/bake/hclparser/gohcl/doc.go b/bake/hclparser/gohcl/doc.go new file mode 100644 index 000000000000..cfec2530c193 --- /dev/null +++ b/bake/hclparser/gohcl/doc.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// optional is the same as attr, but the field is optional +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be a struct that recursively uses the same tags, or a +// slice of such structs, in which case multiple blocks of the corresponding +// type are decoded into the slice. +// +// "body" can be placed on a single field of type hcl.Body to capture +// the full hcl.Body that was decoded for a block. This does not allow leftover +// values like "remain", so a decoding error will still be returned if leftover +// fields are given. If you want to capture the decoding body PLUS leftover +// fields, you must specify a "remain" field as well to prevent errors. The +// body field and the remain field will both contain the leftover fields. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "optional" fields behave like "attr" fields, but they are optional +// and will not give parsing errors if they are missing. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Only a subset of this tagging/typing vocabulary is supported for the +// "Encode" family of functions. See the EncodeIntoBody docs for full details +// on the constraints there. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/bake/hclparser/gohcl/encode.go b/bake/hclparser/gohcl/encode.go new file mode 100644 index 000000000000..64cb2d1ec4bb --- /dev/null +++ b/bake/hclparser/gohcl/encode.go @@ -0,0 +1,194 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EncodeIntoBody replaces the contents of the given hclwrite Body with +// attributes and blocks derived from the given value, which must be a +// struct value or a pointer to a struct value with the struct tags defined +// in this package. +// +// This function can work only with fully-decoded data. It will ignore any +// fields tagged as "remain", any fields that decode attributes into either +// hcl.Attribute or hcl.Expression values, and any fields that decode blocks +// into hcl.Attributes values. This function does not have enough information +// to complete the decoding of these types. +// +// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock +// to produce a whole hclwrite.Block including block labels. +// +// As long as a suitable value is given to encode and the destination body +// is non-nil, this function will always complete. It will panic in case of +// any errors in the calling program, such as passing an inappropriate type +// or a nil body. +// +// The layout of the resulting HCL source is derived from the ordering of +// the struct fields, with blank lines around nested blocks of different types. +// Fields representing attributes should usually precede those representing +// blocks so that the attributes can group togather in the result. For more +// control, use the hclwrite API directly. +func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + populateBody(rv, ty, tags, dst) +} + +// EncodeAsBlock creates a new hclwrite.Block populated with the data from +// the given value, which must be a struct or pointer to struct with the +// struct tags defined in this package. +// +// If the given struct type has fields tagged with "label" tags then they +// will be used in order to annotate the created block with labels. +// +// This function has the same constraints as EncodeIntoBody and will panic +// if they are violated. +func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + labels := make([]string, len(tags.Labels)) + for i, lf := range tags.Labels { + lv := rv.Field(lf.FieldIndex) + // We just stringify whatever we find. It should always be a string + // but if not then we'll still do something reasonable. + labels[i] = fmt.Sprintf("%s", lv.Interface()) + } + + block := hclwrite.NewBlock(blockType, labels) + populateBody(rv, ty, tags, block.Body()) + return block +} + +func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { + nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) + namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) + for n, i := range tags.Attributes { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + for n, i := range tags.Blocks { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + sort.SliceStable(namesOrder, func(i, j int) bool { + ni, nj := namesOrder[i], namesOrder[j] + return nameIdxs[ni] < nameIdxs[nj] + }) + + dst.Clear() + + prevWasBlock := false + for _, name := range namesOrder { + fieldIdx := nameIdxs[name] + field := ty.Field(fieldIdx) + fieldTy := field.Type + fieldVal := rv.Field(fieldIdx) + + if fieldTy.Kind() == reflect.Ptr { + fieldTy = fieldTy.Elem() + fieldVal = fieldVal.Elem() + } + + if _, isAttr := tags.Attributes[name]; isAttr { + + if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { + continue // ignore undecoded fields + } + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + if prevWasBlock { + dst.AppendNewline() + prevWasBlock = false + } + + valTy, err := gocty.ImpliedType(fieldVal.Interface()) + if err != nil { + panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) + } + + val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) + if err != nil { + // This should never happen, since we should always be able + // to decode into the implied type. + panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) + } + + dst.SetAttributeValue(name, val) + + } else { // must be a block, then + elemTy := fieldTy + isSeq := false + if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { + isSeq = true + elemTy = elemTy.Elem() + } + + if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { + continue // ignore undecoded fields + } + prevWasBlock = false + + if isSeq { + l := fieldVal.Len() + for i := 0; i < l; i++ { + elemVal := fieldVal.Index(i) + if !elemVal.IsValid() { + continue // ignore (elem value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(elemVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } else { + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(fieldVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } + } +} diff --git a/bake/hclparser/gohcl/encode_test.go b/bake/hclparser/gohcl/encode_test.go new file mode 100644 index 000000000000..a75bf28ba788 --- /dev/null +++ b/bake/hclparser/gohcl/encode_test.go @@ -0,0 +1,67 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl_test + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclwrite" +) + +func ExampleEncodeIntoBody() { + type Service struct { + Name string `hcl:"name,label"` + Exe []string `hcl:"executable"` + } + type Constraints struct { + OS string `hcl:"os"` + Arch string `hcl:"arch"` + } + type App struct { + Name string `hcl:"name"` + Desc string `hcl:"description"` + Constraints *Constraints `hcl:"constraints,block"` + Services []Service `hcl:"service,block"` + } + + app := App{ + Name: "awesome-app", + Desc: "Such an awesome application", + Constraints: &Constraints{ + OS: "linux", + Arch: "amd64", + }, + Services: []Service{ + { + Name: "web", + Exe: []string{"./web", "--listen=:8080"}, + }, + { + Name: "worker", + Exe: []string{"./worker"}, + }, + }, + } + + f := hclwrite.NewEmptyFile() + gohcl.EncodeIntoBody(&app, f.Body()) + fmt.Printf("%s", f.Bytes()) + + // Output: + // name = "awesome-app" + // description = "Such an awesome application" + // + // constraints { + // os = "linux" + // arch = "amd64" + // } + // + // service "web" { + // executable = ["./web", "--listen=:8080"] + // } + // service "worker" { + // executable = ["./worker"] + // } +} diff --git a/bake/hclparser/gohcl/schema.go b/bake/hclparser/gohcl/schema.go new file mode 100644 index 000000000000..0cdca271759a --- /dev/null +++ b/bake/hclparser/gohcl/schema.go @@ -0,0 +1,184 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl/v2" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Body *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "body": + if ret.Body != nil { + panic("only one 'body' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Body = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/bake/hclparser/gohcl/schema_test.go b/bake/hclparser/gohcl/schema_test.go new file mode 100644 index 000000000000..ca8c0ec1588f --- /dev/null +++ b/bake/hclparser/gohcl/schema_test.go @@ -0,0 +1,233 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl/v2" +) + +func TestImpliedBodySchema(t *testing.T) { + tests := []struct { + val interface{} + wantSchema *hcl.BodySchema + wantPartial bool + }{ + { + struct{}{}, + &hcl.BodySchema{}, + false, + }, + { + struct { + Ignored bool + }{}, + &hcl.BodySchema{}, + false, + }, + { + struct { + Attr1 bool `hcl:"attr1"` + Attr2 bool `hcl:"attr2"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "attr1", + Required: true, + }, + { + Name: "attr2", + Required: true, + }, + }, + }, + false, + }, + { + struct { + Attr *bool `hcl:"attr,attr"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "attr", + Required: false, + }, + }, + }, + false, + }, + { + struct { + Thing struct{} `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + }, + }, + }, + false, + }, + { + struct { + Thing struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + } `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + LabelNames: []string{"type", "name"}, + }, + }, + }, + false, + }, + { + struct { + Thing []struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + } `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + LabelNames: []string{"type", "name"}, + }, + }, + }, + false, + }, + { + struct { + Thing *struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + } `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + LabelNames: []string{"type", "name"}, + }, + }, + }, + false, + }, + { + struct { + Thing struct { + Name string `hcl:"name,label"` + Something string `hcl:"something"` + } `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + LabelNames: []string{"name"}, + }, + }, + }, + false, + }, + { + struct { + Doodad string `hcl:"doodad"` + Thing struct { + Name string `hcl:"name,label"` + } `hcl:"thing,block"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "doodad", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "thing", + LabelNames: []string{"name"}, + }, + }, + }, + false, + }, + { + struct { + Doodad string `hcl:"doodad"` + Config string `hcl:",remain"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "doodad", + Required: true, + }, + }, + }, + true, + }, + { + struct { + Expr hcl.Expression `hcl:"expr"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "expr", + Required: false, + }, + }, + }, + false, + }, + { + struct { + Meh string `hcl:"meh,optional"` + }{}, + &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "meh", + Required: false, + }, + }, + }, + false, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%#v", test.val), func(t *testing.T) { + schema, partial := ImpliedBodySchema(test.val) + if !reflect.DeepEqual(schema, test.wantSchema) { + t.Errorf( + "wrong schema\ngot: %s\nwant: %s", + spew.Sdump(schema), spew.Sdump(test.wantSchema), + ) + } + + if partial != test.wantPartial { + t.Errorf( + "wrong partial flag\ngot: %#v\nwant: %#v", + partial, test.wantPartial, + ) + } + }) + } +} diff --git a/bake/hclparser/gohcl/types.go b/bake/hclparser/gohcl/types.go new file mode 100644 index 000000000000..302e454c238d --- /dev/null +++ b/bake/hclparser/gohcl/types.go @@ -0,0 +1,19 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl/v2" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/bake/hclparser/hclparser.go b/bake/hclparser/hclparser.go index fe7dc772dd78..2de20a67dc43 100644 --- a/bake/hclparser/hclparser.go +++ b/bake/hclparser/hclparser.go @@ -10,9 +10,9 @@ import ( "strconv" "strings" + "github.com/docker/buildx/bake/hclparser/gohcl" "github.com/docker/buildx/util/userfunc" "github.com/hashicorp/hcl/v2" - "github.com/hashicorp/hcl/v2/gohcl" "github.com/pkg/errors" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/gocty" @@ -448,7 +448,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err } // decode! - diag = gohcl.DecodeBody(body(), ectx, output.Interface()) + diag = decodeBody(body(), ectx, output.Interface()) if diag.HasErrors() { return diag } @@ -470,7 +470,7 @@ func (p *parser) resolveBlock(block *hcl.Block, target *hcl.BodySchema) (err err } // store the result into the evaluation context (so it can be referenced) - outputType, err := gocty.ImpliedType(output.Interface()) + outputType, err := ImpliedType(output.Interface()) if err != nil { return err } @@ -947,3 +947,8 @@ func key(ks ...any) uint64 { } return hash.Sum64() } + +func decodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + dec := gohcl.DecodeOptions{ImpliedType: ImpliedType} + return dec.DecodeBody(body, ctx, val) +} diff --git a/bake/hclparser/type_implied.go b/bake/hclparser/type_implied.go new file mode 100644 index 000000000000..e41360ff8fb4 --- /dev/null +++ b/bake/hclparser/type_implied.go @@ -0,0 +1,161 @@ +// MIT License +// +// Copyright (c) 2017-2018 Martin Atkins +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package hclparser + +import ( + "reflect" + + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if ety, err := impliedTypeExt(rt, path); err == nil { + return ety, nil + } + + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} + +var ( + valueType = reflect.TypeOf(cty.Value{}) + stringType = reflect.TypeOf("") +) + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/bake/hclparser/type_implied_ext.go b/bake/hclparser/type_implied_ext.go new file mode 100644 index 000000000000..ecb73b7a6c60 --- /dev/null +++ b/bake/hclparser/type_implied_ext.go @@ -0,0 +1,49 @@ +package hclparser + +import ( + "reflect" + "sync" + + "github.com/containerd/errdefs" + "github.com/zclconf/go-cty/cty" +) + +type FromCtyValue interface { + FromCtyValue(in cty.Value, path cty.Path) error +} + +func impliedTypeExt(rt reflect.Type, _ cty.Path) (cty.Type, error) { + if rt.AssignableTo(fromCtyValueType) { + return fromCtyValueCapsuleType(rt), nil + } + return cty.NilType, errdefs.ErrNotImplemented +} + +var ( + fromCtyValueType = reflect.TypeFor[FromCtyValue]() + fromCtyValueTypes sync.Map +) + +func fromCtyValueCapsuleType(rt reflect.Type) cty.Type { + if val, loaded := fromCtyValueTypes.Load(rt); loaded { + return val.(cty.Type) + } + + // First time used. + ety := cty.CapsuleWithOps(rt.Name(), rt.Elem(), &cty.CapsuleOps{ + ConversionTo: func(_ cty.Type) func(cty.Value, cty.Path) (interface{}, error) { + return func(in cty.Value, p cty.Path) (interface{}, error) { + rv := reflect.New(rt.Elem()).Interface() + if err := rv.(FromCtyValue).FromCtyValue(in, p); err != nil { + return nil, err + } + return rv, nil + } + }, + }) + + // Attempt to store the new type. Use whichever was loaded first + // in the case of a race condition. + val, _ := fromCtyValueTypes.LoadOrStore(rt, ety) + return val.(cty.Type) +} diff --git a/controller/pb/export.go b/controller/pb/export.go index 3de33eb3fe4e..e8f49bb4fe7e 100644 --- a/controller/pb/export.go +++ b/controller/pb/export.go @@ -45,6 +45,7 @@ func CreateExports(entries []*ExportEntry) ([]client.ExportEntry, error) { supportDir = !tar case "registry": out.Type = client.ExporterImage + out.Attrs["push"] = "true" } if supportDir { diff --git a/go.mod b/go.mod index 8aa93470fd79..faa3458c811c 100644 --- a/go.mod +++ b/go.mod @@ -15,6 +15,7 @@ require ( github.com/containerd/platforms v0.2.1 github.com/containerd/typeurl/v2 v2.2.0 github.com/creack/pty v1.1.21 + github.com/davecgh/go-spew v1.1.1 github.com/distribution/reference v0.6.0 github.com/docker/cli v27.3.1+incompatible github.com/docker/cli-docs-tool v0.8.0 @@ -87,7 +88,6 @@ require ( github.com/containerd/containerd/api v1.7.19 // indirect github.com/containerd/ttrpc v1.2.5 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect diff --git a/util/buildflags/cty.go b/util/buildflags/cty.go new file mode 100644 index 000000000000..1c6e84f6aa14 --- /dev/null +++ b/util/buildflags/cty.go @@ -0,0 +1,44 @@ +package buildflags + +import ( + "encoding" + "encoding/json" + + "github.com/moby/buildkit/errdefs" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +func (e *ExportEntry) FromCtyValue(in cty.Value, p cty.Path) error { + return fromCtyValue(in, p, e) +} + +func fromCtyValue[V encoding.TextUnmarshaler](in cty.Value, p cty.Path, v V) error { + // Attempt to read as a cty.Map(cty.String) first as that's our primary form. + if conv, err := convert.Convert(in, cty.Map(cty.String)); err == nil { + m := make(map[string]string, conv.LengthInt()) + for name, val := range conv.AsValueMap() { + m[name] = val.AsString() + } + + data, err := json.Marshal(m) + if err != nil { + return errdefs.Internal(err) + } + + if err := json.Unmarshal(data, v); err != nil { + return errdefs.Internal(err) + } + return nil + } + + // Also supports a string input. + if in, err := convert.Convert(in, cty.String); err == nil { + return v.UnmarshalText([]byte(in.AsString())) + } + + // Return a type mismatch. We want to use the map[string]string + // type since that's the primary type. + msg := convert.MismatchMessage(in.Type(), cty.Map(cty.String)) + return p.NewErrorf("%s", msg) +} diff --git a/util/buildflags/export.go b/util/buildflags/export.go index 37f3c274c302..a17408d498c3 100644 --- a/util/buildflags/export.go +++ b/util/buildflags/export.go @@ -1,7 +1,10 @@ package buildflags import ( + "encoding/json" + "maps" "regexp" + "sort" "strings" "github.com/containerd/platforms" @@ -13,67 +16,131 @@ import ( "github.com/tonistiigi/go-csvvalue" ) -func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) { - var outs []*controllerapi.ExportEntry - if len(inp) == 0 { - return nil, nil +type ExportEntry struct { + Type string `json:"type"` + Attrs map[string]string `json:"attrs,omitempty"` + Destination string `json:"dest,omitempty"` +} + +func (e *ExportEntry) Equal(other *ExportEntry) bool { + if e.Type != other.Type || e.Destination != other.Destination { + return false } - for _, s := range inp { - fields, err := csvvalue.Fields(s, nil) - if err != nil { - return nil, err - } + return maps.Equal(e.Attrs, other.Attrs) +} - out := controllerapi.ExportEntry{ - Attrs: map[string]string{}, - } - if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") { - if s != "-" { - outs = append(outs, &controllerapi.ExportEntry{ - Type: client.ExporterLocal, - Destination: s, - }) - continue - } - out = controllerapi.ExportEntry{ - Type: client.ExporterTar, - Destination: s, - } - } +func (e *ExportEntry) String() string { + var b csvBuilder + if e.Type != "" { + b.Write("type", e.Type) + } + if e.Destination != "" { + b.Write("dest", e.Destination) + } + if len(e.Attrs) > 0 { + b.WriteAttributes(e.Attrs) + } + return b.String() +} - if out.Type == "" { - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid value %s", field) - } - key := strings.TrimSpace(strings.ToLower(parts[0])) - value := parts[1] - switch key { - case "type": - out.Type = value - default: - out.Attrs[key] = value - } - } - } - if out.Type == "" { - return nil, errors.Errorf("type is required for output") +func (e *ExportEntry) ToPB() *controllerapi.ExportEntry { + return &controllerapi.ExportEntry{ + Type: e.Type, + Attrs: maps.Clone(e.Attrs), + Destination: e.Destination, + } +} + +func (e *ExportEntry) MarshalJSON() ([]byte, error) { + m := maps.Clone(e.Attrs) + if m == nil { + m = map[string]string{} + } + m["type"] = e.Type + if e.Destination != "" { + m["dest"] = e.Destination + } + return json.Marshal(m) +} + +func (e *ExportEntry) UnmarshalJSON(data []byte) error { + var m map[string]string + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + e.Type = m["type"] + delete(m, "type") + + e.Destination = m["dest"] + delete(m, "dest") + + e.Attrs = m + return e.Validate() +} + +func (e *ExportEntry) UnmarshalText(text []byte) error { + s := string(text) + fields, err := csvvalue.Fields(s, nil) + if err != nil { + return err + } + + // Clear the target entry. + e.Type = "" + e.Attrs = map[string]string{} + e.Destination = "" + + if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") { + if s != "-" { + e.Type = client.ExporterLocal + e.Destination = s + return nil } - if out.Type == "registry" { - out.Type = client.ExporterImage - if _, ok := out.Attrs["push"]; !ok { - out.Attrs["push"] = "true" + e.Type = client.ExporterTar + e.Destination = s + } + + if e.Type == "" { + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return errors.Errorf("invalid value %s", field) + } + key := strings.TrimSpace(strings.ToLower(parts[0])) + value := parts[1] + switch key { + case "type": + e.Type = value + case "dest": + e.Destination = value + default: + e.Attrs[key] = value } } + } + return e.Validate() +} - if dest, ok := out.Attrs["dest"]; ok { - out.Destination = dest - delete(out.Attrs, "dest") - } +func (e *ExportEntry) Validate() error { + if e.Type == "" { + return errors.Errorf("type is required for output") + } + return nil +} - outs = append(outs, &out) +func ParseExports(inp []string) ([]*controllerapi.ExportEntry, error) { + var outs []*controllerapi.ExportEntry + if len(inp) == 0 { + return nil, nil + } + for _, s := range inp { + var out ExportEntry + if err := out.UnmarshalText([]byte(s)); err != nil { + return nil, err + } + outs = append(outs, out.ToPB()) } return outs, nil } @@ -142,3 +209,32 @@ func ParseAnnotations(inp []string) (map[exptypes.AnnotationKey]string, error) { } return annotations, nil } + +type csvBuilder struct { + sb strings.Builder +} + +func (w *csvBuilder) Write(key, value string) { + if w.sb.Len() > 0 { + w.sb.WriteByte(',') + } + w.sb.WriteString(key) + w.sb.WriteByte('=') + w.sb.WriteString(value) +} + +func (w *csvBuilder) WriteAttributes(attrs map[string]string) { + keys := make([]string, 0, len(attrs)) + for key := range attrs { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + w.Write(key, attrs[key]) + } +} + +func (w *csvBuilder) String() string { + return w.sb.String() +}