-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
Copy pathcontainerd.go
153 lines (136 loc) · 5.12 KB
/
containerd.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
package containerd
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/containerd/containerd"
"github.com/containerd/containerd/gc"
"github.com/containerd/containerd/leases"
ptypes "github.com/containerd/containerd/protobuf/types"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/executor/containerdexecutor"
"github.com/moby/buildkit/executor/oci"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/network/netproviders"
"github.com/moby/buildkit/util/winlayers"
"github.com/moby/buildkit/worker/base"
wlabel "github.com/moby/buildkit/worker/label"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
)
// NewWorkerOpt creates a WorkerOpt.
func NewWorkerOpt(root string, address, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, selinux bool, parallelismSem *semaphore.Weighted, traceSocket string, opts ...containerd.ClientOpt) (base.WorkerOpt, error) {
opts = append(opts, containerd.WithDefaultNamespace(ns))
client, err := containerd.New(address, opts...)
if err != nil {
return base.WorkerOpt{}, errors.Wrapf(err, "failed to connect client to %q . make sure containerd is running", address)
}
return newContainerd(root, client, snapshotterName, ns, rootless, labels, dns, nopt, apparmorProfile, selinux, parallelismSem, traceSocket)
}
func newContainerd(root string, client *containerd.Client, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, selinux bool, parallelismSem *semaphore.Weighted, traceSocket string) (base.WorkerOpt, error) {
if strings.Contains(snapshotterName, "/") {
return base.WorkerOpt{}, errors.Errorf("bad snapshotter name: %q", snapshotterName)
}
name := "containerd-" + snapshotterName
root = filepath.Join(root, name)
if err := os.MkdirAll(root, 0700); err != nil {
return base.WorkerOpt{}, errors.Wrapf(err, "failed to create %s", root)
}
df := client.DiffService()
// TODO: should use containerd daemon instance ID (containerd/containerd#1862)?
id, err := base.ID(root)
if err != nil {
return base.WorkerOpt{}, err
}
serverInfo, err := client.IntrospectionService().Server(context.TODO(), &ptypes.Empty{})
if err != nil {
return base.WorkerOpt{}, err
}
np, npResolvedMode, err := netproviders.Providers(nopt)
if err != nil {
return base.WorkerOpt{}, err
}
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown"
}
xlabels := map[string]string{
wlabel.Executor: "containerd",
wlabel.Snapshotter: snapshotterName,
wlabel.Hostname: hostname,
wlabel.Network: npResolvedMode,
wlabel.SELinuxEnabled: strconv.FormatBool(selinux),
}
if apparmorProfile != "" {
xlabels[wlabel.ApparmorProfile] = apparmorProfile
}
xlabels[wlabel.ContainerdNamespace] = ns
xlabels[wlabel.ContainerdUUID] = serverInfo.UUID
for k, v := range labels {
xlabels[k] = v
}
lm := leaseutil.WithNamespace(client.LeasesService(), ns)
gc := func(ctx context.Context) (gc.Stats, error) {
l, err := lm.Create(ctx)
if err != nil {
return nil, nil
}
return nil, lm.Delete(ctx, leases.Lease{ID: l.ID}, leases.SynchronousDelete)
}
cs := containerdsnapshot.NewContentStore(client.ContentStore(), ns)
resp, err := client.IntrospectionService().Plugins(context.TODO(), []string{"type==io.containerd.runtime.v1", "type==io.containerd.runtime.v2"})
if err != nil {
return base.WorkerOpt{}, errors.Wrap(err, "failed to list runtime plugin")
}
if len(resp.Plugins) == 0 {
return base.WorkerOpt{}, errors.New("failed to find any runtime plugins")
}
var platforms []ocispecs.Platform
for _, plugin := range resp.Plugins {
for _, p := range plugin.Platforms {
platforms = append(platforms, ocispecs.Platform{
OS: p.OS,
Architecture: p.Architecture,
Variant: p.Variant,
})
}
}
snap := containerdsnapshot.NewSnapshotter(snapshotterName, client.SnapshotService(snapshotterName), ns, nil)
if err := cache.MigrateV2(
context.TODO(),
filepath.Join(root, "metadata.db"),
filepath.Join(root, "metadata_v2.db"),
cs,
snap,
lm,
); err != nil {
return base.WorkerOpt{}, err
}
md, err := metadata.NewStore(filepath.Join(root, "metadata_v2.db"))
if err != nil {
return base.WorkerOpt{}, err
}
opt := base.WorkerOpt{
ID: id,
Labels: xlabels,
MetadataStore: md,
NetworkProviders: np,
Executor: containerdexecutor.New(client, root, "", np, dns, apparmorProfile, selinux, traceSocket, rootless),
Snapshotter: snap,
ContentStore: cs,
Applier: winlayers.NewFileSystemApplierWithWindows(cs, df),
Differ: winlayers.NewWalkingDiffWithWindows(cs, df),
ImageStore: client.ImageService(),
Platforms: platforms,
LeaseManager: lm,
GarbageCollect: gc,
ParallelismSem: parallelismSem,
MountPoolRoot: filepath.Join(root, "cachemounts"),
}
return opt, nil
}