This repository has been archived by the owner on Nov 1, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
Copy pathsync.go
77 lines (67 loc) · 2.4 KB
/
sync.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
package sync
import (
"github.com/go-kit/kit/log"
"github.com/pkg/errors"
"github.com/weaveworks/flux/cluster"
"github.com/weaveworks/flux/policy"
"github.com/weaveworks/flux/resource"
)
// Sync synchronises the cluster to the files in a directory
func Sync(m cluster.Manifests, repoResources map[string]resource.Resource, clus cluster.Cluster, deletes bool, logger log.Logger) error {
// Get a map of resources defined in the cluster
clusterBytes, err := clus.Export()
if err != nil {
return errors.Wrap(err, "exporting resource defs from cluster")
}
clusterResources, err := m.ParseManifests(clusterBytes)
if err != nil {
return errors.Wrap(err, "parsing exported resources")
}
// Everything that's in the cluster but not in the repo, delete;
// everything that's in the repo, apply. This is an approximation
// to figuring out what's changed, and applying that. We're
// relying on Kubernetes to decide for each application if it is a
// no-op.
sync := cluster.SyncDef{}
// DANGER ZONE (tamara) This works and is dangerous. At the moment will delete Flux and
// other pods unless the relevant manifests are part of the user repo. Needs a lot of thought
// before this cleanup cluster feature can be unleashed on the world.
if deletes {
for id, res := range clusterResources {
prepareSyncDelete(logger, repoResources, id, res, &sync)
}
}
for id, res := range repoResources {
prepareSyncApply(logger, clusterResources, id, res, &sync)
}
return clus.Sync(sync)
}
func prepareSyncDelete(logger log.Logger, repoResources map[string]resource.Resource, id string, res resource.Resource, sync *cluster.SyncDef) {
if len(repoResources) == 0 {
return
}
if res.Policy().Contains(policy.Ignore) {
logger.Log("resource", res.ResourceID(), "ignore", "delete")
return
}
if _, ok := repoResources[id]; !ok {
sync.Actions = append(sync.Actions, cluster.SyncAction{
Delete: res,
})
}
}
func prepareSyncApply(logger log.Logger, clusterResources map[string]resource.Resource, id string, res resource.Resource, sync *cluster.SyncDef) {
if res.Policy().Contains(policy.Ignore) {
logger.Log("resource", res.ResourceID(), "ignore", "apply")
return
}
if cres, ok := clusterResources[id]; ok {
if cres.Policy().Contains(policy.Ignore) {
logger.Log("resource", res.ResourceID(), "ignore", "apply")
return
}
}
sync.Actions = append(sync.Actions, cluster.SyncAction{
Apply: res,
})
}