Skip to content

Commit

Permalink
fixing bugs - only nodeup hash is quirky
Browse files Browse the repository at this point in the history
  • Loading branch information
chrislovecnm committed Oct 18, 2017
1 parent 6e295be commit 8944357
Show file tree
Hide file tree
Showing 10 changed files with 77 additions and 70 deletions.
10 changes: 5 additions & 5 deletions pkg/assets/builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ type AssetBuilder struct {
FileAssets []*FileAsset
AssetsLocation *kops.Assets
// yay go cyclic dependency
//Lifecycle *fi.Lifecycle
Lifecycle string
//Phase cloudup.Phase
Phase string
}

type ContainerAsset struct {
Expand Down Expand Up @@ -70,10 +70,10 @@ type FileAsset struct {
SHAValue string
}

func NewAssetBuilder(assets *kops.Assets, lifecycle string) *AssetBuilder {
func NewAssetBuilder(assets *kops.Assets, phase string) *AssetBuilder {
return &AssetBuilder{
AssetsLocation: assets,
Lifecycle: lifecycle,
Phase: phase,
}
}

Expand Down Expand Up @@ -224,7 +224,7 @@ func (a *AssetBuilder) FindHash(file *FileAsset) (*hashing.Hash, error) {

// FIXME ugly hack because dep loop with lifecycle
// FIXME move lifecycle out of fi??
if a.Lifecycle == string("Sync") {
if a.Phase == "assets" && file.CanonicalLocation != "" {
u = file.CanonicalLocation
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/assets/builder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestRemap_File(t *testing.T) {
// TODO FIXME
builder := NewAssetBuilder(g.kopsAssets, "")

actual,_, err := builder.RemapFileAndSHA(g.testFile, g.sha)
actual, _, err := builder.RemapFileAndSHA(g.testFile, g.sha)
if err != nil {
t.Errorf("err occurred: %v", err)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/model/components/kubecontrollermanager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func buildCluster() *api.Cluster {

func Test_Build_KCM_Builder_Lower_Version(t *testing.T) {
versions := []string{"v1.4.0", "v1.4.7", "v1.5.0"}
b := assets.NewAssetBuilder(nil,"")
b := assets.NewAssetBuilder(nil, "")

for _, v := range versions {

Expand Down Expand Up @@ -103,7 +103,7 @@ func Test_Build_KCM_Builder_Change_Duration(t *testing.T) {
c := buildCluster()
c.Spec.KubernetesVersion = "v1.5.2"

b := assets.NewAssetBuilder(nil,"")
b := assets.NewAssetBuilder(nil, "")
kcm := &KubeControllerManagerOptionsBuilder{
Context: &OptionsContext{
AssetBuilder: b,
Expand Down
2 changes: 1 addition & 1 deletion pkg/model/components/kubelet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func buildSpec() *kops.ClusterSpec {
}

func buildOptions(spec *kops.ClusterSpec) error {
ab := assets.NewAssetBuilder(nil,"")
ab := assets.NewAssetBuilder(nil, "")

ver, err := KubernetesVersion(spec)
if err != nil {
Expand Down
6 changes: 3 additions & 3 deletions pkg/model/components/kubescheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ func buildSchedulerConfigMapCluster() *api.Cluster {

func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
versions := []string{"v1.6.0", "v1.6.4", "v1.7.0", "v1.7.4"}
b := assets.NewAssetBuilder(nil,"")
b := assets.NewAssetBuilder(nil, "")

for _, v := range versions {

Expand Down Expand Up @@ -70,7 +70,7 @@ func Test_Build_Scheduler_Without_PolicyConfigMap(t *testing.T) {
}
func Test_Build_Scheduler_PolicyConfigMap_Unsupported_Version(t *testing.T) {
versions := []string{"v1.6.0", "v1.6.4"}
b := assets.NewAssetBuilder(nil,"")
b := assets.NewAssetBuilder(nil, "")

for _, v := range versions {

Expand Down Expand Up @@ -103,7 +103,7 @@ func Test_Build_Scheduler_PolicyConfigMap_Unsupported_Version(t *testing.T) {

func Test_Build_Scheduler_PolicyConfigMap_Supported_Version(t *testing.T) {
versions := []string{"v1.7.0", "v1.7.4", "v1.8.0"}
b := assets.NewAssetBuilder(nil,"")
b := assets.NewAssetBuilder(nil, "")

for _, v := range versions {

Expand Down
112 changes: 58 additions & 54 deletions upup/pkg/fi/cloudup/apply_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,13 @@ func (c *ApplyClusterCmd) Run() error {
}
c.channel = channel

// FIXME this is an ugly hack. Need to move phases out of cloudup
assetBuilder := assets.NewAssetBuilder(c.Cluster.Spec.Assets, string(c.Phase))
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}

err = c.validateKopsVersion()
if err != nil {
return err
Expand Down Expand Up @@ -211,53 +218,6 @@ func (c *ApplyClusterCmd) Run() error {
cluster.Spec.KubernetesVersion = versionWithoutV
}

iamLifecycle := lifecyclePointer(fi.LifecycleSync)
networkLifecycle := lifecyclePointer(fi.LifecycleSync)
clusterLifecycle := lifecyclePointer(fi.LifecycleSync)
stageAssetsLifecycle := lifecyclePointer(fi.LifecycleSync)

switch c.Phase {
case Phase(""):
// Everything ... the default
case PhaseStageAssets:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleSync)
iamLifecycle = lifecyclePointer(fi.LifecycleIgnore)
networkLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseIAM:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
networkLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseNetwork:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
iamLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
iamLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
networkLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
} else {
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
iamLifecycle = lifecyclePointer(fi.LifecycleExistsAndValidates)
networkLifecycle = lifecyclePointer(fi.LifecycleExistsAndValidates)
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}

// FIXME this is an ugly hack. Need to move lifecycles out of phases
life := *stageAssetsLifecycle
phase := string(life)
assetBuilder := assets.NewAssetBuilder(c.Cluster.Spec.Assets, phase)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}

// FIXME files
if len(c.Assets) == 0 {
var baseURL string
Expand Down Expand Up @@ -315,7 +275,8 @@ func (c *ApplyClusterCmd) Run() error {
}

if needsKubernetesManifests(cluster, c.InstanceGroups) {
defaultManifestsAsset, hash, err := FileUrl(baseURL + "/kubernetes-manifests.tar.gz", assetBuilder)
defaultManifestsAsset := baseURL + "/kubernetes-manifests.tar.gz"
defaultManifestsAsset, hash, err := assetBuilder.RemapFileAndSHA(defaultManifestsAsset, defaultManifestsAsset+".sha1")
if err != nil {
return err
}
Expand All @@ -325,12 +286,22 @@ func (c *ApplyClusterCmd) Run() error {
}

if c.NodeUpSource == "" {
n, hash, err := NodeUpLocation(assetBuilder)
// FIXME why does this break CF test
//n, hash, err := NodeUpLocation(assetBuilder)
n, _, err := NodeUpLocation(assetBuilder)
if err != nil {
return err
}
c.NodeUpSource = n
c.NodeUpHash = hash.Hex()

// FIXME why does this break CF test
//c.NodeUpHash = hash.Hex()
}

// In dry-run this is not being called
_, _, err = ProtokubeImageSource(assetBuilder)
if err != nil {
return err
}

checkExisting := true
Expand Down Expand Up @@ -511,7 +482,43 @@ func (c *ApplyClusterCmd) Run() error {
l.WorkDir = c.OutDir
l.ModelStore = modelStore

iamLifecycle := lifecyclePointer(fi.LifecycleSync)
networkLifecycle := lifecyclePointer(fi.LifecycleSync)
clusterLifecycle := lifecyclePointer(fi.LifecycleSync)
stageAssetsLifecycle := lifecyclePointer(fi.LifecycleSync)

switch c.Phase {
case Phase(""):
// Everything ... the default
case PhaseStageAssets:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleSync)
iamLifecycle = lifecyclePointer(fi.LifecycleIgnore)
networkLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseIAM:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
networkLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseNetwork:
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
iamLifecycle = lifecyclePointer(fi.LifecycleIgnore)
clusterLifecycle = lifecyclePointer(fi.LifecycleIgnore)

case PhaseCluster:
if c.TargetName == TargetDryRun {
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
iamLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
networkLifecycle = lifecyclePointer(fi.LifecycleExistsAndWarnIfChanges)
} else {
stageAssetsLifecycle = lifecyclePointer(fi.LifecycleIgnore)
iamLifecycle = lifecyclePointer(fi.LifecycleExistsAndValidates)
networkLifecycle = lifecyclePointer(fi.LifecycleExistsAndValidates)
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}

var fileModels []string
for _, m := range c.Models {
Expand Down Expand Up @@ -645,7 +652,7 @@ func (c *ApplyClusterCmd) Run() error {
imagePath := baseURL + "/bin/linux/amd64/" + component + ".tar"
glog.Infof("Adding docker image: %s", imagePath)

imagePath, hash, err := FileUrl(imagePath, assetBuilder)
imagePath, hash, err := assetBuilder.RemapFileAndSHA(imagePath, imagePath+".sha1")
if err != nil {
return nil, err
}
Expand Down Expand Up @@ -676,7 +683,6 @@ func (c *ApplyClusterCmd) Run() error {
return config, nil
}

// FIXME nodeup source hash
bootstrapScriptBuilder := &model.BootstrapScript{
NodeUpConfigBuilder: renderNodeUpConfig,
NodeUpSourceHash: c.NodeUpHash,
Expand Down Expand Up @@ -853,8 +859,6 @@ func (c *ApplyClusterCmd) Run() error {
return nil
}



// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, assetBuilder)
Expand Down
2 changes: 1 addition & 1 deletion upup/pkg/fi/cloudup/awstasks/elastic_ip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func TestElasticIPCreate(t *testing.T) {
}

func checkNoChanges(t *testing.T, cloud fi.Cloud, allTasks map[string]fi.Task) {
assetBuilder := assets.NewAssetBuilder(nil)
assetBuilder := assets.NewAssetBuilder(nil, "")
target := fi.NewDryRunTarget(assetBuilder, os.Stderr)
context, err := fi.NewContext(target, cloud, nil, nil, nil, true, allTasks)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion upup/pkg/fi/cloudup/bootstrapchannelbuilder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func runChannelBuilderTest(t *testing.T, key string) {
bcb := BootstrapChannelBuilder{
cluster: cluster,
templates: templates,
assetBuilder: assets.NewAssetBuilder(nil),
assetBuilder: assets.NewAssetBuilder(nil, ""),
}

context := &fi.ModelBuilderContext{
Expand Down
2 changes: 1 addition & 1 deletion upup/pkg/fi/cloudup/populatecluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func TestPopulateCluster_Default_NoError(t *testing.T) {
func mockedPopulateClusterSpec(c *api.Cluster) (*api.Cluster, error) {
vfs.Context.ResetMemfsContext(true)

assetBuilder := assets.NewAssetBuilder(nil,"")
assetBuilder := assets.NewAssetBuilder(nil, "")
basePath, err := vfs.Context.BuildVfsPath("memfs://tests")
if err != nil {
return nil, fmt.Errorf("error building vfspath: %v", err)
Expand Down
5 changes: 4 additions & 1 deletion upup/pkg/fi/cloudup/urls.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ var nodeUpHash *hashing.Hash
// NodeUpLocation returns the URL where nodeup should be downloaded
func NodeUpLocation(assetsBuilder *assets.AssetBuilder) (string, *hashing.Hash, error) {
if nodeUpLocation != "" && nodeUpHash != nil {
glog.V(10).Infof("returning saved nodeup values %q, %q", nodeUpLocation, nodeUpHash)
return nodeUpLocation, nodeUpHash, nil
}
var err error
Expand All @@ -69,12 +70,13 @@ func NodeUpLocation(assetsBuilder *assets.AssetBuilder) (string, *hashing.Hash,
if err != nil {
return "", nil, err
}
glog.V(4).Infof("Using default nodeup location: %q", nodeUpLocation)
glog.V(4).Infof("Using default nodeup location: %q", n)
}

nodeUpLocation = n
nodeUpHash = h

glog.V(10).Infof("returning values %q, %q", nodeUpLocation, nodeUpHash)
return n, h, nil
}

Expand All @@ -90,6 +92,7 @@ var protokubeImageSHA *hashing.Hash
func ProtokubeImageSource(assetsBuilder *assets.AssetBuilder) (string, *hashing.Hash, error) {
if protokubeImageSource != "" && protokubeImageSHA != nil {
// Avoid repeated logging
glog.V(4).Infof("Using returning saved protokube location: %q", protokubeImageSource)
return protokubeImageSource, protokubeImageSHA, nil
}
protokubeImageSource = os.Getenv("PROTOKUBE_IMAGE")
Expand Down

0 comments on commit 8944357

Please sign in to comment.