From 232cb87c7ac64c91d95130ddf7708ae3ef21ed8b Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Wed, 26 Jul 2017 13:37:59 -0700 Subject: [PATCH] Add versioned Beta support to google_compute_instance_group_manager (#234) * Vendor GCP Compute Beta client library. * Refactor resource_compute_instance_group_manager for multi version support (#129) * Refactor resource_compute_instance_group_manager for multi version support. * Minor changes based on review. * Removed type-specific API version conversion functions. * Add support for Beta operations. * Add v0beta support to google_compute_instance_group_manager. * Renamed Key to Feature, added comments & updated some parameter names. * Fix code and tests for version finder to match fields that don't have a change. * Store non-v1 resources' self links as v1 so that dependent single-version resources don't see diffs. * Fix weird change to vendor.json from merge. * Add a note that Convert loses ForceSendFields, fix failing test. * Moved nil type to a switch case in compute_shared_operation.go. * Move base api version declaration above schema. --- google/api_versions.go | 100 +++++ google/api_versions_test.go | 105 +++++ google/compute_beta_operation.go | 167 ++++++++ google/compute_shared_operation.go | 23 ++ google/config.go | 9 + google/provider.go | 25 ++ google/resource_compute_instance_group.go | 12 + ...resource_compute_instance_group_manager.go | 387 ++++++++++++++---- google/self_link_helpers.go | 49 +++ 9 files changed, 799 insertions(+), 78 deletions(-) create mode 100644 google/api_versions.go create mode 100644 google/api_versions_test.go create mode 100644 google/compute_beta_operation.go create mode 100644 google/compute_shared_operation.go create mode 100644 google/self_link_helpers.go diff --git a/google/api_versions.go b/google/api_versions.go new file mode 100644 index 00000000000..8cc07f4f093 --- /dev/null +++ b/google/api_versions.go @@ -0,0 +1,100 @@ +package google + +import ( + "encoding/json" +) + +type ComputeApiVersion uint8 + +const ( + v1 ComputeApiVersion = iota + v0beta +) + +var OrderedComputeApiVersions = []ComputeApiVersion{ + v0beta, + v1, +} + +// Convert between two types by converting to/from JSON. Intended to switch +// between multiple API versions, as they are strict supersets of one another. +// Convert loses information about ForceSendFields and NullFields. +func Convert(item, out interface{}) error { + bytes, err := json.Marshal(item) + if err != nil { + return err + } + + err = json.Unmarshal(bytes, out) + if err != nil { + return err + } + + return nil +} + +type TerraformResourceData interface { + HasChange(string) bool + GetOk(string) (interface{}, bool) +} + +// Compare the fields set in schema against a list of features and their versions to determine +// what version of the API is required in order to manage the resource. +func getComputeApiVersion(d TerraformResourceData, resourceVersion ComputeApiVersion, features []Feature) ComputeApiVersion { + versions := map[ComputeApiVersion]struct{}{resourceVersion: struct{}{}} + for _, feature := range features { + if feature.InUseBy(d) { + versions[feature.Version] = struct{}{} + } + } + + return maxVersion(versions) +} + +// Compare the fields set in schema against a list of features and their version, and a +// list of features that exist at the base resource version that can only be update at some other +// version, to determine what version of the API is required in order to update the resource. +func getComputeApiVersionUpdate(d TerraformResourceData, resourceVersion ComputeApiVersion, features, updateOnlyFields []Feature) ComputeApiVersion { + versions := map[ComputeApiVersion]struct{}{resourceVersion: struct{}{}} + schemaVersion := getComputeApiVersion(d, resourceVersion, features) + versions[schemaVersion] = struct{}{} + + for _, feature := range updateOnlyFields { + if feature.HasChangeBy(d) { + versions[feature.Version] = struct{}{} + } + } + + return maxVersion(versions) +} + +// A field of a resource and the version of the Compute API required to use it. +type Feature struct { + Version ComputeApiVersion + Item string +} + +// Returns true when a feature has been modified. +// This is most important when updating a resource to remove versioned feature usage; if the +// resource is reverting to its base version, it needs to perform a final update at the higher +// version in order to remove high version features. +func (s Feature) HasChangeBy(d TerraformResourceData) bool { + return d.HasChange(s.Item) +} + +// Return true when a feature appears in schema or has been modified. +func (s Feature) InUseBy(d TerraformResourceData) bool { + _, ok := d.GetOk(s.Item) + return ok || s.HasChangeBy(d) +} + +func maxVersion(versionsInUse map[ComputeApiVersion]struct{}) ComputeApiVersion { + for _, version := range OrderedComputeApiVersions { + if _, ok := versionsInUse[version]; ok { + return version + } + } + + // Fallback to the final, most stable version + return OrderedComputeApiVersions[len(OrderedComputeApiVersions)-1] +} diff --git a/google/api_versions_test.go b/google/api_versions_test.go new file mode 100644 index 00000000000..fe6f7724ad6 --- /dev/null +++ b/google/api_versions_test.go @@ -0,0 +1,105 @@ +package google + +import "testing" + +func TestResourceWithOnlyBaseVersionFields(t *testing.T) { + d := &ResourceDataMock{ + FieldsInSchema: []string{"normal_field"}, + } + + resourceVersion := v1 + computeApiVersion := getComputeApiVersion(d, resourceVersion, []Feature{}) + if computeApiVersion != resourceVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", resourceVersion, computeApiVersion) + } + + computeApiVersion = getComputeApiVersionUpdate(d, resourceVersion, []Feature{}, []Feature{}) + if computeApiVersion != resourceVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", resourceVersion, computeApiVersion) + } +} + +func TestResourceWithBetaFields(t *testing.T) { + resourceVersion := v1 + d := &ResourceDataMock{ + FieldsInSchema: []string{"normal_field", "beta_field"}, + } + + expectedVersion := v0beta + computeApiVersion := getComputeApiVersion(d, resourceVersion, []Feature{{Version: expectedVersion, Item: "beta_field"}}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } + + computeApiVersion = getComputeApiVersionUpdate(d, resourceVersion, []Feature{{Version: expectedVersion, Item: "beta_field"}}, []Feature{}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } +} + +func TestResourceWithBetaFieldsNotInSchema(t *testing.T) { + resourceVersion := v1 + d := &ResourceDataMock{ + FieldsInSchema: []string{"normal_field"}, + } + + expectedVersion := v1 + computeApiVersion := getComputeApiVersion(d, resourceVersion, []Feature{{Version: expectedVersion, Item: "beta_field"}}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } + + computeApiVersion = getComputeApiVersionUpdate(d, resourceVersion, []Feature{{Version: expectedVersion, Item: "beta_field"}}, []Feature{}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } +} + +func TestResourceWithBetaUpdateFields(t *testing.T) { + resourceVersion := v1 + d := &ResourceDataMock{ + FieldsInSchema: []string{"normal_field", "beta_update_field"}, + FieldsWithHasChange: []string{"beta_update_field"}, + } + + expectedVersion := v1 + computeApiVersion := getComputeApiVersion(d, resourceVersion, []Feature{}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } + + expectedVersion = v0beta + computeApiVersion = getComputeApiVersionUpdate(d, resourceVersion, []Feature{}, []Feature{{Version: expectedVersion, Item: "beta_update_field"}}) + if computeApiVersion != expectedVersion { + t.Errorf("Expected to see version: %v. Saw version: %v.", expectedVersion, computeApiVersion) + } + +} + +type ResourceDataMock struct { + FieldsInSchema []string + FieldsWithHasChange []string +} + +func (d *ResourceDataMock) HasChange(key string) bool { + exists := false + for _, val := range d.FieldsWithHasChange { + if key == val { + exists = true + } + } + + return exists +} + +func (d *ResourceDataMock) GetOk(key string) (interface{}, bool) { + exists := false + for _, val := range d.FieldsInSchema { + if key == val { + exists = true + } + + } + + return nil, exists +} diff --git a/google/compute_beta_operation.go b/google/compute_beta_operation.go new file mode 100644 index 00000000000..fc3b94da0a9 --- /dev/null +++ b/google/compute_beta_operation.go @@ -0,0 +1,167 @@ +package google + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + + computeBeta "google.golang.org/api/compute/v0.beta" +) + +// OperationBetaWaitType is an enum specifying what type of operation +// we're waiting on from the beta API. +type ComputeBetaOperationWaitType byte + +const ( + ComputeBetaOperationWaitInvalid ComputeBetaOperationWaitType = iota + ComputeBetaOperationWaitGlobal + ComputeBetaOperationWaitRegion + ComputeBetaOperationWaitZone +) + +type ComputeBetaOperationWaiter struct { + Service *computeBeta.Service + Op *computeBeta.Operation + Project string + Region string + Type ComputeBetaOperationWaitType + Zone string +} + +func (w *ComputeBetaOperationWaiter) RefreshFunc() resource.StateRefreshFunc { + return func() (interface{}, string, error) { + var op *computeBeta.Operation + var err error + + switch w.Type { + case ComputeBetaOperationWaitGlobal: + op, err = w.Service.GlobalOperations.Get( + w.Project, w.Op.Name).Do() + case ComputeBetaOperationWaitRegion: + op, err = w.Service.RegionOperations.Get( + w.Project, w.Region, w.Op.Name).Do() + case ComputeBetaOperationWaitZone: + op, err = w.Service.ZoneOperations.Get( + w.Project, w.Zone, w.Op.Name).Do() + default: + return nil, "bad-type", fmt.Errorf( + "Invalid wait type: %#v", w.Type) + } + + if err != nil { + return nil, "", err + } + + log.Printf("[DEBUG] Got %q when asking for operation %q", op.Status, w.Op.Name) + + return op, op.Status, nil + } +} + +func (w *ComputeBetaOperationWaiter) Conf() *resource.StateChangeConf { + return &resource.StateChangeConf{ + Pending: []string{"PENDING", "RUNNING"}, + Target: []string{"DONE"}, + Refresh: w.RefreshFunc(), + } +} + +// ComputeBetaOperationError wraps computeBeta.OperationError and implements the +// error interface so it can be returned. +type ComputeBetaOperationError computeBeta.OperationError + +func (e ComputeBetaOperationError) Error() string { + var buf bytes.Buffer + + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + + return buf.String() +} + +func computeBetaOperationWaitGlobal(config *Config, op *computeBeta.Operation, project string, activity string) error { + return computeBetaOperationWaitGlobalTime(config, op, project, activity, 4) +} + +func computeBetaOperationWaitGlobalTime(config *Config, op *computeBeta.Operation, project string, activity string, timeoutMin int) error { + w := &ComputeBetaOperationWaiter{ + Service: config.clientComputeBeta, + Op: op, + Project: project, + Type: ComputeBetaOperationWaitGlobal, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(timeoutMin) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*computeBeta.Operation) + if op.Error != nil { + return ComputeBetaOperationError(*op.Error) + } + + return nil +} + +func computeBetaOperationWaitRegion(config *Config, op *computeBeta.Operation, project string, region, activity string) error { + w := &ComputeBetaOperationWaiter{ + Service: config.clientComputeBeta, + Op: op, + Project: project, + Type: ComputeBetaOperationWaitRegion, + Region: region, + } + + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = 4 * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + + op = opRaw.(*computeBeta.Operation) + if op.Error != nil { + return ComputeBetaOperationError(*op.Error) + } + + return nil +} + +func computeBetaOperationWaitZone(config *Config, op *computeBeta.Operation, project string, zone, activity string) error { + return computeBetaOperationWaitZoneTime(config, op, project, zone, 4, activity) +} + +func computeBetaOperationWaitZoneTime(config *Config, op *computeBeta.Operation, project string, zone string, minutes int, activity string) error { + w := &ComputeBetaOperationWaiter{ + Service: config.clientComputeBeta, + Op: op, + Project: project, + Zone: zone, + Type: ComputeBetaOperationWaitZone, + } + state := w.Conf() + state.Delay = 10 * time.Second + state.Timeout = time.Duration(minutes) * time.Minute + state.MinTimeout = 2 * time.Second + opRaw, err := state.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for %s: %s", activity, err) + } + op = opRaw.(*computeBeta.Operation) + if op.Error != nil { + // Return the error + return ComputeBetaOperationError(*op.Error) + } + return nil +} diff --git a/google/compute_shared_operation.go b/google/compute_shared_operation.go new file mode 100644 index 00000000000..86d46072b3f --- /dev/null +++ b/google/compute_shared_operation.go @@ -0,0 +1,23 @@ +package google + +import ( + computeBeta "google.golang.org/api/compute/v0.beta" + "google.golang.org/api/compute/v1" +) + +func computeSharedOperationWaitZone(config *Config, op interface{}, project string, zone, activity string) error { + return computeSharedOperationWaitZoneTime(config, op, project, zone, 4, activity) +} + +func computeSharedOperationWaitZoneTime(config *Config, op interface{}, project string, zone string, minutes int, activity string) error { + switch op.(type) { + case *compute.Operation: + return computeOperationWaitZoneTime(config, op.(*compute.Operation), project, zone, minutes, activity) + case *computeBeta.Operation: + return computeBetaOperationWaitZoneTime(config, op.(*computeBeta.Operation), project, zone, minutes, activity) + case nil: + panic("Attempted to wait on an Operation that was nil.") + default: + panic("Attempted to wait on an Operation of unknown type.") + } +} diff --git a/google/config.go b/google/config.go index 0e382f14876..f161b615426 100644 --- a/google/config.go +++ b/google/config.go @@ -19,6 +19,7 @@ import ( "google.golang.org/api/bigquery/v2" "google.golang.org/api/cloudbilling/v1" "google.golang.org/api/cloudresourcemanager/v1" + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/container/v1" "google.golang.org/api/dns/v1" @@ -38,6 +39,7 @@ type Config struct { clientBilling *cloudbilling.Service clientCompute *compute.Service + clientComputeBeta *computeBeta.Service clientContainer *container.Service clientDns *dns.Service clientPubsub *pubsub.Service @@ -122,6 +124,13 @@ func (c *Config) loadAndValidate() error { } c.clientCompute.UserAgent = userAgent + log.Printf("[INFO] Instantiating GCE Beta client...") + c.clientComputeBeta, err = computeBeta.New(client) + if err != nil { + return err + } + c.clientComputeBeta.UserAgent = userAgent + log.Printf("[INFO] Instantiating GKE client...") c.clientContainer, err = container.New(client) if err != nil { diff --git a/google/provider.go b/google/provider.go index 5bff44c89b9..f24dd5e01f2 100644 --- a/google/provider.go +++ b/google/provider.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform/helper/mutexkv" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" ) @@ -212,6 +213,30 @@ func getZonalResourceFromRegion(getResource func(string) (interface{}, error), r return nil, nil } +func getZonalBetaResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *computeBeta.Service, project string) (interface{}, error) { + zoneList, err := compute.Zones.List(project).Do() + if err != nil { + return nil, err + } + var resource interface{} + for _, zone := range zoneList.Items { + if strings.Contains(zone.Name, region) { + resource, err = getResource(zone.Name) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // Resource was not found in this zone + continue + } + return nil, fmt.Errorf("Error reading Resource: %s", err) + } + // Resource was found + return resource, nil + } + } + // Resource does not exist in this region + return nil, nil +} + // getNetworkLink reads the "network" field from the given resource data and if the value: // - is a resource URL, returns the string unchanged // - is the network name only, then looks up the resource URL using the google client diff --git a/google/resource_compute_instance_group.go b/google/resource_compute_instance_group.go index 11803f49f72..5f86d020ae2 100644 --- a/google/resource_compute_instance_group.go +++ b/google/resource_compute_instance_group.go @@ -372,3 +372,15 @@ func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interf return []*schema.ResourceData{d}, nil } + +func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result + +} diff --git a/google/resource_compute_instance_group_manager.go b/google/resource_compute_instance_group_manager.go index 4478521fb90..9fdd0122f8a 100644 --- a/google/resource_compute_instance_group_manager.go +++ b/google/resource_compute_instance_group_manager.go @@ -7,9 +7,13 @@ import ( "time" "github.com/hashicorp/terraform/helper/schema" + + computeBeta "google.golang.org/api/compute/v0.beta" "google.golang.org/api/compute/v1" ) +var InstanceGroupManagerBaseApiVersion = v1 + func resourceComputeInstanceGroupManager() *schema.Resource { return &schema.Resource{ Create: resourceComputeInstanceGroupManagerCreate, @@ -28,8 +32,9 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, "instance_template": &schema.Schema{ - Type: schema.TypeString, - Required: true, + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, }, "name": &schema.Schema{ @@ -97,10 +102,13 @@ func resourceComputeInstanceGroupManager() *schema.Resource { }, "target_pools": &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, + Type: schema.TypeSet, + Optional: true, + DiffSuppressFunc: compareSelfLinkRelativePaths, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: selfLinkRelativePathHash, }, "target_size": &schema.Schema{ @@ -121,10 +129,25 @@ func getNamedPorts(nps []interface{}) []*compute.NamedPort { Port: int64(np["port"].(int)), }) } + + return namedPorts +} + +func getNamedPortsBeta(nps []interface{}) []*computeBeta.NamedPort { + namedPorts := make([]*computeBeta.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &computeBeta.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + return namedPorts } func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + computeApiVersion := getComputeApiVersion(d, InstanceGroupManagerBaseApiVersion, []Feature{}) config := meta.(*Config) project, err := getProject(d, config) @@ -138,7 +161,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } // Build the parameter - manager := &compute.InstanceGroupManager{ + manager := &computeBeta.InstanceGroupManager{ Name: d.Get("name").(string), BaseInstanceName: d.Get("base_instance_name").(string), InstanceTemplate: d.Get("instance_template").(string), @@ -153,7 +176,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } if v, ok := d.GetOk("named_port"); ok { - manager.NamedPorts = getNamedPorts(v.([]interface{})) + manager.NamedPorts = getNamedPortsBeta(v.([]interface{})) } if attr := d.Get("target_pools").(*schema.Set); attr.Len() > 0 { @@ -170,8 +193,30 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte } log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) - op, err := config.clientCompute.InstanceGroupManagers.Insert( - project, d.Get("zone").(string), manager).Do() + var op interface{} + switch computeApiVersion { + case v1: + managerV1 := &compute.InstanceGroupManager{} + err := Convert(manager, managerV1) + if err != nil { + return err + } + + managerV1.ForceSendFields = manager.ForceSendFields + op, err = config.clientCompute.InstanceGroupManagers.Insert( + project, d.Get("zone").(string), managerV1).Do() + case v0beta: + managerV0beta := &computeBeta.InstanceGroupManager{} + err := Convert(manager, managerV0beta) + if err != nil { + return err + } + + managerV0beta.ForceSendFields = manager.ForceSendFields + op, err = config.clientComputeBeta.InstanceGroupManagers.Insert( + project, d.Get("zone").(string), managerV0beta).Do() + } + if err != nil { return fmt.Errorf("Error creating InstanceGroupManager: %s", err) } @@ -180,7 +225,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte d.SetId(manager.Name) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") if err != nil { return err } @@ -188,7 +233,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte return resourceComputeInstanceGroupManagerRead(d, meta) } -func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { +func flattenNamedPortsBeta(namedPorts []*computeBeta.NamedPort) []map[string]interface{} { result := make([]map[string]interface{}, 0, len(namedPorts)) for _, namedPort := range namedPorts { namedPortMap := make(map[string]interface{}) @@ -201,6 +246,7 @@ func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} } func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + computeApiVersion := getComputeApiVersion(d, InstanceGroupManagerBaseApiVersion, []Feature{}) config := meta.(*Config) project, err := getProject(d, config) @@ -213,36 +259,81 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return err } - getInstanceGroupManager := func(zone string) (interface{}, error) { - return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() - } + manager := &computeBeta.InstanceGroupManager{} + switch computeApiVersion { + case v1: + getInstanceGroupManager := func(zone string) (interface{}, error) { + return config.clientCompute.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + } - var manager *compute.InstanceGroupManager - var e error - if zone, ok := d.GetOk("zone"); ok { - manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() + var v1Manager *compute.InstanceGroupManager + var e error + if zone, ok := d.GetOk("zone"); ok { + v1Manager, e = config.clientCompute.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() + + if e != nil { + return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) + + if e != nil { + return e + } - if e != nil { - return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) + v1Manager = resource.(*compute.InstanceGroupManager) } - } else { - // If the resource was imported, the only info we have is the ID. Try to find the resource - // by searching in the region of the project. - var resource interface{} - resource, e = getZonalResourceFromRegion(getInstanceGroupManager, region, config.clientCompute, project) - if e != nil { - return e + if v1Manager == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + + // The resource doesn't exist anymore + d.SetId("") + return nil } - manager = resource.(*compute.InstanceGroupManager) - } + err = Convert(v1Manager, manager) + if err != nil { + return err + } - if manager == nil { - log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) - // The resource doesn't exist anymore - d.SetId("") - return nil + case v0beta: + getInstanceGroupManager := func(zone string) (interface{}, error) { + return config.clientComputeBeta.InstanceGroupManagers.Get(project, zone, d.Id()).Do() + } + + var v0betaManager *computeBeta.InstanceGroupManager + var e error + if zone, ok := d.GetOk("zone"); ok { + v0betaManager, e = config.clientComputeBeta.InstanceGroupManagers.Get(project, zone.(string), d.Id()).Do() + + if e != nil { + return handleNotFoundError(e, d, fmt.Sprintf("Instance Group Manager %q", d.Get("name").(string))) + } + } else { + // If the resource was imported, the only info we have is the ID. Try to find the resource + // by searching in the region of the project. + var resource interface{} + resource, e = getZonalBetaResourceFromRegion(getInstanceGroupManager, region, config.clientComputeBeta, project) + if e != nil { + return e + } + + v0betaManager = resource.(*computeBeta.InstanceGroupManager) + } + + if v0betaManager == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + + // The resource doesn't exist anymore + d.SetId("") + return nil + } + + manager = v0betaManager } zoneUrl := strings.Split(manager.Zone, "/") @@ -254,10 +345,10 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf d.Set("project", project) d.Set("target_size", manager.TargetSize) d.Set("target_pools", manager.TargetPools) - d.Set("named_port", flattenNamedPorts(manager.NamedPorts)) + d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)) d.Set("fingerprint", manager.Fingerprint) d.Set("instance_group", manager.InstanceGroup) - d.Set("self_link", manager.SelfLink) + d.Set("self_link", ConvertSelfLinkToV1(manager.SelfLink)) update_strategy, ok := d.GetOk("update_strategy") if !ok { update_strategy = "RESTART" @@ -266,7 +357,9 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf return nil } + func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + computeApiVersion := getComputeApiVersionUpdate(d, InstanceGroupManagerBaseApiVersion, []Feature{}, []Feature{}) config := meta.(*Config) project, err := getProject(d, config) @@ -286,19 +379,39 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Build the parameter - setTargetPools := &compute.InstanceGroupManagersSetTargetPoolsRequest{ + setTargetPools := &computeBeta.InstanceGroupManagersSetTargetPoolsRequest{ Fingerprint: d.Get("fingerprint").(string), TargetPools: targetPools, } - op, err := config.clientCompute.InstanceGroupManagers.SetTargetPools( - project, d.Get("zone").(string), d.Id(), setTargetPools).Do() + var op interface{} + switch computeApiVersion { + case v1: + setTargetPoolsV1 := &compute.InstanceGroupManagersSetTargetPoolsRequest{} + err := Convert(setTargetPools, setTargetPoolsV1) + if err != nil { + return err + } + + op, err = config.clientCompute.InstanceGroupManagers.SetTargetPools( + project, d.Get("zone").(string), d.Id(), setTargetPoolsV1).Do() + case v0beta: + setTargetPoolsV0beta := &computeBeta.InstanceGroupManagersSetTargetPoolsRequest{} + err := Convert(setTargetPools, setTargetPoolsV0beta) + if err != nil { + return err + } + + op, err = config.clientComputeBeta.InstanceGroupManagers.SetTargetPools( + project, d.Get("zone").(string), d.Id(), setTargetPoolsV0beta).Do() + } + if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -309,25 +422,68 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte // If instance_template changes then update if d.HasChange("instance_template") { // Build the parameter - setInstanceTemplate := &compute.InstanceGroupManagersSetInstanceTemplateRequest{ + setInstanceTemplate := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{ InstanceTemplate: d.Get("instance_template").(string), } - op, err := config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( - project, d.Get("zone").(string), d.Id(), setInstanceTemplate).Do() + var op interface{} + switch computeApiVersion { + case v1: + setInstanceTemplateV1 := &compute.InstanceGroupManagersSetInstanceTemplateRequest{} + err := Convert(setInstanceTemplate, setInstanceTemplateV1) + if err != nil { + return err + } + + op, err = config.clientCompute.InstanceGroupManagers.SetInstanceTemplate( + project, d.Get("zone").(string), d.Id(), setInstanceTemplateV1).Do() + case v0beta: + setInstanceTemplateV0beta := &computeBeta.InstanceGroupManagersSetInstanceTemplateRequest{} + err := Convert(setInstanceTemplate, setInstanceTemplateV0beta) + if err != nil { + return err + } + + op, err = config.clientComputeBeta.InstanceGroupManagers.SetInstanceTemplate( + project, d.Get("zone").(string), d.Id(), setInstanceTemplateV0beta).Do() + } + if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } if d.Get("update_strategy").(string) == "RESTART" { - managedInstances, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( - project, d.Get("zone").(string), d.Id()).Do() + managedInstances := &computeBeta.InstanceGroupManagersListManagedInstancesResponse{} + switch computeApiVersion { + case v1: + managedInstancesV1, err := config.clientCompute.InstanceGroupManagers.ListManagedInstances( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error getting instance group managers instances: %s", err) + } + + err = Convert(managedInstancesV1, managedInstances) + if err != nil { + return err + } + case v0beta: + managedInstancesV0beta, err := config.clientComputeBeta.InstanceGroupManagers.ListManagedInstances( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error getting instance group managers instances: %s", err) + } + + err = Convert(managedInstancesV0beta, managedInstances) + if err != nil { + return err + } + } managedInstanceCount := len(managedInstances.ManagedInstances) instances := make([]string, managedInstanceCount) @@ -335,19 +491,40 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte instances[i] = v.Instance } - recreateInstances := &compute.InstanceGroupManagersRecreateInstancesRequest{ + recreateInstances := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{ Instances: instances, } - op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( - project, d.Get("zone").(string), d.Id(), recreateInstances).Do() - - if err != nil { - return fmt.Errorf("Error restarting instance group managers instances: %s", err) + var op interface{} + switch computeApiVersion { + case v1: + recreateInstancesV1 := &compute.InstanceGroupManagersRecreateInstancesRequest{} + err := Convert(recreateInstances, recreateInstancesV1) + if err != nil { + return err + } + + op, err = config.clientCompute.InstanceGroupManagers.RecreateInstances( + project, d.Get("zone").(string), d.Id(), recreateInstancesV1).Do() + if err != nil { + return fmt.Errorf("Error restarting instance group managers instances: %s", err) + } + case v0beta: + recreateInstancesV0beta := &computeBeta.InstanceGroupManagersRecreateInstancesRequest{} + err := Convert(recreateInstances, recreateInstancesV0beta) + if err != nil { + return err + } + + op, err = config.clientComputeBeta.InstanceGroupManagers.RecreateInstances( + project, d.Get("zone").(string), d.Id(), recreateInstancesV0beta).Do() + if err != nil { + return fmt.Errorf("Error restarting instance group managers instances: %s", err) + } } // Wait for the operation to complete - err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string), + err = computeSharedOperationWaitZoneTime(config, op, project, d.Get("zone").(string), managedInstanceCount*4, "Restarting InstanceGroupManagers instances") if err != nil { return err @@ -361,20 +538,40 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte if d.HasChange("named_port") { // Build the parameters for a "SetNamedPorts" request: - namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) - setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{ + namedPorts := getNamedPortsBeta(d.Get("named_port").([]interface{})) + setNamedPorts := &computeBeta.InstanceGroupsSetNamedPortsRequest{ NamedPorts: namedPorts, } // Make the request: - op, err := config.clientCompute.InstanceGroups.SetNamedPorts( - project, d.Get("zone").(string), d.Id(), setNamedPorts).Do() + var op interface{} + switch computeApiVersion { + case v1: + setNamedPortsV1 := &compute.InstanceGroupsSetNamedPortsRequest{} + err := Convert(setNamedPorts, setNamedPortsV1) + if err != nil { + return err + } + + op, err = config.clientCompute.InstanceGroups.SetNamedPorts( + project, d.Get("zone").(string), d.Id(), setNamedPortsV1).Do() + case v0beta: + setNamedPortsV0beta := &computeBeta.InstanceGroupsSetNamedPortsRequest{} + err := Convert(setNamedPorts, setNamedPortsV0beta) + if err != nil { + return err + } + + op, err = config.clientComputeBeta.InstanceGroups.SetNamedPorts( + project, d.Get("zone").(string), d.Id(), setNamedPortsV0beta).Do() + } + if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } // Wait for the operation to complete: - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -384,14 +581,22 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte if d.HasChange("target_size") { targetSize := int64(d.Get("target_size").(int)) - op, err := config.clientCompute.InstanceGroupManagers.Resize( - project, d.Get("zone").(string), d.Id(), targetSize).Do() + var op interface{} + switch computeApiVersion { + case v1: + op, err = config.clientCompute.InstanceGroupManagers.Resize( + project, d.Get("zone").(string), d.Id(), targetSize).Do() + case v0beta: + op, err = config.clientComputeBeta.InstanceGroupManagers.Resize( + project, d.Get("zone").(string), d.Id(), targetSize).Do() + } + if err != nil { return fmt.Errorf("Error updating InstanceGroupManager: %s", err) } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -405,6 +610,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + computeApiVersion := getComputeApiVersion(d, InstanceGroupManagerBaseApiVersion, []Feature{}) config := meta.(*Config) project, err := getProject(d, config) @@ -413,13 +619,27 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte } zone := d.Get("zone").(string) - op, err := config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() - attempt := 0 - for err != nil && attempt < 20 { - attempt++ - time.Sleep(2000 * time.Millisecond) + + var op interface{} + switch computeApiVersion { + case v1: op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.clientCompute.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + } + case v0beta: + op, err = config.clientComputeBeta.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.clientComputeBeta.InstanceGroupManagers.Delete(project, zone, d.Id()).Do() + } } + if err != nil { return fmt.Errorf("Error deleting instance group manager: %s", err) } @@ -427,29 +647,40 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte currentSize := int64(d.Get("target_size").(int)) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") for err != nil && currentSize > 0 { if !strings.Contains(err.Error(), "timeout") { return err } - instanceGroup, err := config.clientCompute.InstanceGroups.Get( - project, d.Get("zone").(string), d.Id()).Do() + var instanceGroupSize int64 + switch computeApiVersion { + case v1: + instanceGroup, err := config.clientCompute.InstanceGroups.Get( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error getting instance group size: %s", err) + } - if err != nil { - return fmt.Errorf("Error getting instance group size: %s", err) + instanceGroupSize = instanceGroup.Size + case v0beta: + instanceGroup, err := config.clientComputeBeta.InstanceGroups.Get( + project, d.Get("zone").(string), d.Id()).Do() + if err != nil { + return fmt.Errorf("Error getting instance group size: %s", err) + } + + instanceGroupSize = instanceGroup.Size } - if instanceGroup.Size >= currentSize { + if instanceGroupSize >= currentSize { return fmt.Errorf("Error, instance group isn't shrinking during delete") } - log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize) - - currentSize = instanceGroup.Size - - err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") + log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroupSize, currentSize) + currentSize = instanceGroupSize + err = computeSharedOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") } d.SetId("") diff --git a/google/self_link_helpers.go b/google/self_link_helpers.go new file mode 100644 index 00000000000..bda74d4bb2a --- /dev/null +++ b/google/self_link_helpers.go @@ -0,0 +1,49 @@ +package google + +import ( + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +// Compare only the relative path of two self links. +func compareSelfLinkRelativePaths(k, old, new string, d *schema.ResourceData) bool { + oldStripped, err := getRelativePath(old) + if err != nil { + return false + } + + newStripped, err := getRelativePath(new) + if err != nil { + return false + } + + if oldStripped == newStripped { + return true + } + + return false +} + +// Hash the relative path of a self link. +func selfLinkRelativePathHash(selfLink interface{}) int { + path, _ := getRelativePath(selfLink.(string)) + return hashcode.String(path) +} + +func getRelativePath(selfLink string) (string, error) { + stringParts := strings.SplitAfterN(selfLink, "projects/", 2) + if len(stringParts) != 2 { + return "", fmt.Errorf("String was not a self link: %s", selfLink) + } + + return "projects/" + stringParts[1], nil +} + +func ConvertSelfLinkToV1(link string) string { + reg := regexp.MustCompile("/compute/[a-zA-Z0-9]*/projects/") + return reg.ReplaceAllString(link, "/compute/v1/projects/") +}