Skip to content

Commit

Permalink
Add beta scaffolding to compute instance and compute instance template
Browse files Browse the repository at this point in the history
Note these resources don't currently use beta features - this is futureproofing.
  • Loading branch information
Nic Cope committed Nov 22, 2017
1 parent 72b2c5d commit 701ac6f
Show file tree
Hide file tree
Showing 4 changed files with 142 additions and 72 deletions.
82 changes: 56 additions & 26 deletions google/resource_compute_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,14 @@ import (
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
"github.com/mitchellh/hashstructure"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)

var InstanceBaseApiVersion = v1
var InstanceVersionedFeatures = []Feature{}

func resourceComputeInstance() *schema.Resource {
return &schema.Resource{
Create: resourceComputeInstanceCreate,
Expand Down Expand Up @@ -537,15 +541,27 @@ func resourceComputeInstance() *schema.Resource {
}
}

func getInstance(config *Config, d *schema.ResourceData) (*compute.Instance, error) {
func getInstance(config *Config, d *schema.ResourceData) (*computeBeta.Instance, error) {
project, err := getProject(d, config)
if err != nil {
return nil, err
}
zone := d.Get("zone").(string)
instance, err := config.clientCompute.Instances.Get(project, zone, d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
instance := &computeBeta.Instance{}
switch getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures) {
case v1:
instanceV1, err := config.clientCompute.Instances.Get(project, zone, d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
}
if err := Convert(instanceV1, instance); err != nil {
return nil, err
}
case v0beta:
instance, err = config.clientComputeBeta.Instances.Get(project, zone, d.Id()).Do()
if err != nil {
return nil, handleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string)))
}
}
return instance, nil
}
Expand Down Expand Up @@ -579,7 +595,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err

// Build up the list of disks

disks := []*compute.AttachedDisk{}
disks := []*computeBeta.AttachedDisk{}
bootDisk, err := expandBootDisk(d, config, zone, project)
if err != nil {
return err
Expand Down Expand Up @@ -607,7 +623,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}

prefix := "scheduling.0"
scheduling := &compute.Scheduling{}
scheduling := &computeBeta.Scheduling{}

if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
scheduling.AutomaticRestart = googleapi.Bool(val.(bool))
Expand Down Expand Up @@ -644,7 +660,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}

// Create the instance information
instance := &compute.Instance{
instance := &computeBeta.Instance{
CanIpForward: d.Get("can_ip_forward").(bool),
Description: d.Get("description").(string),
Disks: disks,
Expand All @@ -661,7 +677,17 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
}

log.Printf("[INFO] Requesting instance creation")
op, err := config.clientCompute.Instances.Insert(project, zone.Name, instance).Do()
var op interface{}
switch getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures) {
case v1:
instanceV1 := &compute.Instance{}
if err := Convert(instance, instanceV1); err != nil {
return err
}
op, err = config.clientCompute.Instances.Insert(project, zone.Name, instanceV1).Do()
case v0beta:
op, err = config.clientComputeBeta.Instances.Insert(project, zone.Name, instance).Do()
}
if err != nil {
return fmt.Errorf("Error creating instance: %s", err)
}
Expand Down Expand Up @@ -850,7 +876,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err

md := instance.Metadata

MetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)
BetaMetadataUpdate(o.(map[string]interface{}), n.(map[string]interface{}), md)

if err != nil {
return fmt.Errorf("Error updating metadata: %s", err)
Expand Down Expand Up @@ -882,8 +908,12 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err

if d.HasChange("tags") {
tags := resourceInstanceTags(d)
tagsV1 := &compute.Tags{}
if err := Convert(tags, tagsV1); err != nil {
return err
}
op, err := config.clientCompute.Instances.SetTags(
project, zone, d.Id(), tags).Do()
project, zone, d.Id(), tagsV1).Do()
if err != nil {
return fmt.Errorf("Error updating tags: %s", err)
}
Expand Down Expand Up @@ -1104,15 +1134,15 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
return resourceComputeInstanceRead(d, meta)
}

func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceData, meta interface{}) (*compute.AttachedDisk, error) {
func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceData, meta interface{}) (*computeBeta.AttachedDisk, error) {
config := meta.(*Config)

source, err := ParseDiskFieldValue(diskConfig["source"].(string), d, config)
if err != nil {
return nil, err
}

disk := &compute.AttachedDisk{
disk := &computeBeta.AttachedDisk{
Source: source.RelativeLink(),
}

Expand All @@ -1121,7 +1151,7 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat
}

if v, ok := diskConfig["disk_encryption_key_raw"]; ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
RawKey: v.(string),
}
}
Expand All @@ -1130,20 +1160,20 @@ func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceDat

// See comment on expandInstanceTemplateGuestAccelerators regarding why this
// code is duplicated.
func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*compute.AcceleratorConfig, error) {
func expandInstanceGuestAccelerators(d TerraformResourceData, config *Config) ([]*computeBeta.AcceleratorConfig, error) {
configs, ok := d.GetOk("guest_accelerator")
if !ok {
return nil, nil
}
accels := configs.([]interface{})
guestAccelerators := make([]*compute.AcceleratorConfig, len(accels))
guestAccelerators := make([]*computeBeta.AcceleratorConfig, len(accels))
for i, raw := range accels {
data := raw.(map[string]interface{})
at, err := ParseAcceleratorFieldValue(data["type"].(string), d, config)
if err != nil {
return nil, fmt.Errorf("cannot parse accelerator type: %v", err)
}
guestAccelerators[i] = &compute.AcceleratorConfig{
guestAccelerators[i] = &computeBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
AcceleratorType: at.RelativeLink(),
}
Expand Down Expand Up @@ -1177,8 +1207,8 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err
return nil
}

func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*compute.AttachedDisk, error) {
disk := &compute.AttachedDisk{
func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) (*computeBeta.AttachedDisk, error) {
disk := &computeBeta.AttachedDisk{
AutoDelete: d.Get("boot_disk.0.auto_delete").(bool),
Boot: true,
}
Expand All @@ -1188,7 +1218,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
}

if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok {
disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{
disk.DiskEncryptionKey = &computeBeta.CustomerEncryptionKey{
RawKey: v.(string),
}
}
Expand All @@ -1202,7 +1232,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
}

if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok {
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}

if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok {
disk.InitializeParams.DiskSizeGb = int64(v.(int))
Expand Down Expand Up @@ -1231,7 +1261,7 @@ func expandBootDisk(d *schema.ResourceData, config *Config, zone *compute.Zone,
return disk, nil
}

func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk) []map[string]interface{} {
func flattenBootDisk(d *schema.ResourceData, disk *computeBeta.AttachedDisk) []map[string]interface{} {
result := map[string]interface{}{
"auto_delete": disk.AutoDelete,
"device_name": disk.DeviceName,
Expand All @@ -1253,20 +1283,20 @@ func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk) []map[s
return []map[string]interface{}{result}
}

func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) ([]*compute.AttachedDisk, error) {
func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zone, project string) ([]*computeBeta.AttachedDisk, error) {
diskType, err := readDiskType(config, zone, project, "local-ssd")
if err != nil {
return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err)
}

n := d.Get("scratch_disk.#").(int)
scratchDisks := make([]*compute.AttachedDisk, 0, n)
scratchDisks := make([]*computeBeta.AttachedDisk, 0, n)
for i := 0; i < n; i++ {
scratchDisks = append(scratchDisks, &compute.AttachedDisk{
scratchDisks = append(scratchDisks, &computeBeta.AttachedDisk{
AutoDelete: true,
Type: "SCRATCH",
Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string),
InitializeParams: &compute.AttachedDiskInitializeParams{
InitializeParams: &computeBeta.AttachedDiskInitializeParams{
DiskType: diskType.SelfLink,
},
})
Expand All @@ -1275,7 +1305,7 @@ func expandScratchDisks(d *schema.ResourceData, config *Config, zone *compute.Zo
return scratchDisks, nil
}

func flattenScratchDisk(disk *compute.AttachedDisk) map[string]interface{} {
func flattenScratchDisk(disk *computeBeta.AttachedDisk) map[string]interface{} {
result := map[string]interface{}{
"interface": disk.Interface,
}
Expand Down
63 changes: 44 additions & 19 deletions google/resource_compute_instance_template.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,14 @@ import (

"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
computeBeta "google.golang.org/api/compute/v0.beta"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
)

var InstanceTemplateBaseApiVersion = v1
var InstanceTemplateVersionedFeatures = []Feature{}

func resourceComputeInstanceTemplate() *schema.Resource {
return &schema.Resource{
Create: resourceComputeInstanceTemplateCreate,
Expand Down Expand Up @@ -404,20 +408,20 @@ func resourceComputeInstanceTemplate() *schema.Resource {
}
}

func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk, error) {
func buildDisks(d *schema.ResourceData, config *Config) ([]*computeBeta.AttachedDisk, error) {
project, err := getProject(d, config)
if err != nil {
return nil, err
}

disksCount := d.Get("disk.#").(int)

disks := make([]*compute.AttachedDisk, 0, disksCount)
disks := make([]*computeBeta.AttachedDisk, 0, disksCount)
for i := 0; i < disksCount; i++ {
prefix := fmt.Sprintf("disk.%d", i)

// Build the disk
var disk compute.AttachedDisk
var disk computeBeta.AttachedDisk
disk.Type = "PERSISTENT"
disk.Mode = "READ_WRITE"
disk.Interface = "SCSI"
Expand All @@ -435,7 +439,7 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk
if v, ok := d.GetOk(prefix + ".source"); ok {
disk.Source = v.(string)
} else {
disk.InitializeParams = &compute.AttachedDiskInitializeParams{}
disk.InitializeParams = &computeBeta.AttachedDiskInitializeParams{}

if v, ok := d.GetOk(prefix + ".disk_name"); ok {
disk.InitializeParams.DiskName = v.(string)
Expand Down Expand Up @@ -487,16 +491,16 @@ func buildDisks(d *schema.ResourceData, config *Config) ([]*compute.AttachedDisk
// 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80'.
// Accelerator type 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80'
// must be a valid resource name (not an url).
func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Config) []*compute.AcceleratorConfig {
func expandInstanceTemplateGuestAccelerators(d TerraformResourceData, config *Config) []*computeBeta.AcceleratorConfig {
configs, ok := d.GetOk("guest_accelerator")
if !ok {
return nil
}
accels := configs.([]interface{})
guestAccelerators := make([]*compute.AcceleratorConfig, len(accels))
guestAccelerators := make([]*computeBeta.AcceleratorConfig, len(accels))
for i, raw := range accels {
data := raw.(map[string]interface{})
guestAccelerators[i] = &compute.AcceleratorConfig{
guestAccelerators[i] = &computeBeta.AcceleratorConfig{
AcceleratorCount: int64(data["count"].(int)),
// We can't use ParseAcceleratorFieldValue here because an instance
// template does not have a zone we can use.
Expand All @@ -515,7 +519,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
return err
}

instanceProperties := &compute.InstanceProperties{}
instanceProperties := &computeBeta.InstanceProperties{}

instanceProperties.CanIpForward = d.Get("can_ip_forward").(bool)
instanceProperties.Description = d.Get("instance_description").(string)
Expand All @@ -537,7 +541,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
}
instanceProperties.NetworkInterfaces = networks

instanceProperties.Scheduling = &compute.Scheduling{}
instanceProperties.Scheduling = &computeBeta.Scheduling{}
instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE"

forceSendFieldsScheduling := make([]string, 0, 3)
Expand Down Expand Up @@ -589,30 +593,39 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac
} else {
itName = resource.UniqueId()
}
instanceTemplate := compute.InstanceTemplate{
instanceTemplate := &computeBeta.InstanceTemplate{
Description: d.Get("description").(string),
Properties: instanceProperties,
Name: itName,
}

op, err := config.clientCompute.InstanceTemplates.Insert(
project, &instanceTemplate).Do()
var op interface{}
switch getComputeApiVersion(d, InstanceTemplateBaseApiVersion, InstanceGroupManagerVersionedFeatures) {
case v1:
instanceTemplateV1 := &compute.InstanceTemplate{}
if err := Convert(instanceTemplate, instanceTemplateV1); err != nil {
return err
}
op, err = config.clientCompute.InstanceTemplates.Insert(project, instanceTemplateV1).Do()
case v0beta:
op, err = config.clientComputeBeta.InstanceTemplates.Insert(project, instanceTemplate).Do()
}
if err != nil {
return fmt.Errorf("Error creating instance: %s", err)
return fmt.Errorf("Error creating instance template: %s", err)
}

// Store the ID now
d.SetId(instanceTemplate.Name)

err = computeOperationWait(config.clientCompute, op, project, "Creating Instance Template")
err = computeSharedOperationWait(config.clientCompute, op, project, "Creating Instance Template")
if err != nil {
return err
}

return resourceComputeInstanceTemplateRead(d, meta)
}

func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData) []map[string]interface{} {
func flattenDisks(disks []*computeBeta.AttachedDisk, d *schema.ResourceData) []map[string]interface{} {
result := make([]map[string]interface{}, 0, len(disks))
for i, disk := range disks {
diskMap := make(map[string]interface{})
Expand Down Expand Up @@ -647,10 +660,22 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{
return err
}

instanceTemplate, err := config.clientCompute.InstanceTemplates.Get(
project, d.Id()).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string)))
instanceTemplate := &computeBeta.InstanceTemplate{}
switch getComputeApiVersion(d, InstanceBaseApiVersion, InstanceVersionedFeatures) {
case v1:
instanceTemplateV1, err := config.clientCompute.InstanceTemplates.Get(project, d.Id()).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string)))
}
if err := Convert(instanceTemplateV1, instanceTemplate); err != nil {
return err
}
case v0beta:
var err error
instanceTemplate, err = config.clientComputeBeta.InstanceTemplates.Get(project, d.Id()).Do()
if err != nil {
return handleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string)))
}
}

// Set the metadata fingerprint if there is one.
Expand Down
Loading

0 comments on commit 701ac6f

Please sign in to comment.