-
Notifications
You must be signed in to change notification settings - Fork 988
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add support for kubernetes_replication_controller
- Loading branch information
1 parent
faf2440
commit 895062e
Showing
6 changed files
with
1,723 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
258 changes: 258 additions & 0 deletions
258
kubernetes/resource_kubernetes_replication_controller.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,258 @@ | ||
package kubernetes | ||
|
||
import ( | ||
"fmt" | ||
"log" | ||
"time" | ||
|
||
"github.com/hashicorp/terraform/helper/resource" | ||
"github.com/hashicorp/terraform/helper/schema" | ||
"k8s.io/apimachinery/pkg/api/errors" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
pkgApi "k8s.io/apimachinery/pkg/types" | ||
api "k8s.io/kubernetes/pkg/api/v1" | ||
kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" | ||
) | ||
|
||
func resourceKubernetesReplicationController() *schema.Resource { | ||
return &schema.Resource{ | ||
Create: resourceKubernetesReplicationControllerCreate, | ||
Read: resourceKubernetesReplicationControllerRead, | ||
Exists: resourceKubernetesReplicationControllerExists, | ||
Update: resourceKubernetesReplicationControllerUpdate, | ||
Delete: resourceKubernetesReplicationControllerDelete, | ||
Importer: &schema.ResourceImporter{ | ||
State: schema.ImportStatePassthrough, | ||
}, | ||
|
||
Timeouts: &schema.ResourceTimeout{ | ||
Create: schema.DefaultTimeout(10 * time.Minute), | ||
Update: schema.DefaultTimeout(10 * time.Minute), | ||
Delete: schema.DefaultTimeout(10 * time.Minute), | ||
}, | ||
|
||
Schema: map[string]*schema.Schema{ | ||
"metadata": namespacedMetadataSchema("replication controller", true), | ||
"spec": { | ||
Type: schema.TypeList, | ||
Description: "Spec defines the specification of the desired behavior of the replication controller. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", | ||
Required: true, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: map[string]*schema.Schema{ | ||
"min_ready_seconds": { | ||
Type: schema.TypeInt, | ||
Description: "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", | ||
Optional: true, | ||
Default: 0, | ||
}, | ||
"replicas": { | ||
Type: schema.TypeInt, | ||
Description: "The number of desired replicas. Defaults to 1. More info: http://kubernetes.io/docs/user-guide/replication-controller#what-is-a-replication-controller", | ||
Optional: true, | ||
Default: 1, | ||
}, | ||
"selector": { | ||
Type: schema.TypeMap, | ||
Description: "A label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", | ||
Required: true, | ||
}, | ||
"template": { | ||
Type: schema.TypeList, | ||
Description: "Describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: http://kubernetes.io/docs/user-guide/replication-controller#pod-template", | ||
Required: true, | ||
MaxItems: 1, | ||
Elem: &schema.Resource{ | ||
Schema: podSpecFields(), | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
}, | ||
} | ||
} | ||
|
||
func resourceKubernetesReplicationControllerCreate(d *schema.ResourceData, meta interface{}) error { | ||
conn := meta.(*kubernetes.Clientset) | ||
|
||
metadata := expandMetadata(d.Get("metadata").([]interface{})) | ||
spec, err := expandReplicationControllerSpec(d.Get("spec").([]interface{})) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
spec.Template.Spec.AutomountServiceAccountToken = ptrToBool(false) | ||
|
||
rc := api.ReplicationController{ | ||
ObjectMeta: metadata, | ||
Spec: spec, | ||
} | ||
|
||
log.Printf("[INFO] Creating new replication controller: %#v", rc) | ||
out, err := conn.CoreV1().ReplicationControllers(metadata.Namespace).Create(&rc) | ||
if err != nil { | ||
return fmt.Errorf("Failed to create replication controller: %s", err) | ||
} | ||
|
||
d.SetId(buildId(out.ObjectMeta)) | ||
|
||
log.Printf("[DEBUG] Waiting for replication controller %s to schedule %d replicas", | ||
d.Id(), *out.Spec.Replicas) | ||
// 10 mins should be sufficient for scheduling ~10k replicas | ||
err = resource.Retry(d.Timeout(schema.TimeoutCreate), | ||
waitForDesiredReplicasFunc(conn, out.GetNamespace(), out.GetName())) | ||
if err != nil { | ||
return err | ||
} | ||
// We could wait for all pods to actually reach Ready state | ||
// but that means checking each pod status separately (which can be expensive at scale) | ||
// as there's no aggregate data available from the API | ||
|
||
log.Printf("[INFO] Submitted new replication controller: %#v", out) | ||
|
||
return resourceKubernetesReplicationControllerRead(d, meta) | ||
} | ||
|
||
func resourceKubernetesReplicationControllerRead(d *schema.ResourceData, meta interface{}) error { | ||
conn := meta.(*kubernetes.Clientset) | ||
|
||
namespace, name := idParts(d.Id()) | ||
log.Printf("[INFO] Reading replication controller %s", name) | ||
rc, err := conn.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) | ||
if err != nil { | ||
log.Printf("[DEBUG] Received error: %#v", err) | ||
return err | ||
} | ||
log.Printf("[INFO] Received replication controller: %#v", rc) | ||
|
||
err = d.Set("metadata", flattenMetadata(rc.ObjectMeta)) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
spec, err := flattenReplicationControllerSpec(rc.Spec) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
err = d.Set("spec", spec) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
return nil | ||
} | ||
|
||
func resourceKubernetesReplicationControllerUpdate(d *schema.ResourceData, meta interface{}) error { | ||
conn := meta.(*kubernetes.Clientset) | ||
|
||
namespace, name := idParts(d.Id()) | ||
|
||
ops := patchMetadata("metadata.0.", "/metadata/", d) | ||
|
||
if d.HasChange("spec") { | ||
spec, err := expandReplicationControllerSpec(d.Get("spec").([]interface{})) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
ops = append(ops, &ReplaceOperation{ | ||
Path: "/spec", | ||
Value: spec, | ||
}) | ||
} | ||
data, err := ops.MarshalJSON() | ||
if err != nil { | ||
return fmt.Errorf("Failed to marshal update operations: %s", err) | ||
} | ||
log.Printf("[INFO] Updating replication controller %q: %v", name, string(data)) | ||
out, err := conn.CoreV1().ReplicationControllers(namespace).Patch(name, pkgApi.JSONPatchType, data) | ||
if err != nil { | ||
return fmt.Errorf("Failed to update replication controller: %s", err) | ||
} | ||
log.Printf("[INFO] Submitted updated replication controller: %#v", out) | ||
d.SetId(buildId(out.ObjectMeta)) | ||
|
||
err = resource.Retry(d.Timeout(schema.TimeoutUpdate), | ||
waitForDesiredReplicasFunc(conn, namespace, name)) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
return resourceKubernetesReplicationControllerRead(d, meta) | ||
} | ||
|
||
func resourceKubernetesReplicationControllerDelete(d *schema.ResourceData, meta interface{}) error { | ||
conn := meta.(*kubernetes.Clientset) | ||
|
||
namespace, name := idParts(d.Id()) | ||
log.Printf("[INFO] Deleting replication controller: %#v", name) | ||
|
||
// Drain all replicas before deleting | ||
var ops PatchOperations | ||
ops = append(ops, &ReplaceOperation{ | ||
Path: "/spec/replicas", | ||
Value: 0, | ||
}) | ||
data, err := ops.MarshalJSON() | ||
if err != nil { | ||
return err | ||
} | ||
_, err = conn.CoreV1().ReplicationControllers(namespace).Patch(name, pkgApi.JSONPatchType, data) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
// Wait until all replicas are gone | ||
err = resource.Retry(d.Timeout(schema.TimeoutDelete), | ||
waitForDesiredReplicasFunc(conn, namespace, name)) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
err = conn.CoreV1().ReplicationControllers(namespace).Delete(name, &metav1.DeleteOptions{}) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
log.Printf("[INFO] Replication controller %s deleted", name) | ||
|
||
d.SetId("") | ||
return nil | ||
} | ||
|
||
func resourceKubernetesReplicationControllerExists(d *schema.ResourceData, meta interface{}) (bool, error) { | ||
conn := meta.(*kubernetes.Clientset) | ||
|
||
namespace, name := idParts(d.Id()) | ||
log.Printf("[INFO] Checking replication controller %s", name) | ||
_, err := conn.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}) | ||
if err != nil { | ||
if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 { | ||
return false, nil | ||
} | ||
log.Printf("[DEBUG] Received error: %#v", err) | ||
} | ||
return true, err | ||
} | ||
|
||
func waitForDesiredReplicasFunc(conn *kubernetes.Clientset, ns, name string) resource.RetryFunc { | ||
return func() *resource.RetryError { | ||
rc, err := conn.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{}) | ||
if err != nil { | ||
return resource.NonRetryableError(err) | ||
} | ||
|
||
desiredReplicas := *rc.Spec.Replicas | ||
log.Printf("[DEBUG] Current number of labelled replicas of %q: %d (of %d)\n", | ||
rc.GetName(), rc.Status.FullyLabeledReplicas, desiredReplicas) | ||
|
||
if rc.Status.FullyLabeledReplicas == desiredReplicas { | ||
return nil | ||
} | ||
|
||
return resource.RetryableError(fmt.Errorf("Waiting for %d replicas of %q to be scheduled (%d)", | ||
desiredReplicas, rc.GetName(), rc.Status.FullyLabeledReplicas)) | ||
} | ||
} |
Oops, something went wrong.