Skip to content

Commit

Permalink
Automate Qase 222, 210, 261, 209, 217, 195, 230
Browse files Browse the repository at this point in the history
Signed-off-by: Parthvi Vala <[email protected]>
  • Loading branch information
valaparthvi committed Sep 24, 2024
1 parent dd0eaa5 commit 572b194
Show file tree
Hide file tree
Showing 4 changed files with 236 additions and 3 deletions.
14 changes: 12 additions & 2 deletions hosted/aks/helper/helper_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,11 +199,21 @@ func AddNodePool(cluster *management.Cluster, increaseBy int, client *rancher.Cl

for i := 1; i <= increaseBy; i++ {
newNodepool := management.AKSNodePool{
AvailabilityZones: npTemplate.AvailabilityZones,
Count: pointer.Int64(1),
VMSize: npTemplate.VMSize,
Mode: npTemplate.Mode,
EnableAutoScaling: npTemplate.EnableAutoScaling,
MaxCount: npTemplate.MaxCount,
MaxPods: npTemplate.MaxPods,
MaxSurge: npTemplate.MaxSurge,
MinCount: npTemplate.MinCount,
Mode: npTemplate.Mode,
Name: pointer.String(namegen.RandStringLower(5)),
NodeLabels: npTemplate.NodeLabels,
NodeTaints: npTemplate.NodeTaints,
OsDiskSizeGB: npTemplate.OsDiskSizeGB,
OsDiskType: npTemplate.OsDiskType,
OsType: npTemplate.OsType,
VMSize: npTemplate.VMSize,
}
updateNodePoolsList = append(updateNodePoolsList, newNodepool)

Expand Down
5 changes: 5 additions & 0 deletions hosted/aks/p1/p1_import_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,6 +182,11 @@ var _ = Describe("P1Import", func() {
updateSystemNodePoolCheck(cluster, ctx.RancherAdminClient)
})

It("should successfully edit mode of the nodepool", func() {
testCaseID = 291
updateNodePoolModeCheck(cluster, ctx.RancherAdminClient)
})

})

When("a cluster is created and imported for upgrade", func() {
Expand Down
179 changes: 178 additions & 1 deletion hosted/aks/p1/p1_provisioning_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ package p1_test
import (
"fmt"
"os"
"os/user"
"reflect"
"strconv"
"strings"
"sync"
Expand All @@ -11,6 +13,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
"github.com/rancher/shepherd/extensions/clusters"
"github.com/rancher/shepherd/extensions/clusters/aks"
namegen "github.com/rancher/shepherd/pkg/namegenerator"
"k8s.io/utils/pointer"
Expand Down Expand Up @@ -118,7 +121,143 @@ var _ = Describe("P1Provisioning", func() {
Expect(*cluster.AKSStatus.UpstreamSpec.Monitoring).To(BeTrue())
})

// TODO: Discuss why only one nodepool is taken into account
XIt("updating a cluster while it is still provisioning", func() {
// Blocked by: https://github.com/rancher/aks-operator/issues/667
testCaseID = 222
var err error
k8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCredID, location, true)
Expect(err).NotTo(HaveOccurred())
GinkgoLogr.Info(fmt.Sprintf("Using K8s version %s for cluster %s", k8sVersion, clusterName))

cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, nil)
Expect(err).To(BeNil())

Eventually(func() string {
cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
return cluster.State
}, "1m", "1s").Should(ContainSubstring("provisioning"))

// Wait until the cluster appears on cloud before updating it
Eventually(func() bool {
var existsOnCloud bool
existsOnCloud, err = helper.ClusterExistsOnAzure(clusterName, clusterName)
if err != nil && strings.Contains(err.Error(), "NotFound") {
return false
}
return existsOnCloud
}, "1m", "2s").Should(BeTrue())

Expect(*cluster.AKSConfig.KubernetesVersion).To(Equal(k8sVersion))

initialNPCount := len(cluster.AKSConfig.NodePools)
cluster, err = helper.AddNodePool(cluster, 3, ctx.RancherAdminClient, false, false)
Expect(err).To(BeNil())
Expect(cluster.AKSConfig.NodePools).To(HaveLen(initialNPCount + 3))

var upgradeK8sVersion string
upgradeK8sVersion, err = helper.GetK8sVersion(ctx.RancherAdminClient, ctx.CloudCredID, location, false)
Expect(err).NotTo(HaveOccurred())
GinkgoLogr.Info(fmt.Sprintf("Using K8s version %s for cluster %s", k8sVersion, clusterName))

cluster, err = helper.UpgradeClusterKubernetesVersion(cluster, upgradeK8sVersion, ctx.RancherAdminClient, false)
Expect(err).To(BeNil())
Expect(*cluster.AKSConfig.KubernetesVersion).To(Equal(upgradeK8sVersion))

cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())

err = clusters.WaitClusterToBeUpgraded(ctx.RancherAdminClient, cluster.ID)
Expect(err).To(BeNil())

helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)

cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
Expect(cluster.AKSStatus.UpstreamSpec.NodePools).To(HaveLen(initialNPCount + 3))
Expect(cluster.AKSStatus.UpstreamSpec.KubernetesVersion).To(Equal(upgradeK8sVersion))
})

It("create cluster with network policy: calico and plugin: kubenet", func() {
testCaseID = 210
updateFunc := func(aksConfig *aks.ClusterConfig) {
aksConfig.NetworkPolicy = pointer.String("calico")
aksConfig.NetworkPlugin = pointer.String("kubenet")
}
var err error
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, updateFunc)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())

Expect(*cluster.AKSConfig.NetworkPolicy).To(Equal("calico"))
Expect(*cluster.AKSConfig.NetworkPlugin).To(Equal("kubenet"))
Expect(*cluster.AKSStatus.UpstreamSpec.NetworkPolicy).To(Equal("calico"))
Expect(*cluster.AKSStatus.UpstreamSpec.NetworkPlugin).To(Equal("kubenet"))

helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)
})

XIt("should successfully create cluster with underscore in the name", func() {
// Blocked by https://github.com/rancher/dashboard/issues/9416
testCaseID = 261
if ctx.ClusterCleanup {
clusterName = namegen.AppendRandomString(fmt.Sprintf("%s_hp_ci", helpers.Provider))
} else {
testuser, _ := user.Current()
clusterName = namegen.AppendRandomString(fmt.Sprintf("%s_%s_hp_ci", helpers.Provider, testuser.Username))
}
var err error
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, nil)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)
})

It("should successfully create cluster with custom nodepool parameters", func() {
testCaseID = 209
updateFunc := func(aksConfig *aks.ClusterConfig) {
nodepools := *aksConfig.NodePools
for i := range nodepools {
az := []string{"3"}
nodepools[i].AvailabilityZones = &az
nodepools[i].OsDiskSizeGB = pointer.Int64(64)
nodepools[i].NodeCount = pointer.Int64(3)
nodepools[i].OsDiskType = "Ephemeral"
nodepools[i].EnableAutoScaling = pointer.Bool(true)
nodepools[i].MinCount = pointer.Int64(2)
nodepools[i].MaxCount = pointer.Int64(6)
nodepools[i].VMSize = "Standard_DS3_v2"
nodepools[i].MaxPods = pointer.Int64(20)
nodepools[i].MaxSurge = "2"
nodepools[i].NodeLabels = map[string]string{"custom": "true"}
}
}
var err error
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, updateFunc)
Expect(err).To(BeNil())
cluster, err = helpers.WaitUntilClusterIsReady(cluster, ctx.RancherAdminClient)
Expect(err).To(BeNil())
helpers.ClusterIsReadyChecks(cluster, ctx.RancherAdminClient, clusterName)
})

When("a cluster with invalid config is created", func() {
It("should fail to create 2 clusters with same name in 2 different resource groups", func() {
testCaseID = 217
var err error
cluster, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, nil)
Expect(err).To(BeNil())
resourceGroup2 := namegen.AppendRandomString(helpers.ClusterNamePrefix)
updateFunc := func(aksConfig *aks.ClusterConfig) {
aksConfig.ResourceGroup = resourceGroup2
}
_, err = helper.CreateAKSHostedCluster(ctx.RancherAdminClient, clusterName, ctx.CloudCredID, k8sVersion, location, updateFunc)
Expect(err).ToNot(BeNil())
Expect(err.Error()).To(ContainSubstring("cluster already exists"))
})

It("should fail to create a cluster with 0 nodecount", func() {
testCaseID = 186
updateFunc := func(aksConfig *aks.ClusterConfig) {
Expand Down Expand Up @@ -184,6 +323,37 @@ var _ = Describe("P1Provisioning", func() {
Expect(err).NotTo(HaveOccurred())
})

It("should not be able to edit availability zone of a nodepool", func() {
// Refer: https://github.com/rancher/aks-operator/issues/669
testCaseID = 195
originalNPMap := make(map[string][]string)
newAZ := []string{"3"}
updateFunc := func(cluster *management.Cluster) {
nodepools := cluster.AKSConfig.NodePools
for i := range nodepools {
originalNPMap[*nodepools[i].Name] = *nodepools[i].AvailabilityZones
nodepools[i].AvailabilityZones = &newAZ
}
}
var err error
cluster, err = helper.UpdateCluster(cluster, ctx.RancherAdminClient, updateFunc)
Expect(err).To(BeNil())
for _, np := range cluster.AKSConfig.NodePools {
Expect(*np.AvailabilityZones).To(Equal(newAZ))
}

Eventually(func() bool {
cluster, err = ctx.RancherAdminClient.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
for _, np := range cluster.AKSConfig.NodePools {
if !reflect.DeepEqual(*np.AvailabilityZones, originalNPMap[*np.Name]) {
return false
}
}
return true
}, "5m", "5s").Should(BeTrue(), "Timed out while waiting for config to be restored")
})

It("should not delete the resource group when cluster is deleted", func() {
testCaseID = 207
err := helper.DeleteAKSHostCluster(cluster, ctx.RancherAdminClient)
Expand Down Expand Up @@ -434,8 +604,10 @@ var _ = Describe("P1Provisioning", func() {
Expect(len(cluster.AKSStatus.UpstreamSpec.NodePools)).To(Equal(2))
})

It("should to able to delete a nodepool and add a new one", func() {
XIt("should to able to delete a nodepool and add a new one with different availability zone", func() {
// Blocked by: https://github.com/rancher/aks-operator/issues/667#issuecomment-2370798904
testCaseID = 190
// also covers testCaseID = 194
deleteAndAddNpCheck(cluster, ctx.RancherAdminClient)
})

Expand All @@ -448,5 +620,10 @@ var _ = Describe("P1Provisioning", func() {
testCaseID = 204
updateSystemNodePoolCheck(cluster, ctx.RancherAdminClient)
})

It("should successfully edit mode of the nodepool", func() {
testCaseID = 230
updateNodePoolModeCheck(cluster, ctx.RancherAdminClient)
})
})
})
41 changes: 41 additions & 0 deletions hosted/aks/p1/p1_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ func deleteAndAddNpCheck(cluster *management.Cluster, client *rancher.Client) {
originalLen := len(cluster.AKSConfig.NodePools)
var npToBeDeleted management.AKSNodePool
newPoolName := fmt.Sprintf("newpool%s", namegen.RandStringLower(3))
newPoolAZ := []string{"2", "3"}

updateFunc := func(cluster *management.Cluster) {
var updatedNodePools []management.AKSNodePool
for _, np := range cluster.AKSConfig.NodePools {
Expand All @@ -102,6 +104,8 @@ func deleteAndAddNpCheck(cluster *management.Cluster, client *rancher.Client) {
}
newNodePool := npToBeDeleted
newNodePool.Name = &newPoolName
newNodePool.AvailabilityZones = &newPoolAZ
// testCaseID = 194
updatedNodePools = append(updatedNodePools, newNodePool)
cluster.AKSConfig.NodePools = updatedNodePools
}
Expand All @@ -118,6 +122,7 @@ func deleteAndAddNpCheck(cluster *management.Cluster, client *rancher.Client) {
}
if *np.Name == newPoolName {
npAdded = true
Expect(*np.AvailabilityZones).To(Equal(newPoolAZ))
}
}
Expect(npAdded).To(BeTrue())
Expand All @@ -139,6 +144,8 @@ func deleteAndAddNpCheck(cluster *management.Cluster, client *rancher.Client) {
for _, np := range cluster.AKSConfig.NodePools {
if *np.Name == newPoolName {
npAddedToUpstream = true
// testCaseID = 194
Expect(*np.AvailabilityZones).To(Equal(newPoolAZ))
}
if *np.Name == *npToBeDeleted.Name {
npDeletedFromUpstream = false
Expand Down Expand Up @@ -320,3 +327,37 @@ func updateSystemNodePoolCheck(cluster *management.Cluster, client *rancher.Clie
return true
}, "5m", "5s").Should(BeTrue(), "Failed while upstream nodepool update")
}

func updateNodePoolModeCheck(cluster *management.Cluster, client *rancher.Client) {
var originalModeMap = make(map[string]string)
updateFunc := func(cluster *management.Cluster) {
nodepools := cluster.AKSConfig.NodePools
for i := range nodepools {
originalModeMap[*nodepools[i].Name] = nodepools[i].Mode
if nodepools[i].Mode == "User" {
nodepools[i].Mode = "System"
} else if nodepools[i].Mode == "System" {
nodepools[i].Mode = "User"
}
}
}
var err error
cluster, err = helper.UpdateCluster(cluster, client, updateFunc)
Expect(err).To(BeNil())
for _, np := range cluster.AKSConfig.NodePools {
Expect(np.Mode).ToNot(Equal(originalModeMap[*np.Name]))
}
err = clusters.WaitClusterToBeUpgraded(client, cluster.ID)
Expect(err).To(BeNil())

Eventually(func() bool {
cluster, err = client.Management.Cluster.ByID(cluster.ID)
Expect(err).NotTo(HaveOccurred())
for _, np := range cluster.AKSStatus.UpstreamSpec.NodePools {
if np.Mode == originalModeMap[*np.Name] {
return false
}
}
return true
}, "5m", "5s").Should(BeTrue(), "Failed while upstream nodepool mode update")
}

0 comments on commit 572b194

Please sign in to comment.