diff --git a/docs/upgrade_notes.rst b/docs/upgrade_notes.rst index 27c9c3e0ec..230850a4d5 100644 --- a/docs/upgrade_notes.rst +++ b/docs/upgrade_notes.rst @@ -5,6 +5,17 @@ This page describes how to upgrade from a previous version to a new version which contains backward incompatible or semi-incompatible changes and how to preserve the old behavior when this is possible. +Libcloud 3.9.0 +-------------- + +* [AZURE ARM] Added a new argument to destroy_node() to also delete node's managed + OS disk as part of the node's deletion. Defaults to true. This can be reverted by + setting the argument to false in the call: + + .. sourcecode:: python + + destroy_node(..., ex_destroy_os_disk=False) + Libcloud 3.8.0 -------------- diff --git a/libcloud/compute/drivers/azure_arm.py b/libcloud/compute/drivers/azure_arm.py index 75e1b70200..9c540fec73 100644 --- a/libcloud/compute/drivers/azure_arm.py +++ b/libcloud/compute/drivers/azure_arm.py @@ -808,6 +808,7 @@ def destroy_node( ex_destroy_vhd=True, ex_poll_qty=10, ex_poll_wait=10, + ex_destroy_os_disk=True, ): """ Destroy a node. @@ -830,12 +831,16 @@ def destroy_node( :param ex_poll_wait: Delay in seconds between retries (default 10). :type node: ``int`` + :param ex_destroy_os_disk: Destroy the OS disk associated with + this node either if it is a managed disk (default True). + :type node: ``bool`` + :return: True if the destroy was successful, raises exception otherwise. :rtype: ``bool`` """ - do_node_polling = ex_destroy_nic or ex_destroy_vhd + do_node_polling = ex_destroy_nic or ex_destroy_vhd or ex_destroy_os_disk # This returns a 202 (Accepted) which means that the delete happens # asynchronously. @@ -856,7 +861,7 @@ def destroy_node( raise # Poll until the node actually goes away (otherwise attempt to delete - # NIC and VHD will fail with "resource in use" errors). + # NIC, VHD and OS Disk will fail with "resource in use" errors). retries = ex_poll_qty while do_node_polling and retries > 0: try: @@ -912,6 +917,24 @@ def destroy_node( raise time.sleep(10) + # Optionally destroy the OS managed disk + managed_disk = ( + node.extra.get("properties", {}) + .get("storageProfile", {}) + .get("osDisk", {}) + .get("managedDisk") + ) + if ex_destroy_os_disk and managed_disk is not None: + managed_disk_id = managed_disk["id"] + try: + self.connection.request( + managed_disk_id, params={"api-version": DISK_API_VERSION}, method="DELETE" + ) + except BaseHTTPError as h: + if h.code not in (202, 204): + # 202 or 204 mean destroy is accepted (but deferred) or already destroyed + raise + return True def create_volume( diff --git a/libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_disks_test_node_disk_1.json b/libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_disks_test_node_disk_1.json new file mode 100644 index 0000000000..985c0d3aa1 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure_arm/_subscriptions_99999999_resourceGroups_000000_providers_Microsoft_Compute_disks_test_node_disk_1.json @@ -0,0 +1,51 @@ +{ + "properties": { + "vmId": "99999999-9999-9999-9999-999999999999", + "hardwareProfile": { + "vmSize": "Standard_A1" + }, + "storageProfile": { + "osDisk": { + "osType": "Linux", + "name": "test-disk-1", + "createOption": "FromImage", + "caching": "ReadWrite", + "managedDisk": { + "storageAccountType": "Standard_LRS", + "id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/000000/providers/Microsoft.Compute/disks/test-disk-1" + } + }, + "dataDisks": [ + { + "lun": 0, + "name": "DD0-9999", + "createOption": "Attach", + "caching": "None", + "managedDisk": { + "storageAccountType": "Standard_LRS", + "id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/000000/providers/Microsoft.Compute/disks/DD0-9999" + }, + "diskSizeGB": 8 + } + ] + }, + "networkProfile": { + "networkInterfaces": [ + { + "id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/0000/providers/Microsoft.Network/networkInterfaces/test-nic", + "properties": { + "primary": true + } + } + ] + }, + "provisioningState": "Succeeded" + }, + "type": "Microsoft.Compute/virtualMachines", + "location": "eastus", + "tags": { + "tag_key1": "tag_val1" + }, + "id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/0000/providers/Microsoft.Compute/virtualMachines/test_vm", + "name": "test_vm" +} diff --git a/libcloud/test/compute/test_azure_arm.py b/libcloud/test/compute/test_azure_arm.py index ed9dfbf033..038850c466 100644 --- a/libcloud/test/compute/test_azure_arm.py +++ b/libcloud/test/compute/test_azure_arm.py @@ -376,6 +376,49 @@ def error(e, **kwargs): self.assertTrue(ret) self.assertEqual(4, time_sleep_mock.call_count) # Retries + @mock.patch("time.sleep", return_value=None) + def test_destroy_node__os_managed_disk(self, time_sleep_mock): + def error(e, **kwargs): + raise e(**kwargs) + + node = self.driver.list_nodes()[0] + + # ok responses + for response in (200, 202, 204): + AzureMockHttp.responses = [ + # OK to the DELETE request + lambda f: (httplib.OK, None, {}, "OK"), + # 404 - Node destroyed + lambda f: error(BaseHTTPError, code=404, message="Not found"), + # 200 - NIC destroyed + lambda f: (httplib.OK, None, {}, "OK"), + # 200 - managed OS disk destroyed + lambda f: (httplib.OK, None, {}, "OK") + if response == 200 + else error(BaseHTTPError, code=response, message="Deleted or deferred"), + ] + ret = self.driver.destroy_node(node, ex_destroy_os_disk=True) + self.assertTrue(ret) + + @mock.patch("time.sleep", return_value=None) + def test_destroy_node__os_managed_disk_error(self, time_sleep_mock): + def error(e, **kwargs): + raise e(**kwargs) + + node = self.driver.list_nodes()[0] + AzureMockHttp.responses = [ + # OK to the DELETE request + lambda f: (httplib.OK, None, {}, "OK"), + # 404 - Node destroyed + lambda f: error(BaseHTTPError, code=404, message="Not found"), + # 200 - NIC destroyed + lambda f: (httplib.OK, None, {}, "OK"), + # 500 - transient error when trying to destroy the OS disk + lambda f: error(BaseHTTPError, code=500, message="Cloud weather"), + ] + with self.assertRaises(BaseHTTPError): + self.driver.destroy_node(node, ex_destroy_os_disk=True) + @mock.patch("time.sleep", return_value=None) def test_destroy_node__destroy_nic_retries(self, time_sleep_mock): def error(e, **kwargs):