diff --git a/cookbooks/block_device/libraries/block_device.rb b/cookbooks/block_device/libraries/block_device.rb index a9b2a8807..f1ee78381 100644 --- a/cookbooks/block_device/libraries/block_device.rb +++ b/cookbooks/block_device/libraries/block_device.rb @@ -6,12 +6,7 @@ # http://www.rightscale.com/terms.php and, if applicable, other agreements # such as a RightScale Master Subscription Agreement. -begin - require 'rightscale_tools' -rescue LoadError - Chef::Log.warn("Missing gem 'rightscale_tools'") -end - +require 'rightscale_tools' module RightScale module BlockDeviceHelper diff --git a/cookbooks/block_device/recipes/default.rb b/cookbooks/block_device/recipes/default.rb index ff0b1b390..5a3743c70 100644 --- a/cookbooks/block_device/recipes/default.rb +++ b/cookbooks/block_device/recipes/default.rb @@ -8,6 +8,8 @@ rightscale_marker +include_recipe "rightscale::install_tools" + class Chef::Recipe include RightScale::BlockDeviceHelper end diff --git a/cookbooks/db/attributes/default.rb b/cookbooks/db/attributes/default.rb index 0b3cba6e5..0a31c2100 100644 --- a/cookbooks/db/attributes/default.rb +++ b/cookbooks/db/attributes/default.rb @@ -53,6 +53,9 @@ # Database driver class to be used based on the type of driver default[:db][:client][:driver] = "" +# Database import/export dump temp location +default[:db][:dump][:location] = '/tmp' + # Server state variables # # Default value for DB status diff --git a/cookbooks/db/metadata.json b/cookbooks/db/metadata.json index 5222ef63f..6156f1cab 100644 --- a/cookbooks/db/metadata.json +++ b/cookbooks/db/metadata.json @@ -1,159 +1,220 @@ { - "groupings": { + "name": "db", + "description": "This cookbook provides a set of database recipes used by the RightScale Database Manager ServerTemplates. This cookbook does not contain a specific database implementation, but generic recipes that use the Lightweight Resource Provider (LWRP) interface.", + "long_description": "# RightScale Database Cookbook\n\n## DESCRIPTION:\n\nThis cookbook is available at [https://github.com/rightscale/rightscale_cookbooks](https://github.com/rightscale/rightscale_cookbooks).\n\nThis cookbook provides a set of database recipes used by the RightScale\nDatabase Manager ServerTemplates.\n\nThis cookbook does not contain a specific database implementation, rather\nit provides an interface for general database actions and parameters.\n\n## REQUIREMENTS:\n\n* Must be used with a 'db' provider in the cookbook path.\n* Depends on a `block_device` resource for backup and restore recipes.\n* Requires a virtual machine launched from a RightScale-managed RightImage.\n\n## COOKBOOKS DEPENDENCIES:\n\nPlease see `metadata.rb` file for the latest dependencies.\n\n## KNOWN LIMITATIONS:\n\n* Only one db provider should be present in your cookbook path.\n\n## SETUP:\n\n* To setup only the database client, place `db::default` recipe into\n your runlist. This will pull in generic client inputs, provide provider\n selection input and install client. Set db/provider_type input in\n RightScale ServerTemplate to set provider and version for 'db' resource.\n Packages specific to the database for application servers will be installed by\n the `install_client_driver` action of the db_ based on the type\n of driver. The driver type should be set by the application servers and\n passed to the db_ cookbook. This action also sets the\n `node[:db][:client][:driver]` attribute which is used to perform\n database specific actions.\n* To setup a database client and server, place the following recipes\n in order to your runlist:\n\n db_::setup_server_\n loads the database provider, tuning parameters, as well as other\n provider-specific attributes into the node as inputs.\n\n db::install_server\n sets up generic server and client inputs. This will also include\n db::default recipe which installs the client.\n\n For example: To set up and install MySQL 5.5 client and server\n\n db_mysql::setup_server_5_5\n db::install_server\n\n## USAGE:\n\n### Initialize a master database:\n\n1. Once your server is operational, run the:\n\n \"db::do_init_and_become_master\"\n\n recipe, which initializes your database onto a block device\n that supports backup and restore operations.\n2. Initialize your database from previous dump file or other source.\n3. Register your database with a DNS provider that supports dynamic DNS using:\n\n \"sys_dns::do_set_private\"\n\n to allow your application servers to start making connections.\n4. Backup your database using:\n\n \"db:do_backup\"\n\n so that you can restore the master database in the event \n of a failure or planned termination.\n\n### Restore a master database:\n\n1. Once your server is operational, run the:\n\n \"db::do_restore\"\n\n recipe, which restores your database from a backup previously saved to\n persistent cloud storage.\n2. Register your database with a DNS provider that supports dynamic DNS using:\n\n \"sys_dns::do_set_private\"\n\n to allow your application servers to start making connections.\n\n### Setup database client:\n\n1. Put \"db::default\" into database client ServerTemplate runlist.\n Use db/provider_type input to select from existing clients or override this\n input to add custom type of database client\n db/provider_type Input selects your database provider cookbook\n (e.g. db_mysql, db_postgres, db_oracle, etc.) and what database version the\n client will connect to. (e.g. 5.1, 5.5, 9.1). This affects what connector\n package to install. Syntax for this input is\n _ (i.e. db_mydatabase_1.0)\n2. Fill `db/application/password` , `db/application/user` and\n `db/dns/master/fqdn` inputs which are necessary to connect client to\n Database Manager.\n\n## DETAILS:\n\n### General\n\nThe 'db' interface is defined by a Lightweight Resource, which can be found in\nthe 'resources/default.rb' file.\n\nThis cookbook is intended to be used in conjunction with cookbooks that contain\nLightweight Providers which implement the 'db' interface. See RightScale's\n'db_mysql' cookbook for an example.\n\nFor more information about Lightweight Resources and Providers (LWRPs), please\nsee: [Lightweight Resources and Providers][LWRP]\n\n[LWRP]: http://support.rightscale.com/12-Guides/Chef_Cookbooks_Developer_Guide/04-Developer/06-Development_Resources/Lightweight_Resources_and_Providers_(LWRP)\n\n### Backup/Restore\n\nThis cookbook depends on the block_device LWRP for backup and restore actions.\nSee `db::do_backup` and `db::do_restore` recipes for examples. The\n'block_device' cookbook provides primary and secondary persistence solutions for\nmultiple clouds.\n\nHowever, using LWRPs one can provide their own block device implementation\ninstead.\n\nPlease see the 'block_device' cookbook for the list of available actions,\nattributes and usage.\n\n### Providers:\n\nWhen writing your own database Lightweight Provider:\n\n* The database provider to use is defined by the `node[:db][:provider]`\n attribute. You will need to override this attribute by adding the following\n code in the attributes file of your provider cookbook.\n\n set[:db][:provider] = \"db_myprovider\"\n\n* Any database-specific attributes that you wish to make into user-configurable\n inputs should be added to the cookbook metadata with the default recipe included in\n the attribute's 'recipes' array. For more about Chef metadata, please see:\n [Chef Metadata][Guide]\n* Your provider cookbook metadata should depend on this cookbook by adding a\n 'depends' line to its metadata. For example:\n\n depends \"db\"\n\n[Guide]: http://support.rightscale.com/12-Guides/Chef_Cookbooks_Developer_Guide/02-End_User/04-RightScale_Support_of_Chef/Chef_Metadata\n\n## LICENSE:\n\nCopyright RightScale, Inc. All rights reserved.\nAll access and use subject to the RightScale Terms of Service available at\nhttp://www.rightscale.com/terms.php and, if applicable, other agreements\nsuch as a RightScale Master Subscription Agreement.\n", + "maintainer": "RightScale, Inc.", + "maintainer_email": "support@rightscale.com", + "license": "Copyright RightScale, Inc. All rights reserved.", + "platforms": { + "centos": ">= 0.0.0", + "redhat": ">= 0.0.0", + "ubuntu": ">= 0.0.0" }, - "providing": { + "dependencies": { + "rightscale": ">= 0.0.0", + "block_device": ">= 0.0.0", + "sys_firewall": ">= 0.0.0", + "db_mysql": ">= 0.0.0", + "db_postgres": ">= 0.0.0" }, - "long_description": "# RightScale Database Cookbook\n\n## DESCRIPTION:\n\nThis cookbook provides a set of database recipes used by the RightScale\nDatabase Manager ServerTemplates.\n\nThis cookbook does not contain a specific database implementation, rather\nit provides an interface for general database actions and parameters.\n\n## REQUIREMENTS:\n\n* Must be used with a 'db' provider in the cookbook path.\n* Depends on a `block_device` resource for backup and restore recipes.\n* Requires a virtual machine launched from a RightScale-managed RightImage.\n\n## COOKBOOKS DEPENDENCIES:\n\nPlease see `metadata.rb` file for the latest dependencies.\n\n## KNOWN LIMITATIONS:\n\n* Only one db provider should be present in your cookbook path.\n\n## SETUP:\n\n* To setup only the database client, place `db::default` recipe into\n your runlist. This will pull in generic client inputs, provide provider\n selection input and install client. Set db/provider_type input in\n RightScale ServerTemplate to set provider and version for 'db' resource.\n Packages specific to the database for application servers will be installed by\n the `install_client_driver` action of the db_ based on the type\n of driver. The driver type should be set by the application servers and\n passed to the db_ cookbook. This action also sets the\n `node[:db][:client][:driver]` attribute which is used to perform\n database specific actions.\n* To setup a database client and server, place the following recipes\n in order to your runlist:\n\n db_::setup_server_\n loads the database provider, tuning parameters, as well as other\n provider-specific attributes into the node as inputs.\n\n db::install_server\n sets up generic server and client inputs. This will also include\n db::default recipe which installs the client.\n\n For example: To set up and install MySQL 5.5 client and server\n\n db_mysql::setup_server_5_5\n db::install_server\n\n## USAGE:\n\n### Initialize a master database:\n\n1. Once your server is operational, run the:\n\n \"db::do_init_and_become_master\"\n\n recipe, which initializes your database onto a block device\n that supports backup and restore operations.\n2. Initialize your database from previous dump file or other source.\n3. Register your database with a DNS provider that supports dynamic DNS using:\n\n \"sys_dns::do_set_private\"\n\n to allow your application servers to start making connections.\n4. Backup your database using:\n\n \"db:do_backup\"\n\n so that you can restore the master database in the event \n of a failure or planned termination.\n\n### Restore a master database:\n\n1. Once your server is operational, run the:\n\n \"db::do_restore\"\n\n recipe, which restores your database from a backup previously saved to\n persistent cloud storage.\n2. Register your database with a DNS provider that supports dynamic DNS using:\n\n \"sys_dns::do_set_private\"\n\n to allow your application servers to start making connections.\n\n### Setup database client:\n\n1. Put \"db::default\" into database client ServerTemplate runlist.\n Use db/provider_type input to select from existing clients or override this\n input to add custom type of database client\n db/provider_type Input selects your database provider cookbook\n (e.g. db_mysql, db_postgres, db_oracle, etc.) and what database version the\n client will connect to. (e.g. 5.1, 5.5, 9.1). This affects what connector\n package to install. Syntax for this input is\n _ (i.e. db_mydatabase_1.0)\n2. Fill `db/application/password` , `db/application/user` and\n `db/dns/master/fqdn` inputs which are necessary to connect client to\n Database Manager.\n\n## DETAILS:\n\n### General\n\nThe 'db' interface is defined by a Lightweight Resource, which can be found in\nthe 'resources/default.rb' file.\n\nThis cookbook is intended to be used in conjunction with cookbooks that contain\nLightweight Providers which implement the 'db' interface. See RightScale's\n'db_mysql' cookbook for an example.\n\nFor more information about Lightweight Resources and Providers (LWRPs), please\nsee: [Lightweight Resources and Providers][LWRP]\n\n[LWRP]: http://support.rightscale.com/12-Guides/Chef_Cookbooks_Developer_Guide/04-Developer/06-Development_Resources/Lightweight_Resources_and_Providers_(LWRP)\n\n### Backup/Restore\n\nThis cookbook depends on the block_device LWRP for backup and restore actions.\nSee `db::do_backup` and `db::do_restore` recipes for examples. The\n'block_device' cookbook provides primary and secondary persistence solutions for\nmultiple clouds.\n\nHowever, using LWRPs one can provide their own block device implementation\ninstead.\n\nPlease see the 'block_device' cookbook for the list of available actions,\nattributes and usage.\n\n### Providers:\n\nWhen writing your own database Lightweight Provider:\n\n* The database provider to use is defined by the `node[:db][:provider]`\n attribute. You will need to override this attribute by adding the following\n code in the attributes file of your provider cookbook.\n\n set[:db][:provider] = \"db_myprovider\"\n\n* Any database-specific attributes that you wish to make into user-configurable\n inputs should be added to the cookbook metadata with the default recipe included in\n the attribute's 'recipes' array. For more about Chef metadata, please see:\n [Chef Metadata][Guide]\n* Your provider cookbook metadata should depend on this cookbook by adding a\n 'depends' line to its metadata. For example:\n\n depends \"db\"\n\n[Guide]: http://support.rightscale.com/12-Guides/Chef_Cookbooks_Developer_Guide/02-End_User/04-RightScale_Support_of_Chef/Chef_Metadata\n\n## LICENSE:\n\nCopyright RightScale, Inc. All rights reserved.\nAll access and use subject to the RightScale Terms of Service available at\nhttp://www.rightscale.com/terms.php and, if applicable, other agreements\nsuch as a RightScale Master Subscription Agreement.\n", "recommendations": { }, + "suggestions": { + }, + "conflicting": { + }, + "providing": { + }, + "replacing": { + }, "attributes": { - "db/dump/storage_account_endpoint": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "The endpoint URL for the storage cloud. This is used to override the default endpoint or for generic storage clouds such as Swift. Example: http://endpoint_ip:5000/v2.0/tokens", - "default": "", + "db": { + "display_name": "General Database Options", + "type": "hash", "choice": [ ], + "calculated": false, + "required": "optional", "recipes": [ - "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" - ], - "display_name": "Dump Storage Endpoint URL" + + ] }, - "db/dump": { - "required": "optional", - "type": "hash", - "calculated": false, + "db/dns/master/fqdn": { + "display_name": "Database Master FQDN", + "description": "The fully qualified domain name for the master database server. Example: db-master.example.com", + "required": "required", + "recipes": [ + "db::default", + "db::install_server" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/dns/master/id": { + "display_name": "Database Master DNS Record ID", + "description": "The unique identifier that is associated with the DNS A record of the master database server.The unique identifier is assigned by the DNS provider when you create a dynamic DNS A record. This ID is used to update the associated A record with the private IP address of the master server when this recipe is run. If you are using DNS Made Easy as your DNS provider, a 7-digit number is used (e.g., 4403234).Example:4403234", + "required": "required", "recipes": [ - + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore_and_become_master", + "db::do_init_and_become_master", + "db::do_promote_to_master" ], - "display_name": "Import/export settings for database dump file management." - }, - "db/backup/primary/slave/cron/minute": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "Defines the minute of the hour when the backup EBS snapshot will be taken of the slave database. Backups of the slave are taken hourly. By default, a minute will be randomly chosen at launch time. Uses standard crontab format (e.g., 30 for minute 30 of the hour). Example 30", "choice": [ ], - "recipes": [ - "db::do_primary_backup_schedule_enable" - ], - "display_name": "Slave Backup Cron Minute" + "calculated": false, + "type": "string" }, - "db/force_promote": { + "db/dns/slave/fqdn": { + "display_name": "Database Slave FQDN", + "description": "The fully qualified domain name for a slave database server. Example: db-slave.example.com", "required": "optional", - "type": "string", - "calculated": false, - "description": "If true, when promoting a slave to master, ignores making checks and changes to any current master. WARNING: setting this will promote a slave to a master with no replication until a new slave is brought up. Make sure you understand what you are doing before changing this value. Default: false", - "default": "false", - "choice": [ - "true", - "false" - ], "recipes": [ - "db::do_promote_to_master" + "db::do_set_dns_slave" ], - "display_name": "Force Promote to Master" - }, - "db/terminate_safety": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "Prevents the accidental running of the 'db::do_teminate_server' recipe. This recipe will only run if this input variable is overridden and set to \"off\". Example: text:off", - "default": "Override the dropdown and set to \"off\" to really run this recipe", "choice": [ - "Override the dropdown and set to \"off\" to really run this recipe" - ], - "recipes": [ - "db::do_delete_volumes_and_terminate_server" + ], - "display_name": "Terminate Safety" + "calculated": false, + "type": "string" }, - "db/dump/database_name": { + "db/dns/slave/id": { + "display_name": "Database Slave DNS Record ID", + "description": "The unique identifier that is associated with the DNS A record of a slave server. The unique identifier is assigned by the DNS provider when you create a dynamic DNS A record. This ID is used to update the associated A record with the private IP address of a slave server when this recipe is run. If you are using DNS Made Easy as your DNS provider, a 7-digit number is used (e.g., 4403234). Example:4403234", "required": "required", - "type": "string", - "calculated": false, - "description": "Enter the name of the database name/schema to create/restore a dump from/for. Example: mydbschema", + "recipes": [ + "db::do_set_dns_slave" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/admin/user": { + "display_name": "Database Admin Username", + "description": "The username of the database user with 'admin' privileges. Example: cred:DBADMIN_USER.", + "required": "required", "recipes": [ - "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" + "db::install_server", + "db::setup_privileges_admin", + "db::do_primary_restore", + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore", + "db::do_secondary_restore_and_become_master" ], - "display_name": "Database Schema Name" - }, - "db/backup/secondary/master/cron/minute": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "Defines the minute of the hour when the secondary backup will be taken of the master database. Backups of the master are taken daily. By default, a minute will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Secondary Backup Cron Hour' and 'Master Secondary Backup Cron Minute'. Uses standard crontab format (e.g., 30 for minute 30 of the hour).", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/admin/password": { + "display_name": "Database Admin Password", + "description": "The password of the database user with 'admin' privileges. Example: cred:DBADMIN_PASSWORD.", + "required": "required", "recipes": [ - "db::do_secondary_backup_schedule_enable" + "db::install_server", + "db::setup_privileges_admin", + "db::do_primary_restore", + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore", + "db::do_secondary_restore_and_become_master" ], - "display_name": "Master Secondary Backup Cron Minute" - }, - "db/backup/primary/master/cron/minute": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "Defines the minute of the hour when the backup of the master database will be taken. Backups of the master are taken daily. By default, a minute will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Backup Cron Hour' and 'Master Backup Cron Minute'. Uses standard crontab format. Example: 30", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/replication/user": { + "display_name": "Database Replication Username", + "description": "The username of the database user that has 'replication' privileges. Example: cred:DBREPLICATION_USER.", + "required": "required", "recipes": [ - "db::do_primary_backup_schedule_enable" + "db::setup_replication_privileges", + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore_and_become_master", + "db::do_init_and_become_master", + "db::do_promote_to_master", + "db::do_primary_init_slave", + "db::do_secondary_init_slave", + "db::do_init_slave_at_boot" ], - "display_name": "Master Backup Cron Minute" - }, - "db/backup/primary/master/cron/hour": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "Defines the hour of the day when the primary backup will be taken of the master database. Backups of the master are taken daily. By default, an hour will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Backup Cron Hour' and 'Master Backup Cron Minute'. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Uses standard crontab format. Example: 23 ", "choice": [ ], - "recipes": [ - "db::do_primary_backup_schedule_enable" - ], - "display_name": "Master Backup Cron Hour" + "calculated": false, + "type": "string" }, - "db/provider_type": { + "db/replication/password": { + "display_name": "Database Replication Password", + "description": "The password of the database user that has 'replication' privileges. Example: cred:DBREPLICATION_PASSWORD.", "required": "required", - "type": "string", + "recipes": [ + "db::setup_replication_privileges", + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore_and_become_master", + "db::do_init_and_become_master", + "db::do_promote_to_master", + "db::do_primary_init_slave", + "db::do_secondary_init_slave", + "db::do_init_slave_at_boot" + ], + "choice": [ + + ], "calculated": false, - "description": "Database provider type to use on client side. This must be a string containing the provider cookbook name and (optionally) the version of the database. Example: db_mydatabase_1.0, db_mysql_5.1, db_mysql_5.5, db_postgres_9.1", + "type": "string" + }, + "db/replication/network_interface": { + "display_name": "Database Replication Network Interface", + "description": "The network interface used for replication. WARNING: when selecting 'public' we highly recommend enabling SSL encryption, otherwise data could travel over insecure connections. Make sure you understand what you are doing before changing this value. Default: private", + "required": "optional", "choice": [ - "db_mysql_5.1", - "db_mysql_5.5", - "db_postgres_9.1" + "private", + "public", + "vpn" ], + "default": "private", "recipes": [ - "db::default" + "db::install_server", + "db::do_promote_to_master", + "db::request_master_allow", + "db::request_master_deny", + "db::do_set_dns_slave" ], - "display_name": "Database Provider type" + "calculated": false, + "type": "string" }, "db/application/user": { - "required": "required", - "type": "string", - "calculated": false, + "display_name": "Database Application Username", "description": "The username of the database user that has 'user' privileges. Example: cred:DBAPPLICATION_USER.", + "required": "required", + "recipes": [ + "db::default", + "db::setup_privileges_application", + "db::install_server", + "db::do_primary_restore", + "db::do_primary_restore_and_become_master", + "db::do_secondary_restore", + "db::do_secondary_restore_and_become_master" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/application/password": { + "display_name": "Database Application Password", + "description": "The password of the database user that has 'user' privileges. Example: cred:DBAPPLICATION_PASSWORD.", + "required": "required", "recipes": [ "db::default", "db::setup_privileges_application", @@ -163,12 +224,14 @@ "db::do_secondary_restore", "db::do_secondary_restore_and_become_master" ], - "display_name": "Database Application Username" + "choice": [ + + ], + "calculated": false, + "type": "string" }, "db/init_slave_at_boot": { - "required": "optional", - "type": "string", - "calculated": false, + "display_name": "Init Slave at Boot", "description": "Set to 'True' to have the instance initialize the database server as a slave on boot. Set to 'False' if there is no master database server running. Example: false", "default": "false", "choice": [ @@ -178,60 +241,14 @@ "recipes": [ "db::do_init_slave_at_boot" ], - "display_name": "Init Slave at Boot" - }, - "db/dns/master/id": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The unique identifier that is associated with the DNS A record of the master database server.The unique identifier is assigned by the DNS provider when you create a dynamic DNS A record. This ID is used to update the associated A record with the private IP address of the master server when this recipe is run. If you are using DNS Made Easy as your DNS provider, a 7-digit number is used (e.g., 4403234).Example:4403234", - "choice": [ - - ], - "recipes": [ - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore_and_become_master", - "db::do_init_and_become_master", - "db::do_promote_to_master" - ], - "display_name": "Database Master DNS Record ID" - }, - "db/backup/secondary/slave/cron/minute": { - "required": "optional", - "type": "string", "calculated": false, - "description": "Defines the minute of the hour when the secondary backup will be taken of the slave database. Backups of the slave are taken hourly. By default, a minute will be randomly chosen at launch time. Uses standard crontab format (e.g., 30 for minute 30 of the hour). Uses standard crontab format (e.g., 30 for minute 30 of the hour).", - "choice": [ - - ], - "recipes": [ - "db::do_secondary_backup_schedule_enable" - ], - "display_name": "Slave Secondary Backup Cron Minute" - }, - "db/backup/timestamp_override": { - "required": "optional", "type": "string", - "calculated": false, - "description": "An optional variable to restore a database backup with a specific timestamp rather than the most recent backup in the lineage. You must specify a string that matches the timestamp tag on the volume snapshot. You will need to specify the timestamp that is defined by the snapshot's tag (not the name). For example, if the snapshot's tag is 'rs_backup:timestamp=1303613371' you would specify '1303613371' for this input. Example: 1303613371", - "choice": [ - - ], - "recipes": [ - "db::do_primary_restore_and_become_master", - "db::do_primary_restore", - "db::do_primary_init_slave", - "db::do_secondary_restore_and_become_master", - "db::do_secondary_restore", - "db::do_secondary_init_slave" - ], - "display_name": "Database Restore Timestamp Override" + "required": "optional" }, "db/dns/ttl": { - "required": "optional", - "type": "string", - "calculated": false, + "display_name": "Database DNS TTL Limit", "description": "The upper limit for the TTL of the master DB DNS record in seconds. This value should be kept low in the event of Master DB failure so that the DNS record updates in a timely manner. When installing the DB server, this value is checked in the DNS records. Input should be set for 300 when using CloudDNS. Example: 60", + "required": "optional", "default": "60", "choice": [ "60", @@ -240,250 +257,226 @@ "recipes": [ "db::install_server" ], - "display_name": "Database DNS TTL Limit" + "calculated": false, + "type": "string" }, - "db/replication/user": { + "db/provider_type": { + "display_name": "Database Provider type", + "description": "Database provider type to use on client side. This must be a string containing the provider cookbook name and (optionally) the version of the database. Example: db_mydatabase_1.0, db_mysql_5.1, db_mysql_5.5, db_postgres_9.1", "required": "required", - "type": "string", - "calculated": false, - "description": "The username of the database user that has 'replication' privileges. Example: cred:DBREPLICATION_USER.", "choice": [ - + "db_mysql_5.1", + "db_mysql_5.5", + "db_postgres_9.1" ], "recipes": [ - "db::setup_replication_privileges", + "db::default" + ], + "calculated": false, + "type": "string" + }, + "db/backup/lineage": { + "display_name": "Database Backup Lineage", + "description": "The prefix that will be used to name/locate the backup of a particular database.Note: For servers running on Rackspace, this value also indicates the Cloud Files container to use for storing primary backups.If a Cloud Files container with this name does not already exist,the setup process creates one. Example: text:prod_db_lineage", + "required": "required", + "recipes": [ + "db::do_primary_init_slave", + "db::do_secondary_init_slave", + "db::do_init_slave_at_boot", + "db::do_promote_to_master", "db::do_primary_restore_and_become_master", "db::do_secondary_restore_and_become_master", "db::do_init_and_become_master", - "db::do_promote_to_master", - "db::do_primary_init_slave", - "db::do_secondary_init_slave", - "db::do_init_slave_at_boot" + "db::do_primary_backup", + "db::do_primary_restore", + "db::do_primary_backup_schedule_enable", + "db::do_primary_backup_schedule_disable", + "db::do_force_reset", + "db::do_secondary_backup", + "db::do_secondary_restore", + "db::do_secondary_backup_schedule_enable", + "db::do_secondary_backup_schedule_disable" ], - "display_name": "Database Replication Username" - }, - "db/dns/slave/id": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The unique identifier that is associated with the DNS A record of a slave server. The unique identifier is assigned by the DNS provider when you create a dynamic DNS A record. This ID is used to update the associated A record with the private IP address of a slave server when this recipe is run. If you are using DNS Made Easy as your DNS provider, a 7-digit number is used (e.g., 4403234). Example:4403234", "choice": [ ], - "recipes": [ - "db::do_set_dns_slave" - ], - "display_name": "Database Slave DNS Record ID" + "calculated": false, + "type": "string" }, - "db/backup/secondary/slave/cron/hour": { + "db/backup/lineage_override": { + "display_name": "Database Restore Lineage Override", + "description": "If defined, this will override the input defined for 'Backup Lineage' (db/backup/lineage) so that you can restore the database from another backup that has as a different lineage name. The most recently completed snapshots will be used unless a specific timestamp value is specified for 'Restore Timestamp Override' (db/backup/timestamp_override). Although this input allows you to restore from a different set of snapshots, subsequent backups will use 'Backup Lineage' to name the snapshots. Be sure to remove the 'Backup Lineage Override' input after the new master is operational. Example: text:new_db_lineage", "required": "optional", - "type": "string", - "calculated": false, - "description": "By default, secondary backups of the slave database are taken hourly. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Uses standard crontab format (e.g., 23 for 11:00 PM).", + "recipes": [ + "db::do_init_slave_at_boot", + "db::do_primary_restore_and_become_master", + "db::do_primary_restore", + "db::do_primary_init_slave", + "db::do_secondary_restore_and_become_master", + "db::do_secondary_restore", + "db::do_secondary_init_slave" + ], "choice": [ ], - "recipes": [ - "db::do_secondary_backup_schedule_enable" - ], - "display_name": "Slave Secondary Backup Cron Hour" + "calculated": false, + "type": "string" }, - "db/replication/network_interface": { + "db/backup/timestamp_override": { + "display_name": "Database Restore Timestamp Override", + "description": "An optional variable to restore a database backup with a specific timestamp rather than the most recent backup in the lineage. You must specify a string that matches the timestamp tag on the volume snapshot. You will need to specify the timestamp that is defined by the snapshot's tag (not the name). For example, if the snapshot's tag is 'rs_backup:timestamp=1303613371' you would specify '1303613371' for this input. Example: 1303613371", "required": "optional", - "type": "string", - "calculated": false, - "description": "The network interface used for replication. WARNING: when selecting 'public' we highly recommend enabling SSL encryption, otherwise data could travel over insecure connections. Make sure you understand what you are doing before changing this value. Default: private", - "default": "private", - "choice": [ - "private", - "public", - "vpn" - ], "recipes": [ - "db::install_server", - "db::do_promote_to_master", - "db::request_master_allow", - "db::request_master_deny", - "db::do_set_dns_slave" + "db::do_primary_restore_and_become_master", + "db::do_primary_restore", + "db::do_primary_init_slave", + "db::do_secondary_restore_and_become_master", + "db::do_secondary_restore", + "db::do_secondary_init_slave" ], - "display_name": "Database Replication Network Interface" - }, - "db/dns/master/fqdn": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The fully qualified domain name for the master database server. Example: db-master.example.com", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/primary/master/cron/hour": { + "display_name": "Master Backup Cron Hour", + "description": "Defines the hour of the day when the primary backup will be taken of the master database. Backups of the master are taken daily. By default, an hour will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Backup Cron Hour' and 'Master Backup Cron Minute'. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Uses standard crontab format. Example: 23 ", + "required": "optional", "recipes": [ - "db::default", - "db::install_server" + "db::do_primary_backup_schedule_enable" ], - "display_name": "Database Master FQDN" - }, - "db/dump/storage_account_id": { - "required": "required", - "type": "string", - "calculated": false, - "description": "In order to write the dump file to the specified cloud storage location, you need to provide cloud authentication credentials. For Amazon S3, use your Amazon access key ID (e.g., cred:AWS_ACCESS_KEY_ID). For Rackspace Cloud Files, use your Rackspace login username (e.g., cred:RACKSPACE_USERNAME). For OpenStack Swift the format is: 'tenantID:username'. Example: cred:AWS_ACCESS_KEY_ID", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/primary/slave/cron/hour": { + "display_name": "Slave Backup Cron Hour", + "description": "By default, primary backups of the slave database are taken hourly. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Example: 23.", + "required": "optional", "recipes": [ - "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" + "db::do_primary_backup_schedule_enable" ], - "display_name": "Dump Storage Account ID" - }, - "db/application/password": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The password of the database user that has 'user' privileges. Example: cred:DBAPPLICATION_PASSWORD.", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/primary/master/cron/minute": { + "display_name": "Master Backup Cron Minute", + "description": "Defines the minute of the hour when the backup of the master database will be taken. Backups of the master are taken daily. By default, a minute will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Backup Cron Hour' and 'Master Backup Cron Minute'. Uses standard crontab format. Example: 30", + "required": "optional", "recipes": [ - "db::default", - "db::setup_privileges_application", - "db::install_server", - "db::do_primary_restore", - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore", - "db::do_secondary_restore_and_become_master" + "db::do_primary_backup_schedule_enable" ], - "display_name": "Database Application Password" - }, - "db/replication/password": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The password of the database user that has 'replication' privileges. Example: cred:DBREPLICATION_PASSWORD.", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/primary/slave/cron/minute": { + "display_name": "Slave Backup Cron Minute", + "description": "Defines the minute of the hour when the backup EBS snapshot will be taken of the slave database. Backups of the slave are taken hourly. By default, a minute will be randomly chosen at launch time. Uses standard crontab format (e.g., 30 for minute 30 of the hour). Example 30", + "required": "optional", "recipes": [ - "db::setup_replication_privileges", - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore_and_become_master", - "db::do_init_and_become_master", - "db::do_promote_to_master", - "db::do_primary_init_slave", - "db::do_secondary_init_slave", - "db::do_init_slave_at_boot" + "db::do_primary_backup_schedule_enable" ], - "display_name": "Database Replication Password" - }, - "db/dump/storage_account_secret": { - "required": "required", - "type": "string", - "calculated": false, - "description": "In order to write the dump file to the specified cloud storage location, you need to provide cloud authentication credentials. For Amazon S3, use your AWS secret access key (e.g., cred:AWS_SECRET_ACCESS_KEY). For Rackspace Cloud Files, use your Rackspace account API key (e.g., cred:RACKSPACE_AUTH_KEY). Example: cred:AWS_SECRET_ACCESS_KEY", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/secondary/master/cron/hour": { + "display_name": "Master Secondary Backup Cron Hour", + "description": "Defines the hour of the day when the secondary backup will be taken of the master database. Backups of the master are taken daily. By default, an hour will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Secondary Backup Cron Hour' and 'Master Secondary Backup Cron Minute'. Uses standard crontab format (e.g., 23 for 11:00 PM).", + "required": "optional", "recipes": [ - "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" + "db::do_secondary_backup_schedule_enable" ], - "display_name": "Dump Storage Account Secret" - }, - "db/admin/password": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The password of the database user with 'admin' privileges. Example: cred:DBADMIN_PASSWORD.", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/backup/secondary/slave/cron/hour": { + "display_name": "Slave Secondary Backup Cron Hour", + "description": "By default, secondary backups of the slave database are taken hourly. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Uses standard crontab format (e.g., 23 for 11:00 PM).", + "required": "optional", "recipes": [ - "db::install_server", - "db::setup_privileges_admin", - "db::do_primary_restore", - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore", - "db::do_secondary_restore_and_become_master" + "db::do_secondary_backup_schedule_enable" ], - "display_name": "Database Admin Password" - }, - "db/dump/prefix": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The prefix that will be used to name/locate the backup of a particular database dump. Defines the prefix of the dump file name that will be used to name the backup database dump file, along with a timestamp. Example: prod_db_backup", "choice": [ ], - "recipes": [ - "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" - ], - "display_name": "Dump Prefix" + "calculated": false, + "type": "string" }, - "db/backup/primary/slave/cron/hour": { + "db/backup/secondary/master/cron/minute": { + "display_name": "Master Secondary Backup Cron Minute", + "description": "Defines the minute of the hour when the secondary backup will be taken of the master database. Backups of the master are taken daily. By default, a minute will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Secondary Backup Cron Hour' and 'Master Secondary Backup Cron Minute'. Uses standard crontab format (e.g., 30 for minute 30 of the hour).", "required": "optional", - "type": "string", - "calculated": false, - "description": "By default, primary backups of the slave database are taken hourly. However, if you specify a value in this input (e.g., 23 for 11:00 PM), then backups will occur once per day at the specified hour, rather than hourly. Example: 23.", + "recipes": [ + "db::do_secondary_backup_schedule_enable" + ], "choice": [ ], - "recipes": [ - "db::do_primary_backup_schedule_enable" - ], - "display_name": "Slave Backup Cron Hour" + "calculated": false, + "type": "string" }, - "db/dns/slave/fqdn": { + "db/backup/secondary/slave/cron/minute": { + "display_name": "Slave Secondary Backup Cron Minute", + "description": "Defines the minute of the hour when the secondary backup will be taken of the slave database. Backups of the slave are taken hourly. By default, a minute will be randomly chosen at launch time. Uses standard crontab format (e.g., 30 for minute 30 of the hour). Uses standard crontab format (e.g., 30 for minute 30 of the hour).", "required": "optional", - "type": "string", - "calculated": false, - "description": "The fully qualified domain name for a slave database server. Example: db-slave.example.com", + "recipes": [ + "db::do_secondary_backup_schedule_enable" + ], "choice": [ ], - "recipes": [ - "db::do_set_dns_slave" - ], - "display_name": "Database Slave FQDN" - }, - "db/force_safety": { - "required": "optional", - "type": "string", "calculated": false, - "description": "Prevents the accidental running of the db::do_force_reset recipe. This recipe will only run if the input variable is overridden and set to \"off\". Example: text:off", - "default": "Override the dropdown and set to \"off\" to really run this recipe", + "type": "string" + }, + "db/dump": { + "display_name": "Import/export settings for database dump file management.", + "type": "hash", "choice": [ - "Override the dropdown and set to \"off\" to really run this recipe" + ], + "calculated": false, + "required": "optional", "recipes": [ - "db::do_force_reset" - ], - "display_name": "Force Reset Safety" + + ] }, - "db/dump/container": { - "required": "required", - "type": "string", - "calculated": false, - "description": "The cloud storage location where the dump file will be saved to or restored from. For Amazon S3, use the bucket name. For Rackspace Cloud Files, use the container name. Example: db_dump_bucket", + "db/dump/location": { + "display_name": "Database Dump Location", + "description": "The location where database dumps for import and export willbe stored temporarily. Set to the desired location, ensuring the filesystemthe folder resides in has enough space.", + "required": "optional", "choice": [ - + "/tmp", + "/mnt/ephemeral", + "/mnt/storage" ], + "default": "/tmp", "recipes": [ "db::do_dump_import", - "db::do_dump_export", - "db::do_dump_schedule_enable" + "db::do_dump_export" ], - "display_name": "Dump Container" + "calculated": false, + "type": "string" }, "db/dump/storage_account_provider": { - "required": "required", - "type": "string", - "calculated": false, + "display_name": "Dump Storage Account Provider", "description": "Location where the dump file will be saved. Used by dump recipes to back up to remote object storage (complete list of supported storage locations is in input dropdown). Example: s3", + "required": "required", "choice": [ "s3", "Cloud_Files", @@ -500,164 +493,188 @@ "db::do_dump_export", "db::do_dump_schedule_enable" ], - "display_name": "Dump Storage Account Provider" - }, - "db": { - "required": "optional", - "type": "hash", "calculated": false, + "type": "string" + }, + "db/dump/storage_account_id": { + "display_name": "Dump Storage Account ID", + "description": "In order to write the dump file to the specified cloud storage location, you need to provide cloud authentication credentials. For Amazon S3, use your Amazon access key ID (e.g., cred:AWS_ACCESS_KEY_ID). For Rackspace Cloud Files, use your Rackspace login username (e.g., cred:RACKSPACE_USERNAME). For OpenStack Swift the format is: 'tenantID:username'. Example: cred:AWS_ACCESS_KEY_ID", + "required": "required", + "recipes": [ + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/dump/storage_account_secret": { + "display_name": "Dump Storage Account Secret", + "description": "In order to write the dump file to the specified cloud storage location, you need to provide cloud authentication credentials. For Amazon S3, use your AWS secret access key (e.g., cred:AWS_SECRET_ACCESS_KEY). For Rackspace Cloud Files, use your Rackspace account API key (e.g., cred:RACKSPACE_AUTH_KEY). Example: cred:AWS_SECRET_ACCESS_KEY", + "required": "required", "recipes": [ + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" + ], + "choice": [ ], - "display_name": "General Database Options" + "calculated": false, + "type": "string" }, - "db/backup/secondary/master/cron/hour": { + "db/dump/storage_account_endpoint": { + "display_name": "Dump Storage Endpoint URL", + "description": "The endpoint URL for the storage cloud. This is used to override the default endpoint or for generic storage clouds such as Swift. Example: http://endpoint_ip:5000/v2.0/tokens", "required": "optional", - "type": "string", - "calculated": false, - "description": "Defines the hour of the day when the secondary backup will be taken of the master database. Backups of the master are taken daily. By default, an hour will be randomly chosen at launch time. Otherwise, the time of the backup is defined by 'Master Secondary Backup Cron Hour' and 'Master Secondary Backup Cron Minute'. Uses standard crontab format (e.g., 23 for 11:00 PM).", + "default": "", + "recipes": [ + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/dump/container": { + "display_name": "Dump Container", + "description": "The cloud storage location where the dump file will be saved to or restored from. For Amazon S3, use the bucket name. For Rackspace Cloud Files, use the container name. Example: db_dump_bucket", + "required": "required", "recipes": [ - "db::do_secondary_backup_schedule_enable" + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" ], - "display_name": "Master Secondary Backup Cron Hour" - }, - "db/backup/lineage_override": { - "required": "optional", - "type": "string", - "calculated": false, - "description": "If defined, this will override the input defined for 'Backup Lineage' (db/backup/lineage) so that you can restore the database from another backup that has as a different lineage name. The most recently completed snapshots will be used unless a specific timestamp value is specified for 'Restore Timestamp Override' (db/backup/timestamp_override). Although this input allows you to restore from a different set of snapshots, subsequent backups will use 'Backup Lineage' to name the snapshots. Be sure to remove the 'Backup Lineage Override' input after the new master is operational. Example: text:new_db_lineage", "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/dump/prefix": { + "display_name": "Dump Prefix", + "description": "The prefix that will be used to name/locate the backup of a particular database dump. Defines the prefix of the dump file name that will be used to name the backup database dump file, along with a timestamp. Example: prod_db_backup", + "required": "required", "recipes": [ - "db::do_init_slave_at_boot", - "db::do_primary_restore_and_become_master", - "db::do_primary_restore", - "db::do_primary_init_slave", - "db::do_secondary_restore_and_become_master", - "db::do_secondary_restore", - "db::do_secondary_init_slave" + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" ], - "display_name": "Database Restore Lineage Override" + "choice": [ + + ], + "calculated": false, + "type": "string" }, - "db/backup/lineage": { + "db/dump/database_name": { + "display_name": "Database Schema Name", + "description": "Enter the name of the database name/schema to create/restore a dump from/for. Example: mydbschema", "required": "required", - "type": "string", - "calculated": false, - "description": "The prefix that will be used to name/locate the backup of a particular database.Note: For servers running on Rackspace, this value also indicates the Cloud Files container to use for storing primary backups.If a Cloud Files container with this name does not already exist,the setup process creates one. Example: text:prod_db_lineage", + "recipes": [ + "db::do_dump_import", + "db::do_dump_export", + "db::do_dump_schedule_enable" + ], "choice": [ ], + "calculated": false, + "type": "string" + }, + "db/terminate_safety": { + "display_name": "Terminate Safety", + "description": "Prevents the accidental running of the 'db::do_teminate_server' recipe. This recipe will only run if this input variable is overridden and set to \"off\". Example: text:off", + "type": "string", + "choice": [ + "Override the dropdown and set to \"off\" to really run this recipe" + ], + "default": "Override the dropdown and set to \"off\" to really run this recipe", + "required": "optional", "recipes": [ - "db::do_primary_init_slave", - "db::do_secondary_init_slave", - "db::do_init_slave_at_boot", - "db::do_promote_to_master", - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore_and_become_master", - "db::do_init_and_become_master", - "db::do_primary_backup", - "db::do_primary_restore", - "db::do_primary_backup_schedule_enable", - "db::do_primary_backup_schedule_disable", - "db::do_force_reset", - "db::do_secondary_backup", - "db::do_secondary_restore", - "db::do_secondary_backup_schedule_enable", - "db::do_secondary_backup_schedule_disable" + "db::do_delete_volumes_and_terminate_server" ], - "display_name": "Database Backup Lineage" + "calculated": false }, - "db/admin/user": { - "required": "required", + "db/force_safety": { + "display_name": "Force Reset Safety", + "description": "Prevents the accidental running of the db::do_force_reset recipe. This recipe will only run if the input variable is overridden and set to \"off\". Example: text:off", "type": "string", - "calculated": false, - "description": "The username of the database user with 'admin' privileges. Example: cred:DBADMIN_USER.", "choice": [ - + "Override the dropdown and set to \"off\" to really run this recipe" ], + "default": "Override the dropdown and set to \"off\" to really run this recipe", + "required": "optional", "recipes": [ - "db::install_server", - "db::setup_privileges_admin", - "db::do_primary_restore", - "db::do_primary_restore_and_become_master", - "db::do_secondary_restore", - "db::do_secondary_restore_and_become_master" + "db::do_force_reset" + ], + "calculated": false + }, + "db/force_promote": { + "display_name": "Force Promote to Master", + "description": "If true, when promoting a slave to master, ignores making checks and changes to any current master. WARNING: setting this will promote a slave to a master with no replication until a new slave is brought up. Make sure you understand what you are doing before changing this value. Default: false", + "required": "optional", + "default": "false", + "choice": [ + "true", + "false" ], - "display_name": "Database Admin Username" + "recipes": [ + "db::do_promote_to_master" + ], + "calculated": false, + "type": "string" } }, - "maintainer": "RightScale, Inc.", - "description": "This cookbook provides a set of database recipes used by the RightScale Database Manager ServerTemplates. This cookbook does not contain a specific database implementation, but generic recipes that use the Lightweight Resource Provider (LWRP) interface.", - "replacing": { - }, - "platforms": { - "centos": ">= 0.0.0", - "ubuntu": ">= 0.0.0", - "redhat": ">= 0.0.0" - }, - "license": "Copyright RightScale, Inc. All rights reserved.", - "maintainer_email": "support@rightscale.com", - "name": "db", - "version": "13.5.0", - "dependencies": { - "rightscale": ">= 0.0.0", - "sys_firewall": ">= 0.0.0", - "block_device": ">= 0.0.0", - "db_postgres": ">= 0.0.0", - "db_mysql": ">= 0.0.0" + "groupings": { }, "recipes": { - "db::request_appserver_allow": "Sends a request to allow connections from the caller's private IP address to all database servers in the deployment that are tagged with the database:active=true tag. This should be run on an application server before attempting a database connection.", - "db::do_dump_schedule_disable": "Disables the daily run of do_dump_export.", - "db::do_set_dns_slave": "Sets the slave DNS record to the network interface IP.", - "db::do_dump_export": "Creates a dump file and uploads it to a remote object storage (e.g., Amazon S3, Google Cloud Storage, Azure, Softlayer or Rackspace Cloud Files).", - "db::setup_privileges_application": "Adds the username and password for application privileges.", - "db::request_master_allow": "Sends a request to the master database server tagged with rs_dbrepl:master_instance_uuid= to allow connections from the server's private IP address. This script should be run on a slave before it sets up replication.", + "db::default": "Selects and installs database client. It also sets up the provider and version for 'db' resource.", + "db::install_server": "Installs and sets up the packages that are required for database servers. Adds the database:active=true tag to your server, which identifies it as a database server. The tag is used by application servers to identify active databases.", + "db::setup_monitoring": "Installs the collectd plugin for database monitoring support, which is required to enable monitoring and alerting functionality for your servers.", "db::do_primary_backup": { "description": "Creates a primary backup of the database using persistent storage inthe current cloud. Backup type depends on cloud and hypervisor type. For clouds with volume snapshots support available, volume backup will be used only if hypervisor is different than KVM. For the clouds without volume snapshots support and for KVM based instances backups are uploaded to ROS container.", "thread": "db_backup" }, - "db::do_secondary_backup_schedule_disable": "Disables db::do_secondary_backup from being run periodically.", - "db::do_primary_backup_schedule_disable": "Disables db::do_primary_backup from being run periodically.", "db::do_primary_restore": "Restores the database from the most recently completed primary backup available in persistent storage of the current cloud.", - "db::do_delete_volumes_and_terminate_server": "Deletes any currently attached volumes from the instance and then terminates the machine.", - "db::request_master_deny": "Sends a request to the master database server tagged with rs_dbrepl:master_instance_uuid= to deny connections from the server's private IP address. This script should be run on a slave when it stops replicating.", - "db::do_init_slave_at_boot": "Initializes the slave server at boot.", - "db::do_primary_init_slave": "Initializes the slave server from the primary backup location. Authentication information provided by inputs is ignored for slave servers.", - "db::do_init_and_become_master": "Initializes the database and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", - "db::do_appservers_deny": "Denies connections from all application servers in the deployment that are tagged with appserver:active=true tag. This script can be run on a database server to deny connections from all application servers in the deployment.", - "db::do_secondary_restore": "Restores the database from the most recently completed backup available in a secondary location.", "db::do_primary_backup_schedule_enable": "Enables db::do_primary_backup to be run periodically.", - "db::setup_monitoring": "Installs the collectd plugin for database monitoring support, which is required to enable monitoring and alerting functionality for your servers.", - "db::do_promote_to_master": "Promotes a replicating slave to master.", - "db::do_secondary_restore_and_become_master": "Restores the database from a secondary backup location and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", - "db::request_appserver_deny": "Sends a request to deny connections from the caller's private IP address to all database servers in the deployment that are tagged with the database:active=true tag. This should be run on an application server upon decommissioning.", - "db::do_dump_import": "Retrieves a dump file from remote object storage (e.g., Amazon S3 Google Cloud Storage, Azure, Softlayer or Rackspace Cloud Files) and imports it to the database server.", - "db::do_secondary_backup_schedule_enable": "Enables db::do_secondary_backup to be run periodically.", - "db::setup_replication_privileges": "Sets up privileges for replication slave servers.", - "db::do_dump_schedule_enable": "Schedules the daily run of do_dump_export.", + "db::do_primary_backup_schedule_disable": "Disables db::do_primary_backup from being run periodically.", + "db::setup_privileges_admin": "Adds the username and password for 'superuser' privileges.", + "db::setup_privileges_application": "Adds the username and password for application privileges.", "db::do_secondary_backup": { "description": "Creates a backup of the database and uploads it to a secondary cloud storage location, which can be used to migrate your database to a different cloud. For example, you can save a secondary backup to an Amazon S3 bucket or a Rackspace Cloud Files container.", "thread": "db_backup" }, - "db::install_server": "Installs and sets up the packages that are required for database servers. Adds the database:active=true tag to your server, which identifies it as a database server. The tag is used by application servers to identify active databases.", - "db::default": "Selects and installs database client. It also sets up the provider and version for 'db' resource.", - "db::do_primary_restore_and_become_master": "Restores the database and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", - "db::setup_privileges_admin": "Adds the username and password for 'superuser' privileges.", - "db::do_appservers_allow": "Allows connections from all application servers in the deployment that are tagged with appserver:active=true tag. This script should be run on a database server so that it will accept connections from related application servers.", + "db::do_secondary_restore": "Restores the database from the most recently completed backup available in a secondary location.", + "db::do_secondary_backup_schedule_enable": "Enables db::do_secondary_backup to be run periodically.", + "db::do_secondary_backup_schedule_disable": "Disables db::do_secondary_backup from being run periodically.", "db::do_force_reset": "Resets the database back to a pristine state. WARNING: Execution of this script will delete any data in your database!", + "db::do_dump_export": "Creates a dump file and uploads it to a remote object storage (e.g., Amazon S3, Google Cloud Storage, Azure, Softlayer or Rackspace Cloud Files).", + "db::do_dump_import": "Retrieves a dump file from remote object storage (e.g., Amazon S3 Google Cloud Storage, Azure, Softlayer or Rackspace Cloud Files) and imports it to the database server.", + "db::do_dump_schedule_enable": "Schedules the daily run of do_dump_export.", + "db::do_dump_schedule_disable": "Disables the daily run of do_dump_export.", + "db::do_appservers_allow": "Allows connections from all application servers in the deployment that are tagged with appserver:active=true tag. This script should be run on a database server so that it will accept connections from related application servers.", + "db::do_appservers_deny": "Denies connections from all application servers in the deployment that are tagged with appserver:active=true tag. This script can be run on a database server to deny connections from all application servers in the deployment.", + "db::request_appserver_allow": "Sends a request to allow connections from the caller's private IP address to all database servers in the deployment that are tagged with the database:active=true tag. This should be run on an application server before attempting a database connection.", + "db::request_appserver_deny": "Sends a request to deny connections from the caller's private IP address to all database servers in the deployment that are tagged with the database:active=true tag. This should be run on an application server upon decommissioning.", + "db::do_init_and_become_master": "Initializes the database and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", + "db::do_primary_restore_and_become_master": "Restores the database and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", + "db::do_secondary_restore_and_become_master": "Restores the database from a secondary backup location and tags it as the master database server. Sets DNS. Starts a fresh backup from this master.", + "db::do_primary_init_slave": "Initializes the slave server from the primary backup location. Authentication information provided by inputs is ignored for slave servers.", + "db::do_secondary_init_slave": "Initializes the slave server from the secondary backup location. Authentication information provided by inputs is ignored for slave servers.", + "db::do_init_slave_at_boot": "Initializes the slave server at boot.", + "db::do_set_dns_slave": "Sets the slave DNS record to the network interface IP.", + "db::do_promote_to_master": "Promotes a replicating slave to master.", + "db::setup_replication_privileges": "Sets up privileges for replication slave servers.", + "db::request_master_allow": "Sends a request to the master database server tagged with rs_dbrepl:master_instance_uuid= to allow connections from the server's private IP address. This script should be run on a slave before it sets up replication.", + "db::request_master_deny": "Sends a request to the master database server tagged with rs_dbrepl:master_instance_uuid= to deny connections from the server's private IP address. This script should be run on a slave when it stops replicating.", "db::handle_demote_master": "Remote recipe executed by do_promote_to_master. DO NOT RUN.", - "db::do_secondary_init_slave": "Initializes the slave server from the secondary backup location. Authentication information provided by inputs is ignored for slave servers." - }, - "suggestions": { + "db::do_delete_volumes_and_terminate_server": "Deletes any currently attached volumes from the instance and then terminates the machine." }, - "conflicting": { - } + "version": "13.5.0" } \ No newline at end of file diff --git a/cookbooks/db/metadata.rb b/cookbooks/db/metadata.rb index ed6fa4126..2c3488ade 100644 --- a/cookbooks/db/metadata.rb +++ b/cookbooks/db/metadata.rb @@ -554,6 +554,23 @@ :display_name => "Import/export settings for database dump file management.", :type => "hash" +attribute 'db/dump/location', + :display_name => 'Database Dump Location', + :description => 'The location where database dumps for import and export will' + + 'be stored temporarily. Set to the desired location, ensuring the filesystem' + + 'the folder resides in has enough space.', + :required => 'optional', + :choice => [ + '/tmp', + '/mnt/ephemeral', + '/mnt/storage' + ], + :default => '/tmp', + :recipes => [ + "db::do_dump_import", + "db::do_dump_export" + ] + attribute "db/dump/storage_account_provider", :display_name => "Dump Storage Account Provider", :description => diff --git a/cookbooks/db/recipes/do_dump_export.rb b/cookbooks/db/recipes/do_dump_export.rb index 8140a309b..5aa323174 100644 --- a/cookbooks/db/recipes/do_dump_export.rb +++ b/cookbooks/db/recipes/do_dump_export.rb @@ -10,7 +10,7 @@ # Set up all db/dump/* attributes dumpfilename = node[:db][:dump][:prefix] + "-" + Time.now.strftime("%Y%m%d%H%M") + ".gz" -dumpfilepath = "/tmp/#{dumpfilename}" +dumpfilepath = "#{node[:db][:dump][:location]}/#{dumpfilename}" databasename = node[:db][:dump][:database_name] diff --git a/cookbooks/db/recipes/do_dump_import.rb b/cookbooks/db/recipes/do_dump_import.rb index 6f83f9fbc..f3468d043 100644 --- a/cookbooks/db/recipes/do_dump_import.rb +++ b/cookbooks/db/recipes/do_dump_import.rb @@ -28,7 +28,7 @@ class Chef::Resource::RubyBlock db_name = node[:db][:dump][:database_name] prefix = node[:db][:dump][:prefix] - dumpfilepath_without_extension = "/tmp/" + prefix + dumpfilepath_without_extension = "#{node[:db][:dump][:location]}/" + prefix container = node[:db][:dump][:container] cloud = node[:db][:dump][:storage_account_provider] command_to_execute = "/opt/rightscale/sandbox/bin/ros_util get" +