diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index d0ee22b625..2ea1da6acb 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -573,6 +573,12 @@ namespace eosio { namespace chain { 3170011, "The signer returned no valid block signatures" ) FC_DECLARE_DERIVED_EXCEPTION( unsupported_multiple_block_signatures, producer_exception, 3170012, "The signer returned multiple signatures but that is not supported" ) + FC_DECLARE_DERIVED_EXCEPTION( duplicate_snapshot_request, producer_exception, + 3170013, "Snapshot has been already scheduled with specified parameters" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_request_not_found, producer_exception, + 3170014, "Snapshot request not found" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_snapshot_request, producer_exception, + 3170015, "Invalid snapshot request" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) diff --git a/plugins/producer_api_plugin/producer.swagger.yaml b/plugins/producer_api_plugin/producer.swagger.yaml index c76d4c95d0..de6d334b38 100644 --- a/plugins/producer_api_plugin/producer.swagger.yaml +++ b/plugins/producer_api_plugin/producer.swagger.yaml @@ -37,14 +37,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/resume: post: summary: resume @@ -56,14 +55,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/paused: post: summary: paused @@ -83,8 +81,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/get_runtime_options: post: summary: get_runtime_options @@ -96,14 +93,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Runtime_Options' + $ref: "#/components/schemas/Runtime_Options" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/update_runtime_options: post: summary: update_runtime_options @@ -113,21 +109,20 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Runtime_Options' + $ref: "#/components/schemas/Runtime_Options" responses: "201": description: OK content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/get_greylist: post: summary: get_greylist @@ -151,8 +146,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/add_greylist_accounts: post: summary: add_greylist_accounts @@ -164,25 +158,24 @@ paths: schema: type: object properties: - accounts: - type: array - description: List of account names to add - items: - $ref: "https://docs.eosnetwork.com/openapi/v2.0/Name.yaml" + accounts: + type: array + description: List of account names to add + items: + $ref: "https://docs.eosnetwork.com/openapi/v2.0/Name.yaml" responses: "201": description: OK content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/remove_greylist_accounts: post: summary: remove_greylist_accounts @@ -205,14 +198,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/get_whitelist_blacklist: post: summary: get_whitelist_blacklist @@ -260,8 +252,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/set_whitelist_blacklist: post: summary: set_whitelist_blacklist @@ -308,14 +299,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/create_snapshot: post: summary: create_snapshot @@ -352,8 +342,180 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' + $ref: "#/component/schema/Error" + /producer/schedule_snapshot: + post: + summary: schedule_snapshot + description: Submits a request to generate a schedule for automated snapshot with given parameters. If request body is empty, triest to execute snapshot immediately. Returns error when unable to create snapshot. + operationId: schedule_snapshot + requestBody: + content: + application/json: + schema: + type: object + properties: + block_spacing: + type: integer + description: Generate snapshot every block_spacing blocks + start_block_num: + type: integer + description: Block number at which schedule starts + example: 5102 + end_block_num: + type: integer + description: Block number at which schedule ends + example: 15102 + snapshot_description: + type: string + description: Description of the snapshot + example: Daily snapshot + responses: + "201": + description: OK + content: + application/json: + schema: + type: object + properties: + snapshot_request_id: + type: integer + description: Unique id identifying current snapshot request + block_spacing: + type: integer + description: Generate snapshot every block_spacing blocks + start_block_num: + type: integer + description: Block number at which schedule starts + example: 5102 + end_block_num: + type: integer + description: Block number at which schedule ends + example: 15102 + snapshot_description: + type: string + description: Description of the snapshot + example: Daily snapshot + "400": + description: client error + content: + application/json: + schema: + $ref: "#/component/schema/Error" + /producer/get_snapshot_requests: + post: + summary: get_snapshot_requests + description: Returns a list of scheduled snapshots + operationId: get_snapshot_status + responses: + "201": + description: OK + content: + application/json: + schema: + type: object + properties: + snapshot_requests: + type: array + description: An array of scheduled snapshots + items: + type: object + properties: + snapshot_request_id: + type: integer + description: Unique id identifying current snapshot request + block_spacing: + type: integer + description: Generate snapshot every block_spacing blocks + start_block_num: + type: integer + description: Block number at which schedule starts + example: 5102 + end_block_num: + type: integer + description: Block number at which schedule ends + example: 15102 + snapshot_description: + type: string + description: Description of the snapshot + example: Daily snapshot + pending_snapshots: + type: array + description: List of pending snapshots + items: + type: object + properties: + head_block_id: + $ref: "https://docs.eosnetwork.com/openapi/v2.0/Sha256.yaml" + head_block_num: + type: integer + description: Highest block number on the chain + example: 5102 + head_block_time: + type: string + description: Highest block unix timestamp + example: 2020-11-16T00:00:00.000 + version: + type: integer + description: version number + example: 6 + snapshot_name: + type: string + description: The path and file name of the snapshot + example: /home/me/nodes/node-name/snapshots/snapshot-0000999f99999f9f999f99f99ff9999f999f9fff99ff99ffff9f9f9fff9f9999.bin + "400": + description: client error + content: + application/json: + schema: + $ref: "#/component/schema/Error" + /producer/unschedule_snapshot: + post: + summary: unschedule_snapshot + description: Submits a request to remove identified by id recurring snapshot from schedule. Returns error when unable to create unschedule. + operationId: unschedule_snapshot + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + snapshot_request_id: + type: integer + description: snapshot id + responses: + "201": + description: OK + content: + application/json: + schema: + type: object + properties: + snapshot_request_id: + type: integer + description: Unique id identifying current snapshot request + block_spacing: + type: integer + description: Generate snapshot every block_spacing blocks + start_block_num: + type: integer + description: Block number at which schedule starts + example: 5102 + end_block_num: + type: integer + description: Block number at which schedule ends + example: 15102 + snapshot_description: + type: string + description: Description of the snapshot + example: Daily snapshot + "400": + description: client error + content: + application/json: + schema: + $ref: "#/components/schemas/Error" /producer/get_integrity_hash: post: summary: get_integrity_hash @@ -377,8 +539,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/schedule_protocol_feature_activations: post: summary: schedule_protocol_feature_activations @@ -401,14 +562,13 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/OK' + $ref: "#/components/schemas/OK" "400": description: client error content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/get_supported_protocol_features: post: summary: get_supported_protocol_features @@ -478,9 +638,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - - + $ref: "#/components/schemas/Error" /producer/get_account_ram_corrections: post: summary: get_account_ram_corrections @@ -531,8 +689,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" /producer/get_unapplied_transactions: post: summary: get_unapplied_transactions @@ -612,8 +769,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Error' - + $ref: "#/components/schemas/Error" components: securitySchemes: {} schemas: diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 96fdaaf025..14b6b19006 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -82,6 +82,11 @@ struct async_result_visitor : public fc::visitor { api_handle.call_name(std::move(params)); \ eosio::detail::producer_api_plugin_response result{"ok"}; +#define INVOKE_V_R_II(api_handle, call_name, in_param) \ + auto params = parse_params(body);\ + api_handle.call_name(std::move(params)); \ + eosio::detail::producer_api_plugin_response result{"ok"}; + #define INVOKE_V_V(api_handle, call_name) \ body = parse_params(body); \ api_handle.call_name(); \ @@ -131,6 +136,12 @@ void producer_api_plugin::plugin_startup() { INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), CALL_ASYNC(producer, producer, create_snapshot, producer_plugin::snapshot_information, INVOKE_R_V_ASYNC(producer, create_snapshot), 201), + CALL_WITH_400(producer, producer, schedule_snapshot, + INVOKE_V_R_II(producer, schedule_snapshot, producer_plugin::snapshot_request_information), 201), + CALL_WITH_400(producer, producer, get_snapshot_requests, + INVOKE_R_V(producer, get_snapshot_requests), 201), + CALL_WITH_400(producer, producer, unschedule_snapshot, + INVOKE_V_R(producer, unschedule_snapshot, producer_plugin::snapshot_request_id_information), 201), CALL_WITH_400(producer, producer, get_integrity_hash, INVOKE_R_V(producer, get_integrity_hash), 201), CALL_WITH_400(producer, producer, schedule_protocol_feature_activations, diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index aed1587064..d59a7c4643 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -82,6 +82,26 @@ class producer_plugin : public appbase::plugin { std::string snapshot_name; }; + struct snapshot_request_information { + uint32_t block_spacing = 0; + uint32_t start_block_num = 0; + uint32_t end_block_num = 0; + std::string snapshot_description = ""; + + }; + + struct snapshot_request_id_information { + uint32_t snapshot_request_id = 0; + }; + + struct snapshot_schedule_information : public snapshot_request_id_information, public snapshot_request_information { + std::optional> pending_snapshots; + }; + + struct get_snapshot_requests_result { + std::vector snapshot_requests; + }; + struct scheduled_protocol_feature_activations { std::vector protocol_features_to_activate; }; @@ -137,7 +157,11 @@ class producer_plugin : public appbase::plugin { void set_whitelist_blacklist(const whitelist_blacklist& params); integrity_hash_information get_integrity_hash() const; + void create_snapshot(next_function next); + void schedule_snapshot(const snapshot_request_information& schedule); + void unschedule_snapshot(const snapshot_request_id_information& schedule); + get_snapshot_requests_result get_snapshot_requests() const; scheduled_protocol_feature_activations get_scheduled_protocol_feature_activations() const; void schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule); @@ -193,6 +217,10 @@ FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(head_block_num)(head_block_time)(version)(snapshot_name)) +FC_REFLECT(eosio::producer_plugin::snapshot_request_information, (block_spacing)(start_block_num)(end_block_num)(snapshot_description)) +FC_REFLECT(eosio::producer_plugin::snapshot_request_id_information, (snapshot_request_id)) +FC_REFLECT(eosio::producer_plugin::get_snapshot_requests_result, (snapshot_requests)) +FC_REFLECT_DERIVED(eosio::producer_plugin::snapshot_schedule_information, (eosio::producer_plugin::snapshot_request_id_information)(eosio::producer_plugin::snapshot_request_information), (pending_snapshots)) FC_REFLECT(eosio::producer_plugin::scheduled_protocol_feature_activations, (protocol_features_to_activate)) FC_REFLECT(eosio::producer_plugin::get_supported_protocol_features_params, (exclude_disabled)(exclude_unactivatable)) FC_REFLECT(eosio::producer_plugin::get_account_ram_corrections_params, (lower_bound)(upper_bound)(limit)(reverse)) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_db_json.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_db_json.hpp new file mode 100644 index 0000000000..db9e84837a --- /dev/null +++ b/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_db_json.hpp @@ -0,0 +1,90 @@ +#pragma once + +#include +#include + +#include + +#include +#include +#include + +namespace eosio { + +/// this class designed to serialize/deserialize snapshot schedule to a filesystem so it can be restored after restart +class snapshot_db_json { +public: + snapshot_db_json() = default; + ~snapshot_db_json() = default; + + void set_path(bfs::path path) { + db_path = std::move(path); + } + + bfs::path get_json_path() const { + return db_path / "snapshot-schedule.json"; + } + + const snapshot_db_json& operator>>(std::vector& sr) { + boost::property_tree::ptree root; + + try { + std::ifstream file(get_json_path().string()); + file.exceptions(std::istream::failbit|std::istream::eofbit); + boost::property_tree::read_json(file, root); + + // parse ptree + for(boost::property_tree::ptree::value_type& req: root.get_child("snapshot_requests")) { + producer_plugin::snapshot_schedule_information ssi; + ssi.snapshot_request_id = req.second.get("snapshot_request_id"); + ssi.snapshot_description = req.second.get("snapshot_description"); + ssi.block_spacing = req.second.get("block_spacing"); + ssi.start_block_num = req.second.get("start_block_num"); + ssi.end_block_num = req.second.get("end_block_num"); + sr.push_back(ssi); + } + } + catch (std::ifstream::failure & e) { + elog( "unable to restore snapshots schedule from filesystem ${jsonpath}, details: ${details}", + ("jsonpath", get_json_path().string()) ("details",e.what()) ); + appbase::app().quit(); + } + + return *this; + } + + const snapshot_db_json& operator<<(std::vector& sr) const { + boost::property_tree::ptree root; + boost::property_tree::ptree node_srs; + + for(const auto& key: sr) { + boost::property_tree::ptree node; + node.put("snapshot_request_id", key.snapshot_request_id); + node.put("snapshot_description", key.snapshot_description); + node.put("block_spacing", key.block_spacing); + node.put("start_block_num", key.start_block_num); + node.put("end_block_num", key.end_block_num); + node_srs.push_back(std::make_pair("", node)); + } + + root.push_back(std::make_pair("snapshot_requests", node_srs)); + + try { + std::ofstream file(get_json_path().string()); + file.exceptions(std::istream::failbit|std::istream::eofbit); + boost::property_tree::write_json(file, root); + } + catch (std::ofstream::failure & e) { + elog( "unable to store snapshots schedule to filesystem to ${jsonpath}, details: ${details}", + ("jsonpath", get_json_path().string()) ("details", e.what()) ); + appbase::app().quit(); + } + + return *this; + } + +private: + bfs::path db_path; +}; + +}// namespace eosio diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_scheduler.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_scheduler.hpp new file mode 100644 index 0000000000..53f17d5e0f --- /dev/null +++ b/plugins/producer_plugin/include/eosio/producer_plugin/snapshot_scheduler.hpp @@ -0,0 +1,205 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace eosio { + +namespace bmi = boost::multi_index; + +class snapshot_scheduler { +private: + struct by_snapshot_id; + struct by_snapshot_value; + struct as_vector; + + using snapshot_requests = bmi::multi_index_container< + producer_plugin::snapshot_schedule_information, + indexed_by< + bmi::hashed_unique, BOOST_MULTI_INDEX_MEMBER(producer_plugin::snapshot_request_id_information, uint32_t, snapshot_request_id)>, + bmi::random_access>, + bmi::ordered_unique, + composite_key>>>; + snapshot_requests _snapshot_requests; + snapshot_db_json _snapshot_db; + uint32_t _snapshot_id = 0; + uint32_t _inflight_sid = 0; + std::function)> _create_snapshot; + + void x_serialize() { + auto& vec = _snapshot_requests.get(); + std::vector sr(vec.begin(), vec.end()); + _snapshot_db << sr; + } + +public: + snapshot_scheduler() = default; + + // snapshot_scheduler_listener + void on_start_block(uint32_t height) { + bool serialize_needed = false; + bool snapshot_executed = false; + + auto execute_snapshot_with_log = [this, height, &snapshot_executed](const auto & req) { + // one snapshot per height + if (!snapshot_executed) { + dlog("snapshot scheduler creating a snapshot from the request [start_block_num:${start_block_num}, end_block_num=${end_block_num}, block_spacing=${block_spacing}], height=${height}", + ("start_block_num", req.start_block_num) + ("end_block_num", req.end_block_num) + ("block_spacing", req.block_spacing) + ("height", height)); + + execute_snapshot(req.snapshot_request_id); + snapshot_executed = true; + } + }; + + for(const auto& req: _snapshot_requests.get<0>()) { + // -1 since its called from start block + bool recurring_snapshot = req.block_spacing && (height >= req.start_block_num + 1) && (!((height - req.start_block_num - 1) % req.block_spacing)); + bool onetime_snapshot = (!req.block_spacing) && (height == req.start_block_num + 1); + + // assume "asap" for snapshot with missed/zero start, it can have spacing + if(!req.start_block_num) { + // update start_block_num with current height only if this is recurring + // if non recurring, will be executed and unscheduled + if (req.block_spacing && height) { + auto& snapshot_by_id = _snapshot_requests.get(); + auto it = snapshot_by_id.find(req.snapshot_request_id); + _snapshot_requests.modify(it, [&height](auto& p) { p.start_block_num = height - 1; }); + serialize_needed = true; + } + execute_snapshot_with_log(req); + } else if(recurring_snapshot || onetime_snapshot) { + execute_snapshot_with_log(req); + } + + // cleanup - remove expired (or invalid) request + if((!req.start_block_num && !req.block_spacing) || + (!req.block_spacing && height >= (req.start_block_num + 1)) || + (req.end_block_num > 0 && height >= (req.end_block_num + 1))) { + unschedule_snapshot(req.snapshot_request_id); + } + } + + // store db to filesystem + if (serialize_needed) x_serialize(); + } + + // snapshot_scheduler_handler + void schedule_snapshot(const producer_plugin::snapshot_request_information& sri) { + auto& snapshot_by_value = _snapshot_requests.get(); + auto existing = snapshot_by_value.find(std::make_tuple(sri.block_spacing, sri.start_block_num, sri.end_block_num)); + EOS_ASSERT(existing == snapshot_by_value.end(), chain::duplicate_snapshot_request, "Duplicate snapshot request"); + + if(sri.end_block_num > 0) { + // if "end" is specified, it should be greater then start + EOS_ASSERT(sri.start_block_num <= sri.end_block_num, chain::invalid_snapshot_request, "End block number should be greater or equal to start block number"); + // if also block_spacing specified, check it + if(sri.block_spacing > 0) { + EOS_ASSERT(sri.start_block_num + sri.block_spacing <= sri.end_block_num, chain::invalid_snapshot_request, "Block spacing exceeds defined by start and end range"); + } + } + + _snapshot_requests.emplace(producer_plugin::snapshot_schedule_information {{_snapshot_id++},{sri.block_spacing, sri.start_block_num, sri.end_block_num, sri.snapshot_description},{}}); + x_serialize(); + } + + virtual void unschedule_snapshot(uint32_t sri) { + auto& snapshot_by_id = _snapshot_requests.get(); + auto existing = snapshot_by_id.find(sri); + EOS_ASSERT(existing != snapshot_by_id.end(), chain::snapshot_request_not_found, "Snapshot request not found"); + _snapshot_requests.erase(existing); + x_serialize(); + } + + virtual producer_plugin::get_snapshot_requests_result get_snapshot_requests() { + producer_plugin::get_snapshot_requests_result result; + auto& asvector = _snapshot_requests.get(); + result.snapshot_requests.reserve(asvector.size()); + result.snapshot_requests.insert(result.snapshot_requests.begin(), asvector.begin(), asvector.end()); + return result; + } + + // initialize with storage + void set_db_path(bfs::path db_path) { + _snapshot_db.set_path(std::move(db_path)); + // init from db + if(fc::exists(_snapshot_db.get_json_path())) { + std::vector sr; + _snapshot_db >> sr; + // if db read succeeded, clear/load + _snapshot_requests.get().clear(); + _snapshot_requests.insert(sr.begin(), sr.end()); + } + } + + // add pending snapshot info to inflight snapshot request + void add_pending_snapshot_info(const producer_plugin::snapshot_information & si) { + auto& snapshot_by_id = _snapshot_requests.get(); + auto snapshot_req = snapshot_by_id.find(_inflight_sid); + if (snapshot_req != snapshot_by_id.end()) { + _snapshot_requests.modify(snapshot_req, [&si](auto& p) { + if (!p.pending_snapshots) { + p.pending_snapshots = std::vector(); + } + p.pending_snapshots->emplace_back(si); + }); + } + } + + // snapshot executor + void set_create_snapshot_fn(std::function)> fn) { + _create_snapshot = std::move(fn); + } + + void execute_snapshot(uint32_t srid) { + _inflight_sid = srid; + auto next = [srid, this](const std::variant& result) { + if(std::holds_alternative(result)) { + try { + std::get(result)->dynamic_rethrow_exception(); + } catch(const fc::exception& e) { + elog( "snapshot creation error: ${details}", ("details",e.to_detail_string()) ); + appbase::app().quit(); + } catch(const std::exception& e) { + elog( "snapshot creation error: ${details}", ("details",e.what()) ); + appbase::app().quit(); + } + } else { + // success, snapshot finalized + auto snapshot_info = std::get(result); + auto& snapshot_by_id = _snapshot_requests.get(); + auto snapshot_req = snapshot_by_id.find(srid); + + if (snapshot_req != snapshot_by_id.end()) { + if (auto pending = snapshot_req->pending_snapshots; pending) { + auto it = std::remove_if(pending->begin(), pending->end(), [&snapshot_info](const producer_plugin::snapshot_information & s){ return s.head_block_num <= snapshot_info.head_block_num; }); + pending->erase(it, pending->end()); + _snapshot_requests.modify(snapshot_req, [&pending](auto& p) { + p.pending_snapshots = std::move(pending); + }); + } + } + } + }; + _create_snapshot(next); + } +}; +}// namespace eosio diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f0195bc0f6..fc4b7370f8 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include #include @@ -26,6 +27,7 @@ #include #include #include +#include #include namespace bmi = boost::multi_index; @@ -385,6 +387,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _accepted_block_connection; std::optional _accepted_block_header_connection; std::optional _irreversible_block_connection; + std::optional _block_start_connection; producer_plugin_metrics _metrics; @@ -406,6 +409,9 @@ class producer_plugin_impl : public std::enable_shared_from_this next, const fc::time_point& read_window_start_time); - + void consider_new_watermark( account_name producer, uint32_t block_num, block_timestamp_type timestamp) { auto itr = _producer_watermarks.find( producer ); if( itr != _producer_watermarks.end() ) { @@ -816,7 +822,8 @@ void new_chain_banner(const eosio::chain::controller& db) } producer_plugin::producer_plugin() - : my(new producer_plugin_impl(app().get_io_service())){ + : my(new producer_plugin_impl(app().get_io_service())) + { } producer_plugin::~producer_plugin() {} @@ -1171,6 +1178,8 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ } } + my->_snapshot_scheduler.set_db_path(my->_snapshots_dir); + my->_snapshot_scheduler.set_create_snapshot_fn([this](producer_plugin::next_function next){create_snapshot(next);}); } FC_LOG_AND_RETHROW() } void producer_plugin::plugin_startup() @@ -1197,6 +1206,7 @@ void producer_plugin::plugin_startup() my->_accepted_block_connection.emplace(chain.accepted_block.connect( [this]( const auto& bsp ){ my->on_block( bsp ); } )); my->_accepted_block_header_connection.emplace(chain.accepted_block_header.connect( [this]( const auto& bsp ){ my->on_block_header( bsp ); } )); my->_irreversible_block_connection.emplace(chain.irreversible_block.connect( [this]( const auto& bsp ){ my->on_irreversible_block( bsp->block ); } )); + my->_block_start_connection.emplace(chain.block_start.connect( [this]( uint32_t bs ){ my->_snapshot_scheduler.on_start_block(bs); } )); const auto lib_num = chain.last_irreversible_block_num(); const auto lib = chain.fetch_block_by_number(lib_num); @@ -1508,12 +1518,27 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_pending_snapshot_index.emplace(head_id, next, pending_path.generic_string(), snapshot_path.generic_string()); + my->_snapshot_scheduler.add_pending_snapshot_info( producer_plugin::snapshot_information{head_id, head_block_num, head_block_time, chain_snapshot_header::current_version, pending_path.generic_string()} ); } CATCH_AND_CALL (next); } } +void producer_plugin::schedule_snapshot(const snapshot_request_information& sri) +{ + my->_snapshot_scheduler.schedule_snapshot(sri); +} + +void producer_plugin::unschedule_snapshot(const snapshot_request_id_information& sri) +{ + my->_snapshot_scheduler.unschedule_snapshot(sri.snapshot_request_id); +} + +producer_plugin::get_snapshot_requests_result producer_plugin::get_snapshot_requests() const +{ + return my->_snapshot_scheduler.get_snapshot_requests(); +} + producer_plugin::scheduled_protocol_feature_activations producer_plugin::get_scheduled_protocol_feature_activations()const { return {my->_protocol_features_to_activate}; diff --git a/plugins/producer_plugin/test/CMakeLists.txt b/plugins/producer_plugin/test/CMakeLists.txt index eee8bd488d..e44370f19d 100644 --- a/plugins/producer_plugin/test/CMakeLists.txt +++ b/plugins/producer_plugin/test/CMakeLists.txt @@ -13,6 +13,12 @@ target_link_libraries( test_options producer_plugin eosio_testing ) add_test(NAME test_options COMMAND plugins/producer_plugin/test/test_options WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_executable( test_snapshot_scheduler test_snapshot_scheduler.cpp ) +target_link_libraries( test_snapshot_scheduler producer_plugin eosio_testing ) + +add_test(NAME test_snapshot_scheduler COMMAND plugins/producer_plugin/test/test_snapshot_scheduler WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + add_executable( test_read_only_trx test_read_only_trx.cpp ) target_link_libraries( test_read_only_trx producer_plugin eosio_testing ) -add_test(NAME test_read_only_trx COMMAND plugins/producer_plugin/test/test_read_only_trx WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +add_test(NAME test_read_only_trx COMMAND plugins/producer_plugin/test/test_read_only_trx WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) \ No newline at end of file diff --git a/plugins/producer_plugin/test/test_snapshot_scheduler.cpp b/plugins/producer_plugin/test/test_snapshot_scheduler.cpp new file mode 100644 index 0000000000..eab3995a32 --- /dev/null +++ b/plugins/producer_plugin/test/test_snapshot_scheduler.cpp @@ -0,0 +1,143 @@ +#define BOOST_TEST_MODULE snapshot_scheduler +#include + +#include +#include +#include +#include +namespace { + +using namespace eosio; +using namespace eosio::chain; + +BOOST_AUTO_TEST_SUITE(snapshot_scheduler_test) + +BOOST_AUTO_TEST_CASE(snapshot_scheduler_test) { + + fc::logger log; + producer_plugin scheduler; + + { + // add/remove test + producer_plugin::snapshot_request_information sri1 = {.block_spacing = 100, .start_block_num = 5000, .end_block_num = 10000, .snapshot_description = "Example of recurring snapshot"}; + producer_plugin::snapshot_request_information sri2 = {.block_spacing = 0, .start_block_num = 5200, .end_block_num = 5200, .snapshot_description = "Example of one-time snapshot"}; + + scheduler.schedule_snapshot(sri1); + scheduler.schedule_snapshot(sri2); + + BOOST_CHECK_EQUAL(2, scheduler.get_snapshot_requests().snapshot_requests.size()); + + BOOST_CHECK_EXCEPTION(scheduler.schedule_snapshot(sri1), duplicate_snapshot_request, [](const fc::assert_exception& e) { + return e.to_detail_string().find("Duplicate snapshot request") != std::string::npos; + }); + + producer_plugin::snapshot_request_id_information sri_delete_1 = {.snapshot_request_id = 0}; + scheduler.unschedule_snapshot(sri_delete_1); + + BOOST_CHECK_EQUAL(1, scheduler.get_snapshot_requests().snapshot_requests.size()); + + producer_plugin::snapshot_request_id_information sri_delete_none = {.snapshot_request_id = 2}; + BOOST_CHECK_EXCEPTION(scheduler.unschedule_snapshot(sri_delete_none), snapshot_request_not_found, [](const fc::assert_exception& e) { + return e.to_detail_string().find("Snapshot request not found") != std::string::npos; + }); + + producer_plugin::snapshot_request_id_information sri_delete_2 = {.snapshot_request_id = 1}; + scheduler.unschedule_snapshot(sri_delete_2); + + BOOST_CHECK_EQUAL(0, scheduler.get_snapshot_requests().snapshot_requests.size()); + + producer_plugin::snapshot_request_information sri_large_spacing = {.block_spacing = 1000, .start_block_num = 5000, .end_block_num = 5010}; + BOOST_CHECK_EXCEPTION(scheduler.schedule_snapshot(sri_large_spacing), invalid_snapshot_request, [](const fc::assert_exception& e) { + return e.to_detail_string().find("Block spacing exceeds defined by start and end range") != std::string::npos; + }); + + producer_plugin::snapshot_request_information sri_start_end = {.block_spacing = 1000, .start_block_num = 50000, .end_block_num = 5000}; + BOOST_CHECK_EXCEPTION(scheduler.schedule_snapshot(sri_start_end), invalid_snapshot_request, [](const fc::assert_exception& e) { + return e.to_detail_string().find("End block number should be greater or equal to start block number") != std::string::npos; + }); + } + { + boost::filesystem::path temp = boost::filesystem::temp_directory_path() / boost::filesystem::unique_path(); + + try { + std::promise> plugin_promise; + std::future> plugin_fut = plugin_promise.get_future(); + + std::thread app_thread([&]() { + fc::logger::get(DEFAULT_LOGGER).set_log_level(fc::log_level::debug); + std::vector argv = + {"test", "--data-dir", temp.c_str(), "--config-dir", temp.c_str(), + "-p", "eosio", "-e", "--max-transaction-time", "475", "--disable-subjective-billing=true"}; + appbase::app().initialize(argv.size(), (char**) &argv[0]); + appbase::app().startup(); + plugin_promise.set_value( + {appbase::app().find_plugin(), appbase::app().find_plugin()}); + appbase::app().exec(); + }); + + auto [prod_plug, chain_plug] = plugin_fut.get(); + std::deque all_blocks; + std::promise empty_blocks_promise; + std::future empty_blocks_fut = empty_blocks_promise.get_future(); + auto ab = chain_plug->chain().accepted_block.connect([&](const block_state_ptr& bsp) { + static int num_empty = std::numeric_limits::max(); + all_blocks.push_back(bsp); + if(bsp->block->transactions.empty()) { + --num_empty; + if(num_empty == 0) empty_blocks_promise.set_value(); + } else {// we want a few empty blocks after we have some non-empty blocks + num_empty = 10; + } + }); + auto pp = appbase::app().find_plugin(); + auto bs = chain_plug->chain().block_start.connect([&pp](uint32_t bn) { + // catching pending snapshot + if (!pp->get_snapshot_requests().snapshot_requests.empty()) { + auto& pending = pp->get_snapshot_requests().snapshot_requests.begin()->pending_snapshots; + if (pending && pending->size()==1) { + // lets check the head block num of it, it should be 8 + 1 = 9 + // this means we are getting a snapshot for correct block # as well + BOOST_CHECK_EQUAL(9, pending->begin()->head_block_num); + } + } + }); + + producer_plugin::snapshot_request_information sri1 = {.block_spacing = 8, .start_block_num = 1, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2"}; + producer_plugin::snapshot_request_information sri2 = {.block_spacing = 5000, .start_block_num = 100000, .end_block_num = 300000, .snapshot_description = "Example of recurring snapshot 2"}; + producer_plugin::snapshot_request_information sri3 = {.block_spacing = 2, .start_block_num = 0, .end_block_num = 3, .snapshot_description = "Example of recurring snapshot 1"}; + + pp->schedule_snapshot(sri1); + pp->schedule_snapshot(sri2); + pp->schedule_snapshot(sri3); + + // all three snapshot requests should be present now + BOOST_CHECK_EQUAL(3, pp->get_snapshot_requests().snapshot_requests.size()); + + empty_blocks_fut.wait_for(std::chrono::seconds(5)); + + // one of the snapshots is done here and request, corresponding to it should be deleted + BOOST_CHECK_EQUAL(2, pp->get_snapshot_requests().snapshot_requests.size()); + + // check whether no pending snapshots present + BOOST_CHECK_EQUAL(0, pp->get_snapshot_requests().snapshot_requests.begin()->pending_snapshots->size()); + + // quit app + appbase::app().quit(); + app_thread.join(); + + // lets check whether schedule can be read back after restart + snapshot_db_json db; + std::vector ssi; + db.set_path(temp / "snapshots"); + db >> ssi; + BOOST_CHECK_EQUAL(2, ssi.size()); + BOOST_CHECK_EQUAL(ssi.begin()->block_spacing, sri1.block_spacing); + } catch(...) { + bfs::remove_all(temp); + throw; + } + bfs::remove_all(temp); + } +} + BOOST_AUTO_TEST_SUITE_END() +}// namespace diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 6f4f9f874d..8ded158a12 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -25,6 +25,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CUR configure_file(${CMAKE_CURRENT_SOURCE_DIR}/terminate-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/terminate-scenarios-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_snapshot_diff_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_snapshot_diff_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_snapshot_forked_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_snapshot_forked_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_short_fork_take_over_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_short_fork_take_over_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) @@ -154,6 +155,8 @@ add_test(NAME keosd_auto_launch_test COMMAND tests/keosd_auto_launch_test.py WOR set_property(TEST keosd_auto_launch_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_snapshot_diff_test COMMAND tests/nodeos_snapshot_diff_test.py -v --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_snapshot_diff_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_snapshot_forked_test COMMAND tests/nodeos_snapshot_forked_test.py -v --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_snapshot_forked_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME trx_finality_status_test COMMAND tests/trx_finality_status_test.py -v --clean-run WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST trx_finality_status_test PROPERTY LABELS nonparallelizable_tests) diff --git a/tests/TestHarness/Node.py b/tests/TestHarness/Node.py index 7833fbadb2..f21d3de58c 100644 --- a/tests/TestHarness/Node.py +++ b/tests/TestHarness/Node.py @@ -1581,6 +1581,9 @@ def modifyBuiltinPFSubjRestrictions(self, featureCodename, subjectiveRestriction def createSnapshot(self): return self.processUrllibRequest("producer", "create_snapshot") + def scheduleSnapshot(self): + return self.processUrllibRequest("producer", "schedule_snapshot") + # kill all existing nodeos in case lingering from previous test @staticmethod def killAllNodeos(): diff --git a/tests/nodeos_snapshot_diff_test.py b/tests/nodeos_snapshot_diff_test.py index 2d025968ba..9c953b4756 100755 --- a/tests/nodeos_snapshot_diff_test.py +++ b/tests/nodeos_snapshot_diff_test.py @@ -40,7 +40,7 @@ pnodes=1 testAccounts = 2 trxGeneratorCnt=2 -startedNonProdNodes = 2 +startedNonProdNodes = 3 cluster=Cluster(walletd=True) dumpErrorDetails=args.dump_error_details keepLogs=args.keep_logs @@ -60,10 +60,14 @@ trxGenLauncher=None +snapshotScheduleDB = "snapshot-schedule.json" + def getLatestSnapshot(nodeId): snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") snapshotDirContents = os.listdir(snapshotDir) assert len(snapshotDirContents) > 0 + # disregard snapshot schedule config in same folder + if snapshotScheduleDB in snapshotDirContents: snapshotDirContents.remove(snapshotScheduleDB) snapshotDirContents.sort() return os.path.join(snapshotDir, snapshotDirContents[-1]) @@ -111,9 +115,11 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI snapshotNodeId = 0 node0=cluster.getNode(snapshotNodeId) irrNodeId = snapshotNodeId+1 + progNodeId = irrNodeId+1 nodeSnap=cluster.getNode(snapshotNodeId) nodeIrr=cluster.getNode(irrNodeId) + nodeProg=cluster.getNode(progNodeId) Print("Wait for account creation to be irreversible") blockNum=node0.getBlockNum(BlockType.head) @@ -152,6 +158,9 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI assert steadyStateAvg>=minRequiredTransactions, "Expected to at least receive %s transactions per block, but only getting %s" % (minRequiredTransactions, steadyStateAvg) Print("Create snapshot") + ret = nodeProg.scheduleSnapshot() + assert ret is not None, "Snapshot creation failed" + ret = nodeSnap.createSnapshot() assert ret is not None, "Snapshot creation failed" ret_head_block_num = ret["payload"]["head_block_num"] @@ -195,6 +204,16 @@ def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportI irrSnapshotFile = irrSnapshotFile + ".json" assert Utils.compareFiles(snapshotFile, irrSnapshotFile), f"Snapshot files differ {snapshotFile} != {irrSnapshotFile}" + + Print("Kill programmable node") + nodeProg.kill(signal.SIGTERM) + + Print("Convert snapshot to JSON") + progSnapshotFile = getLatestSnapshot(progNodeId) + Utils.processLeapUtilCmd("snapshot to-json --input-file {}".format(progSnapshotFile), "snapshot to-json", silentErrors=False) + progSnapshotFile = progSnapshotFile + ".json" + + assert Utils.compareFiles(progSnapshotFile, irrSnapshotFile), f"Snapshot files differ {progSnapshotFile} != {irrSnapshotFile}" testSuccessful=True diff --git a/tests/nodeos_snapshot_forked_test.py b/tests/nodeos_snapshot_forked_test.py new file mode 100755 index 0000000000..c35af7cd81 --- /dev/null +++ b/tests/nodeos_snapshot_forked_test.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 + +import os +import time +import decimal +import json +import math +import re +import signal + +from TestHarness import Account, Cluster, Node, TestHelper, Utils, WalletMgr +from TestHarness.Node import BlockType + +############################################################### +# nodeos_snapshot_forked +# +# Test to verify that programmable snapshot functionality is +# working appropriately when forks occur +# +# Setup involves constructing bridge-mode node topology and +# killing a "bridge" node +# +############################################################### +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + + +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "--wallet-port"}) +Utils.Debug=args.v +totalProducerNodes=2 +totalNonProducerNodes=1 +totalNodes=totalProducerNodes+totalNonProducerNodes +maxActiveProducers=3 +totalProducers=maxActiveProducers +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +killAll=args.clean_run +walletPort=args.wallet_port + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +snapshotScheduleDB = "snapshot-schedule.json" + +EOSIO_ACCT_PRIVATE_DEFAULT_KEY = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" +EOSIO_ACCT_PUBLIC_DEFAULT_KEY = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" + +def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportInterval=20): + if not node.waitForBlock(blockNum, timeout=timeout, blockType=blockType, reportInterval=reportInterval): + info=node.getInfo() + headBlockNum=info["head_block_num"] + libBlockNum=info["last_irreversible_block_num"] + Utils.errorExit("Failed to get to %s block number %d. Last had head block number %d and lib %d" % (blockType, blockNum, headBlockNum, libBlockNum)) + +def getSnapshotsCount(nodeId): + snapshotDir = os.path.join(Utils.getNodeDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + # disregard snapshot schedule config in same folder + if snapshotScheduleDB in snapshotDirContents: snapshotDirContents.remove(snapshotScheduleDB) + return len(snapshotDirContents) + +try: + TestHelper.printSystemInfo("BEGIN") + + cluster.setWalletMgr(walletMgr) + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + specificExtraNodeosArgs={} + # producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node + specificExtraNodeosArgs[totalProducerNodes]="--plugin eosio::test_control_api_plugin" + + # ensure that transactions don't get cleaned up too early + successDuration = 360 + failure_duration = 360 + extraNodeosArgs=" --transaction-finality-status-max-storage-size-gb 1 " + \ + f"--transaction-finality-status-success-duration-sec {successDuration} --transaction-finality-status-failure-duration-sec {failure_duration}" + extraNodeosArgs+=" --http-max-response-time-ms 990000" + + + # *** setup topogrophy *** + + # "bridge" shape connects defprocera through defproducerb (in node0) to each other and defproducerc is alone (in node01) + # and the only connection between those 2 groups is through the bridge node + if cluster.launch(prodCount=2, topo="bridge", pnodes=totalProducerNodes, + totalNodes=totalNodes, totalProducers=totalProducers, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, + extraNodeosArgs=extraNodeosArgs) is False: + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + # *** identify each node (producers and non-producing node) *** + + nonProdNode=None + prodNodes=[] + producers=[] + for i, node in enumerate(cluster.getNodes()): + node.producers=Cluster.parseProducers(node.nodeId) + numProducers=len(node.producers) + Print(f"node {i} has producers={node.producers}") + if numProducers==0: + if nonProdNode is None: + nonProdNode=node + else: + Utils.errorExit("More than one non-producing nodes") + else: + prodNodes.append(node) + producers.extend(node.producers) + + prodAB=prodNodes[0] # defproducera, defproducerb + prodC=prodNodes[1] # defproducerc + + # *** Identify a block where production is stable *** + + #verify nodes are in sync and advancing + cluster.biosNode.kill(signal.SIGTERM) + cluster.waitOnClusterSync(blockAdvancing=5) + + Print("Creating account1") + account1 = Account('account1') + account1.ownerPublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY + account1.activePublicKey = EOSIO_ACCT_PUBLIC_DEFAULT_KEY + cluster.createAccountAndVerify(account1, cluster.eosioAccount, stakedDeposit=1000) + + Print("Validating accounts after bootstrap") + cluster.validateAccounts([account1]) + + # *** Schedule snapshot, it should become pending + prodAB.scheduleSnapshot() + + # *** Killing the "bridge" node *** + Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.") + # kill at the beginning of the production window for defproducera, so there is time for the fork for + # defproducerc to grow before it would overtake the fork for defproducera and defproducerb + killAtProducer="defproducera" + nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=1) + + #verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if + #an error occurs) + numPasses = 2 + blocksPerProducer = 12 + blocksPerRound = totalProducers * blocksPerProducer + count = blocksPerRound * numPasses + while nonProdNode.verifyAlive() and count > 0: + # wait on prodNode 0 since it will continue to advance, since defproducera and defproducerb are its producers + Print("Wait for next block") + assert prodAB.waitForNextBlock(timeout=6), "Production node AB should continue to advance, even after bridge node is killed" + count -= 1 + + # schedule a snapshot that should get finalized + prodC.scheduleSnapshot() + + assert not nonProdNode.verifyAlive(), "Bridge node should have been killed if test was functioning correctly." + + def getState(status): + assert status is not None, "ERROR: getTransactionStatus failed to return any status" + assert "state" in status, \ + f"ERROR: getTransactionStatus returned a status object that didn't have a \"state\" field. state: {json.dumps(status, indent=1)}" + return status["state"] + + assert prodC.waitForNextBlock(), "Production node C should continue to advance, even after bridge node is killed" + + Print("Relaunching the non-producing bridge node to connect the nodes") + if not nonProdNode.relaunch(): + errorExit(f"Failure - (non-production) node {nonProdNode.nodeNum} should have restarted") + + Print("Wait for LIB to move, which indicates prodC has forked out the branch") + assert prodC.waitForLibToAdvance(), \ + "ERROR: Network did not reach consensus after bridge node was restarted." + + for prodNode in prodNodes: + info=prodNode.getInfo() + Print(f"node info: {json.dumps(info, indent=1)}") + + assert prodC.waitForProducer("defproducerc"), \ + f"Waiting for prodC to produce, but it never happened" + \ + f"\n\nprod AB info: {json.dumps(prodAB.getInfo(), indent=1)}\n\nprod C info: {json.dumps(prodC.getInfo(), indent=1)}" + + blockNum=prodC.getBlockNum(BlockType.head) + 1 + waitForBlock(prodC, blockNum + 1, blockType=BlockType.lib) + + # AB & C compare counts, should be same + assert getSnapshotsCount(0) == getSnapshotsCount(1), \ + "ERROR: Pre-fork and post-fork snapshots failed to finalize." + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +errorCode = 0 if testSuccessful else 1 +exit(errorCode) \ No newline at end of file