-
Notifications
You must be signed in to change notification settings - Fork 73
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
OC tierup compile in order #1342
Changes from 2 commits
0997826
fec0a7a
57a1d71
9cdcef8
f51599c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,6 +8,7 @@ | |
#include <eosio/chain/webassembly/eos-vm-oc/intrinsic.hpp> | ||
#include <eosio/chain/webassembly/eos-vm-oc/compile_monitor.hpp> | ||
#include <eosio/chain/exceptions.hpp> | ||
#include <eosio/chain/config.hpp> | ||
|
||
#include <unistd.h> | ||
#include <sys/syscall.h> | ||
|
@@ -38,7 +39,7 @@ static constexpr size_t descriptor_ptr_from_file_start = header_offset + offseto | |
|
||
static_assert(sizeof(code_cache_header) <= header_size, "code_cache_header too big"); | ||
|
||
code_cache_async::code_cache_async(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : | ||
code_cache_async::code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : | ||
code_cache_base(data_dir, eosvmoc_config, db), | ||
_result_queue(eosvmoc_config.threads * 2), | ||
_threads(eosvmoc_config.threads) | ||
|
@@ -106,7 +107,7 @@ std::tuple<size_t, size_t> code_cache_async::consume_compile_thread_queue() { | |
} | ||
|
||
|
||
const code_descriptor* const code_cache_async::get_descriptor_for_code(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { | ||
const code_descriptor* const code_cache_async::get_descriptor_for_code(const account_name& receiver, const digest_type& code_id, const uint8_t& vm_version, bool is_write_window, get_cd_failure& failure) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it's fine to leave as is, but for increased abstraction maybe this just takes There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I like that. Didn't feel right including |
||
//if there are any outstanding compiles, process the result queue now | ||
//When app is in write window, all tasks are running sequentially and read-only threads | ||
//are not running. Safe to update cache entries. | ||
|
@@ -156,13 +157,16 @@ const code_descriptor* const code_cache_async::get_descriptor_for_code(const dig | |
it->second = false; | ||
return nullptr; | ||
} | ||
if(_queued_compiles.find(ct) != _queued_compiles.end()) { | ||
if(std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), ct) != _queued_compiles.end()) { | ||
failure = get_cd_failure::temporary; // Compile might not be done yet | ||
return nullptr; | ||
} | ||
|
||
if(_outstanding_compiles_and_poison.size() >= _threads) { | ||
_queued_compiles.emplace(ct); | ||
if (receiver.prefix() == chain::config::system_account_name) | ||
_queued_compiles.push_front(ct); | ||
else | ||
_queued_compiles.push_back(ct); | ||
failure = get_cd_failure::temporary; // Compile might not be done yet | ||
return nullptr; | ||
} | ||
|
@@ -221,7 +225,7 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const | |
return &*_cache_index.push_front(std::move(std::get<code_descriptor>(result.result))).first; | ||
} | ||
|
||
code_cache_base::code_cache_base(const std::filesystem::path data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : | ||
code_cache_base::code_cache_base(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db) : | ||
_db(db), | ||
_cache_file_path(data_dir/"code_cache.bin") | ||
{ | ||
|
@@ -383,7 +387,9 @@ void code_cache_base::free_code(const digest_type& code_id, const uint8_t& vm_ve | |
} | ||
|
||
//if it's in the queued list, erase it | ||
_queued_compiles.erase({code_id, vm_version}); | ||
auto i = std::find(_queued_compiles.cbegin(), _queued_compiles.cend(), code_tuple{code_id, vm_version}); | ||
if (i != _queued_compiles.cend()) | ||
_queued_compiles.erase(i); | ||
|
||
//however, if it's currently being compiled there is no way to cancel the compile, | ||
//so instead set a poison boolean that indicates not to insert the code in to the cache | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm worried about the performance ramification of switching this from
constantlogn time look up to linear time lookup. The lookup occurs every time an action is executed and when someone starts fresh there could be hundreds or thousands of entries that need to be searched through for every action (until compilation settles down).I think maybe instead we should have a second
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
One issue I saw was that compiling was according to code_hash sort order instead of order they came in. This preserves the order. If worried about performance, maybe we should use a multindex instead for this which would allow for keeping track of order, last used, prioritize eosio.*, and provide fast lookup.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yeah if you want to maintain compilation order, a multi_index sounds good
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Done