diff --git a/db_stress_tool/db_stress_common.h b/db_stress_tool/db_stress_common.h index 768e90b9fb5a..b22f01f65b19 100644 --- a/db_stress_tool/db_stress_common.h +++ b/db_stress_tool/db_stress_common.h @@ -352,6 +352,45 @@ DECLARE_uint64(initial_auto_readahead_size); DECLARE_uint64(max_auto_readahead_size); DECLARE_uint64(num_file_reads_for_auto_readahead); DECLARE_bool(auto_readahead_size); +DECLARE_bool(allow_fallocate); +DECLARE_int32(table_cache_numshardbits); +DECLARE_bool(enable_write_thread_adaptive_yield); +DECLARE_uint64(log_readahead_size); +DECLARE_uint64(bgerror_resume_retry_interval); +DECLARE_uint64(delete_obsolete_files_period_micros); +DECLARE_uint64(max_log_file_size); +DECLARE_uint64(log_file_time_to_roll); +DECLARE_bool(use_adaptive_mutex); +DECLARE_bool(advise_random_on_open); +DECLARE_uint64(WAL_ttl_seconds); +DECLARE_uint64(WAL_size_limit_MB); +DECLARE_bool(strict_bytes_per_sync); +DECLARE_bool(avoid_flush_during_shutdown); +DECLARE_bool(fill_cache); +DECLARE_bool(optimize_multiget_for_io); +DECLARE_bool(memtable_insert_hint_per_batch); +DECLARE_bool(dump_malloc_stats); +DECLARE_uint32(stats_history_buffer_size); +DECLARE_bool(skip_stats_update_on_db_open); +DECLARE_bool(optimize_filters_for_hits); +DECLARE_uint64(sample_for_compression); +DECLARE_bool(report_bg_io_stats); +DECLARE_bool(cache_index_and_filter_blocks_with_high_priority); +DECLARE_bool(use_delta_encoding); +DECLARE_bool(verify_compression); +DECLARE_uint32(read_amp_bytes_per_bit); +DECLARE_bool(enable_index_compression); +DECLARE_uint32(index_shortening); +DECLARE_uint32(metadata_charge_policy); +DECLARE_bool(use_adaptive_mutex_lru); +DECLARE_uint32(compress_format_version); +DECLARE_uint64(manifest_preallocation_size); +DECLARE_bool(checksum_handoff_file); +DECLARE_uint64(max_total_wal_size); +DECLARE_double(high_pri_pool_ratio); +DECLARE_double(low_pri_pool_ratio); +DECLARE_uint64(soft_pending_compaction_bytes_limit); +DECLARE_uint64(hard_pending_compaction_bytes_limit); constexpr long KB = 1024; constexpr int kRandomValueMaxFactor = 3; diff --git a/db_stress_tool/db_stress_gflags.cc b/db_stress_tool/db_stress_gflags.cc index 3a00594250d6..a251575a246a 100644 --- a/db_stress_tool/db_stress_gflags.cc +++ b/db_stress_tool/db_stress_gflags.cc @@ -7,6 +7,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "rocksdb/cache.h" +#include "rocksdb/options.h" +#include "rocksdb/utilities/backup_engine.h" #ifdef GFLAGS #include "db_stress_tool/db_stress_common.h" @@ -402,7 +405,8 @@ DEFINE_double(experimental_mempurge_threshold, 0.0, "Maximum estimated useful payload that triggers a " "mempurge process to collect memtable garbage bytes."); -DEFINE_bool(enable_write_thread_adaptive_yield, true, +DEFINE_bool(enable_write_thread_adaptive_yield, + ROCKSDB_NAMESPACE::Options().enable_write_thread_adaptive_yield, "Use a yielding spin loop for brief writer thread waits."); // Options for StackableDB-based BlobDB @@ -1144,4 +1148,155 @@ DEFINE_uint32(bottommost_file_compaction_delay, 0, DEFINE_bool(auto_readahead_size, false, "Does auto tuning of readahead_size when enabled during scans."); +DEFINE_bool(allow_fallocate, ROCKSDB_NAMESPACE::Options().allow_fallocate, + "Options.allow_fallocate"); + +DEFINE_int32(table_cache_numshardbits, + ROCKSDB_NAMESPACE::Options().table_cache_numshardbits, + "Options.table_cache_numshardbits"); + +DEFINE_uint64(log_readahead_size, + ROCKSDB_NAMESPACE::Options().log_readahead_size, + "Options.log_readahead_size"); + +DEFINE_uint64(bgerror_resume_retry_interval, + ROCKSDB_NAMESPACE::Options().bgerror_resume_retry_interval, + "Options.bgerror_resume_retry_interval"); + +DEFINE_uint64(delete_obsolete_files_period_micros, + ROCKSDB_NAMESPACE::Options().delete_obsolete_files_period_micros, + "Options.delete_obsolete_files_period_micros"); + +DEFINE_uint64(max_log_file_size, ROCKSDB_NAMESPACE::Options().max_log_file_size, + "Options.max_log_file_sizes"); + +DEFINE_uint64(log_file_time_to_roll, + ROCKSDB_NAMESPACE::Options().log_file_time_to_roll, + "Options.log_file_time_to_roll"); + +DEFINE_bool(use_adaptive_mutex, ROCKSDB_NAMESPACE::Options().use_adaptive_mutex, + "Options.use_adaptive_mutex"); + +DEFINE_bool(advise_random_on_open, + ROCKSDB_NAMESPACE::Options().advise_random_on_open, + "Options.advise_random_on_open"); + +DEFINE_uint64(WAL_ttl_seconds, ROCKSDB_NAMESPACE::Options().WAL_ttl_seconds, + "Options.WAL_ttl_seconds"); + +DEFINE_uint64(WAL_size_limit_MB, ROCKSDB_NAMESPACE::Options().WAL_size_limit_MB, + "Options.WAL_size_limit_MB"); + +DEFINE_bool(strict_bytes_per_sync, + ROCKSDB_NAMESPACE::Options().strict_bytes_per_sync, + "Options.strict_bytes_per_sync"); + +DEFINE_bool(avoid_flush_during_shutdown, + ROCKSDB_NAMESPACE::Options().avoid_flush_during_shutdown, + "Options.avoid_flush_during_shutdown"); + +DEFINE_bool(fill_cache, ROCKSDB_NAMESPACE::ReadOptions().fill_cache, + "ReadOptions.fill_cache"); + +DEFINE_bool(optimize_multiget_for_io, + ROCKSDB_NAMESPACE::ReadOptions().optimize_multiget_for_io, + "ReadOptions.optimize_multiget_for_io"); + +DEFINE_bool(memtable_insert_hint_per_batch, + ROCKSDB_NAMESPACE::WriteOptions().memtable_insert_hint_per_batch, + "WriteOptions.memtable_insert_hint_per_batch"); + +DEFINE_bool(dump_malloc_stats, ROCKSDB_NAMESPACE::Options().dump_malloc_stats, + "Options.dump_malloc_stats"); + +DEFINE_uint32(stats_history_buffer_size, + ROCKSDB_NAMESPACE::Options().stats_history_buffer_size, + "Options.stats_history_buffer_size"); + +DEFINE_bool(skip_stats_update_on_db_open, + ROCKSDB_NAMESPACE::Options().skip_stats_update_on_db_open, + "Options.skip_stats_update_on_db_open"); + +DEFINE_bool(optimize_filters_for_hits, + ROCKSDB_NAMESPACE::Options().optimize_filters_for_hits, + "Options.optimize_filters_for_hits"); + +DEFINE_uint64(sample_for_compression, + ROCKSDB_NAMESPACE::Options().sample_for_compression, + "Options.sample_for_compression"); + +DEFINE_bool(report_bg_io_stats, ROCKSDB_NAMESPACE::Options().report_bg_io_stats, + "Options.report_bg_io_stats"); + +DEFINE_bool( + cache_index_and_filter_blocks_with_high_priority, + ROCKSDB_NAMESPACE::BlockBasedTableOptions() + .cache_index_and_filter_blocks_with_high_priority, + "BlockBasedTableOptions.cache_index_and_filter_blocks_with_high_priority"); + +DEFINE_bool(use_delta_encoding, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().use_delta_encoding, + "BlockBasedTableOptions.use_delta_encoding"); + +DEFINE_bool(verify_compression, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().verify_compression, + "BlockBasedTableOptions.verify_compression"); + +DEFINE_uint32( + read_amp_bytes_per_bit, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().read_amp_bytes_per_bit, + "Options.read_amp_bytes_per_bit"); + +DEFINE_bool( + enable_index_compression, + ROCKSDB_NAMESPACE::BlockBasedTableOptions().enable_index_compression, + "BlockBasedTableOptions.enable_index_compression"); + +DEFINE_uint32(index_shortening, + static_cast( + ROCKSDB_NAMESPACE::BlockBasedTableOptions().index_shortening), + "BlockBasedTableOptions.index_shortening"); + +DEFINE_uint32(metadata_charge_policy, + static_cast(ROCKSDB_NAMESPACE::ShardedCacheOptions() + .metadata_charge_policy), + "ShardedCacheOptions.metadata_charge_policy"); + +DEFINE_bool(use_adaptive_mutex_lru, + ROCKSDB_NAMESPACE::LRUCacheOptions().use_adaptive_mutex, + "LRUCacheOptions.use_adaptive_mutex"); + +DEFINE_uint32( + compress_format_version, + static_cast(ROCKSDB_NAMESPACE::CompressedSecondaryCacheOptions() + .compress_format_version), + "CompressedSecondaryCacheOptions.compress_format_version"); + +DEFINE_uint64(manifest_preallocation_size, + ROCKSDB_NAMESPACE::Options().manifest_preallocation_size, + "Options.manifest_preallocation_size"); + +DEFINE_uint64(max_total_wal_size, + ROCKSDB_NAMESPACE::Options().max_total_wal_size, + "Options.max_total_wal_size"); + +DEFINE_bool(checksum_handoff_file, false, + "If true, include all the supported files in " + "Options.checksum_handoff_file. Otherwise include no files."); + +DEFINE_double(high_pri_pool_ratio, + ROCKSDB_NAMESPACE::LRUCacheOptions().high_pri_pool_ratio, + "LRUCacheOptions.high_pri_pool_ratio"); + +DEFINE_double(low_pri_pool_ratio, + ROCKSDB_NAMESPACE::LRUCacheOptions().low_pri_pool_ratio, + "LRUCacheOptions.low_pri_pool_ratio"); + +DEFINE_uint64(soft_pending_compaction_bytes_limit, + ROCKSDB_NAMESPACE::Options().soft_pending_compaction_bytes_limit, + "Options.soft_pending_compaction_bytes_limit"); + +DEFINE_uint64(hard_pending_compaction_bytes_limit, + ROCKSDB_NAMESPACE::Options().hard_pending_compaction_bytes_limit, + "Options.hard_pending_compaction_bytes_limit"); #endif // GFLAGS diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index feec339d9797..63c6357aca8e 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -141,6 +141,7 @@ std::shared_ptr StressTest::NewCache(size_t capacity, } CompressedSecondaryCacheOptions opts; opts.capacity = FLAGS_compressed_secondary_cache_size; + opts.compress_format_version = FLAGS_compress_format_version; secondary_cache = NewCompressedSecondaryCache(opts); if (secondary_cache == nullptr) { fprintf(stderr, "Failed to allocate compressed secondary cache\n"); @@ -191,6 +192,11 @@ std::shared_ptr StressTest::NewCache(size_t capacity, LRUCacheOptions opts; opts.capacity = capacity; opts.num_shard_bits = num_shard_bits; + opts.metadata_charge_policy = + static_cast(FLAGS_metadata_charge_policy); + opts.use_adaptive_mutex = FLAGS_use_adaptive_mutex_lru; + opts.high_pri_pool_ratio = FLAGS_high_pri_pool_ratio; + opts.low_pri_pool_ratio = FLAGS_low_pri_pool_ratio; if (tiered) { TieredCacheOptions tiered_opts; tiered_opts.cache_opts = &opts; @@ -830,10 +836,14 @@ void StressTest::OperateDb(ThreadState* thread) { read_opts.adaptive_readahead = FLAGS_adaptive_readahead; read_opts.readahead_size = FLAGS_readahead_size; read_opts.auto_readahead_size = FLAGS_auto_readahead_size; + read_opts.fill_cache = FLAGS_fill_cache; + read_opts.optimize_multiget_for_io = FLAGS_optimize_multiget_for_io; WriteOptions write_opts; if (FLAGS_rate_limit_auto_wal_flush) { write_opts.rate_limiter_priority = Env::IO_USER; } + write_opts.memtable_insert_hint_per_batch = + FLAGS_memtable_insert_hint_per_batch; auto shared = thread->shared; char value[100]; std::string from_db; @@ -1264,6 +1274,19 @@ Status StressTest::TestIterate(ThreadState* thread, Slice read_ts_slice; MaybeUseOlderTimestampForRangeScan(thread, read_ts_str, read_ts_slice, ro); + std::string op_logs; + if (ro.tailing) { + op_logs += "ReadOptions.tailing = true "; + } + ro.pin_data = thread->rand.OneIn(2); + if (ro.pin_data) { + op_logs += "ReadOptions.pin_data = true "; + } + ro.background_purge_on_iterator_cleanup = thread->rand.OneIn(2); + if (ro.background_purge_on_iterator_cleanup) { + op_logs += "ReadOptions.background_purge_on_iterator_cleanup = true "; + } + bool expect_total_order = false; if (thread->rand.OneIn(16)) { // When prefix extractor is used, it's useful to cover total order seek. @@ -1315,7 +1338,6 @@ Status StressTest::TestIterate(ThreadState* thread, } } - std::string op_logs; constexpr size_t kOpLogsLimit = 10000; for (const std::string& key_str : key_strs) { @@ -1681,6 +1703,19 @@ Status StressTest::TestBackupRestore( } else { backup_opts.schema_version = 2; } + if (thread->rand.OneIn(3)) { + backup_opts.max_background_operations = 16; + } else { + backup_opts.max_background_operations = 1; + } + if (thread->rand.OneIn(2)) { + backup_opts.backup_rate_limiter.reset(NewGenericRateLimiter( + 1024 * 1024 /* rate_bytes_per_sec */, 10 /* refill_period_us */)); + } + if (thread->rand.OneIn(2)) { + backup_opts.restore_rate_limiter.reset(NewGenericRateLimiter( + 1024 * 1024 /* rate_bytes_per_sec */, 10 /* refill_period_us */)); + } BackupEngine* backup_engine = nullptr; std::string from = "a backup/restore operation"; Status s = BackupEngine::Open(db_stress_env, backup_opts, &backup_engine); @@ -1707,6 +1742,9 @@ Status StressTest::TestBackupRestore( // lock and wait on a background operation (flush). create_opts.flush_before_backup = true; } + create_opts.decrease_background_thread_cpu_priority = thread->rand.OneIn(2); + create_opts.background_thread_cpu_priority = static_cast( + thread->rand.Next() % (static_cast(CpuPriority::kHigh) + 1)); s = backup_engine->CreateNewBackup(create_opts, db_); if (!s.ok()) { from = "BackupEngine::CreateNewBackup"; @@ -1744,19 +1782,20 @@ Status StressTest::TestBackupRestore( const bool allow_persistent = thread->tid == 0; // not too many bool from_latest = false; int count = static_cast(backup_info.size()); + RestoreOptions restore_options; + restore_options.keep_log_files = thread->rand.OneIn(2); if (s.ok() && !inplace_not_restore) { if (count > 1) { s = backup_engine->RestoreDBFromBackup( - RestoreOptions(), backup_info[thread->rand.Uniform(count)].backup_id, + restore_options, backup_info[thread->rand.Uniform(count)].backup_id, restore_dir /* db_dir */, restore_dir /* wal_dir */); if (!s.ok()) { from = "BackupEngine::RestoreDBFromBackup"; } } else { from_latest = true; - s = backup_engine->RestoreDBFromLatestBackup(RestoreOptions(), - restore_dir /* db_dir */, - restore_dir /* wal_dir */); + s = backup_engine->RestoreDBFromLatestBackup( + restore_options, restore_dir /* db_dir */, restore_dir /* wal_dir */); if (!s.ok()) { from = "BackupEngine::RestoreDBFromLatestBackup"; } @@ -1779,9 +1818,9 @@ Status StressTest::TestBackupRestore( std::vector restored_cf_handles; // Not yet implemented: opening restored BlobDB or TransactionDB - Options restore_options; + Options db_opt; if (s.ok() && !FLAGS_use_txn && !FLAGS_use_blob_db) { - s = PrepareOptionsForRestoredDB(&restore_options); + s = PrepareOptionsForRestoredDB(&db_opt); if (!s.ok()) { from = "PrepareRestoredDBOptions in backup/restore"; } @@ -1794,19 +1833,19 @@ Status StressTest::TestBackupRestore( // the same order as `column_family_names_`. assert(FLAGS_clear_column_family_one_in == 0); for (const auto& name : column_family_names_) { - cf_descriptors.emplace_back(name, ColumnFamilyOptions(restore_options)); + cf_descriptors.emplace_back(name, ColumnFamilyOptions(db_opt)); } if (inplace_not_restore) { BackupInfo& info = backup_info[thread->rand.Uniform(count)]; - restore_options.env = info.env_for_open.get(); - s = DB::OpenForReadOnly(DBOptions(restore_options), info.name_for_open, + db_opt.env = info.env_for_open.get(); + s = DB::OpenForReadOnly(DBOptions(db_opt), info.name_for_open, cf_descriptors, &restored_cf_handles, &restored_db); if (!s.ok()) { from = "DB::OpenForReadOnly in backup/restore"; } } else { - s = DB::Open(DBOptions(restore_options), restore_dir, cf_descriptors, + s = DB::Open(DBOptions(db_opt), restore_dir, cf_descriptors, &restored_cf_handles, &restored_db); if (!s.ok()) { from = "DB::Open in backup/restore"; @@ -2248,8 +2287,12 @@ void StressTest::TestCompactFiles(ThreadState* thread, size_t output_level = std::min(random_level + 1, cf_meta_data.levels.size() - 1); - auto s = db_->CompactFiles(CompactionOptions(), column_family, - input_files, static_cast(output_level)); + CompactionOptions compact_options; + if (thread->rand.OneIn(2)) { + compact_options.output_file_size_limit = FLAGS_target_file_size_base; + } + auto s = db_->CompactFiles(compact_options, column_family, input_files, + static_cast(output_level)); if (!s.ok()) { fprintf(stdout, "Unable to perform CompactFiles(): %s\n", s.ToString().c_str()); @@ -2396,6 +2439,9 @@ void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key, CompactRangeOptions cro; cro.exclusive_manual_compaction = static_cast(thread->rand.Next() % 2); cro.change_level = static_cast(thread->rand.Next() % 2); + if (thread->rand.OneIn(2)) { + cro.target_level = thread->rand.Next() % options_.num_levels; + } std::vector bottom_level_styles = { BottommostLevelCompaction::kSkip, BottommostLevelCompaction::kIfHaveCompactionFilter, @@ -3286,6 +3332,15 @@ void InitializeOptionsFromFlags( block_based_options.max_auto_readahead_size = FLAGS_max_auto_readahead_size; block_based_options.num_file_reads_for_auto_readahead = FLAGS_num_file_reads_for_auto_readahead; + block_based_options.cache_index_and_filter_blocks_with_high_priority = + FLAGS_cache_index_and_filter_blocks_with_high_priority; + block_based_options.use_delta_encoding = FLAGS_use_delta_encoding; + block_based_options.verify_compression = FLAGS_verify_compression; + block_based_options.read_amp_bytes_per_bit = FLAGS_read_amp_bytes_per_bit; + block_based_options.enable_index_compression = FLAGS_enable_index_compression; + block_based_options.index_shortening = + static_cast( + FLAGS_index_shortening); options.table_factory.reset(NewBlockBasedTableFactory(block_based_options)); options.db_write_buffer_size = FLAGS_db_write_buffer_size; options.write_buffer_size = FLAGS_write_buffer_size; @@ -3486,6 +3541,42 @@ void InitializeOptionsFromFlags( options.bottommost_file_compaction_delay = FLAGS_bottommost_file_compaction_delay; + + options.allow_fallocate = FLAGS_allow_fallocate; + options.table_cache_numshardbits = FLAGS_table_cache_numshardbits; + options.enable_write_thread_adaptive_yield = + FLAGS_enable_write_thread_adaptive_yield; + options.log_readahead_size = FLAGS_log_readahead_size; + options.bgerror_resume_retry_interval = FLAGS_bgerror_resume_retry_interval; + options.delete_obsolete_files_period_micros = + FLAGS_delete_obsolete_files_period_micros; + options.max_log_file_size = FLAGS_max_log_file_size; + options.log_file_time_to_roll = FLAGS_log_file_time_to_roll; + options.use_adaptive_mutex = FLAGS_use_adaptive_mutex; + options.advise_random_on_open = FLAGS_advise_random_on_open; + options.WAL_ttl_seconds = FLAGS_WAL_ttl_seconds; + options.WAL_size_limit_MB = FLAGS_WAL_size_limit_MB; + options.wal_bytes_per_sync = FLAGS_wal_bytes_per_sync; + options.strict_bytes_per_sync = FLAGS_strict_bytes_per_sync; + options.avoid_flush_during_shutdown = FLAGS_avoid_flush_during_shutdown; + options.dump_malloc_stats = FLAGS_dump_malloc_stats; + options.stats_history_buffer_size = FLAGS_stats_history_buffer_size; + options.skip_stats_update_on_db_open = FLAGS_skip_stats_update_on_db_open; + options.optimize_filters_for_hits = FLAGS_optimize_filters_for_hits; + options.sample_for_compression = FLAGS_sample_for_compression; + options.report_bg_io_stats = FLAGS_report_bg_io_stats; + options.manifest_preallocation_size = FLAGS_manifest_preallocation_size; + if (FLAGS_checksum_handoff_file) { + options.checksum_handoff_file_types = { + FileType::kWalFile, FileType::kTableFile, FileType::kDescriptorFile}; + } else { + options.checksum_handoff_file_types = {}; + } + options.max_total_wal_size = FLAGS_max_total_wal_size; + options.soft_pending_compaction_bytes_limit = + FLAGS_soft_pending_compaction_bytes_limit; + options.hard_pending_compaction_bytes_limit = + FLAGS_hard_pending_compaction_bytes_limit; } void InitializeOptionsGeneral( diff --git a/db_stress_tool/no_batched_ops_stress.cc b/db_stress_tool/no_batched_ops_stress.cc index 7b0c6d2eb9ec..b95cee3183a8 100644 --- a/db_stress_tool/no_batched_ops_stress.cc +++ b/db_stress_tool/no_batched_ops_stress.cc @@ -1594,8 +1594,13 @@ class NonBatchedOpsStressTest : public StressTest { s = sst_file_writer.Finish(); } if (s.ok()) { + IngestExternalFileOptions ingest_options; + ingest_options.move_files = thread->rand.OneInOpt(2); + ingest_options.verify_checksums_before_ingest = thread->rand.OneInOpt(2); + ingest_options.verify_checksums_readahead_size = + thread->rand.OneInOpt(2) ? 1024 * 1024 : 0; s = db_->IngestExternalFile(column_families_[column_family], - {sst_filename}, IngestExternalFileOptions()); + {sst_filename}, ingest_options); } if (!s.ok()) { for (PendingExpectedValue& pending_expected_value : diff --git a/tools/db_crashtest.py b/tools/db_crashtest.py index b1916381005b..4acd85228732 100644 --- a/tools/db_crashtest.py +++ b/tools/db_crashtest.py @@ -227,8 +227,47 @@ ), "auto_readahead_size" : lambda: random.choice([0, 1]), "verify_iterator_with_expected_state_one_in": 5, + "allow_fallocate": lambda: random.choice([0, 1]), + "table_cache_numshardbits": lambda: random.choice([6] * 3 + [-1] * 2 + [0]), + "enable_write_thread_adaptive_yield": lambda: random.choice([0, 1]), + "log_readahead_size": lambda: random.choice([0, 16 * 1024 * 1024]), + "bgerror_resume_retry_interval": lambda: random.choice([10000, 1000000]), + "delete_obsolete_files_period_micros": lambda: random.choice([6 * 60 * 60 * 1000000, 30 * 1000000]), + "max_log_file_size": lambda: random.choice([0, 1024 * 1024]), + "log_file_time_to_roll": lambda: random.choice([0, 60]), + "use_adaptive_mutex": lambda: random.choice([0, 1]), + "advise_random_on_open": lambda: random.choice([0] + [1] * 3), + "WAL_ttl_seconds": lambda: random.choice([0, 60]), + "WAL_size_limit_MB": lambda: random.choice([0, 1]), + "wal_bytes_per_sync": lambda: random.choice([0, 1024 * 1024]), + "strict_bytes_per_sync": lambda: random.choice([0, 1]), + "avoid_flush_during_shutdown": lambda: random.choice([0, 1]), + "fill_cache": lambda: random.choice([0, 1]), + "optimize_multiget_for_io": lambda: random.choice([0, 1]), + "memtable_insert_hint_per_batch": lambda: random.choice([0, 1]), + "dump_malloc_stats": lambda: random.choice([0, 1]), + "stats_history_buffer_size": lambda: random.choice([0, 1024 * 1024]), + "skip_stats_update_on_db_open": lambda: random.choice([0, 1]), + "optimize_filters_for_hits": lambda: random.choice([0, 1]), + "sample_for_compression": lambda: random.choice([0, 5]), + "report_bg_io_stats": lambda: random.choice([0, 1]), + "cache_index_and_filter_blocks_with_high_priority": lambda: random.choice([0, 1]), + "use_delta_encoding": lambda: random.choice([0, 1]), + "verify_compression": lambda: random.choice([0, 1]), + "read_amp_bytes_per_bit": lambda: random.choice([0, 32]), + "enable_index_compression": lambda: random.choice([0, 1]), + "index_shortening": lambda: random.choice([0, 1, 2]), + "metadata_charge_policy": lambda: random.choice([0, 1]), + "use_adaptive_mutex_lru": lambda: random.choice([0, 1]), + "compress_format_version": lambda: random.choice([1, 2]), + "manifest_preallocation_size": lambda: random.choice([0, 4 * 1024 * 1024]), + "checksum_handoff_file": lambda: random.choice([0, 1]), + "max_total_wal_size": lambda: random.choice([0] * 4 + [64 * 1024 * 1024]), + "high_pri_pool_ratio": lambda: random.choice([0, 0.5]), + "low_pri_pool_ratio": lambda: random.choice([0, 0.5]), + "soft_pending_compaction_bytes_limit" : lambda: random.choice([1024 * 1024] + [64 * 1073741824] * 4), + "hard_pending_compaction_bytes_limit" : lambda: random.choice([2 * 1024 * 1024] + [256 * 1073741824] * 4), } - _TEST_DIR_ENV_VAR = "TEST_TMPDIR" # If TEST_TMPDIR_EXPECTED is not specified, default value will be TEST_TMPDIR _TEST_EXPECTED_DIR_ENV_VAR = "TEST_TMPDIR_EXPECTED"