Skip to content

Commit

Permalink
remove big blob file type in ps v3 (#5544)
Browse files Browse the repository at this point in the history
* support config blob file limit size

* add some comment

* Update dbms/src/Storages/Page/V3/BlobStore.cpp

Co-authored-by: JaySon <[email protected]>

* Update dbms/src/TestUtils/MockDiskDelegator.h

Co-authored-by: JaySon <[email protected]>

* Update dbms/src/Storages/Page/V3/BlobStore.cpp

Co-authored-by: JaySon <[email protected]>

* Update dbms/src/Storages/Page/V3/BlobStore.cpp

Co-authored-by: JaySon <[email protected]>

* improve code comment

* set BlobStat to read only type if its capacity is larger than config.file_limit_size

* add metric

* Update dbms/src/Storages/Page/V3/BlobStore.cpp

Co-authored-by: JaySon <[email protected]>

* small fix for gtest

* fix unit test

* fix metrics

* add some comment about reloading config.file_limit_size

* Update metrics/grafana/tiflash_summary.json

Co-authored-by: JaySon <[email protected]>

* Update metrics/grafana/tiflash_summary.json

Co-authored-by: JaySon <[email protected]>

Co-authored-by: JaySon <[email protected]>
Co-authored-by: Ti Chi Robot <[email protected]>
  • Loading branch information
3 people authored Aug 23, 2022
1 parent 8e92ffb commit c9656c9
Show file tree
Hide file tree
Showing 11 changed files with 327 additions and 330 deletions.
2 changes: 2 additions & 0 deletions dbms/src/Common/TiFlashMetrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ namespace DB
F(type_exec, {{"type", "exec"}}, ExpBuckets{0.0005, 2, 20}), \
F(type_migrate, {{"type", "migrate"}}, ExpBuckets{0.0005, 2, 20}), \
F(type_v3, {{"type", "v3"}}, ExpBuckets{0.0005, 2, 20})) \
M(tiflash_storage_page_write_batch_size, "The size of each write batch in bytes", Histogram, \
F(type_v3, {{"type", "v3"}}, ExpBuckets{4 * 1024, 4, 10})) \
M(tiflash_storage_logical_throughput_bytes, "The logical throughput of read tasks of storage in bytes", Histogram, \
F(type_read, {{"type", "read"}}, EqualWidthBuckets{1 * 1024 * 1024, 60, 50 * 1024 * 1024})) \
M(tiflash_storage_io_limiter, "Storage I/O limiter metrics", Counter, F(type_fg_read_req_bytes, {"type", "fg_read_req_bytes"}), \
Expand Down
6 changes: 3 additions & 3 deletions dbms/src/Interpreters/Settings.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,9 +316,9 @@ struct Settings
M(SettingUInt64, dt_checksum_frame_size, DBMS_DEFAULT_BUFFER_SIZE, "Frame size for delta tree stable storage") \
\
M(SettingDouble, dt_page_gc_threshold, 0.5, "Max valid rate of deciding to do a GC in PageStorage") \
M(SettingBool, dt_enable_read_thread, false, "Enable storage read thread or not") \
M(SettingDouble, dt_block_slots_scale, 1.0, "Block slots limit of a read request") \
M(SettingDouble, dt_active_segments_scale, 1.0, "Acitve segments limit of a read request") \
M(SettingBool, dt_enable_read_thread, false, "Enable storage read thread or not") \
M(SettingDouble, dt_block_slots_scale, 1.0, "Block slots limit of a read request") \
M(SettingDouble, dt_active_segments_scale, 1.0, "Acitve segments limit of a read request") \
\
M(SettingChecksumAlgorithm, dt_checksum_algorithm, ChecksumAlgo::XXH3, "Checksum algorithm for delta tree stable storage") \
M(SettingCompressionMethod, dt_compression_method, CompressionMethod::LZ4, "The method of data compression when writing.") \
Expand Down
177 changes: 73 additions & 104 deletions dbms/src/Storages/Page/V3/BlobStore.cpp

Large diffs are not rendered by default.

55 changes: 18 additions & 37 deletions dbms/src/Storages/Page/V3/BlobStore.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,7 @@ class BlobStore : private Allocator<false>
// Read Only.
// Only after heavy GC, BlobFile will change to READ_ONLY type.
// After GC remove, empty files will be removed.
READ_ONLY = 2,

// Big Blob file
// Only used to page size > config.file_limit_size
BIG_BLOB = 3
READ_ONLY = 2
};

static String blobTypeToString(BlobStatType type)
Expand All @@ -87,8 +83,6 @@ class BlobStore : private Allocator<false>
return "normal";
case BlobStatType::READ_ONLY:
return "read only";
case BlobStatType::BIG_BLOB:
return "big blob";
}
return "Invalid";
}
Expand All @@ -100,31 +94,26 @@ class BlobStore : private Allocator<false>

std::mutex sm_lock;
const SpaceMapPtr smap;
/**
* If no any data inside. It shoule be same as space map `biggest_cap`,
* It is a hint for choosing quickly, should use `recalculateCapacity`
* to update it after some space are free in the spacemap.
*/

// The max capacity hint of all available slots in SpaceMap
// A hint means that it is not an absolutely accurate value after inserting data,
// but is useful for quickly choosing BlobFile.
// Should call `recalculateCapacity` to get an accurate value after removing data.
UInt64 sm_max_caps = 0;
// The current file size of the BlobFile
UInt64 sm_total_size = 0;
// The sum of the size of all valid data in the BlobFile
UInt64 sm_valid_size = 0;
// sm_valid_size / sm_total_size
double sm_valid_rate = 0.0;

public:
BlobStat(BlobFileId id_, SpaceMap::SpaceMapType sm_type, UInt64 sm_max_caps_)
BlobStat(BlobFileId id_, SpaceMap::SpaceMapType sm_type, UInt64 sm_max_caps_, BlobStatType type_)
: id(id_)
, type(BlobStatType::NORMAL)
, type(type_)
, smap(SpaceMap::createSpaceMap(sm_type, 0, sm_max_caps_))
, sm_max_caps(sm_max_caps_)
{
if (sm_type == SpaceMap::SpaceMapType::SMAP64_BIG)
{
type = BlobStatType::BIG_BLOB;
}

// Won't create read-only blob by default.
assert(type != BlobStatType::READ_ONLY);
}
{}

[[nodiscard]] std::lock_guard<std::mutex> lock()
{
Expand All @@ -146,14 +135,12 @@ class BlobStore : private Allocator<false>
type.store(BlobStatType::READ_ONLY);
}

bool isBigBlob() const
{
return type.load() == BlobStatType::BIG_BLOB;
}

BlobFileOffset getPosFromStat(size_t buf_size, const std::lock_guard<std::mutex> &);

bool removePosFromStat(BlobFileOffset offset, size_t buf_size, const std::lock_guard<std::mutex> &);
/**
* The return value is the valid data size remained in the BlobFile after the remove
*/
size_t removePosFromStat(BlobFileOffset offset, size_t buf_size, const std::lock_guard<std::mutex> &);

/**
* This method is only used when blobstore restore
Expand Down Expand Up @@ -191,13 +178,9 @@ class BlobStore : private Allocator<false>
//
[[nodiscard]] std::lock_guard<std::mutex> lock() const;

BlobStatPtr createStatNotChecking(BlobFileId blob_file_id, const std::lock_guard<std::mutex> &);
BlobStatPtr createStatNotChecking(BlobFileId blob_file_id, UInt64 max_caps, const std::lock_guard<std::mutex> &);

BlobStatPtr createStat(BlobFileId blob_file_id, const std::lock_guard<std::mutex> & guard);

BlobStatPtr createBigPageStatNotChecking(BlobFileId blob_file_id, const std::lock_guard<std::mutex> &);

BlobStatPtr createBigStat(BlobFileId blob_file_id, const std::lock_guard<std::mutex> & guard);
BlobStatPtr createStat(BlobFileId blob_file_id, UInt64 max_caps, const std::lock_guard<std::mutex> & guard);

void eraseStat(const BlobStatPtr && stat, const std::lock_guard<std::mutex> &);

Expand All @@ -219,8 +202,6 @@ class BlobStore : private Allocator<false>
*/
std::pair<BlobStatPtr, BlobFileId> chooseStat(size_t buf_size, const std::lock_guard<std::mutex> &);

BlobFileId chooseBigStat(const std::lock_guard<std::mutex> &) const;

BlobStatPtr blobIdToStat(BlobFileId file_id, bool ignore_not_exist = false);

using StatsMap = std::map<String, std::list<BlobStatPtr>>;
Expand Down
9 changes: 0 additions & 9 deletions dbms/src/Storages/Page/V3/spacemap/SpaceMap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include <Core/Types.h>
#include <IO/WriteHelpers.h>
#include <Storages/Page/V3/spacemap/SpaceMap.h>
#include <Storages/Page/V3/spacemap/SpaceMapBig.h>
#include <Storages/Page/V3/spacemap/SpaceMapRBTree.h>
#include <Storages/Page/V3/spacemap/SpaceMapSTDMap.h>
#include <common/likely.h>
Expand Down Expand Up @@ -43,9 +42,6 @@ SpaceMapPtr SpaceMap::createSpaceMap(SpaceMapType type, UInt64 start, UInt64 end
case SMAP64_STD_MAP:
smap = STDMapSpaceMap::create(start, end);
break;
case SMAP64_BIG:
smap = BigSpaceMap::create(start, end);
break;
default:
throw Exception(fmt::format("Invalid [type={}] to create spaceMap", static_cast<UInt8>(type)), ErrorCodes::LOGICAL_ERROR);
}
Expand All @@ -60,11 +56,6 @@ SpaceMapPtr SpaceMap::createSpaceMap(SpaceMapType type, UInt64 start, UInt64 end

bool SpaceMap::checkSpace(UInt64 offset, size_t size) const
{
// If we used `SMAP64_BIG`, we won't check the space.
if (type == SMAP64_BIG)
{
return false;
}
return (offset < start) || (offset > end) || (offset + size - 1 > end);
}

Expand Down
3 changes: 0 additions & 3 deletions dbms/src/Storages/Page/V3/spacemap/SpaceMap.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ class SpaceMap
SMAP64_INVALID = 0,
SMAP64_RBTREE = 1,
SMAP64_STD_MAP = 2,
SMAP64_BIG = 3 // support for writebatch bigger than blobstore.config.file_limit_size
};

/**
Expand Down Expand Up @@ -143,8 +142,6 @@ class SpaceMap
return "RB-Tree";
case SMAP64_STD_MAP:
return "STD Map";
case SMAP64_BIG:
return "STD Big";
default:
return "Invalid";
}
Expand Down
151 changes: 0 additions & 151 deletions dbms/src/Storages/Page/V3/spacemap/SpaceMapBig.h

This file was deleted.

Loading

0 comments on commit c9656c9

Please sign in to comment.