Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support cancel when writing fap snapshots #9415

Merged
merged 58 commits into from
Oct 12, 2024
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
4c9873e
a
CalvinNeo Sep 6, 2024
6ce1cf3
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 9, 2024
c3f53a2
f
CalvinNeo Sep 10, 2024
4d036d3
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 10, 2024
93e7dbb
a
CalvinNeo Sep 10, 2024
21e7d84
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 10, 2024
e0fa695
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
036d841
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
8a12d16
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.h
CalvinNeo Sep 11, 2024
0e015b3
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.cpp
CalvinNeo Sep 11, 2024
5120d57
a
CalvinNeo Sep 11, 2024
de48102
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 11, 2024
2f27cb9
addr
CalvinNeo Sep 11, 2024
ea82123
a
CalvinNeo Sep 11, 2024
0cf4235
support cancel when building
CalvinNeo Sep 11, 2024
2792a48
fmt
CalvinNeo Sep 11, 2024
392bedf
a
CalvinNeo Sep 12, 2024
779cb79
a
CalvinNeo Sep 12, 2024
7bbca76
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 13, 2024
824c3d8
a
CalvinNeo Sep 14, 2024
199013e
address cmt
CalvinNeo Sep 14, 2024
33aa19e
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 14, 2024
cd95445
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 14, 2024
866910d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
CalvinNeo Sep 18, 2024
945584d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore_Ingest.cpp
CalvinNeo Sep 18, 2024
8ccfc30
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
deb6b1d
Update dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
CalvinNeo Sep 18, 2024
2cd8986
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
736d57e
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 18, 2024
509fd6e
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/FastAddPeer.cpp
CalvinNeo Sep 18, 2024
e379e28
add
CalvinNeo Sep 18, 2024
de011da
fix sig
CalvinNeo Sep 19, 2024
7c416ac
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 19, 2024
17c1369
reject too many raft log
CalvinNeo Sep 20, 2024
9739723
change all log
CalvinNeo Sep 20, 2024
82e1284
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Sep 20, 2024
9e40d9d
fir log format
CalvinNeo Sep 20, 2024
327340c
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 20, 2024
1bc1b9b
Merge remote-tracking branch 'upstream/master' into fix-lock-segment-…
CalvinNeo Sep 20, 2024
9b457d4
Update dbms/src/Storages/KVStore/MultiRaft/Disagg/CheckpointIngestInf…
CalvinNeo Sep 20, 2024
fce5d52
clear codes createTargetSegmentsFromCheckpoint
CalvinNeo Sep 20, 2024
e259a0b
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 20, 2024
d1f22bd
fix lock contention
CalvinNeo Sep 20, 2024
ced81e4
a
CalvinNeo Sep 23, 2024
9a1588e
make it run
CalvinNeo Sep 23, 2024
17403c8
rewrite readAllSegmentsMetaInfoInRange
CalvinNeo Sep 23, 2024
258aec3
trace
CalvinNeo Sep 23, 2024
bed9de6
first
CalvinNeo Sep 24, 2024
9af68be
Revert "first"
CalvinNeo Sep 24, 2024
8a12b2e
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
9c38da8
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
f23fb31
remove
CalvinNeo Sep 25, 2024
533e6df
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 25, 2024
e7ae2de
Update dbms/src/Storages/DeltaMerge/Segment.cpp
CalvinNeo Sep 25, 2024
a9dfbd6
Merge branch 'fix-lock-segment-cache' of github.com:CalvinNeo/tiflash…
CalvinNeo Sep 26, 2024
a19faf4
Merge branch 'master' into fix-lock-segment-cache
JaySon-Huang Oct 9, 2024
6fa4b03
Merge branch 'master' into fix-lock-segment-cache
CalvinNeo Oct 12, 2024
f16aae5
Apply suggestions from code review
JaySon-Huang Oct 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dbms/src/Common/FailPoint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ namespace DB
M(pause_before_prehandle_subtask) \
M(pause_when_persist_region) \
M(pause_before_wn_establish_task) \
M(pause_when_building_fap_segments) \
M(pause_passive_flush_before_persist_region)

#define APPLY_FOR_RANDOM_FAILPOINTS(M) \
Expand Down
2 changes: 2 additions & 0 deletions dbms/src/Common/ProfileEvents.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,8 @@
M(S3CopyObject) \
M(S3GetObjectRetry) \
M(S3PutObjectRetry) \
M(S3IORead) \
M(S3IOSeek) \
M(FileCacheHit) \
M(FileCacheMiss) \
M(FileCacheEvict) \
Expand Down
1 change: 1 addition & 0 deletions dbms/src/Common/TiFlashMetrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -432,6 +432,7 @@ static_assert(RAFT_REGION_BIG_WRITE_THRES * 4 < RAFT_REGION_BIG_WRITE_MAX, "Inva
F(type_ingest_stage, {{"type", "ingest_stage"}}, ExpBucketsWithRange{0.2, 2, 30}), \
F(type_total, {{"type", "total"}}, ExpBucketsWithRange{0.2, 4, 300}), \
F(type_queue_stage, {{"type", "queue_stage"}}, ExpBucketsWithRange{0.2, 4, 300}), \
F(type_write_stage_read_segment, {{"type", "write_stage_read_segment"}}, ExpBucketsWithRange{0.2, 4, 120}), \
F(type_phase1_total, {{"type", "phase1_total"}}, ExpBucketsWithRange{0.2, 4, 300})) \
M(tiflash_raft_command_throughput, \
"", \
Expand Down
6 changes: 4 additions & 2 deletions dbms/src/Storages/DeltaMerge/DeltaMergeStore.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@

namespace DB
{

struct GeneralCancelHandle;
struct Settings;

class Logger;
Expand Down Expand Up @@ -388,17 +388,19 @@ class DeltaMergeStore : private boost::noncopyable

Segments buildSegmentsFromCheckpointInfo(
const DMContextPtr & dm_context,
std::shared_ptr<GeneralCancelHandle> cancel_handle,
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info) const;

Segments buildSegmentsFromCheckpointInfo(
const Context & db_context,
std::shared_ptr<GeneralCancelHandle> cancel_handle,
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
const DB::Settings & db_settings,
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info)
{
auto dm_context = newDMContext(db_context, db_settings);
return buildSegmentsFromCheckpointInfo(dm_context, range, checkpoint_info);
return buildSegmentsFromCheckpointInfo(dm_context, cancel_handle, range, checkpoint_info);
}

UInt64 ingestSegmentsFromCheckpointInfo(
Expand Down
15 changes: 12 additions & 3 deletions dbms/src/Storages/DeltaMerge/DeltaMergeStore_Ingest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1133,6 +1133,7 @@ bool DeltaMergeStore::ingestSegmentDataIntoSegmentUsingSplit(

Segments DeltaMergeStore::buildSegmentsFromCheckpointInfo(
const DMContextPtr & dm_context,
std::shared_ptr<GeneralCancelHandle> cancel_handle,
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
const DM::RowKeyRange & range,
const CheckpointInfoPtr & checkpoint_info) const
{
Expand All @@ -1142,15 +1143,23 @@ Segments DeltaMergeStore::buildSegmentsFromCheckpointInfo(
}
LOG_INFO(
log,
"Build checkpoint from remote, store_id={} region_id={}",
"Build checkpoint from remote, store_id={} region_id={} range={}",
checkpoint_info->remote_store_id,
checkpoint_info->region_id);
checkpoint_info->region_id,
range.toDebugString());
WriteBatches wbs{*dm_context->storage_pool};
try
{
auto segment_meta_infos = Segment::readAllSegmentsMetaInfoInRange(*dm_context, range, checkpoint_info);
auto segment_meta_infos
= Segment::readAllSegmentsMetaInfoInRange(*dm_context, cancel_handle, range, checkpoint_info);
LOG_INFO(
log,
"Finish read all segments meta info in range, region_id={} segments_num={}",
checkpoint_info->region_id,
segment_meta_infos.size());
auto restored_segments = Segment::createTargetSegmentsFromCheckpoint( //
log,
checkpoint_info->region_id,
*dm_context,
checkpoint_info->remote_store_id,
segment_meta_infos,
Expand Down
189 changes: 137 additions & 52 deletions dbms/src/Storages/DeltaMerge/Segment.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
#include <Storages/KVStore/KVStore.h>
#include <Storages/KVStore/MultiRaft/Disagg/FastAddPeerCache.h>
#include <Storages/KVStore/TMTContext.h>
#include <Storages/KVStore/Utils/AsyncTasks.h>
#include <Storages/Page/V3/PageEntryCheckpointInfo.h>
#include <Storages/Page/V3/Universal/UniversalPageIdFormatImpl.h>
#include <Storages/Page/V3/Universal/UniversalPageStorage.h>
Expand Down Expand Up @@ -121,7 +122,10 @@ namespace ErrorCodes
extern const int LOGICAL_ERROR;
extern const int UNKNOWN_FORMAT_VERSION;
} // namespace ErrorCodes

namespace FailPoints
{
extern const char pause_when_building_fap_segments[];
} // namespace FailPoints
namespace DM
{
String SegmentSnapshot::detailInfo() const
Expand Down Expand Up @@ -432,79 +436,154 @@ SegmentPtr Segment::restoreSegment( //

Segment::SegmentMetaInfos Segment::readAllSegmentsMetaInfoInRange( //
DMContext & context,
std::shared_ptr<GeneralCancelHandle> cancel_handle,
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
const RowKeyRange & target_range,
const CheckpointInfoPtr & checkpoint_info)
{
auto fap_context = context.global_context.getSharedContextDisagg()->fap_context;

auto log = DB::Logger::get();
Stopwatch sw;
SCOPE_EXIT(
{ GET_METRIC(tiflash_fap_task_duration_seconds, type_write_stage_read_segment).Observe(sw.elapsedSeconds()); });
static constexpr UInt64 WAIT_TIME_THRESHOLD = 20;
JinheLin marked this conversation as resolved.
Show resolved Hide resolved
// We have a cache that records all segments which map to a certain table identified by (keyspace_id, physical_table_id).
// We can thus avoid reading from the very beginning for every different regions in this table.
// If cache is empty, we read from DELTA_MERGE_FIRST_SEGMENT_ID to the end and build the cache.
// Otherwise, we just read the segment that cover the range.
PageIdU64 current_segment_id = DELTA_MERGE_FIRST_SEGMENT_ID;
auto end_to_segment_id_cache = checkpoint_info->checkpoint_data_holder->getEndToSegmentIdCache(
KeyspaceTableID{context.keyspace_id, context.physical_table_id});
auto lock = end_to_segment_id_cache->lock();
bool is_cache_ready = end_to_segment_id_cache->isReady(lock);
if (is_cache_ready)
{
current_segment_id
= end_to_segment_id_cache->getSegmentIdContainingKey(lock, target_range.getStart().toRowKeyValue());
bool is_cache_ready = false;
// If there is a table building cache, then other table may block to read the built cache.
// If the remote reader causes much time to retrieve data, then these tasks could block here.
// However, when the execlusive holder is canceled due to timeout, the readers could eventually get the lock.
FAIL_POINT_PAUSE(FailPoints::pause_when_building_fap_segments);
{
auto sec = sw.elapsedSecondsFromLastTime();
// Lock acquires when:
// 1. No writer.
// 2. The writer finishes.
// 3. The writer is canceled.
auto lock = end_to_segment_id_cache->readLock();
is_cache_ready = end_to_segment_id_cache->isReady(lock);
auto el = sw.elapsedSecondsFromLastTime() - sec;
JaySon-Huang marked this conversation as resolved.
Show resolved Hide resolved
if (el > WAIT_TIME_THRESHOLD)
JinheLin marked this conversation as resolved.
Show resolved Hide resolved
{
LOG_INFO(
log,
"Wait building segmend id cache for {:.3f}s, current_segment_id={}, region_id={}",
el,
current_segment_id,
JinheLin marked this conversation as resolved.
Show resolved Hide resolved
checkpoint_info->region_id);
}
}
LOG_DEBUG(Logger::get(), "Read segment meta info from segment {}", current_segment_id);
std::vector<std::pair<DM::RowKeyValue, UInt64>> end_key_and_segment_ids;
SegmentMetaInfos segment_infos;
while (current_segment_id != 0)
{
Segment::SegmentMetaInfo segment_info;
auto target_id = UniversalPageIdFormat::toFullPageId(
UniversalPageIdFormat::toFullPrefix(context.keyspace_id, StorageType::Meta, context.physical_table_id),
current_segment_id);
auto page = checkpoint_info->temp_ps->read(target_id, nullptr, {}, false);
if unlikely (!page.isValid())
using GenericLock = std::variant<std::shared_lock<std::shared_mutex>, std::unique_lock<std::shared_mutex>>;
GenericLock lock;
if (is_cache_ready)
{
// After #7642, DELTA_MERGE_FIRST_SEGMENT_ID may not exist, however, such checkpoint won't be selected.
// If it were to be selected, the FAP task could fallback to regular snapshot.
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Can't find page id {}, keyspace={} table_id={} current_segment_id={} range={}",
target_id,
context.keyspace_id,
context.physical_table_id,
current_segment_id,
target_range.toDebugString());
// If the cache is ready, requires a read lock.
lock = end_to_segment_id_cache->readLock();
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
current_segment_id = end_to_segment_id_cache->getSegmentIdContainingKey(
std::get<std::shared_lock<std::shared_mutex>>(lock),
target_range.getStart().toRowKeyValue());
}
segment_info.segment_id = current_segment_id;
ReadBufferFromMemory buf(page.data.begin(), page.data.size());
readSegmentMetaInfo(buf, segment_info);
if (!is_cache_ready)
else
{
end_key_and_segment_ids.emplace_back(segment_info.range.getEnd().toRowKeyValue(), segment_info.segment_id);
// Otherwise, requires a write lock to build cache.
lock = end_to_segment_id_cache->writeLock();
}
current_segment_id = segment_info.next_segment_id;
if (!(segment_info.range.shrink(target_range).none()))
LOG_DEBUG(
log,
"Read segment meta info from segment {}, region_id={}",
current_segment_id,
checkpoint_info->region_id);
std::vector<std::pair<DM::RowKeyValue, UInt64>> end_key_and_segment_ids;
SegmentMetaInfos segment_infos;
while (current_segment_id != 0)
{
segment_infos.emplace_back(segment_info);
if (cancel_handle->isCanceled())
{
LOG_INFO(
log,
"FAP is canceled when building segments, region_id={} keyspace={} table_id={}",
JinheLin marked this conversation as resolved.
Show resolved Hide resolved
checkpoint_info->region_id,
context.keyspace_id,
context.physical_table_id);
// FAP task would be cleaned in FastAddPeerImplWrite. So returning incompelete result could be OK.
end_key_and_segment_ids.clear();
JinheLin marked this conversation as resolved.
Show resolved Hide resolved
return segment_infos;
}
Segment::SegmentMetaInfo segment_info;
auto target_id = UniversalPageIdFormat::toFullPageId(
UniversalPageIdFormat::toFullPrefix(context.keyspace_id, StorageType::Meta, context.physical_table_id),
current_segment_id);
auto page = checkpoint_info->temp_ps->read(target_id, nullptr, {}, false);
if unlikely (!page.isValid())
{
// After #7642, DELTA_MERGE_FIRST_SEGMENT_ID may not exist, however, such checkpoint won't be selected.
// If it were to be selected, the FAP task could fallback to regular snapshot.
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Can't find page id {}, keyspace={} table_id={} current_segment_id={} range={} region_id={}",
target_id,
context.keyspace_id,
context.physical_table_id,
current_segment_id,
target_range.toDebugString(),
checkpoint_info->region_id);
}
segment_info.segment_id = current_segment_id;
ReadBufferFromMemory buf(page.data.begin(), page.data.size());
readSegmentMetaInfo(buf, segment_info);
if (!is_cache_ready)
{
end_key_and_segment_ids.emplace_back(
segment_info.range.getEnd().toRowKeyValue(),
segment_info.segment_id);
}
current_segment_id = segment_info.next_segment_id;
if (!(segment_info.range.shrink(target_range).none()))
{
segment_infos.emplace_back(segment_info);
}
// if not build cache, stop as early as possible.
if (is_cache_ready && segment_info.range.end.value->compare(*target_range.end.value) >= 0)
{
break;
}
}
// if not build cache, stop as early as possible.
if (is_cache_ready && segment_info.range.end.value->compare(*target_range.end.value) >= 0)
if (!is_cache_ready)
{
break;
LOG_DEBUG(
log,
"Build cache for keyspace {} table {} with {} segments, region_id={}",
context.keyspace_id,
context.physical_table_id,
end_key_and_segment_ids.size(),
checkpoint_info->region_id);
end_to_segment_id_cache->build(
std::get<std::unique_lock<std::shared_mutex>>(lock),
std::move(end_key_and_segment_ids));
}
return segment_infos;
}
if (!is_cache_ready)

if (cancel_handle->isCanceled())
{
LOG_DEBUG(
Logger::get(),
"Build cache for keyspace {} table {} with {} segments",
LOG_INFO(
log,
"FAP is canceled when building segments, region_id={} keyspace={} table_id={}",
checkpoint_info->region_id,
context.keyspace_id,
context.physical_table_id,
end_key_and_segment_ids.size());
end_to_segment_id_cache->build(lock, std::move(end_key_and_segment_ids));
context.physical_table_id);
// FAP task would be cleaned in FastAddPeerImplWrite. So returning incompelete result could be OK.
return {};
}
return segment_infos;
}

Segments Segment::createTargetSegmentsFromCheckpoint( //
const LoggerPtr & parent_log,
UInt64 region_id,
DMContext & context,
StoreID remote_store_id,
const SegmentMetaInfos & meta_infos,
Expand All @@ -518,12 +597,15 @@ Segments Segment::createTargetSegmentsFromCheckpoint( //
{
LOG_DEBUG(
parent_log,
"Create segment begin. Delta id {} stable id {} range {} epoch {} next_segment_id {}",
"Create segment begin. Delta id {} stable id {} range {} epoch {} next_segment_id {}, remote_store_id={}, "
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
"region_id={}",
segment_info.delta_id,
segment_info.stable_id,
segment_info.range.toDebugString(),
segment_info.epoch,
segment_info.next_segment_id);
segment_info.next_segment_id,
remote_store_id,
region_id);
auto stable = StableValueSpace::createFromCheckpoint(parent_log, context, temp_ps, segment_info.stable_id, wbs);
auto delta = DeltaValueSpace::createFromCheckpoint(
parent_log,
Expand All @@ -543,12 +625,15 @@ Segments Segment::createTargetSegmentsFromCheckpoint( //
segments.push_back(segment);
LOG_DEBUG(
parent_log,
"Create segment end. Delta id {} stable id {} range {} epoch {} next_segment_id {}",
"Create segment end. Delta id {} stable id {} range {} epoch {} next_segment_id {}, remote_store_id={}, "
CalvinNeo marked this conversation as resolved.
Show resolved Hide resolved
"region_id={}",
segment_info.delta_id,
segment_info.stable_id,
segment_info.range.toDebugString(),
segment_info.epoch,
segment_info.next_segment_id);
segment_info.next_segment_id,
remote_store_id,
region_id);
}
return segments;
}
Expand Down
12 changes: 9 additions & 3 deletions dbms/src/Storages/DeltaMerge/Segment.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,10 @@
#include <Storages/KVStore/MultiRaft/Disagg/fast_add_peer.pb.h>
#include <Storages/Page/PageDefinesBase.h>

namespace DB::DM
namespace DB
{
struct GeneralCancelHandle;
namespace DM
{
struct SegmentSnapshot;
using SegmentSnapshotPtr = std::shared_ptr<SegmentSnapshot>;
Expand Down Expand Up @@ -174,13 +177,15 @@ class Segment
using SegmentMetaInfos = std::vector<SegmentMetaInfo>;
static SegmentMetaInfos readAllSegmentsMetaInfoInRange( //
DMContext & context,
std::shared_ptr<GeneralCancelHandle> cancel_handle,
const RowKeyRange & target_range,
const CheckpointInfoPtr & checkpoint_info);

// Create a list of temp segments from checkpoint.
// The data of these temp segments will be included in `wbs`.
static Segments createTargetSegmentsFromCheckpoint( //
const LoggerPtr & parent_log,
UInt64 region_id,
DMContext & context,
StoreID remote_store_id,
const SegmentMetaInfos & meta_infos,
Expand Down Expand Up @@ -527,7 +532,7 @@ class Segment

PageIdU64 segmentId() const { return segment_id; }
PageIdU64 nextSegmentId() const { return next_segment_id; }
UInt64 segmentEpoch() const { return epoch; };
UInt64 segmentEpoch() const { return epoch; }

void check(DMContext & dm_context, const String & when) const;

Expand Down Expand Up @@ -767,4 +772,5 @@ class Segment
};

void readSegmentMetaInfo(ReadBuffer & buf, Segment::SegmentMetaInfo & segment_info);
} // namespace DB::DM
} // namespace DM
} // namespace DB
Loading