Skip to content

Commit

Permalink
add metric(wip)
Browse files Browse the repository at this point in the history
  • Loading branch information
lidezhu committed Aug 23, 2022
1 parent af71d61 commit 79d951b
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 7 deletions.
2 changes: 2 additions & 0 deletions dbms/src/Common/TiFlashMetrics.h
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,8 @@ namespace DB
F(type_exec, {{"type", "exec"}}, ExpBuckets{0.0005, 2, 20}), \
F(type_migrate, {{"type", "migrate"}}, ExpBuckets{0.0005, 2, 20}), \
F(type_v3, {{"type", "v3"}}, ExpBuckets{0.0005, 2, 20})) \
M(tiflash_storage_page_write_batch_size, "The size of each write batch in bytes", Histogram, \
F(type_v3, {{"type", "v3"}}, EqualWidthBuckets{1 * 1024 * 1024, 100, 10 * 1024 * 1024})) \
M(tiflash_storage_logical_throughput_bytes, "The logical throughput of read tasks of storage in bytes", Histogram, \
F(type_read, {{"type", "read"}}, EqualWidthBuckets{1 * 1024 * 1024, 60, 50 * 1024 * 1024})) \
M(tiflash_storage_io_limiter, "Storage I/O limiter metrics", Counter, F(type_fg_read_req_bytes, {"type", "fg_read_req_bytes"}), \
Expand Down
16 changes: 9 additions & 7 deletions dbms/src/Storages/Page/V3/BlobStore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,13 +240,6 @@ PageEntriesEdit BlobStore::write(DB::WriteBatch & wb, const WriteLimiterPtr & wr

const size_t all_page_data_size = wb.getTotalDataSize();

// If the WriteBatch is too big, we will split the Writes in the WriteBatch to different `BlobFile`.
// This can avoid allocating a big buffer for writing data and can smooth memory usage.
if (all_page_data_size > config.file_limit_size)
{
return handleLargeWrite(wb, write_limiter);
}

PageEntriesEdit edit;

auto ns_id = wb.getNamespaceId();
Expand Down Expand Up @@ -282,6 +275,15 @@ PageEntriesEdit BlobStore::write(DB::WriteBatch & wb, const WriteLimiterPtr & wr
return edit;
}

GET_METRIC(tiflash_storage_page_write_batch_size).Observe(all_page_data_size);

// If the WriteBatch is too big, we will split the Writes in the WriteBatch to different `BlobFile`.
// This can avoid allocating a big buffer for writing data and can smooth memory usage.
if (all_page_data_size > config.file_limit_size)
{
return handleLargeWrite(wb, write_limiter);
}

char * buffer = static_cast<char *>(alloc(all_page_data_size));
SCOPE_EXIT({
free(buffer, all_page_data_size);
Expand Down

0 comments on commit 79d951b

Please sign in to comment.