Skip to content

Commit

Permalink
SimpleCache: fix unwanted re-entry when size update...
Browse files Browse the repository at this point in the history
...triggers entry doom and thus ends up running the operation queue in the wrong spot of creation (and entry op) completion handler. This can among other things result in this madness:

#6 disk_cache::SimpleEntryImpl::CloseInternal()
#7 disk_cache::SimpleEntryImpl::RunNextOperationIfNeeded()
#8 SimpleEntryImpl::ScopedOperationRunner::~ScopedOperationRunner()
#9 disk_cache::SimpleEntryImpl::WriteDataInternal()
#10 disk_cache::SimpleEntryImpl::RunNextOperationIfNeeded()
#11 SimpleEntryImpl::ScopedOperationRunner::~ScopedOperationRunner()
#12 disk_cache::SimpleEntryImpl::WriteDataInternal()
#13 disk_cache::SimpleEntryImpl::RunNextOperationIfNeeded()
#14 disk_cache::SimpleEntryImpl::DoomEntry()
#15 disk_cache::SimpleBackendImpl::DoomEntryFromHash()
#16 disk_cache::SimpleBackendImpl::DoomEntries()
#17 disk_cache::SimpleIndex::StartEvictionIfNeeded()
#18 disk_cache::SimpleIndex::UpdateEntrySize()
#19 disk_cache::SimpleEntryImpl::UpdateDataFromEntryStat()
#20 disk_cache::SimpleEntryImpl::CreationOperationComplete()
(namespace elided twice to avoid wrapping).

... which means we end up at the in_results = nullptr line near the bottom of CreationOperationComplete with null `synchronous_entry_`(!) (and a dangling in_results->sync_entry, where one would expect the two to be aliases). I *think* we won't actually deliver a callback from this state since we likely needed to be in optimistic path to got thus far, but I am not certain.

Similarly, when this sort of thing happens from within read/write ops, it could potentially cause callbacks to be delivered in wrong order if the queued op ends up being a stream 0 operation, which can be executed without a round trip to a worker thread.

Change-Id: Iac8058f0d18225677e361c6cdddf92d28fb4833f
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5054619
Reviewed-by: Adam Rice <[email protected]>
Reviewed-by: Kenichi Ishibashi <[email protected]>
Commit-Queue: Maks Orlovich <[email protected]>
Cr-Commit-Position: refs/heads/main@{#1248862}
  • Loading branch information
Maks Orlovich authored and Chromium LUCI CQ committed Jan 18, 2024
1 parent 5edd4ed commit 4a68fc8
Show file tree
Hide file tree
Showing 6 changed files with 192 additions and 43 deletions.
126 changes: 126 additions & 0 deletions net/disk_cache/backend_unittest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5651,3 +5651,129 @@ TEST_F(DiskCacheBackendTest, BlockFileImmediateCloseNoDangle) {
EXPECT_EQ(result.net_error(), net::ERR_IO_PENDING);
run_loop.Run();
}

// Test that when a write causes a doom, it doesn't result in wrong delivery
// order of callbacks due to re-entrant operation execution.
TEST_F(DiskCacheBackendTest, SimpleWriteOrderEviction) {
SetSimpleCacheMode();
SetMaxSize(4096);
InitCache();

// Writes of [1, 2, ..., kMaxSize] are more than enough to trigger eviction,
// as (1 + 80)*80/2 * 2 = 6480 (last * 2 since two streams are written).
constexpr int kMaxSize = 80;

scoped_refptr<net::IOBufferWithSize> buffer =
CacheTestCreateAndFillBuffer(kMaxSize, /*no_nulls=*/false);

disk_cache::Entry* entry = nullptr;
ASSERT_THAT(CreateEntry("key", &entry), IsOk());
ASSERT_TRUE(entry);

bool expected_next_write_stream_1 = true;
int expected_next_write_size = 1;
int next_offset = 0;
base::RunLoop run_loop;
for (int size = 1; size <= kMaxSize; ++size) {
entry->WriteData(/*index=*/1, /*offset = */ next_offset, buffer.get(),
/*buf_len=*/size,
base::BindLambdaForTesting([&](int result) {
EXPECT_TRUE(expected_next_write_stream_1);
EXPECT_EQ(result, expected_next_write_size);
expected_next_write_stream_1 = false;
}),
/*truncate=*/true);
// Stream 0 writes are used here because unlike with stream 1 ones,
// WriteDataInternal can succeed and queue response callback immediately.
entry->WriteData(/*index=*/0, /*offset = */ next_offset, buffer.get(),
/*buf_len=*/size,
base::BindLambdaForTesting([&](int result) {
EXPECT_FALSE(expected_next_write_stream_1);
EXPECT_EQ(result, expected_next_write_size);
expected_next_write_stream_1 = true;
++expected_next_write_size;
if (expected_next_write_size == (kMaxSize + 1)) {
run_loop.Quit();
}
}),
/*truncate=*/true);
next_offset += size;
}

entry->Close();
run_loop.Run();
}

// Test that when a write causes a doom, it doesn't result in wrong delivery
// order of callbacks due to re-entrant operation execution. Variant that
// uses stream 0 ops only.
TEST_F(DiskCacheBackendTest, SimpleWriteOrderEvictionStream0) {
SetSimpleCacheMode();
SetMaxSize(4096);
InitCache();

// Writes of [1, 2, ..., kMaxSize] are more than enough to trigger eviction,
// as (1 + 120)*120/2 = 7260.
constexpr int kMaxSize = 120;

scoped_refptr<net::IOBufferWithSize> buffer =
CacheTestCreateAndFillBuffer(kMaxSize, /*no_nulls=*/false);

disk_cache::Entry* entry = nullptr;
ASSERT_THAT(CreateEntry("key", &entry), IsOk());
ASSERT_TRUE(entry);

int expected_next_write_size = 1;
int next_offset = 0;
base::RunLoop run_loop;
for (int size = 1; size <= kMaxSize; ++size) {
// Stream 0 writes are used here because unlike with stream 1 ones,
// WriteDataInternal can succeed and queue response callback immediately.
entry->WriteData(/*index=*/0, /*offset = */ next_offset, buffer.get(),
/*buf_len=*/size,
base::BindLambdaForTesting([&](int result) {
EXPECT_EQ(result, expected_next_write_size);
++expected_next_write_size;
if (expected_next_write_size == (kMaxSize + 1)) {
run_loop.Quit();
}
}),
/*truncate=*/true);
next_offset += size;
}

entry->Close();
run_loop.Run();
}

// Test to make sure that if entry creation triggers eviction, a queued up
// close (possible with optimistic ops) doesn't run from within creation
// completion handler (which is indirectly detected as a dangling pointer).
TEST_F(DiskCacheBackendTest, SimpleNoCloseFromWithinCreate) {
SetSimpleCacheMode();
SetMaxSize(4096);
InitCache();

// Make entries big enough to force their eviction.
constexpr int kDataSize = 4097;

auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(kDataSize);
CacheTestFillBuffer(buffer->data(), kDataSize, false);

for (int i = 0; i < 100; ++i) {
std::string key = base::NumberToString(i);
EntryResult entry_result =
cache_->CreateEntry(key, net::HIGHEST, base::DoNothing());
ASSERT_EQ(entry_result.net_error(), net::OK);
disk_cache::Entry* entry = entry_result.ReleaseEntry();
// Doing stream 0 write to avoid need for thread round-trips for it to take
// effect if SimpleEntryImpl runs it.
entry->WriteData(/*index=*/0, /*offset = */ 0, buffer.get(),
/*buf_len=*/kDataSize,
base::BindLambdaForTesting(
[&](int result) { EXPECT_EQ(kDataSize, result); }),
/*truncate=*/true);
entry->Close();
}
RunUntilIdle();
}
8 changes: 8 additions & 0 deletions net/disk_cache/disk_cache_test_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,14 @@ void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls) {
buffer[0] = 'g';
}

scoped_refptr<net::IOBufferWithSize> CacheTestCreateAndFillBuffer(
size_t len,
bool no_nulls) {
auto buffer = base::MakeRefCounted<net::IOBufferWithSize>(len);
CacheTestFillBuffer(buffer->data(), len, no_nulls);
return buffer;
}

bool CreateCacheTestFile(const base::FilePath& name) {
int flags = base::File::FLAG_CREATE_ALWAYS | base::File::FLAG_READ |
base::File::FLAG_WRITE;
Expand Down
11 changes: 11 additions & 0 deletions net/disk_cache/disk_cache_test_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,17 @@

#include "base/files/file_path.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/scoped_refptr.h"
#include "base/run_loop.h"
#include "base/timer/timer.h"
#include "build/build_config.h"
#include "net/base/test_completion_callback.h"
#include "net/disk_cache/disk_cache.h"

namespace net {
class IOBufferWithSize;
} // namespace net

// Re-creates a given test file inside the cache test folder.
bool CreateCacheTestFile(const base::FilePath& name);

Expand All @@ -27,6 +32,12 @@ bool DeleteCache(const base::FilePath& path);
// Fills buffer with random values (may contain nulls unless no_nulls is true).
void CacheTestFillBuffer(char* buffer, size_t len, bool no_nulls);

// Creates a buffer of size `len`, and fills in with random values, which
// may contain 0 unless `no_nulls` is true.
scoped_refptr<net::IOBufferWithSize> CacheTestCreateAndFillBuffer(
size_t len,
bool no_nulls);

// Generates a random key of up to 200 bytes.
std::string GenerateKey(bool same_length);

Expand Down
56 changes: 34 additions & 22 deletions net/disk_cache/simple/simple_entry_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -455,8 +455,12 @@ int SimpleEntryImpl::WriteData(int stream_index,
// Stream 0 data is kept in memory, so can be written immediatly if there are
// no IO operations pending.
if (stream_index == 0 && state_ == STATE_READY &&
pending_operations_.size() == 0)
return SetStream0Data(buf, offset, buf_len, truncate);
pending_operations_.size() == 0) {
state_ = STATE_IO_PENDING;
SetStream0Data(buf, offset, buf_len, truncate);
state_ = STATE_READY;
return buf_len;
}

// We can only do optimistic Write if there is no pending operations, so
// that we are sure that the next call to RunNextOperationIfNeeded will
Expand Down Expand Up @@ -1012,16 +1016,20 @@ int SimpleEntryImpl::ReadDataInternal(bool sync_possible,

// Since stream 0 data is kept in memory, it is read immediately.
if (stream_index == 0) {
int rv = ReadFromBuffer(stream_0_data_.get(), offset, buf_len, buf);
return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
state_ = STATE_IO_PENDING;
ReadFromBuffer(stream_0_data_.get(), offset, buf_len, buf);
state_ = STATE_READY;
return PostToCallbackIfNeeded(sync_possible, std::move(callback), buf_len);
}

// Sometimes we can read in-ram prefetched stream 1 data immediately, too.
if (stream_index == 1) {
if (stream_1_prefetch_data_) {
int rv =
ReadFromBuffer(stream_1_prefetch_data_.get(), offset, buf_len, buf);
return PostToCallbackIfNeeded(sync_possible, std::move(callback), rv);
state_ = STATE_IO_PENDING;
ReadFromBuffer(stream_1_prefetch_data_.get(), offset, buf_len, buf);
state_ = STATE_READY;
return PostToCallbackIfNeeded(sync_possible, std::move(callback),
buf_len);
}
}

Expand Down Expand Up @@ -1090,10 +1098,12 @@ void SimpleEntryImpl::WriteDataInternal(int stream_index,

// Since stream 0 data is kept in memory, it will be written immediatly.
if (stream_index == 0) {
int ret_value = SetStream0Data(buf, offset, buf_len, truncate);
state_ = STATE_IO_PENDING;
SetStream0Data(buf, offset, buf_len, truncate);
state_ = STATE_READY;
if (!callback.is_null()) {
base::SequencedTaskRunner::GetCurrentDefault()->PostTask(
FROM_HERE, base::BindOnce(std::move(callback), ret_value));
FROM_HERE, base::BindOnce(std::move(callback), buf_len));
}
return;
}
Expand Down Expand Up @@ -1407,7 +1417,6 @@ void SimpleEntryImpl::CreationOperationComplete(
if (backend_ && doom_state_ == DOOM_NONE)
backend_->index()->Insert(entry_hash_);

state_ = STATE_READY;
synchronous_entry_ = in_results->sync_entry;

// Copy over any pre-fetched data and its CRCs.
Expand Down Expand Up @@ -1459,6 +1468,8 @@ void SimpleEntryImpl::CreationOperationComplete(
// ultimately release `in_results->sync_entry`, and thus leading to having a
// dangling pointer here.
in_results = nullptr;

state_ = STATE_READY;
if (result_state == SimpleEntryOperation::ENTRY_NEEDS_CALLBACK) {
ReturnEntryToCallerAsync(!created, std::move(completion_callback));
}
Expand All @@ -1474,8 +1485,8 @@ void SimpleEntryImpl::UpdateStateAfterOperationComplete(
state_ = STATE_FAILURE;
MarkAsDoomed(DOOM_COMPLETED);
} else {
state_ = STATE_READY;
UpdateDataFromEntryStat(entry_stat);
state_ = STATE_READY;
}
}

Expand Down Expand Up @@ -1637,7 +1648,10 @@ void SimpleEntryImpl::UpdateDataFromEntryStat(
const SimpleEntryStat& entry_stat) {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
DCHECK(synchronous_entry_);
DCHECK_EQ(STATE_READY, state_);
// We want to only be called in STATE_IO_PENDING so that if call to
// SimpleIndex::UpdateEntrySize() ends up triggering eviction and queuing
// Dooms it doesn't also run any queued operations.
CHECK_EQ(state_, STATE_IO_PENDING);

last_used_ = entry_stat.last_used();
last_modified_ = entry_stat.last_modified();
Expand All @@ -1662,23 +1676,22 @@ int64_t SimpleEntryImpl::GetDiskUsage() const {
return file_size;
}

int SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
int offset,
int buf_len,
net::IOBuffer* out_buf) {
void SimpleEntryImpl::ReadFromBuffer(net::GrowableIOBuffer* in_buf,
int offset,
int buf_len,
net::IOBuffer* out_buf) {
DCHECK_GE(buf_len, 0);

std::copy(in_buf->data() + offset, in_buf->data() + offset + buf_len,
out_buf->data());
UpdateDataFromEntryStat(SimpleEntryStat(base::Time::Now(), last_modified_,
data_size_, sparse_data_size_));
return buf_len;
}

int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
int offset,
int buf_len,
bool truncate) {
void SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
int offset,
int buf_len,
bool truncate) {
// Currently, stream 0 is only used for HTTP headers, and always writes them
// with a single, truncating write. Detect these writes and record the size
// changes of the headers. Also, support writes to stream 0 that have
Expand Down Expand Up @@ -1717,7 +1730,6 @@ int SimpleEntryImpl::SetStream0Data(net::IOBuffer* buf,
UpdateDataFromEntryStat(
SimpleEntryStat(modification_time, modification_time, data_size_,
sparse_data_size_));
return buf_len;
}

} // namespace disk_cache
28 changes: 12 additions & 16 deletions net/disk_cache/simple/simple_entry_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -343,20 +343,21 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
int64_t GetDiskUsage() const;

// Completes a read from the stream data kept in memory, logging metrics
// and updating metadata. Returns the # of bytes read successfully.
// This asumes the caller has already range-checked offset and buf_len
// appropriately.
int ReadFromBuffer(net::GrowableIOBuffer* in_buf,
int offset,
int buf_len,
net::IOBuffer* out_buf);
// and updating metadata. This assumes the caller has already range-checked
// offset and buf_len appropriately, and therefore always reads `buf_len`
// bytes.
void ReadFromBuffer(net::GrowableIOBuffer* in_buf,
int offset,
int buf_len,
net::IOBuffer* out_buf);

// Copies data from |buf| to the internal in-memory buffer for stream 0. If
// |truncate| is set to true, the target buffer will be truncated at |offset|
// + |buf_len| before being written.
int SetStream0Data(net::IOBuffer* buf,
int offset, int buf_len,
bool truncate);
void SetStream0Data(net::IOBuffer* buf,
int offset,
int buf_len,
bool truncate);

// We want all async I/O on entries to complete before recycling the dir.
scoped_refptr<BackendCleanupTracker> cleanup_tracker_;
Expand Down Expand Up @@ -421,12 +422,7 @@ class NET_EXPORT_PRIVATE SimpleEntryImpl : public Entry,
// an operation is being executed no one owns the synchronous entry. Therefore
// SimpleEntryImpl should not be deleted while an operation is running as that
// would leak the SimpleSynchronousEntry.
// This dangling raw_ptr occurred in:
// content_unittests:
// GeneratedCodeCacheTest/GeneratedCodeCacheTest.StressVeryLargeEntries/1
// https://ci.chromium.org/ui/p/chromium/builders/try/linux-rel/1425125/test-results?q=ExactID%3Aninja%3A%2F%2Fcontent%2Ftest%3Acontent_unittests%2FGeneratedCodeCacheTest.StressVeryLargeEntries%2FGeneratedCodeCacheTest.1+VHash%3Ab3ba0803668e9981&sortby=&groupby=
raw_ptr<SimpleSynchronousEntry, FlakyDanglingUntriaged> synchronous_entry_ =
nullptr;
raw_ptr<SimpleSynchronousEntry> synchronous_entry_ = nullptr;

scoped_refptr<net::PrioritizedTaskRunner> prioritized_task_runner_;

Expand Down
6 changes: 1 addition & 5 deletions net/disk_cache/simple/simple_synchronous_entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,7 @@ struct SimpleEntryCreationResults {
explicit SimpleEntryCreationResults(SimpleEntryStat entry_stat);
~SimpleEntryCreationResults();

// This dangling raw_ptr occurred in:
// content_unittests:
// GeneratedCodeCacheTest/GeneratedCodeCacheTest.StressVeryLargeEntries/1
// https://ci.chromium.org/ui/p/chromium/builders/try/linux-rel/1425125/test-results?q=ExactID%3Aninja%3A%2F%2Fcontent%2Ftest%3Acontent_unittests%2FGeneratedCodeCacheTest.StressVeryLargeEntries%2FGeneratedCodeCacheTest.1+VHash%3Ab3ba0803668e9981&sortby=&groupby=
raw_ptr<SimpleSynchronousEntry, FlakyDanglingUntriaged> sync_entry;
raw_ptr<SimpleSynchronousEntry> sync_entry;
// This is set when `sync_entry` is null.
std::unique_ptr<UnboundBackendFileOperations> unbound_file_operations;

Expand Down

0 comments on commit 4a68fc8

Please sign in to comment.