-
Notifications
You must be signed in to change notification settings - Fork 170
/
Copy pathrange_tombstone_list.hh
204 lines (198 loc) · 8.94 KB
/
range_tombstone_list.hh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
* Copyright (C) 2016 ScyllaDB
*/
/*
* This file is part of Scylla.
*
* Scylla is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Scylla is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "range_tombstone.hh"
#include "query-request.hh"
#include "position_in_partition.hh"
#include "utils/preempt.hh"
#include <iosfwd>
class range_tombstone_list final {
using range_tombstones_type = range_tombstone::container_type;
class insert_undo_op {
const range_tombstone& _new_rt;
public:
insert_undo_op(const range_tombstone& new_rt)
: _new_rt(new_rt) { }
void undo(const schema& s, range_tombstone_list& rt_list) noexcept;
};
class erase_undo_op {
alloc_strategy_unique_ptr<range_tombstone> _rt;
public:
erase_undo_op(range_tombstone& rt)
: _rt(&rt) { }
void undo(const schema& s, range_tombstone_list& rt_list) noexcept;
};
class update_undo_op {
range_tombstone _old_rt;
const range_tombstone& _new_rt;
public:
update_undo_op(range_tombstone&& old_rt, const range_tombstone& new_rt)
: _old_rt(std::move(old_rt)), _new_rt(new_rt) { }
void undo(const schema& s, range_tombstone_list& rt_list) noexcept;
};
class reverter {
private:
using op = boost::variant<erase_undo_op, insert_undo_op, update_undo_op>;
std::vector<op> _ops;
const schema& _s;
protected:
range_tombstone_list& _dst;
public:
reverter(const schema& s, range_tombstone_list& dst)
: _s(s)
, _dst(dst) { }
virtual ~reverter() {
revert();
}
reverter(reverter&&) = default;
reverter& operator=(reverter&&) = default;
reverter(const reverter&) = delete;
reverter& operator=(reverter&) = delete;
virtual range_tombstones_type::iterator insert(range_tombstones_type::iterator it, range_tombstone& new_rt);
virtual range_tombstones_type::iterator erase(range_tombstones_type::iterator it);
virtual void update(range_tombstones_type::iterator it, range_tombstone&& new_rt);
void revert() noexcept;
void cancel() noexcept {
_ops.clear();
}
};
class nop_reverter : public reverter {
public:
nop_reverter(const schema& s, range_tombstone_list& rt_list)
: reverter(s, rt_list) { }
virtual range_tombstones_type::iterator insert(range_tombstones_type::iterator it, range_tombstone& new_rt) override;
virtual range_tombstones_type::iterator erase(range_tombstones_type::iterator it) override;
virtual void update(range_tombstones_type::iterator it, range_tombstone&& new_rt) override;
};
private:
range_tombstones_type _tombstones;
public:
// ForwardIterator<range_tombstone>
using iterator = range_tombstones_type::iterator;
using const_iterator = range_tombstones_type::const_iterator;
struct copy_comparator_only { };
range_tombstone_list(const schema& s)
: _tombstones(range_tombstone::compare(s))
{ }
range_tombstone_list(const range_tombstone_list& x, copy_comparator_only)
: _tombstones(x._tombstones.key_comp())
{ }
range_tombstone_list(const range_tombstone_list&);
range_tombstone_list& operator=(range_tombstone_list&) = delete;
range_tombstone_list(range_tombstone_list&&) = default;
range_tombstone_list& operator=(range_tombstone_list&&) = default;
~range_tombstone_list();
size_t size() const {
return _tombstones.size();
}
bool empty() const {
return _tombstones.empty();
}
range_tombstones_type& tombstones() {
return _tombstones;
}
auto begin() {
return _tombstones.begin();
}
auto begin() const {
return _tombstones.begin();
}
auto end() {
return _tombstones.end();
}
auto end() const {
return _tombstones.end();
}
void apply(const schema& s, const bound_view& start_bound, const bound_view& end_bound, tombstone tomb) {
apply(s, start_bound.prefix(), start_bound.kind(), end_bound.prefix(), end_bound.kind(), std::move(tomb));
}
void apply(const schema& s, const range_tombstone& rt) {
apply(s, rt.start, rt.start_kind, rt.end, rt.end_kind, rt.tomb);
}
void apply(const schema& s, range_tombstone&& rt) {
apply(s, std::move(rt.start), rt.start_kind, std::move(rt.end), rt.end_kind, std::move(rt.tomb));
}
void apply(const schema& s, clustering_key_prefix start, bound_kind start_kind,
clustering_key_prefix end, bound_kind end_kind, tombstone tomb) {
nop_reverter rev(s, *this);
apply_reversibly(s, std::move(start), start_kind, std::move(end), end_kind, std::move(tomb), rev);
}
// Monotonic exception guarantees. In case of failure the object will contain at least as much information as before the call.
void apply_monotonically(const schema& s, const range_tombstone& rt);
// Merges another list with this object.
// Monotonic exception guarantees. In case of failure the object will contain at least as much information as before the call.
void apply_monotonically(const schema& s, const range_tombstone_list& list);
/// Merges another list with this object.
/// The other list must be governed by the same allocator as this object.
///
/// Monotonic exception guarantees. In case of failure the object will contain at least as much information as before the call.
/// The other list will be left in a state such that it would still commute with this object to the same state as it
/// would if the call didn't fail.
stop_iteration apply_monotonically(const schema& s, range_tombstone_list&& list, is_preemptible = is_preemptible::no);
public:
tombstone search_tombstone_covering(const schema& s, const clustering_key_prefix& key) const;
// Returns range of tombstones which overlap with given range
boost::iterator_range<const_iterator> slice(const schema& s, const query::clustering_range&) const;
// Returns range tombstones which overlap with [start, end)
boost::iterator_range<const_iterator> slice(const schema& s, position_in_partition_view start, position_in_partition_view end) const;
iterator erase(const_iterator, const_iterator);
// Ensures that every range tombstone is strictly contained within given clustering ranges.
// Preserves all information which may be relevant for rows from that ranges.
void trim(const schema& s, const query::clustering_row_ranges&);
range_tombstone_list difference(const schema& s, const range_tombstone_list& rt_list) const;
// Erases the range tombstones for which filter returns true.
template <typename Pred>
void erase_where(Pred filter) {
static_assert(std::is_same<bool, std::result_of_t<Pred(const range_tombstone&)>>::value,
"bad Pred signature");
auto it = begin();
while (it != end()) {
if (filter(*it)) {
it = _tombstones.erase_and_dispose(it, current_deleter<range_tombstone>());
} else {
++it;
}
}
}
void clear() {
_tombstones.clear_and_dispose(current_deleter<range_tombstone>());
}
// Removes elements of this list in batches.
// Returns stop_iteration::yes iff there is no more elements to remove.
stop_iteration clear_gently() noexcept;
void apply(const schema& s, const range_tombstone_list& rt_list);
// See reversibly_mergeable.hh
reverter apply_reversibly(const schema& s, range_tombstone_list& rt_list);
friend std::ostream& operator<<(std::ostream& out, const range_tombstone_list&);
bool equal(const schema&, const range_tombstone_list&) const;
size_t external_memory_usage(const schema& s) const {
size_t result = 0;
for (auto& rtb : _tombstones) {
result += rtb.memory_usage(s);
}
return result;
}
private:
void apply_reversibly(const schema& s, clustering_key_prefix start, bound_kind start_kind,
clustering_key_prefix end, bound_kind end_kind, tombstone tomb, reverter& rev);
void insert_from(const schema& s, range_tombstones_type::iterator it, clustering_key_prefix start,
bound_kind start_kind, clustering_key_prefix end, bound_kind end_kind, tombstone tomb, reverter& rev);
range_tombstones_type::iterator find(const schema& s, const range_tombstone& rt);
};