-
Notifications
You must be signed in to change notification settings - Fork 1k
/
Copy pathmmap1.c
254 lines (213 loc) · 6.39 KB
/
mmap1.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2018 Jan Stancek. All rights reserved.
*/
/*
* Test: Spawn 2 threads. First thread maps, writes and unmaps
* an area. Second thread tries to read from it. Second thread
* races against first thread. There is no synchronization
* between threads, but each mmap/munmap increases a counter
* that is checked to determine when has read occurred. If a read
* hit SIGSEGV in between mmap/munmap it is a failure. If a read
* between mmap/munmap worked, then its value must match expected
* value.
*
* Can trigger panics/stalls since at least 4.14 on some arches:
* fc8efd2ddfed ("mm/memory.c: do_fault: avoid usage of stale vm_area_struct")
* Can trigger user-space stalls on aarch64:
* 7a30df49f63a ("mm: mmu_gather: remove __tlb_reset_range() for force flush")
* https://lore.kernel.org/linux-mm/[email protected]
* Can trigger "still mapped when deleted" BUG at mm/filemap.c:171, on aarch64 since 4.20
* e1b98fa31664 ("locking/rwsem: Add missing ACQUIRE to read_slowpath exit when queue is empty")
* 99143f82a255 ("lcoking/rwsem: Add missing ACQUIRE to read_slowpath sleep loop")
*/
#include <errno.h>
#include <float.h>
#include <pthread.h>
#include <sched.h>
#include <setjmp.h>
#include <stdio.h>
#include <stdlib.h>
#include "lapi/abisize.h"
#include "tst_test.h"
#include "tst_safe_pthread.h"
#define GIGABYTE (1L*1024*1024*1024)
#define TEST_FILENAME "ashfile"
/* seconds remaining before reaching timeout */
#define STOP_THRESHOLD 10
#define PROGRESS_SEC 3
static int file_size = 1024;
static int num_iter = 5000;
static void *distant_area;
static jmp_buf jmpbuf;
static volatile unsigned char *map_address;
static unsigned long page_sz;
static unsigned long mapped_sigsegv_count;
static unsigned long map_count;
static unsigned long threads_spawned;
static unsigned long data_matched;
static unsigned long repeated_reads;
/* sequence id for each map/unmap performed */
static int mapcnt, unmapcnt;
/* stored sequence id before making read attempt */
static int br_map, br_unmap;
/* compare "before read" counters with "after read" counters */
static inline int was_area_mapped(int br_m, int br_u, int ar_m, int ar_u)
{
return (br_m == ar_m && br_u == ar_u && br_m > br_u);
}
static void sig_handler(int signal, siginfo_t *info,
LTP_ATTRIBUTE_UNUSED void *ut)
{
int ar_m, ar_u;
switch (signal) {
case SIGSEGV:
/* if we hit SIGSEGV between map/unmap, something is wrong */
ar_u = tst_atomic_load(&unmapcnt);
ar_m = tst_atomic_load(&mapcnt);
if (was_area_mapped(br_map, br_unmap, ar_m, ar_u)) {
tst_res(TFAIL, "got sigsegv while mapped");
_exit(TFAIL);
}
mapped_sigsegv_count++;
longjmp(jmpbuf, 1);
break;
default:
tst_res(TFAIL, "Unexpected signal - %d, addr: %p, exiting",
signal, info->si_addr);
_exit(TBROK);
}
}
void *map_write_unmap(void *ptr)
{
int *fd = ptr;
void *tmp;
int i, j;
for (i = 0; i < num_iter; i++) {
map_address = SAFE_MMAP(distant_area,
(size_t) file_size, PROT_WRITE | PROT_READ,
MAP_SHARED, *fd, 0);
tst_atomic_inc(&mapcnt);
for (j = 0; j < file_size; j++)
map_address[j] = 'b';
tmp = (void *)map_address;
tst_atomic_inc(&unmapcnt);
SAFE_MUNMAP(tmp, file_size);
map_count++;
}
return NULL;
}
void *read_mem(LTP_ATTRIBUTE_UNUSED void *ptr)
{
volatile int i; /* longjmp could clobber i */
int j, ar_map, ar_unmap;
unsigned char c;
for (i = 0; i < num_iter; i++) {
if (setjmp(jmpbuf) == 1)
continue;
for (j = 0; j < file_size; j++) {
read_again:
br_map = tst_atomic_load(&mapcnt);
br_unmap = tst_atomic_load(&unmapcnt);
c = map_address[j];
ar_unmap = tst_atomic_load(&unmapcnt);
ar_map = tst_atomic_load(&mapcnt);
/*
* Read above is racing against munmap and mmap
* in other thread. While the address might be valid
* the mapping could be in various stages of being
* 'ready'. We only check the value, if we can be sure
* read hapenned in between single mmap and munmap as
* observed by first thread.
*/
if (was_area_mapped(br_map, br_unmap, ar_map,
ar_unmap)) {
switch (c) {
case 'a':
repeated_reads++;
goto read_again;
case 'b':
data_matched++;
break;
default:
tst_res(TFAIL, "value[%d] is %c", j, c);
break;
}
}
}
}
return NULL;
}
int mkfile(int size)
{
int fd, i;
fd = SAFE_OPEN(TEST_FILENAME, O_RDWR | O_CREAT, 0600);
SAFE_UNLINK(TEST_FILENAME);
for (i = 0; i < size; i++)
SAFE_WRITE(SAFE_WRITE_ALL, fd, "a", 1);
SAFE_WRITE(SAFE_WRITE_ALL, fd, "\0", 1);
if (fsync(fd) == -1)
tst_brk(TBROK | TERRNO, "fsync()");
return fd;
}
static void setup(void)
{
struct sigaction sigptr;
size_t distant_mmap_size;
size_t mem_total;
page_sz = getpagesize();
mem_total = SAFE_READ_MEMINFO("MemTotal:");
mem_total *= 1024;
#ifdef TST_ABI32
distant_mmap_size = 256*1024*1024;
#else
distant_mmap_size = (mem_total > 4 * GIGABYTE) ? 2 * GIGABYTE : mem_total / 2;
#endif
/*
* Used as hint for mmap thread, so it doesn't interfere
* with other potential (temporary) mappings from libc
*/
distant_area = SAFE_MMAP(0, distant_mmap_size, PROT_WRITE | PROT_READ,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
SAFE_MUNMAP(distant_area, distant_mmap_size);
distant_area += distant_mmap_size / 2;
sigptr.sa_sigaction = sig_handler;
sigemptyset(&sigptr.sa_mask);
sigptr.sa_flags = SA_SIGINFO | SA_NODEFER;
SAFE_SIGACTION(SIGSEGV, &sigptr, NULL);
}
static void run(void)
{
pthread_t thid[2];
int start, last_update;
start = last_update = tst_remaining_runtime();
while (tst_remaining_runtime()) {
int fd = mkfile(file_size);
tst_atomic_store(0, &mapcnt);
tst_atomic_store(0, &unmapcnt);
SAFE_PTHREAD_CREATE(&thid[0], NULL, map_write_unmap, &fd);
SAFE_PTHREAD_CREATE(&thid[1], NULL, read_mem, &fd);
threads_spawned += 2;
SAFE_PTHREAD_JOIN(thid[0], NULL);
SAFE_PTHREAD_JOIN(thid[1], NULL);
close(fd);
if (last_update - tst_remaining_runtime() >= PROGRESS_SEC) {
last_update = tst_remaining_runtime();
tst_res(TINFO, "[%03d] mapped: %lu, sigsegv hit: %lu, "
"threads spawned: %lu",
start - last_update,
map_count, mapped_sigsegv_count,
threads_spawned);
tst_res(TINFO, " repeated_reads: %ld, "
"data_matched: %lu", repeated_reads,
data_matched);
}
}
tst_res(TPASS, "System survived.");
}
static struct tst_test test = {
.test_all = run,
.setup = setup,
.runtime = 180,
.needs_tmpdir = 1,
};