]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | /* | |
4 | * Ceph - scalable distributed file system | |
5 | * | |
6 | * Copyright (C) 2011 New Dream Network | |
7 | * | |
8 | * This is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License version 2.1, as published by the Free Software | |
11 | * Foundation. See file COPYING. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <pthread.h> | |
16 | #include "gtest/gtest.h" | |
17 | #ifndef GTEST_IS_THREADSAFE | |
18 | #error "!GTEST_IS_THREADSAFE" | |
19 | #endif | |
20 | ||
21 | #include "include/cephfs/libcephfs.h" | |
22 | #include <errno.h> | |
23 | #include <fcntl.h> | |
24 | #include <unistd.h> | |
25 | #include <sys/file.h> | |
26 | #include <sys/types.h> | |
27 | #include <sys/stat.h> | |
28 | #include <dirent.h> | |
7c673cae FG |
29 | #include <stdlib.h> |
30 | #include <semaphore.h> | |
31 | #include <time.h> | |
32 | #include <sys/mman.h> | |
33 | ||
34 | #ifdef __linux__ | |
35 | #include <limits.h> | |
eafe8130 TL |
36 | #include <sys/xattr.h> |
37 | #elif __FreeBSD__ | |
38 | #include <sys/types.h> | |
39 | #include <sys/wait.h> | |
7c673cae FG |
40 | #endif |
41 | ||
11fdf7f2 | 42 | #include "include/ceph_assert.h" |
eafe8130 | 43 | #include "ceph_pthread_self.h" |
11fdf7f2 | 44 | |
7c673cae FG |
45 | // Startup common: create and mount ceph fs |
46 | #define STARTUP_CEPH() do { \ | |
47 | ASSERT_EQ(0, ceph_create(&cmount, NULL)); \ | |
48 | ASSERT_EQ(0, ceph_conf_read_file(cmount, NULL)); \ | |
49 | ASSERT_EQ(0, ceph_conf_parse_env(cmount, NULL)); \ | |
50 | ASSERT_EQ(0, ceph_mount(cmount, NULL)); \ | |
51 | } while(0) | |
52 | ||
53 | // Cleanup common: unmount and release ceph fs | |
54 | #define CLEANUP_CEPH() do { \ | |
55 | ASSERT_EQ(0, ceph_unmount(cmount)); \ | |
56 | ASSERT_EQ(0, ceph_release(cmount)); \ | |
57 | } while(0) | |
58 | ||
59 | static const mode_t fileMode = S_IRWXU | S_IRWXG | S_IRWXO; | |
60 | ||
61 | // Default wait time for normal and "slow" operations | |
62 | // (5" should be enough in case of network congestion) | |
63 | static const long waitMs = 10; | |
64 | static const long waitSlowMs = 5000; | |
65 | ||
66 | // Get the absolute struct timespec reference from now + 'ms' milliseconds | |
67 | static const struct timespec* abstime(struct timespec &ts, long ms) { | |
68 | if (clock_gettime(CLOCK_REALTIME, &ts) == -1) { | |
11fdf7f2 | 69 | ceph_abort(); |
7c673cae FG |
70 | } |
71 | ts.tv_nsec += ms * 1000000; | |
72 | ts.tv_sec += ts.tv_nsec / 1000000000; | |
73 | ts.tv_nsec %= 1000000000; | |
74 | return &ts; | |
75 | } | |
76 | ||
77 | /* Basic locking */ | |
78 | TEST(LibCephFS, BasicLocking) { | |
79 | struct ceph_mount_info *cmount = NULL; | |
80 | STARTUP_CEPH(); | |
81 | ||
82 | char c_file[1024]; | |
83 | sprintf(c_file, "/flock_test_%d", getpid()); | |
84 | const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode); | |
85 | ASSERT_GE(fd, 0); | |
86 | ||
87 | // Lock exclusively twice | |
88 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42)); | |
89 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43)); | |
90 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44)); | |
91 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42)); | |
92 | ||
93 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 43)); | |
94 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 44)); | |
95 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43)); | |
96 | ||
97 | // Lock shared three times | |
98 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42)); | |
99 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 43)); | |
100 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 44)); | |
101 | // And then attempt to lock exclusively | |
102 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45)); | |
103 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42)); | |
104 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45)); | |
105 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 44)); | |
106 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45)); | |
107 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 43)); | |
108 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, 45)); | |
109 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, 42)); | |
110 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 45)); | |
111 | ||
112 | // Lock shared with upgrade to exclusive (POSIX) | |
113 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42)); | |
114 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42)); | |
115 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42)); | |
116 | ||
117 | // Lock exclusive with downgrade to shared (POSIX) | |
118 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, 42)); | |
119 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, 42)); | |
120 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, 42)); | |
121 | ||
122 | ASSERT_EQ(0, ceph_close(cmount, fd)); | |
123 | ASSERT_EQ(0, ceph_unlink(cmount, c_file)); | |
124 | CLEANUP_CEPH(); | |
125 | } | |
126 | ||
127 | /* Locking in different threads */ | |
128 | ||
129 | // Used by ConcurrentLocking test | |
130 | struct str_ConcurrentLocking { | |
131 | const char *file; | |
132 | struct ceph_mount_info *cmount; // !NULL if shared | |
133 | sem_t sem[2]; | |
134 | sem_t semReply[2]; | |
135 | void sem_init(int pshared) { | |
136 | ASSERT_EQ(0, ::sem_init(&sem[0], pshared, 0)); | |
137 | ASSERT_EQ(0, ::sem_init(&sem[1], pshared, 0)); | |
138 | ASSERT_EQ(0, ::sem_init(&semReply[0], pshared, 0)); | |
139 | ASSERT_EQ(0, ::sem_init(&semReply[1], pshared, 0)); | |
140 | } | |
141 | void sem_destroy() { | |
142 | ASSERT_EQ(0, ::sem_destroy(&sem[0])); | |
143 | ASSERT_EQ(0, ::sem_destroy(&sem[1])); | |
144 | ASSERT_EQ(0, ::sem_destroy(&semReply[0])); | |
145 | ASSERT_EQ(0, ::sem_destroy(&semReply[1])); | |
146 | } | |
147 | }; | |
148 | ||
149 | // Wakeup main (for (N) steps) | |
150 | #define PING_MAIN(n) ASSERT_EQ(0, sem_post(&s.sem[n%2])) | |
151 | // Wait for main to wake us up (for (RN) steps) | |
152 | #define WAIT_MAIN(n) \ | |
153 | ASSERT_EQ(0, sem_timedwait(&s.semReply[n%2], abstime(ts, waitSlowMs))) | |
154 | ||
155 | // Wakeup worker (for (RN) steps) | |
156 | #define PING_WORKER(n) ASSERT_EQ(0, sem_post(&s.semReply[n%2])) | |
157 | // Wait for worker to wake us up (for (N) steps) | |
158 | #define WAIT_WORKER(n) \ | |
159 | ASSERT_EQ(0, sem_timedwait(&s.sem[n%2], abstime(ts, waitSlowMs))) | |
160 | // Worker shall not wake us up (for (N) steps) | |
161 | #define NOT_WAIT_WORKER(n) \ | |
162 | ASSERT_EQ(-1, sem_timedwait(&s.sem[n%2], abstime(ts, waitMs))) | |
163 | ||
164 | // Do twice an operation | |
165 | #define TWICE(EXPR) do { \ | |
166 | EXPR; \ | |
167 | EXPR; \ | |
168 | } while(0) | |
169 | ||
170 | /* Locking in different threads */ | |
171 | ||
172 | // Used by ConcurrentLocking test | |
173 | static void thread_ConcurrentLocking(str_ConcurrentLocking& s) { | |
174 | struct ceph_mount_info *const cmount = s.cmount; | |
175 | struct timespec ts; | |
176 | ||
177 | const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode); | |
178 | ASSERT_GE(fd, 0); | |
179 | ||
180 | ASSERT_EQ(-EWOULDBLOCK, | |
eafe8130 | 181 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
7c673cae | 182 | PING_MAIN(1); // (1) |
eafe8130 | 183 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
184 | PING_MAIN(2); // (2) |
185 | ||
eafe8130 | 186 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
187 | PING_MAIN(3); // (3) |
188 | ||
eafe8130 | 189 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, ceph_pthread_self())); |
7c673cae FG |
190 | PING_MAIN(4); // (4) |
191 | ||
192 | WAIT_MAIN(1); // (R1) | |
eafe8130 | 193 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
194 | PING_MAIN(5); // (5) |
195 | ||
196 | WAIT_MAIN(2); // (R2) | |
eafe8130 | 197 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
198 | PING_MAIN(6); // (6) |
199 | ||
200 | WAIT_MAIN(3); // (R3) | |
eafe8130 | 201 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
202 | PING_MAIN(7); // (7) |
203 | } | |
204 | ||
205 | // Used by ConcurrentLocking test | |
206 | static void* thread_ConcurrentLocking_(void *arg) { | |
207 | str_ConcurrentLocking *const s = | |
208 | reinterpret_cast<str_ConcurrentLocking*>(arg); | |
209 | thread_ConcurrentLocking(*s); | |
210 | return NULL; | |
211 | } | |
212 | ||
213 | TEST(LibCephFS, ConcurrentLocking) { | |
214 | const pid_t mypid = getpid(); | |
215 | struct ceph_mount_info *cmount; | |
216 | STARTUP_CEPH(); | |
217 | ||
218 | char c_file[1024]; | |
219 | sprintf(c_file, "/flock_test_%d", mypid); | |
220 | const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode); | |
221 | ASSERT_GE(fd, 0); | |
222 | ||
223 | // Lock | |
eafe8130 | 224 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
225 | |
226 | // Start locker thread | |
227 | pthread_t thread; | |
228 | struct timespec ts; | |
229 | str_ConcurrentLocking s = { c_file, cmount }; | |
230 | s.sem_init(0); | |
231 | ASSERT_EQ(0, pthread_create(&thread, NULL, thread_ConcurrentLocking_, &s)); | |
232 | // Synchronization point with thread (failure: thread is dead) | |
233 | WAIT_WORKER(1); // (1) | |
234 | ||
235 | // Shall not have lock immediately | |
236 | NOT_WAIT_WORKER(2); // (2) | |
237 | ||
238 | // Unlock | |
eafe8130 | 239 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
240 | |
241 | // Shall have lock | |
242 | // Synchronization point with thread (failure: thread is dead) | |
243 | WAIT_WORKER(2); // (2) | |
244 | ||
245 | // Synchronization point with thread (failure: thread is dead) | |
246 | WAIT_WORKER(3); // (3) | |
247 | ||
248 | // Wait for thread to share lock | |
249 | WAIT_WORKER(4); // (4) | |
250 | ASSERT_EQ(-EWOULDBLOCK, | |
eafe8130 TL |
251 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
252 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self())); | |
7c673cae FG |
253 | |
254 | // Wake up thread to unlock shared lock | |
255 | PING_WORKER(1); // (R1) | |
256 | WAIT_WORKER(5); // (5) | |
257 | ||
258 | // Now we can lock exclusively | |
259 | // Upgrade to exclusive lock (as per POSIX) | |
eafe8130 | 260 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
261 | |
262 | // Wake up thread to lock shared lock | |
263 | PING_WORKER(2); // (R2) | |
264 | ||
265 | // Shall not have lock immediately | |
266 | NOT_WAIT_WORKER(6); // (6) | |
267 | ||
268 | // Release lock ; thread will get it | |
eafe8130 | 269 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
270 | WAIT_WORKER(6); // (6) |
271 | ||
272 | // We no longer have the lock | |
273 | ASSERT_EQ(-EWOULDBLOCK, | |
eafe8130 | 274 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
7c673cae | 275 | ASSERT_EQ(-EWOULDBLOCK, |
eafe8130 | 276 | ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self())); |
7c673cae FG |
277 | |
278 | // Wake up thread to unlock exclusive lock | |
279 | PING_WORKER(3); // (R3) | |
280 | WAIT_WORKER(7); // (7) | |
281 | ||
282 | // We can lock it again | |
eafe8130 TL |
283 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
284 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); | |
7c673cae FG |
285 | |
286 | // Cleanup | |
287 | void *retval = (void*) (uintptr_t) -1; | |
288 | ASSERT_EQ(0, pthread_join(thread, &retval)); | |
289 | ASSERT_EQ(NULL, retval); | |
290 | s.sem_destroy(); | |
291 | ASSERT_EQ(0, ceph_close(cmount, fd)); | |
292 | ASSERT_EQ(0, ceph_unlink(cmount, c_file)); | |
293 | CLEANUP_CEPH(); | |
294 | } | |
295 | ||
296 | TEST(LibCephFS, ThreesomeLocking) { | |
297 | const pid_t mypid = getpid(); | |
298 | struct ceph_mount_info *cmount; | |
299 | STARTUP_CEPH(); | |
300 | ||
301 | char c_file[1024]; | |
302 | sprintf(c_file, "/flock_test_%d", mypid); | |
303 | const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode); | |
304 | ASSERT_GE(fd, 0); | |
305 | ||
306 | // Lock | |
eafe8130 | 307 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
308 | |
309 | // Start locker thread | |
310 | pthread_t thread[2]; | |
311 | struct timespec ts; | |
312 | str_ConcurrentLocking s = { c_file, cmount }; | |
313 | s.sem_init(0); | |
314 | ASSERT_EQ(0, pthread_create(&thread[0], NULL, thread_ConcurrentLocking_, &s)); | |
315 | ASSERT_EQ(0, pthread_create(&thread[1], NULL, thread_ConcurrentLocking_, &s)); | |
316 | // Synchronization point with thread (failure: thread is dead) | |
317 | TWICE(WAIT_WORKER(1)); // (1) | |
318 | ||
319 | // Shall not have lock immediately | |
320 | NOT_WAIT_WORKER(2); // (2) | |
321 | ||
322 | // Unlock | |
eafe8130 | 323 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
324 | |
325 | // Shall have lock | |
326 | TWICE(// Synchronization point with thread (failure: thread is dead) | |
327 | WAIT_WORKER(2); // (2) | |
328 | ||
329 | // Synchronization point with thread (failure: thread is dead) | |
330 | WAIT_WORKER(3)); // (3) | |
331 | ||
332 | // Wait for thread to share lock | |
333 | TWICE(WAIT_WORKER(4)); // (4) | |
334 | ASSERT_EQ(-EWOULDBLOCK, | |
eafe8130 TL |
335 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
336 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self())); | |
7c673cae FG |
337 | |
338 | // Wake up thread to unlock shared lock | |
339 | TWICE(PING_WORKER(1); // (R1) | |
340 | WAIT_WORKER(5)); // (5) | |
341 | ||
342 | // Now we can lock exclusively | |
343 | // Upgrade to exclusive lock (as per POSIX) | |
eafe8130 | 344 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, ceph_pthread_self())); |
7c673cae FG |
345 | |
346 | TWICE( // Wake up thread to lock shared lock | |
347 | PING_WORKER(2); // (R2) | |
348 | ||
349 | // Shall not have lock immediately | |
350 | NOT_WAIT_WORKER(6)); // (6) | |
351 | ||
352 | // Release lock ; thread will get it | |
eafe8130 | 353 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); |
7c673cae FG |
354 | TWICE(WAIT_WORKER(6); // (6) |
355 | ||
356 | // We no longer have the lock | |
357 | ASSERT_EQ(-EWOULDBLOCK, | |
eafe8130 | 358 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
7c673cae | 359 | ASSERT_EQ(-EWOULDBLOCK, |
eafe8130 | 360 | ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, ceph_pthread_self())); |
7c673cae FG |
361 | |
362 | // Wake up thread to unlock exclusive lock | |
363 | PING_WORKER(3); // (R3) | |
364 | WAIT_WORKER(7); // (7) | |
365 | ); | |
366 | ||
367 | // We can lock it again | |
eafe8130 TL |
368 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, ceph_pthread_self())); |
369 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, ceph_pthread_self())); | |
7c673cae FG |
370 | |
371 | // Cleanup | |
372 | void *retval = (void*) (uintptr_t) -1; | |
373 | ASSERT_EQ(0, pthread_join(thread[0], &retval)); | |
374 | ASSERT_EQ(NULL, retval); | |
375 | ASSERT_EQ(0, pthread_join(thread[1], &retval)); | |
376 | ASSERT_EQ(NULL, retval); | |
377 | s.sem_destroy(); | |
378 | ASSERT_EQ(0, ceph_close(cmount, fd)); | |
379 | ASSERT_EQ(0, ceph_unlink(cmount, c_file)); | |
380 | CLEANUP_CEPH(); | |
381 | } | |
382 | ||
383 | /* Locking in different processes */ | |
384 | ||
385 | #define PROCESS_SLOW_MS() \ | |
386 | static const long waitMs = 100; \ | |
387 | (void) waitMs | |
388 | ||
389 | // Used by ConcurrentLocking test | |
390 | static void process_ConcurrentLocking(str_ConcurrentLocking& s) { | |
391 | const pid_t mypid = getpid(); | |
392 | PROCESS_SLOW_MS(); | |
393 | ||
394 | struct ceph_mount_info *cmount = NULL; | |
395 | struct timespec ts; | |
396 | ||
397 | STARTUP_CEPH(); | |
398 | s.cmount = cmount; | |
399 | ||
400 | const int fd = ceph_open(cmount, s.file, O_RDWR | O_CREAT, fileMode); | |
401 | ASSERT_GE(fd, 0); | |
402 | WAIT_MAIN(1); // (R1) | |
403 | ||
404 | ASSERT_EQ(-EWOULDBLOCK, | |
405 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
406 | PING_MAIN(1); // (1) | |
407 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
408 | PING_MAIN(2); // (2) | |
409 | ||
410 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
411 | PING_MAIN(3); // (3) | |
412 | ||
413 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH, mypid)); | |
414 | PING_MAIN(4); // (4) | |
415 | ||
416 | WAIT_MAIN(2); // (R2) | |
417 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
418 | PING_MAIN(5); // (5) | |
419 | ||
420 | WAIT_MAIN(3); // (R3) | |
421 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
422 | PING_MAIN(6); // (6) | |
423 | ||
424 | WAIT_MAIN(4); // (R4) | |
425 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
426 | PING_MAIN(7); // (7) | |
427 | ||
428 | CLEANUP_CEPH(); | |
429 | ||
430 | s.sem_destroy(); | |
431 | exit(EXIT_SUCCESS); | |
432 | } | |
433 | ||
434 | // Disabled because of fork() issues (http://tracker.ceph.com/issues/16556) | |
435 | TEST(LibCephFS, DISABLED_InterProcessLocking) { | |
436 | PROCESS_SLOW_MS(); | |
437 | // Process synchronization | |
438 | char c_file[1024]; | |
439 | const pid_t mypid = getpid(); | |
440 | sprintf(c_file, "/flock_test_%d", mypid); | |
441 | ||
442 | // Note: the semaphores MUST be on a shared memory segment | |
443 | str_ConcurrentLocking *const shs = | |
444 | reinterpret_cast<str_ConcurrentLocking*> | |
445 | (mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, | |
446 | -1, 0)); | |
447 | str_ConcurrentLocking &s = *shs; | |
448 | s.file = c_file; | |
449 | s.sem_init(1); | |
450 | ||
451 | // Start locker process | |
452 | const pid_t pid = fork(); | |
453 | ASSERT_GE(pid, 0); | |
454 | if (pid == 0) { | |
455 | process_ConcurrentLocking(s); | |
456 | exit(EXIT_FAILURE); | |
457 | } | |
458 | ||
459 | struct timespec ts; | |
460 | struct ceph_mount_info *cmount; | |
461 | STARTUP_CEPH(); | |
462 | ||
463 | const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode); | |
464 | ASSERT_GE(fd, 0); | |
465 | ||
466 | // Lock | |
467 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
468 | ||
469 | // Synchronization point with process (failure: process is dead) | |
470 | PING_WORKER(1); // (R1) | |
471 | WAIT_WORKER(1); // (1) | |
472 | ||
473 | // Shall not have lock immediately | |
474 | NOT_WAIT_WORKER(2); // (2) | |
475 | ||
476 | // Unlock | |
477 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
478 | ||
479 | // Shall have lock | |
480 | // Synchronization point with process (failure: process is dead) | |
481 | WAIT_WORKER(2); // (2) | |
482 | ||
483 | // Synchronization point with process (failure: process is dead) | |
484 | WAIT_WORKER(3); // (3) | |
485 | ||
486 | // Wait for process to share lock | |
487 | WAIT_WORKER(4); // (4) | |
488 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
489 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid)); | |
490 | ||
491 | // Wake up process to unlock shared lock | |
492 | PING_WORKER(2); // (R2) | |
493 | WAIT_WORKER(5); // (5) | |
494 | ||
495 | // Now we can lock exclusively | |
496 | // Upgrade to exclusive lock (as per POSIX) | |
497 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
498 | ||
499 | // Wake up process to lock shared lock | |
500 | PING_WORKER(3); // (R3) | |
501 | ||
502 | // Shall not have lock immediately | |
503 | NOT_WAIT_WORKER(6); // (6) | |
504 | ||
505 | // Release lock ; process will get it | |
506 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
507 | WAIT_WORKER(6); // (6) | |
508 | ||
509 | // We no longer have the lock | |
510 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
511 | ASSERT_EQ(-EWOULDBLOCK, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid)); | |
512 | ||
513 | // Wake up process to unlock exclusive lock | |
514 | PING_WORKER(4); // (R4) | |
515 | WAIT_WORKER(7); // (7) | |
516 | ||
517 | // We can lock it again | |
518 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
519 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
520 | ||
521 | // Wait pid | |
522 | int status; | |
523 | ASSERT_EQ(pid, waitpid(pid, &status, 0)); | |
524 | ASSERT_EQ(EXIT_SUCCESS, status); | |
525 | ||
526 | // Cleanup | |
527 | s.sem_destroy(); | |
528 | ASSERT_EQ(0, munmap(shs, sizeof(*shs))); | |
529 | ASSERT_EQ(0, ceph_close(cmount, fd)); | |
530 | ASSERT_EQ(0, ceph_unlink(cmount, c_file)); | |
531 | CLEANUP_CEPH(); | |
532 | } | |
533 | ||
534 | // Disabled because of fork() issues (http://tracker.ceph.com/issues/16556) | |
535 | TEST(LibCephFS, DISABLED_ThreesomeInterProcessLocking) { | |
536 | PROCESS_SLOW_MS(); | |
537 | // Process synchronization | |
538 | char c_file[1024]; | |
539 | const pid_t mypid = getpid(); | |
540 | sprintf(c_file, "/flock_test_%d", mypid); | |
541 | ||
542 | // Note: the semaphores MUST be on a shared memory segment | |
543 | str_ConcurrentLocking *const shs = | |
544 | reinterpret_cast<str_ConcurrentLocking*> | |
545 | (mmap(0, sizeof(*shs), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, | |
546 | -1, 0)); | |
547 | str_ConcurrentLocking &s = *shs; | |
548 | s.file = c_file; | |
549 | s.sem_init(1); | |
550 | ||
551 | // Start locker processes | |
552 | pid_t pid[2]; | |
553 | pid[0] = fork(); | |
554 | ASSERT_GE(pid[0], 0); | |
555 | if (pid[0] == 0) { | |
556 | process_ConcurrentLocking(s); | |
557 | exit(EXIT_FAILURE); | |
558 | } | |
559 | pid[1] = fork(); | |
560 | ASSERT_GE(pid[1], 0); | |
561 | if (pid[1] == 0) { | |
562 | process_ConcurrentLocking(s); | |
563 | exit(EXIT_FAILURE); | |
564 | } | |
565 | ||
566 | struct timespec ts; | |
567 | struct ceph_mount_info *cmount; | |
568 | STARTUP_CEPH(); | |
569 | ||
570 | const int fd = ceph_open(cmount, c_file, O_RDWR | O_CREAT, fileMode); | |
571 | ASSERT_GE(fd, 0); | |
572 | ||
573 | // Lock | |
574 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
575 | ||
576 | // Synchronization point with process (failure: process is dead) | |
577 | TWICE(PING_WORKER(1)); // (R1) | |
578 | TWICE(WAIT_WORKER(1)); // (1) | |
579 | ||
580 | // Shall not have lock immediately | |
581 | NOT_WAIT_WORKER(2); // (2) | |
582 | ||
583 | // Unlock | |
584 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
585 | ||
586 | // Shall have lock | |
587 | TWICE(// Synchronization point with process (failure: process is dead) | |
588 | WAIT_WORKER(2); // (2) | |
589 | ||
590 | // Synchronization point with process (failure: process is dead) | |
591 | WAIT_WORKER(3)); // (3) | |
592 | ||
593 | // Wait for process to share lock | |
594 | TWICE(WAIT_WORKER(4)); // (4) | |
595 | ASSERT_EQ(-EWOULDBLOCK, | |
596 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
597 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid)); | |
598 | ||
599 | // Wake up process to unlock shared lock | |
600 | TWICE(PING_WORKER(2); // (R2) | |
601 | WAIT_WORKER(5)); // (5) | |
602 | ||
603 | // Now we can lock exclusively | |
604 | // Upgrade to exclusive lock (as per POSIX) | |
605 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX, mypid)); | |
606 | ||
607 | TWICE( // Wake up process to lock shared lock | |
608 | PING_WORKER(3); // (R3) | |
609 | ||
610 | // Shall not have lock immediately | |
611 | NOT_WAIT_WORKER(6)); // (6) | |
612 | ||
613 | // Release lock ; process will get it | |
614 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
615 | TWICE(WAIT_WORKER(6); // (6) | |
616 | ||
617 | // We no longer have the lock | |
618 | ASSERT_EQ(-EWOULDBLOCK, | |
619 | ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
620 | ASSERT_EQ(-EWOULDBLOCK, | |
621 | ceph_flock(cmount, fd, LOCK_SH | LOCK_NB, mypid)); | |
622 | ||
623 | // Wake up process to unlock exclusive lock | |
624 | PING_WORKER(4); // (R4) | |
625 | WAIT_WORKER(7); // (7) | |
626 | ); | |
627 | ||
628 | // We can lock it again | |
629 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_EX | LOCK_NB, mypid)); | |
630 | ASSERT_EQ(0, ceph_flock(cmount, fd, LOCK_UN, mypid)); | |
631 | ||
632 | // Wait pids | |
633 | int status; | |
634 | ASSERT_EQ(pid[0], waitpid(pid[0], &status, 0)); | |
635 | ASSERT_EQ(EXIT_SUCCESS, status); | |
636 | ASSERT_EQ(pid[1], waitpid(pid[1], &status, 0)); | |
637 | ASSERT_EQ(EXIT_SUCCESS, status); | |
638 | ||
639 | // Cleanup | |
640 | s.sem_destroy(); | |
641 | ASSERT_EQ(0, munmap(shs, sizeof(*shs))); | |
642 | ASSERT_EQ(0, ceph_close(cmount, fd)); | |
643 | ASSERT_EQ(0, ceph_unlink(cmount, c_file)); | |
644 | CLEANUP_CEPH(); | |
645 | } |