]> git.proxmox.com Git - ceph.git/blob - ceph/src/pmdk/src/libpmemobj/sync.c
import ceph 16.2.7
[ceph.git] / ceph / src / pmdk / src / libpmemobj / sync.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright 2015-2018, Intel Corporation */
3
4 /*
5 * sync.c -- persistent memory resident synchronization primitives
6 */
7
8 #include <inttypes.h>
9
10 #include "obj.h"
11 #include "out.h"
12 #include "util.h"
13 #include "sync.h"
14 #include "sys_util.h"
15 #include "util.h"
16 #include "valgrind_internal.h"
17
18 #ifdef __FreeBSD__
19 #define RECORD_LOCK(init, type, p) \
20 if (init) {\
21 PMEM##type##_internal *head = pop->type##_head;\
22 while (!util_bool_compare_and_swap64(&pop->type##_head, head,\
23 p)) {\
24 head = pop->type##_head;\
25 }\
26 p->PMEM##type##_next = head;\
27 }
28 #else
29 #define RECORD_LOCK(init, type, p)
30 #endif
31
32 /*
33 * _get_value -- (internal) atomically initialize and return a value.
34 * Returns -1 on error, 0 if the caller is not the value
35 * initializer, 1 if the caller is the value initializer.
36 */
37 static int
38 _get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg,
39 int (*init_value)(void *value, void *arg))
40 {
41 uint64_t tmp_runid;
42 int initializer = 0;
43
44 while ((tmp_runid = *runid) != pop_runid) {
45 if (tmp_runid == pop_runid - 1)
46 continue;
47
48 if (!util_bool_compare_and_swap64(runid, tmp_runid,
49 pop_runid - 1))
50 continue;
51
52 initializer = 1;
53
54 if (init_value(value, arg)) {
55 ERR("error initializing lock");
56 util_fetch_and_and64(runid, 0);
57 return -1;
58 }
59
60 if (util_bool_compare_and_swap64(runid, pop_runid - 1,
61 pop_runid) == 0) {
62 ERR("error setting lock runid");
63 return -1;
64 }
65 }
66
67 return initializer;
68 }
69
70 /*
71 * get_mutex -- (internal) atomically initialize, record and return a mutex
72 */
73 static inline os_mutex_t *
74 get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp)
75 {
76 if (likely(imp->pmemmutex.runid == pop->run_id))
77 return &imp->PMEMmutex_lock;
78
79 volatile uint64_t *runid = &imp->pmemmutex.runid;
80
81 LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64,
82 imp, pop->run_id, *runid);
83
84 ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
85
86 COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal));
87 COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t));
88
89 VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE);
90
91 int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock,
92 NULL, (void *)os_mutex_init);
93 if (initializer == -1) {
94 return NULL;
95 }
96
97 RECORD_LOCK(initializer, mutex, imp);
98
99 return &imp->PMEMmutex_lock;
100 }
101
102 /*
103 * get_rwlock -- (internal) atomically initialize, record and return a rwlock
104 */
105 static inline os_rwlock_t *
106 get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp)
107 {
108 if (likely(irp->pmemrwlock.runid == pop->run_id))
109 return &irp->PMEMrwlock_lock;
110
111 volatile uint64_t *runid = &irp->pmemrwlock.runid;
112
113 LOG(5, "PMEMrwlock %p pop->run_id %"\
114 PRIu64 " pmemrwlock.runid %" PRIu64,
115 irp, pop->run_id, *runid);
116
117 ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
118
119 COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal));
120 COMPILE_ERROR_ON(util_alignof(PMEMrwlock)
121 != util_alignof(os_rwlock_t));
122
123 VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE);
124
125 int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock,
126 NULL, (void *)os_rwlock_init);
127 if (initializer == -1) {
128 return NULL;
129 }
130
131 RECORD_LOCK(initializer, rwlock, irp);
132
133 return &irp->PMEMrwlock_lock;
134 }
135
136 /*
137 * get_cond -- (internal) atomically initialize, record and return a
138 * condition variable
139 */
140 static inline os_cond_t *
141 get_cond(PMEMobjpool *pop, PMEMcond_internal *icp)
142 {
143 if (likely(icp->pmemcond.runid == pop->run_id))
144 return &icp->PMEMcond_cond;
145
146 volatile uint64_t *runid = &icp->pmemcond.runid;
147
148 LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64,
149 icp, pop->run_id, *runid);
150
151 ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0);
152
153 COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal));
154 COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t));
155
156 VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE);
157
158 int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond,
159 NULL, (void *)os_cond_init);
160 if (initializer == -1) {
161 return NULL;
162 }
163
164 RECORD_LOCK(initializer, cond, icp);
165
166 return &icp->PMEMcond_cond;
167 }
168
169 /*
170 * pmemobj_mutex_zero -- zero-initialize a pmem resident mutex
171 *
172 * This function is not MT safe.
173 */
174 void
175 pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp)
176 {
177 LOG(3, "pop %p mutex %p", pop, mutexp);
178
179 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
180
181 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
182 mutexip->pmemmutex.runid = 0;
183 pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid,
184 sizeof(mutexip->pmemmutex.runid));
185 }
186
187 /*
188 * pmemobj_mutex_lock -- lock a pmem resident mutex
189 *
190 * Atomically initializes and locks a PMEMmutex, otherwise behaves as its
191 * POSIX counterpart.
192 */
193 int
194 pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp)
195 {
196 LOG(3, "pop %p mutex %p", pop, mutexp);
197
198 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
199
200 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
201 os_mutex_t *mutex = get_mutex(pop, mutexip);
202
203 if (mutex == NULL)
204 return EINVAL;
205
206 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
207
208 return os_mutex_lock(mutex);
209 }
210
211 /*
212 * pmemobj_mutex_assert_locked -- checks whether mutex is locked.
213 *
214 * Returns 0 when mutex is locked.
215 */
216 int
217 pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp)
218 {
219 LOG(3, "pop %p mutex %p", pop, mutexp);
220
221 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
222
223 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
224 os_mutex_t *mutex = get_mutex(pop, mutexip);
225 if (mutex == NULL)
226 return EINVAL;
227
228 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
229
230 int ret = os_mutex_trylock(mutex);
231 if (ret == EBUSY)
232 return 0;
233 if (ret == 0) {
234 util_mutex_unlock(mutex);
235 /*
236 * There's no good error code for this case. EINVAL is used for
237 * something else here.
238 */
239 return ENODEV;
240 }
241 return ret;
242 }
243
244 /*
245 * pmemobj_mutex_timedlock -- lock a pmem resident mutex
246 *
247 * Atomically initializes and locks a PMEMmutex, otherwise behaves as its
248 * POSIX counterpart.
249 */
250 int
251 pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp,
252 const struct timespec *__restrict abs_timeout)
253 {
254 LOG(3, "pop %p mutex %p", pop, mutexp);
255
256 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
257
258 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
259 os_mutex_t *mutex = get_mutex(pop, mutexip);
260 if (mutex == NULL)
261 return EINVAL;
262
263 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
264
265 return os_mutex_timedlock(mutex, abs_timeout);
266 }
267
268 /*
269 * pmemobj_mutex_trylock -- trylock a pmem resident mutex
270 *
271 * Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its
272 * POSIX counterpart.
273 */
274 int
275 pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp)
276 {
277 LOG(3, "pop %p mutex %p", pop, mutexp);
278
279 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
280
281 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
282 os_mutex_t *mutex = get_mutex(pop, mutexip);
283 if (mutex == NULL)
284 return EINVAL;
285
286 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
287
288 return os_mutex_trylock(mutex);
289 }
290
291 /*
292 * pmemobj_mutex_unlock -- unlock a pmem resident mutex
293 */
294 int
295 pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp)
296 {
297 LOG(3, "pop %p mutex %p", pop, mutexp);
298
299 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
300
301 /* XXX potential performance improvement - move GET to debug version */
302 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
303 os_mutex_t *mutex = get_mutex(pop, mutexip);
304 if (mutex == NULL)
305 return EINVAL;
306
307 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
308
309 return os_mutex_unlock(mutex);
310 }
311
312 /*
313 * pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock
314 *
315 * This function is not MT safe.
316 */
317 void
318 pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp)
319 {
320 LOG(3, "pop %p rwlock %p", pop, rwlockp);
321
322 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
323
324 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
325 rwlockip->pmemrwlock.runid = 0;
326 pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid,
327 sizeof(rwlockip->pmemrwlock.runid));
328 }
329
330 /*
331 * pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex
332 *
333 * Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its
334 * POSIX counterpart.
335 */
336 int
337 pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
338 {
339 LOG(3, "pop %p rwlock %p", pop, rwlockp);
340
341 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
342
343 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
344 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
345 if (rwlock == NULL)
346 return EINVAL;
347
348 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
349
350 return os_rwlock_rdlock(rwlock);
351 }
352
353 /*
354 * pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex
355 *
356 * Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its
357 * POSIX counterpart.
358 */
359 int
360 pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
361 {
362 LOG(3, "pop %p rwlock %p", pop, rwlockp);
363
364 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
365
366 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
367 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
368 if (rwlock == NULL)
369 return EINVAL;
370
371 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
372
373 return os_rwlock_wrlock(rwlock);
374 }
375
376 /*
377 * pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex
378 *
379 * Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as
380 * its POSIX counterpart.
381 */
382 int
383 pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
384 const struct timespec *__restrict abs_timeout)
385 {
386 LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
387 abs_timeout->tv_sec, abs_timeout->tv_nsec);
388
389 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
390
391 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
392 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
393 if (rwlock == NULL)
394 return EINVAL;
395
396 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
397
398 return os_rwlock_timedrdlock(rwlock, abs_timeout);
399 }
400
401 /*
402 * pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex
403 *
404 * Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as
405 * its POSIX counterpart.
406 */
407 int
408 pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp,
409 const struct timespec *__restrict abs_timeout)
410 {
411 LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp,
412 abs_timeout->tv_sec, abs_timeout->tv_nsec);
413
414 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
415
416 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
417 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
418 if (rwlock == NULL)
419 return EINVAL;
420
421 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
422
423 return os_rwlock_timedwrlock(rwlock, abs_timeout);
424 }
425
426 /*
427 * pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex
428 *
429 * Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its
430 * POSIX counterpart.
431 */
432 int
433 pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
434 {
435 LOG(3, "pop %p rwlock %p", pop, rwlockp);
436
437 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
438
439 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
440 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
441 if (rwlock == NULL)
442 return EINVAL;
443
444 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
445
446 return os_rwlock_tryrdlock(rwlock);
447 }
448
449 /*
450 * pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex
451 *
452 * Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its
453 * POSIX counterpart.
454 */
455 int
456 pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
457 {
458 LOG(3, "pop %p rwlock %p", pop, rwlockp);
459
460 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
461
462 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
463 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
464 if (rwlock == NULL)
465 return EINVAL;
466
467 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
468
469 return os_rwlock_trywrlock(rwlock);
470 }
471
472 /*
473 * pmemobj_rwlock_unlock -- unlock a pmem resident rwlock
474 */
475 int
476 pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp)
477 {
478 LOG(3, "pop %p rwlock %p", pop, rwlockp);
479
480 ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp));
481
482 /* XXX potential performance improvement - move GET to debug version */
483 PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp;
484 os_rwlock_t *rwlock = get_rwlock(pop, rwlockip);
485 if (rwlock == NULL)
486 return EINVAL;
487
488 ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0);
489
490 return os_rwlock_unlock(rwlock);
491 }
492
493 /*
494 * pmemobj_cond_zero -- zero-initialize a pmem resident condition variable
495 *
496 * This function is not MT safe.
497 */
498 void
499 pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp)
500 {
501 LOG(3, "pop %p cond %p", pop, condp);
502
503 ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
504
505 PMEMcond_internal *condip = (PMEMcond_internal *)condp;
506 condip->pmemcond.runid = 0;
507 pmemops_persist(&pop->p_ops, &condip->pmemcond.runid,
508 sizeof(condip->pmemcond.runid));
509 }
510
511 /*
512 * pmemobj_cond_broadcast -- broadcast a pmem resident condition variable
513 *
514 * Atomically initializes and broadcast a PMEMcond, otherwise behaves as its
515 * POSIX counterpart.
516 */
517 int
518 pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp)
519 {
520 LOG(3, "pop %p cond %p", pop, condp);
521
522 ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
523
524 PMEMcond_internal *condip = (PMEMcond_internal *)condp;
525 os_cond_t *cond = get_cond(pop, condip);
526 if (cond == NULL)
527 return EINVAL;
528
529 ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
530
531 return os_cond_broadcast(cond);
532 }
533
534 /*
535 * pmemobj_cond_signal -- signal a pmem resident condition variable
536 *
537 * Atomically initializes and signal a PMEMcond, otherwise behaves as its
538 * POSIX counterpart.
539 */
540 int
541 pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp)
542 {
543 LOG(3, "pop %p cond %p", pop, condp);
544
545 ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
546
547 PMEMcond_internal *condip = (PMEMcond_internal *)condp;
548 os_cond_t *cond = get_cond(pop, condip);
549 if (cond == NULL)
550 return EINVAL;
551
552 ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
553
554 return os_cond_signal(cond);
555 }
556
557 /*
558 * pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable
559 *
560 * Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its
561 * POSIX counterpart.
562 */
563 int
564 pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp,
565 PMEMmutex *__restrict mutexp,
566 const struct timespec *__restrict abs_timeout)
567 {
568 LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp,
569 mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec);
570
571 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
572 ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
573
574 PMEMcond_internal *condip = (PMEMcond_internal *)condp;
575 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
576 os_cond_t *cond = get_cond(pop, condip);
577 os_mutex_t *mutex = get_mutex(pop, mutexip);
578 if ((cond == NULL) || (mutex == NULL))
579 return EINVAL;
580
581 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
582 ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
583
584 return os_cond_timedwait(cond, mutex, abs_timeout);
585 }
586
587 /*
588 * pmemobj_cond_wait -- wait on a pmem resident condition variable
589 *
590 * Atomically initializes and wait on a PMEMcond, otherwise behaves as its
591 * POSIX counterpart.
592 */
593 int
594 pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp,
595 PMEMmutex *__restrict mutexp)
596 {
597 LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp);
598
599 ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp));
600 ASSERTeq(pop, pmemobj_pool_by_ptr(condp));
601
602 PMEMcond_internal *condip = (PMEMcond_internal *)condp;
603 PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp;
604 os_cond_t *cond = get_cond(pop, condip);
605 os_mutex_t *mutex = get_mutex(pop, mutexip);
606 if ((cond == NULL) || (mutex == NULL))
607 return EINVAL;
608
609 ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0);
610 ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0);
611
612 return os_cond_wait(cond, mutex);
613 }
614
615 /*
616 * pmemobj_volatile -- atomically initialize, record and return a
617 * generic value
618 */
619 void *
620 pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt,
621 void *ptr, size_t size,
622 int (*constr)(void *ptr, void *arg), void *arg)
623 {
624 LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr,
625 constr, arg);
626
627 if (likely(vlt->runid == pop->run_id))
628 return ptr;
629
630 VALGRIND_REMOVE_PMEM_MAPPING(ptr, size);
631
632 VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt));
633 if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) {
634 VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
635 return NULL;
636 }
637
638 VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt));
639 VALGRIND_SET_CLEAN(vlt, sizeof(*vlt));
640
641 return ptr;
642 }