]> git.proxmox.com Git - ceph.git/blame - ceph/src/pmdk/src/libpmemblk/blk.c
import ceph 16.2.7
[ceph.git] / ceph / src / pmdk / src / libpmemblk / blk.c
CommitLineData
a4b75251
TL
1// SPDX-License-Identifier: BSD-3-Clause
2/* Copyright 2014-2020, Intel Corporation */
3
4/*
5 * blk.c -- block memory pool entry points for libpmem
6 */
7
8#include <inttypes.h>
9#include <stdio.h>
10#include <string.h>
11#include <sys/types.h>
12#include <sys/param.h>
13#include <unistd.h>
14#include <errno.h>
15#include <time.h>
16#include <stdint.h>
17#include <endian.h>
18#include <stdbool.h>
19
20#include "libpmem.h"
21#include "libpmemblk.h"
22
23#include "mmap.h"
24#include "set.h"
25#include "out.h"
26#include "btt.h"
27#include "blk.h"
28#include "util.h"
29#include "sys_util.h"
30#include "util_pmem.h"
31#include "valgrind_internal.h"
32
33static const struct pool_attr Blk_create_attr = {
34 BLK_HDR_SIG,
35 BLK_FORMAT_MAJOR,
36 BLK_FORMAT_FEAT_DEFAULT,
37 {0}, {0}, {0}, {0}, {0}
38};
39
40static const struct pool_attr Blk_open_attr = {
41 BLK_HDR_SIG,
42 BLK_FORMAT_MAJOR,
43 BLK_FORMAT_FEAT_CHECK,
44 {0}, {0}, {0}, {0}, {0}
45};
46
47/*
48 * lane_enter -- (internal) acquire a unique lane number
49 */
50static void
51lane_enter(PMEMblkpool *pbp, unsigned *lane)
52{
53 unsigned mylane;
54
55 mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane;
56
57 /* lane selected, grab the per-lane lock */
58 util_mutex_lock(&pbp->locks[mylane]);
59
60 *lane = mylane;
61}
62
63/*
64 * lane_exit -- (internal) drop lane lock
65 */
66static void
67lane_exit(PMEMblkpool *pbp, unsigned mylane)
68{
69 util_mutex_unlock(&pbp->locks[mylane]);
70}
71
72/*
73 * nsread -- (internal) read data from the namespace encapsulating the BTT
74 *
75 * This routine is provided to btt_init() to allow the btt module to
76 * do I/O on the memory pool containing the BTT layout.
77 */
78static int
79nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off)
80{
81 struct pmemblk *pbp = (struct pmemblk *)ns;
82
83 LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
84
85 if (off + count > pbp->datasize) {
86 ERR("offset + count (%zu) past end of data area (%zu)",
87 (size_t)off + count, pbp->datasize);
88 errno = EINVAL;
89 return -1;
90 }
91
92 memcpy(buf, (char *)pbp->data + off, count);
93
94 return 0;
95}
96
97/*
98 * nswrite -- (internal) write data to the namespace encapsulating the BTT
99 *
100 * This routine is provided to btt_init() to allow the btt module to
101 * do I/O on the memory pool containing the BTT layout.
102 */
103static int
104nswrite(void *ns, unsigned lane, const void *buf, size_t count,
105 uint64_t off)
106{
107 struct pmemblk *pbp = (struct pmemblk *)ns;
108
109 LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
110
111 if (off + count > pbp->datasize) {
112 ERR("offset + count (%zu) past end of data area (%zu)",
113 (size_t)off + count, pbp->datasize);
114 errno = EINVAL;
115 return -1;
116 }
117
118 void *dest = (char *)pbp->data + off;
119
120#ifdef DEBUG
121 /* grab debug write lock */
122 util_mutex_lock(&pbp->write_lock);
123#endif
124
125 /* unprotect the memory (debug version only) */
126 RANGE_RW(dest, count, pbp->is_dev_dax);
127
128 if (pbp->is_pmem)
129 pmem_memcpy_nodrain(dest, buf, count);
130 else
131 memcpy(dest, buf, count);
132
133 /* protect the memory again (debug version only) */
134 RANGE_RO(dest, count, pbp->is_dev_dax);
135
136#ifdef DEBUG
137 /* release debug write lock */
138 util_mutex_unlock(&pbp->write_lock);
139#endif
140
141 if (pbp->is_pmem)
142 pmem_drain();
143 else
144 pmem_msync(dest, count);
145
146 return 0;
147}
148
149/*
150 * nsmap -- (internal) allow direct access to a range of a namespace
151 *
152 * The caller requests a range to be "mapped" but the return value
153 * may indicate a smaller amount (in which case the caller is expected
154 * to call back later for another mapping).
155 *
156 * This routine is provided to btt_init() to allow the btt module to
157 * do I/O on the memory pool containing the BTT layout.
158 */
159static ssize_t
160nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off)
161{
162 struct pmemblk *pbp = (struct pmemblk *)ns;
163
164 LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off);
165
166 ASSERT(((ssize_t)len) >= 0);
167
168 if (off + len >= pbp->datasize) {
169 ERR("offset + len (%zu) past end of data area (%zu)",
170 (size_t)off + len, pbp->datasize - 1);
171 errno = EINVAL;
172 return -1;
173 }
174
175 /*
176 * Since the entire file is memory-mapped, this callback
177 * can always provide the entire length requested.
178 */
179 *addrp = (char *)pbp->data + off;
180
181 LOG(12, "returning addr %p", *addrp);
182
183 return (ssize_t)len;
184}
185
186/*
187 * nssync -- (internal) flush changes made to a namespace range
188 *
189 * This is used in conjunction with the addresses handed out by
190 * nsmap() above. There's no need to sync things written via
191 * nswrite() since those changes are flushed each time nswrite()
192 * is called.
193 *
194 * This routine is provided to btt_init() to allow the btt module to
195 * do I/O on the memory pool containing the BTT layout.
196 */
197static void
198nssync(void *ns, unsigned lane, void *addr, size_t len)
199{
200 struct pmemblk *pbp = (struct pmemblk *)ns;
201
202 LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len);
203
204 if (pbp->is_pmem)
205 pmem_persist(addr, len);
206 else
207 pmem_msync(addr, len);
208}
209
210/*
211 * nszero -- (internal) zero data in the namespace encapsulating the BTT
212 *
213 * This routine is provided to btt_init() to allow the btt module to
214 * zero the memory pool containing the BTT layout.
215 */
216static int
217nszero(void *ns, unsigned lane, size_t count, uint64_t off)
218{
219 struct pmemblk *pbp = (struct pmemblk *)ns;
220
221 LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off);
222
223 if (off + count > pbp->datasize) {
224 ERR("offset + count (%zu) past end of data area (%zu)",
225 (size_t)off + count, pbp->datasize);
226 errno = EINVAL;
227 return -1;
228 }
229
230 void *dest = (char *)pbp->data + off;
231
232 /* unprotect the memory (debug version only) */
233 RANGE_RW(dest, count, pbp->is_dev_dax);
234
235 pmem_memset_persist(dest, 0, count);
236
237 /* protect the memory again (debug version only) */
238 RANGE_RO(dest, count, pbp->is_dev_dax);
239
240 return 0;
241}
242
243/* callbacks for btt_init() */
244static struct ns_callback ns_cb = {
245 .nsread = nsread,
246 .nswrite = nswrite,
247 .nszero = nszero,
248 .nsmap = nsmap,
249 .nssync = nssync,
250 .ns_is_zeroed = 0
251};
252
253/*
254 * blk_descr_create -- (internal) create block memory pool descriptor
255 */
256static void
257blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed)
258{
259 LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed);
260
261 /* create the required metadata */
262 pbp->bsize = htole32(bsize);
263 util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize));
264
265 pbp->is_zeroed = zeroed;
266 util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed));
267}
268
269/*
270 * blk_descr_check -- (internal) validate block memory pool descriptor
271 */
272static int
273blk_descr_check(PMEMblkpool *pbp, size_t *bsize)
274{
275 LOG(3, "pbp %p bsize %zu", pbp, *bsize);
276
277 size_t hdr_bsize = le32toh(pbp->bsize);
278 if (*bsize && *bsize != hdr_bsize) {
279 ERR("wrong bsize (%zu), pool created with bsize %zu",
280 *bsize, hdr_bsize);
281 errno = EINVAL;
282 return -1;
283 }
284 *bsize = hdr_bsize;
285 LOG(3, "using block size from header: %zu", *bsize);
286
287 return 0;
288}
289
290/*
291 * blk_runtime_init -- (internal) initialize block memory pool runtime data
292 */
293static int
294blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly)
295{
296 LOG(3, "pbp %p bsize %zu rdonly %d",
297 pbp, bsize, rdonly);
298
299 /* remove volatile part of header */
300 VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
301 sizeof(struct pmemblk) -
302 sizeof(struct pool_hdr) -
303 sizeof(pbp->bsize) -
304 sizeof(pbp->is_zeroed));
305
306 /*
307 * Use some of the memory pool area for run-time info. This
308 * run-time state is never loaded from the file, it is always
309 * created here, so no need to worry about byte-order.
310 */
311 pbp->rdonly = rdonly;
312 pbp->data = (char *)pbp->addr +
313 roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN);
314 ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data);
315 pbp->datasize = (size_t)
316 (((char *)pbp->addr + pbp->size) - (char *)pbp->data);
317
318 LOG(4, "data area %p data size %zu bsize %zu",
319 pbp->data, pbp->datasize, bsize);
320
321 long ncpus = sysconf(_SC_NPROCESSORS_ONLN);
322 if (ncpus < 1)
323 ncpus = 1;
324
325 ns_cb.ns_is_zeroed = pbp->is_zeroed;
326
327 /* things free by "goto err" if not NULL */
328 struct btt *bttp = NULL;
329 os_mutex_t *locks = NULL;
330
331 bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid,
332 (unsigned)ncpus * 2, pbp, &ns_cb);
333
334 if (bttp == NULL)
335 goto err; /* btt_init set errno, called LOG */
336
337 pbp->bttp = bttp;
338
339 pbp->nlane = btt_nlane(pbp->bttp);
340 pbp->next_lane = 0;
341 if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) {
342 ERR("!Malloc for lane locks");
343 goto err;
344 }
345
346 for (unsigned i = 0; i < pbp->nlane; i++)
347 util_mutex_init(&locks[i]);
348
349 pbp->locks = locks;
350
351#ifdef DEBUG
352 /* initialize debug lock */
353 util_mutex_init(&pbp->write_lock);
354#endif
355
356 /*
357 * If possible, turn off all permissions on the pool header page.
358 *
359 * The prototype PMFS doesn't allow this when large pages are in
360 * use. It is not considered an error if this fails.
361 */
362 RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax);
363
364 /* the data area should be kept read-only for debug version */
365 RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax);
366
367 return 0;
368
369err:
370 LOG(4, "error clean up");
371 int oerrno = errno;
372 if (bttp)
373 btt_fini(bttp);
374 errno = oerrno;
375 return -1;
376}
377
378/*
379 * pmemblk_createU -- create a block memory pool
380 */
381#ifndef _WIN32
382static inline
383#endif
384PMEMblkpool *
385pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode)
386{
387 LOG(3, "path %s bsize %zu poolsize %zu mode %o",
388 path, bsize, poolsize, mode);
389
390 /* check if bsize is valid */
391 if (bsize == 0) {
392 ERR("Invalid block size %zu", bsize);
393 errno = EINVAL;
394 return NULL;
395 }
396
397 if (bsize > UINT32_MAX) {
398 ERR("Invalid block size %zu", bsize);
399 errno = EINVAL;
400 return NULL;
401 }
402
403 struct pool_set *set;
404 struct pool_attr adj_pool_attr = Blk_create_attr;
405
406 /* force set SDS feature */
407 if (SDS_at_create)
408 adj_pool_attr.features.incompat |= POOL_FEAT_SDS;
409 else
410 adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS;
411
412 if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL,
413 PMEMBLK_MIN_PART, &adj_pool_attr, NULL,
414 REPLICAS_DISABLED) != 0) {
415 LOG(2, "cannot create pool or pool set");
416 return NULL;
417 }
418
419 ASSERT(set->nreplicas > 0);
420
421 struct pool_replica *rep = set->replica[0];
422 PMEMblkpool *pbp = rep->part[0].addr;
423
424 VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
425 sizeof(struct pmemblk) -
426 ((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
427
428 pbp->addr = pbp;
429 pbp->size = rep->repsize;
430 pbp->set = set;
431 pbp->is_pmem = rep->is_pmem;
432 pbp->is_dev_dax = rep->part[0].is_dev_dax;
433
434 /* is_dev_dax implies is_pmem */
435 ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
436
437 /* create pool descriptor */
438 blk_descr_create(pbp, (uint32_t)bsize, set->zeroed);
439
440 /* initialize runtime parts */
441 if (blk_runtime_init(pbp, bsize, 0) != 0) {
442 ERR("pool initialization failed");
443 goto err;
444 }
445
446 if (util_poolset_chmod(set, mode))
447 goto err;
448
449 util_poolset_fdclose(set);
450
451 LOG(3, "pbp %p", pbp);
452 return pbp;
453
454err:
455 LOG(4, "error clean up");
456 int oerrno = errno;
457 util_poolset_close(set, DELETE_CREATED_PARTS);
458 errno = oerrno;
459 return NULL;
460}
461
462#ifndef _WIN32
463/*
464 * pmemblk_create -- create a block memory pool
465 */
466PMEMblkpool *
467pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode)
468{
469 return pmemblk_createU(path, bsize, poolsize, mode);
470}
471#else
472/*
473 * pmemblk_createW -- create a block memory pool
474 */
475PMEMblkpool *
476pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize,
477 mode_t mode)
478{
479 char *upath = util_toUTF8(path);
480 if (upath == NULL)
481 return NULL;
482
483 PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode);
484
485 util_free_UTF8(upath);
486 return ret;
487}
488#endif
489
490/*
491 * blk_open_common -- (internal) open a block memory pool
492 *
493 * This routine does all the work, but takes a cow flag so internal
494 * calls can map a read-only pool if required.
495 *
496 * Passing in bsize == 0 means a valid pool header must exist (which
497 * will supply the block size).
498 */
499static PMEMblkpool *
500blk_open_common(const char *path, size_t bsize, unsigned flags)
501{
502 LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags);
503
504 struct pool_set *set;
505
506 if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr,
507 NULL, NULL, flags) != 0) {
508 LOG(2, "cannot open pool or pool set");
509 return NULL;
510 }
511
512 ASSERT(set->nreplicas > 0);
513
514 struct pool_replica *rep = set->replica[0];
515 PMEMblkpool *pbp = rep->part[0].addr;
516
517 VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr,
518 sizeof(struct pmemblk) -
519 ((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr));
520
521 pbp->addr = pbp;
522 pbp->size = rep->repsize;
523 pbp->set = set;
524 pbp->is_pmem = rep->is_pmem;
525 pbp->is_dev_dax = rep->part[0].is_dev_dax;
526
527 /* is_dev_dax implies is_pmem */
528 ASSERT(!pbp->is_dev_dax || pbp->is_pmem);
529
530 if (set->nreplicas > 1) {
531 errno = ENOTSUP;
532 ERR("!replicas not supported");
533 goto err;
534 }
535
536 /* validate pool descriptor */
537 if (blk_descr_check(pbp, &bsize) != 0) {
538 LOG(2, "descriptor check failed");
539 goto err;
540 }
541
542 /* initialize runtime parts */
543 if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) {
544 ERR("pool initialization failed");
545 goto err;
546 }
547
548 util_poolset_fdclose(set);
549
550 LOG(3, "pbp %p", pbp);
551 return pbp;
552
553err:
554 LOG(4, "error clean up");
555 int oerrno = errno;
556 util_poolset_close(set, DO_NOT_DELETE_PARTS);
557 errno = oerrno;
558 return NULL;
559}
560
561/*
562 * pmemblk_openU -- open a block memory pool
563 */
564#ifndef _WIN32
565static inline
566#endif
567PMEMblkpool *
568pmemblk_openU(const char *path, size_t bsize)
569{
570 LOG(3, "path %s bsize %zu", path, bsize);
571
572 return blk_open_common(path, bsize, COW_at_open ? POOL_OPEN_COW : 0);
573}
574
575#ifndef _WIN32
576/*
577 * pmemblk_open -- open a block memory pool
578 */
579PMEMblkpool *
580pmemblk_open(const char *path, size_t bsize)
581{
582 return pmemblk_openU(path, bsize);
583}
584#else
585/*
586 * pmemblk_openW -- open a block memory pool
587 */
588PMEMblkpool *
589pmemblk_openW(const wchar_t *path, size_t bsize)
590{
591 char *upath = util_toUTF8(path);
592 if (upath == NULL)
593 return NULL;
594
595 PMEMblkpool *ret = pmemblk_openU(upath, bsize);
596
597 util_free_UTF8(upath);
598 return ret;
599}
600#endif
601
602/*
603 * pmemblk_close -- close a block memory pool
604 */
605void
606pmemblk_close(PMEMblkpool *pbp)
607{
608 LOG(3, "pbp %p", pbp);
609
610 btt_fini(pbp->bttp);
611 if (pbp->locks) {
612 for (unsigned i = 0; i < pbp->nlane; i++)
613 util_mutex_destroy(&pbp->locks[i]);
614 Free((void *)pbp->locks);
615 }
616
617#ifdef DEBUG
618 /* destroy debug lock */
619 util_mutex_destroy(&pbp->write_lock);
620#endif
621
622 util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS);
623}
624
625/*
626 * pmemblk_bsize -- return size of block for specified pool
627 */
628size_t
629pmemblk_bsize(PMEMblkpool *pbp)
630{
631 LOG(3, "pbp %p", pbp);
632
633 return le32toh(pbp->bsize);
634}
635
636/*
637 * pmemblk_nblock -- return number of usable blocks in a block memory pool
638 */
639size_t
640pmemblk_nblock(PMEMblkpool *pbp)
641{
642 LOG(3, "pbp %p", pbp);
643
644 return btt_nlba(pbp->bttp);
645}
646
647/*
648 * pmemblk_read -- read a block in a block memory pool
649 */
650int
651pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno)
652{
653 LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
654
655 if (blockno < 0) {
656 ERR("negative block number");
657 errno = EINVAL;
658 return -1;
659 }
660
661 unsigned lane;
662
663 lane_enter(pbp, &lane);
664
665 int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf);
666
667 lane_exit(pbp, lane);
668
669 return err;
670}
671
672/*
673 * pmemblk_write -- write a block (atomically) in a block memory pool
674 */
675int
676pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno)
677{
678 LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno);
679
680 if (pbp->rdonly) {
681 ERR("EROFS (pool is read-only)");
682 errno = EROFS;
683 return -1;
684 }
685
686 if (blockno < 0) {
687 ERR("negative block number");
688 errno = EINVAL;
689 return -1;
690 }
691
692 unsigned lane;
693
694 lane_enter(pbp, &lane);
695
696 int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf);
697
698 lane_exit(pbp, lane);
699
700 return err;
701}
702
703/*
704 * pmemblk_set_zero -- zero a block in a block memory pool
705 */
706int
707pmemblk_set_zero(PMEMblkpool *pbp, long long blockno)
708{
709 LOG(3, "pbp %p blockno %lld", pbp, blockno);
710
711 if (pbp->rdonly) {
712 ERR("EROFS (pool is read-only)");
713 errno = EROFS;
714 return -1;
715 }
716
717 if (blockno < 0) {
718 ERR("negative block number");
719 errno = EINVAL;
720 return -1;
721 }
722
723 unsigned lane;
724
725 lane_enter(pbp, &lane);
726
727 int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno);
728
729 lane_exit(pbp, lane);
730
731 return err;
732}
733
734/*
735 * pmemblk_set_error -- set the error state on a block in a block memory pool
736 */
737int
738pmemblk_set_error(PMEMblkpool *pbp, long long blockno)
739{
740 LOG(3, "pbp %p blockno %lld", pbp, blockno);
741
742 if (pbp->rdonly) {
743 ERR("EROFS (pool is read-only)");
744 errno = EROFS;
745 return -1;
746 }
747
748 if (blockno < 0) {
749 ERR("negative block number");
750 errno = EINVAL;
751 return -1;
752 }
753
754 unsigned lane;
755
756 lane_enter(pbp, &lane);
757
758 int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno);
759
760 lane_exit(pbp, lane);
761
762 return err;
763}
764
765/*
766 * pmemblk_checkU -- block memory pool consistency check
767 */
768#ifndef _WIN32
769static inline
770#endif
771int
772pmemblk_checkU(const char *path, size_t bsize)
773{
774 LOG(3, "path \"%s\" bsize %zu", path, bsize);
775
776 /* map the pool read-only */
777 PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW);
778 if (pbp == NULL)
779 return -1; /* errno set by blk_open_common() */
780
781 int retval = btt_check(pbp->bttp);
782 int oerrno = errno;
783 pmemblk_close(pbp);
784 errno = oerrno;
785
786 return retval;
787}
788
789#ifndef _WIN32
790/*
791 * pmemblk_check -- block memory pool consistency check
792 */
793int
794pmemblk_check(const char *path, size_t bsize)
795{
796 return pmemblk_checkU(path, bsize);
797}
798#else
799/*
800 * pmemblk_checkW -- block memory pool consistency check
801 */
802int
803pmemblk_checkW(const wchar_t *path, size_t bsize)
804{
805 char *upath = util_toUTF8(path);
806 if (upath == NULL)
807 return -1;
808
809 int ret = pmemblk_checkU(upath, bsize);
810
811 util_free_UTF8(upath);
812 return ret;
813}
814#endif
815
816/*
817 * pmemblk_ctl_getU -- programmatically executes a read ctl query
818 */
819#ifndef _WIN32
820static inline
821#endif
822int
823pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg)
824{
825 LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
826 return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
827 CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg);
828}
829
830/*
831 * pmemblk_ctl_setU -- programmatically executes a write ctl query
832 */
833#ifndef _WIN32
834static inline
835#endif
836int
837pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg)
838{
839 LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
840 return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
841 CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg);
842}
843
844/*
845 * pmemblk_ctl_execU -- programmatically executes a runnable ctl query
846 */
847#ifndef _WIN32
848static inline
849#endif
850int
851pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg)
852{
853 LOG(3, "pbp %p name %s arg %p", pbp, name, arg);
854 return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp,
855 CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg);
856}
857
858#ifndef _WIN32
859/*
860 * pmemblk_ctl_get -- programmatically executes a read ctl query
861 */
862int
863pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg)
864{
865 return pmemblk_ctl_getU(pbp, name, arg);
866}
867
868/*
869 * pmemblk_ctl_set -- programmatically executes a write ctl query
870 */
871int
872pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg)
873{
874 return pmemblk_ctl_setU(pbp, name, arg);
875}
876
877/*
878 * pmemblk_ctl_exec -- programmatically executes a runnable ctl query
879 */
880int
881pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg)
882{
883 return pmemblk_ctl_execU(pbp, name, arg);
884}
885#else
886/*
887 * pmemblk_ctl_getW -- programmatically executes a read ctl query
888 */
889int
890pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
891{
892 char *uname = util_toUTF8(name);
893 if (uname == NULL)
894 return -1;
895
896 int ret = pmemblk_ctl_getU(pbp, uname, arg);
897 util_free_UTF8(uname);
898
899 return ret;
900}
901
902/*
903 * pmemblk_ctl_setW -- programmatically executes a write ctl query
904 */
905int
906pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
907{
908 char *uname = util_toUTF8(name);
909 if (uname == NULL)
910 return -1;
911
912 int ret = pmemblk_ctl_setU(pbp, uname, arg);
913 util_free_UTF8(uname);
914
915 return ret;
916}
917
918/*
919 * pmemblk_ctl_execW -- programmatically executes a runnable ctl query
920 */
921int
922pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg)
923{
924 char *uname = util_toUTF8(name);
925 if (uname == NULL)
926 return -1;
927
928 int ret = pmemblk_ctl_execU(pbp, uname, arg);
929 util_free_UTF8(uname);
930
931 return ret;
932}
933#endif
934
935#if FAULT_INJECTION
936void
937pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth,
938 const char *at)
939{
940 core_inject_fault_at(type, nth, at);
941}
942
943int
944pmemblk_fault_injection_enabled(void)
945{
946 return core_fault_injection_enabled();
947}
948#endif