]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/md/persistent-data/dm-space-map-common.c
Merge branch 'turbostat' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
[mirror_ubuntu-artful-kernel.git] / drivers / md / persistent-data / dm-space-map-common.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-space-map-common.h"
8 #include "dm-transaction-manager.h"
9
10 #include <linux/bitops.h>
11 #include <linux/device-mapper.h>
12
13 #define DM_MSG_PREFIX "space map common"
14
15 /*----------------------------------------------------------------*/
16
17 /*
18 * Index validator.
19 */
20 #define INDEX_CSUM_XOR 160478
21
22 static void index_prepare_for_write(struct dm_block_validator *v,
23 struct dm_block *b,
24 size_t block_size)
25 {
26 struct disk_metadata_index *mi_le = dm_block_data(b);
27
28 mi_le->blocknr = cpu_to_le64(dm_block_location(b));
29 mi_le->csum = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
30 block_size - sizeof(__le32),
31 INDEX_CSUM_XOR));
32 }
33
34 static int index_check(struct dm_block_validator *v,
35 struct dm_block *b,
36 size_t block_size)
37 {
38 struct disk_metadata_index *mi_le = dm_block_data(b);
39 __le32 csum_disk;
40
41 if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
42 DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
43 le64_to_cpu(mi_le->blocknr), dm_block_location(b));
44 return -ENOTBLK;
45 }
46
47 csum_disk = cpu_to_le32(dm_bm_checksum(&mi_le->padding,
48 block_size - sizeof(__le32),
49 INDEX_CSUM_XOR));
50 if (csum_disk != mi_le->csum) {
51 DMERR_LIMIT("index_check failed: csum %u != wanted %u",
52 le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
53 return -EILSEQ;
54 }
55
56 return 0;
57 }
58
59 static struct dm_block_validator index_validator = {
60 .name = "index",
61 .prepare_for_write = index_prepare_for_write,
62 .check = index_check
63 };
64
65 /*----------------------------------------------------------------*/
66
67 /*
68 * Bitmap validator
69 */
70 #define BITMAP_CSUM_XOR 240779
71
72 static void bitmap_prepare_for_write(struct dm_block_validator *v,
73 struct dm_block *b,
74 size_t block_size)
75 {
76 struct disk_bitmap_header *disk_header = dm_block_data(b);
77
78 disk_header->blocknr = cpu_to_le64(dm_block_location(b));
79 disk_header->csum = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
80 block_size - sizeof(__le32),
81 BITMAP_CSUM_XOR));
82 }
83
84 static int bitmap_check(struct dm_block_validator *v,
85 struct dm_block *b,
86 size_t block_size)
87 {
88 struct disk_bitmap_header *disk_header = dm_block_data(b);
89 __le32 csum_disk;
90
91 if (dm_block_location(b) != le64_to_cpu(disk_header->blocknr)) {
92 DMERR_LIMIT("bitmap check failed: blocknr %llu != wanted %llu",
93 le64_to_cpu(disk_header->blocknr), dm_block_location(b));
94 return -ENOTBLK;
95 }
96
97 csum_disk = cpu_to_le32(dm_bm_checksum(&disk_header->not_used,
98 block_size - sizeof(__le32),
99 BITMAP_CSUM_XOR));
100 if (csum_disk != disk_header->csum) {
101 DMERR_LIMIT("bitmap check failed: csum %u != wanted %u",
102 le32_to_cpu(csum_disk), le32_to_cpu(disk_header->csum));
103 return -EILSEQ;
104 }
105
106 return 0;
107 }
108
109 static struct dm_block_validator dm_sm_bitmap_validator = {
110 .name = "sm_bitmap",
111 .prepare_for_write = bitmap_prepare_for_write,
112 .check = bitmap_check
113 };
114
115 /*----------------------------------------------------------------*/
116
117 #define ENTRIES_PER_WORD 32
118 #define ENTRIES_SHIFT 5
119
120 static void *dm_bitmap_data(struct dm_block *b)
121 {
122 return dm_block_data(b) + sizeof(struct disk_bitmap_header);
123 }
124
125 #define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
126
127 static unsigned bitmap_word_used(void *addr, unsigned b)
128 {
129 __le64 *words_le = addr;
130 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
131
132 uint64_t bits = le64_to_cpu(*w_le);
133 uint64_t mask = (bits + WORD_MASK_HIGH + 1) & WORD_MASK_HIGH;
134
135 return !(~bits & mask);
136 }
137
138 static unsigned sm_lookup_bitmap(void *addr, unsigned b)
139 {
140 __le64 *words_le = addr;
141 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
142 unsigned hi, lo;
143
144 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
145 hi = !!test_bit_le(b, (void *) w_le);
146 lo = !!test_bit_le(b + 1, (void *) w_le);
147 return (hi << 1) | lo;
148 }
149
150 static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
151 {
152 __le64 *words_le = addr;
153 __le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
154
155 b = (b & (ENTRIES_PER_WORD - 1)) << 1;
156
157 if (val & 2)
158 __set_bit_le(b, (void *) w_le);
159 else
160 __clear_bit_le(b, (void *) w_le);
161
162 if (val & 1)
163 __set_bit_le(b + 1, (void *) w_le);
164 else
165 __clear_bit_le(b + 1, (void *) w_le);
166 }
167
168 static int sm_find_free(void *addr, unsigned begin, unsigned end,
169 unsigned *result)
170 {
171 while (begin < end) {
172 if (!(begin & (ENTRIES_PER_WORD - 1)) &&
173 bitmap_word_used(addr, begin)) {
174 begin += ENTRIES_PER_WORD;
175 continue;
176 }
177
178 if (!sm_lookup_bitmap(addr, begin)) {
179 *result = begin;
180 return 0;
181 }
182
183 begin++;
184 }
185
186 return -ENOSPC;
187 }
188
189 /*----------------------------------------------------------------*/
190
191 static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm)
192 {
193 ll->tm = tm;
194
195 ll->bitmap_info.tm = tm;
196 ll->bitmap_info.levels = 1;
197
198 /*
199 * Because the new bitmap blocks are created via a shadow
200 * operation, the old entry has already had its reference count
201 * decremented and we don't need the btree to do any bookkeeping.
202 */
203 ll->bitmap_info.value_type.size = sizeof(struct disk_index_entry);
204 ll->bitmap_info.value_type.inc = NULL;
205 ll->bitmap_info.value_type.dec = NULL;
206 ll->bitmap_info.value_type.equal = NULL;
207
208 ll->ref_count_info.tm = tm;
209 ll->ref_count_info.levels = 1;
210 ll->ref_count_info.value_type.size = sizeof(uint32_t);
211 ll->ref_count_info.value_type.inc = NULL;
212 ll->ref_count_info.value_type.dec = NULL;
213 ll->ref_count_info.value_type.equal = NULL;
214
215 ll->block_size = dm_bm_block_size(dm_tm_get_bm(tm));
216
217 if (ll->block_size > (1 << 30)) {
218 DMERR("block size too big to hold bitmaps");
219 return -EINVAL;
220 }
221
222 ll->entries_per_block = (ll->block_size - sizeof(struct disk_bitmap_header)) *
223 ENTRIES_PER_BYTE;
224 ll->nr_blocks = 0;
225 ll->bitmap_root = 0;
226 ll->ref_count_root = 0;
227 ll->bitmap_index_changed = false;
228
229 return 0;
230 }
231
232 int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
233 {
234 int r;
235 dm_block_t i, nr_blocks, nr_indexes;
236 unsigned old_blocks, blocks;
237
238 nr_blocks = ll->nr_blocks + extra_blocks;
239 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
240 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block);
241
242 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block);
243 if (nr_indexes > ll->max_entries(ll)) {
244 DMERR("space map too large");
245 return -EINVAL;
246 }
247
248 /*
249 * We need to set this before the dm_tm_new_block() call below.
250 */
251 ll->nr_blocks = nr_blocks;
252 for (i = old_blocks; i < blocks; i++) {
253 struct dm_block *b;
254 struct disk_index_entry idx;
255
256 r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
257 if (r < 0)
258 return r;
259
260 idx.blocknr = cpu_to_le64(dm_block_location(b));
261
262 dm_tm_unlock(ll->tm, b);
263
264 idx.nr_free = cpu_to_le32(ll->entries_per_block);
265 idx.none_free_before = 0;
266
267 r = ll->save_ie(ll, i, &idx);
268 if (r < 0)
269 return r;
270 }
271
272 return 0;
273 }
274
275 int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
276 {
277 int r;
278 dm_block_t index = b;
279 struct disk_index_entry ie_disk;
280 struct dm_block *blk;
281
282 b = do_div(index, ll->entries_per_block);
283 r = ll->load_ie(ll, index, &ie_disk);
284 if (r < 0)
285 return r;
286
287 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
288 &dm_sm_bitmap_validator, &blk);
289 if (r < 0)
290 return r;
291
292 *result = sm_lookup_bitmap(dm_bitmap_data(blk), b);
293
294 dm_tm_unlock(ll->tm, blk);
295
296 return 0;
297 }
298
299 static int sm_ll_lookup_big_ref_count(struct ll_disk *ll, dm_block_t b,
300 uint32_t *result)
301 {
302 __le32 le_rc;
303 int r;
304
305 r = dm_btree_lookup(&ll->ref_count_info, ll->ref_count_root, &b, &le_rc);
306 if (r < 0)
307 return r;
308
309 *result = le32_to_cpu(le_rc);
310
311 return r;
312 }
313
314 int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result)
315 {
316 int r = sm_ll_lookup_bitmap(ll, b, result);
317
318 if (r)
319 return r;
320
321 if (*result != 3)
322 return r;
323
324 return sm_ll_lookup_big_ref_count(ll, b, result);
325 }
326
327 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
328 dm_block_t end, dm_block_t *result)
329 {
330 int r;
331 struct disk_index_entry ie_disk;
332 dm_block_t i, index_begin = begin;
333 dm_block_t index_end = dm_sector_div_up(end, ll->entries_per_block);
334
335 /*
336 * FIXME: Use shifts
337 */
338 begin = do_div(index_begin, ll->entries_per_block);
339 end = do_div(end, ll->entries_per_block);
340
341 for (i = index_begin; i < index_end; i++, begin = 0) {
342 struct dm_block *blk;
343 unsigned position;
344 uint32_t bit_end;
345
346 r = ll->load_ie(ll, i, &ie_disk);
347 if (r < 0)
348 return r;
349
350 if (le32_to_cpu(ie_disk.nr_free) == 0)
351 continue;
352
353 r = dm_tm_read_lock(ll->tm, le64_to_cpu(ie_disk.blocknr),
354 &dm_sm_bitmap_validator, &blk);
355 if (r < 0)
356 return r;
357
358 bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
359
360 r = sm_find_free(dm_bitmap_data(blk),
361 max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
362 bit_end, &position);
363 if (r == -ENOSPC) {
364 /*
365 * This might happen because we started searching
366 * part way through the bitmap.
367 */
368 dm_tm_unlock(ll->tm, blk);
369 continue;
370
371 } else if (r < 0) {
372 dm_tm_unlock(ll->tm, blk);
373 return r;
374 }
375
376 dm_tm_unlock(ll->tm, blk);
377
378 *result = i * ll->entries_per_block + (dm_block_t) position;
379 return 0;
380 }
381
382 return -ENOSPC;
383 }
384
385 static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
386 int (*mutator)(void *context, uint32_t old, uint32_t *new),
387 void *context, enum allocation_event *ev)
388 {
389 int r;
390 uint32_t bit, old, ref_count;
391 struct dm_block *nb;
392 dm_block_t index = b;
393 struct disk_index_entry ie_disk;
394 void *bm_le;
395 int inc;
396
397 bit = do_div(index, ll->entries_per_block);
398 r = ll->load_ie(ll, index, &ie_disk);
399 if (r < 0)
400 return r;
401
402 r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ie_disk.blocknr),
403 &dm_sm_bitmap_validator, &nb, &inc);
404 if (r < 0) {
405 DMERR("dm_tm_shadow_block() failed");
406 return r;
407 }
408 ie_disk.blocknr = cpu_to_le64(dm_block_location(nb));
409
410 bm_le = dm_bitmap_data(nb);
411 old = sm_lookup_bitmap(bm_le, bit);
412
413 if (old > 2) {
414 r = sm_ll_lookup_big_ref_count(ll, b, &old);
415 if (r < 0) {
416 dm_tm_unlock(ll->tm, nb);
417 return r;
418 }
419 }
420
421 r = mutator(context, old, &ref_count);
422 if (r) {
423 dm_tm_unlock(ll->tm, nb);
424 return r;
425 }
426
427 if (ref_count <= 2) {
428 sm_set_bitmap(bm_le, bit, ref_count);
429
430 dm_tm_unlock(ll->tm, nb);
431
432 if (old > 2) {
433 r = dm_btree_remove(&ll->ref_count_info,
434 ll->ref_count_root,
435 &b, &ll->ref_count_root);
436 if (r)
437 return r;
438 }
439
440 } else {
441 __le32 le_rc = cpu_to_le32(ref_count);
442
443 sm_set_bitmap(bm_le, bit, 3);
444 dm_tm_unlock(ll->tm, nb);
445
446 __dm_bless_for_disk(&le_rc);
447 r = dm_btree_insert(&ll->ref_count_info, ll->ref_count_root,
448 &b, &le_rc, &ll->ref_count_root);
449 if (r < 0) {
450 DMERR("ref count insert failed");
451 return r;
452 }
453 }
454
455 if (ref_count && !old) {
456 *ev = SM_ALLOC;
457 ll->nr_allocated++;
458 le32_add_cpu(&ie_disk.nr_free, -1);
459 if (le32_to_cpu(ie_disk.none_free_before) == bit)
460 ie_disk.none_free_before = cpu_to_le32(bit + 1);
461
462 } else if (old && !ref_count) {
463 *ev = SM_FREE;
464 ll->nr_allocated--;
465 le32_add_cpu(&ie_disk.nr_free, 1);
466 ie_disk.none_free_before = cpu_to_le32(min(le32_to_cpu(ie_disk.none_free_before), bit));
467 } else
468 *ev = SM_NONE;
469
470 return ll->save_ie(ll, index, &ie_disk);
471 }
472
473 static int set_ref_count(void *context, uint32_t old, uint32_t *new)
474 {
475 *new = *((uint32_t *) context);
476 return 0;
477 }
478
479 int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
480 uint32_t ref_count, enum allocation_event *ev)
481 {
482 return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
483 }
484
485 static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
486 {
487 *new = old + 1;
488 return 0;
489 }
490
491 int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
492 {
493 return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
494 }
495
496 static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
497 {
498 if (!old) {
499 DMERR_LIMIT("unable to decrement a reference count below 0");
500 return -EINVAL;
501 }
502
503 *new = old - 1;
504 return 0;
505 }
506
507 int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
508 {
509 return sm_ll_mutate(ll, b, dec_ref_count, NULL, ev);
510 }
511
512 int sm_ll_commit(struct ll_disk *ll)
513 {
514 int r = 0;
515
516 if (ll->bitmap_index_changed) {
517 r = ll->commit(ll);
518 if (!r)
519 ll->bitmap_index_changed = false;
520 }
521
522 return r;
523 }
524
525 /*----------------------------------------------------------------*/
526
527 static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index,
528 struct disk_index_entry *ie)
529 {
530 memcpy(ie, ll->mi_le.index + index, sizeof(*ie));
531 return 0;
532 }
533
534 static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index,
535 struct disk_index_entry *ie)
536 {
537 ll->bitmap_index_changed = true;
538 memcpy(ll->mi_le.index + index, ie, sizeof(*ie));
539 return 0;
540 }
541
542 static int metadata_ll_init_index(struct ll_disk *ll)
543 {
544 int r;
545 struct dm_block *b;
546
547 r = dm_tm_new_block(ll->tm, &index_validator, &b);
548 if (r < 0)
549 return r;
550
551 ll->bitmap_root = dm_block_location(b);
552
553 dm_tm_unlock(ll->tm, b);
554
555 return 0;
556 }
557
558 static int metadata_ll_open(struct ll_disk *ll)
559 {
560 int r;
561 struct dm_block *block;
562
563 r = dm_tm_read_lock(ll->tm, ll->bitmap_root,
564 &index_validator, &block);
565 if (r)
566 return r;
567
568 memcpy(&ll->mi_le, dm_block_data(block), sizeof(ll->mi_le));
569 dm_tm_unlock(ll->tm, block);
570
571 return 0;
572 }
573
574 static dm_block_t metadata_ll_max_entries(struct ll_disk *ll)
575 {
576 return MAX_METADATA_BITMAPS;
577 }
578
579 static int metadata_ll_commit(struct ll_disk *ll)
580 {
581 int r, inc;
582 struct dm_block *b;
583
584 r = dm_tm_shadow_block(ll->tm, ll->bitmap_root, &index_validator, &b, &inc);
585 if (r)
586 return r;
587
588 memcpy(dm_block_data(b), &ll->mi_le, sizeof(ll->mi_le));
589 ll->bitmap_root = dm_block_location(b);
590
591 dm_tm_unlock(ll->tm, b);
592
593 return 0;
594 }
595
596 int sm_ll_new_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm)
597 {
598 int r;
599
600 r = sm_ll_init(ll, tm);
601 if (r < 0)
602 return r;
603
604 ll->load_ie = metadata_ll_load_ie;
605 ll->save_ie = metadata_ll_save_ie;
606 ll->init_index = metadata_ll_init_index;
607 ll->open_index = metadata_ll_open;
608 ll->max_entries = metadata_ll_max_entries;
609 ll->commit = metadata_ll_commit;
610
611 ll->nr_blocks = 0;
612 ll->nr_allocated = 0;
613
614 r = ll->init_index(ll);
615 if (r < 0)
616 return r;
617
618 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
619 if (r < 0)
620 return r;
621
622 return 0;
623 }
624
625 int sm_ll_open_metadata(struct ll_disk *ll, struct dm_transaction_manager *tm,
626 void *root_le, size_t len)
627 {
628 int r;
629 struct disk_sm_root *smr = root_le;
630
631 if (len < sizeof(struct disk_sm_root)) {
632 DMERR("sm_metadata root too small");
633 return -ENOMEM;
634 }
635
636 r = sm_ll_init(ll, tm);
637 if (r < 0)
638 return r;
639
640 ll->load_ie = metadata_ll_load_ie;
641 ll->save_ie = metadata_ll_save_ie;
642 ll->init_index = metadata_ll_init_index;
643 ll->open_index = metadata_ll_open;
644 ll->max_entries = metadata_ll_max_entries;
645 ll->commit = metadata_ll_commit;
646
647 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
648 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
649 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
650 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
651
652 return ll->open_index(ll);
653 }
654
655 /*----------------------------------------------------------------*/
656
657 static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
658 struct disk_index_entry *ie)
659 {
660 return dm_btree_lookup(&ll->bitmap_info, ll->bitmap_root, &index, ie);
661 }
662
663 static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
664 struct disk_index_entry *ie)
665 {
666 __dm_bless_for_disk(ie);
667 return dm_btree_insert(&ll->bitmap_info, ll->bitmap_root,
668 &index, ie, &ll->bitmap_root);
669 }
670
671 static int disk_ll_init_index(struct ll_disk *ll)
672 {
673 return dm_btree_empty(&ll->bitmap_info, &ll->bitmap_root);
674 }
675
676 static int disk_ll_open(struct ll_disk *ll)
677 {
678 /* nothing to do */
679 return 0;
680 }
681
682 static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
683 {
684 return -1ULL;
685 }
686
687 static int disk_ll_commit(struct ll_disk *ll)
688 {
689 return 0;
690 }
691
692 int sm_ll_new_disk(struct ll_disk *ll, struct dm_transaction_manager *tm)
693 {
694 int r;
695
696 r = sm_ll_init(ll, tm);
697 if (r < 0)
698 return r;
699
700 ll->load_ie = disk_ll_load_ie;
701 ll->save_ie = disk_ll_save_ie;
702 ll->init_index = disk_ll_init_index;
703 ll->open_index = disk_ll_open;
704 ll->max_entries = disk_ll_max_entries;
705 ll->commit = disk_ll_commit;
706
707 ll->nr_blocks = 0;
708 ll->nr_allocated = 0;
709
710 r = ll->init_index(ll);
711 if (r < 0)
712 return r;
713
714 r = dm_btree_empty(&ll->ref_count_info, &ll->ref_count_root);
715 if (r < 0)
716 return r;
717
718 return 0;
719 }
720
721 int sm_ll_open_disk(struct ll_disk *ll, struct dm_transaction_manager *tm,
722 void *root_le, size_t len)
723 {
724 int r;
725 struct disk_sm_root *smr = root_le;
726
727 if (len < sizeof(struct disk_sm_root)) {
728 DMERR("sm_metadata root too small");
729 return -ENOMEM;
730 }
731
732 r = sm_ll_init(ll, tm);
733 if (r < 0)
734 return r;
735
736 ll->load_ie = disk_ll_load_ie;
737 ll->save_ie = disk_ll_save_ie;
738 ll->init_index = disk_ll_init_index;
739 ll->open_index = disk_ll_open;
740 ll->max_entries = disk_ll_max_entries;
741 ll->commit = disk_ll_commit;
742
743 ll->nr_blocks = le64_to_cpu(smr->nr_blocks);
744 ll->nr_allocated = le64_to_cpu(smr->nr_allocated);
745 ll->bitmap_root = le64_to_cpu(smr->bitmap_root);
746 ll->ref_count_root = le64_to_cpu(smr->ref_count_root);
747
748 return ll->open_index(ll);
749 }
750
751 /*----------------------------------------------------------------*/