]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/hfsplus/bnode.c
Merge branch 'i2c/for-4.15' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / fs / hfsplus / bnode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/hfsplus/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12 #include <linux/string.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/fs.h>
16 #include <linux/swap.h>
17
18 #include "hfsplus_fs.h"
19 #include "hfsplus_raw.h"
20
21 /* Copy a specified range of bytes from the raw data of a node */
22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23 {
24 struct page **pagep;
25 int l;
26
27 off += node->page_offset;
28 pagep = node->page + (off >> PAGE_SHIFT);
29 off &= ~PAGE_MASK;
30
31 l = min_t(int, len, PAGE_SIZE - off);
32 memcpy(buf, kmap(*pagep) + off, l);
33 kunmap(*pagep);
34
35 while ((len -= l) != 0) {
36 buf += l;
37 l = min_t(int, len, PAGE_SIZE);
38 memcpy(buf, kmap(*++pagep), l);
39 kunmap(*pagep);
40 }
41 }
42
43 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
44 {
45 __be16 data;
46 /* TODO: optimize later... */
47 hfs_bnode_read(node, &data, off, 2);
48 return be16_to_cpu(data);
49 }
50
51 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
52 {
53 u8 data;
54 /* TODO: optimize later... */
55 hfs_bnode_read(node, &data, off, 1);
56 return data;
57 }
58
59 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
60 {
61 struct hfs_btree *tree;
62 int key_len;
63
64 tree = node->tree;
65 if (node->type == HFS_NODE_LEAF ||
66 tree->attributes & HFS_TREE_VARIDXKEYS ||
67 node->tree->cnid == HFSPLUS_ATTR_CNID)
68 key_len = hfs_bnode_read_u16(node, off) + 2;
69 else
70 key_len = tree->max_key_len + 2;
71
72 hfs_bnode_read(node, key, off, key_len);
73 }
74
75 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
76 {
77 struct page **pagep;
78 int l;
79
80 off += node->page_offset;
81 pagep = node->page + (off >> PAGE_SHIFT);
82 off &= ~PAGE_MASK;
83
84 l = min_t(int, len, PAGE_SIZE - off);
85 memcpy(kmap(*pagep) + off, buf, l);
86 set_page_dirty(*pagep);
87 kunmap(*pagep);
88
89 while ((len -= l) != 0) {
90 buf += l;
91 l = min_t(int, len, PAGE_SIZE);
92 memcpy(kmap(*++pagep), buf, l);
93 set_page_dirty(*pagep);
94 kunmap(*pagep);
95 }
96 }
97
98 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
99 {
100 __be16 v = cpu_to_be16(data);
101 /* TODO: optimize later... */
102 hfs_bnode_write(node, &v, off, 2);
103 }
104
105 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
106 {
107 struct page **pagep;
108 int l;
109
110 off += node->page_offset;
111 pagep = node->page + (off >> PAGE_SHIFT);
112 off &= ~PAGE_MASK;
113
114 l = min_t(int, len, PAGE_SIZE - off);
115 memset(kmap(*pagep) + off, 0, l);
116 set_page_dirty(*pagep);
117 kunmap(*pagep);
118
119 while ((len -= l) != 0) {
120 l = min_t(int, len, PAGE_SIZE);
121 memset(kmap(*++pagep), 0, l);
122 set_page_dirty(*pagep);
123 kunmap(*pagep);
124 }
125 }
126
127 void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
128 struct hfs_bnode *src_node, int src, int len)
129 {
130 struct hfs_btree *tree;
131 struct page **src_page, **dst_page;
132 int l;
133
134 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
135 if (!len)
136 return;
137 tree = src_node->tree;
138 src += src_node->page_offset;
139 dst += dst_node->page_offset;
140 src_page = src_node->page + (src >> PAGE_SHIFT);
141 src &= ~PAGE_MASK;
142 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
143 dst &= ~PAGE_MASK;
144
145 if (src == dst) {
146 l = min_t(int, len, PAGE_SIZE - src);
147 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
148 kunmap(*src_page);
149 set_page_dirty(*dst_page);
150 kunmap(*dst_page);
151
152 while ((len -= l) != 0) {
153 l = min_t(int, len, PAGE_SIZE);
154 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
155 kunmap(*src_page);
156 set_page_dirty(*dst_page);
157 kunmap(*dst_page);
158 }
159 } else {
160 void *src_ptr, *dst_ptr;
161
162 do {
163 src_ptr = kmap(*src_page) + src;
164 dst_ptr = kmap(*dst_page) + dst;
165 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
166 l = PAGE_SIZE - src;
167 src = 0;
168 dst += l;
169 } else {
170 l = PAGE_SIZE - dst;
171 src += l;
172 dst = 0;
173 }
174 l = min(len, l);
175 memcpy(dst_ptr, src_ptr, l);
176 kunmap(*src_page);
177 set_page_dirty(*dst_page);
178 kunmap(*dst_page);
179 if (!dst)
180 dst_page++;
181 else
182 src_page++;
183 } while ((len -= l));
184 }
185 }
186
187 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
188 {
189 struct page **src_page, **dst_page;
190 int l;
191
192 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
193 if (!len)
194 return;
195 src += node->page_offset;
196 dst += node->page_offset;
197 if (dst > src) {
198 src += len - 1;
199 src_page = node->page + (src >> PAGE_SHIFT);
200 src = (src & ~PAGE_MASK) + 1;
201 dst += len - 1;
202 dst_page = node->page + (dst >> PAGE_SHIFT);
203 dst = (dst & ~PAGE_MASK) + 1;
204
205 if (src == dst) {
206 while (src < len) {
207 memmove(kmap(*dst_page), kmap(*src_page), src);
208 kunmap(*src_page);
209 set_page_dirty(*dst_page);
210 kunmap(*dst_page);
211 len -= src;
212 src = PAGE_SIZE;
213 src_page--;
214 dst_page--;
215 }
216 src -= len;
217 memmove(kmap(*dst_page) + src,
218 kmap(*src_page) + src, len);
219 kunmap(*src_page);
220 set_page_dirty(*dst_page);
221 kunmap(*dst_page);
222 } else {
223 void *src_ptr, *dst_ptr;
224
225 do {
226 src_ptr = kmap(*src_page) + src;
227 dst_ptr = kmap(*dst_page) + dst;
228 if (src < dst) {
229 l = src;
230 src = PAGE_SIZE;
231 dst -= l;
232 } else {
233 l = dst;
234 src -= l;
235 dst = PAGE_SIZE;
236 }
237 l = min(len, l);
238 memmove(dst_ptr - l, src_ptr - l, l);
239 kunmap(*src_page);
240 set_page_dirty(*dst_page);
241 kunmap(*dst_page);
242 if (dst == PAGE_SIZE)
243 dst_page--;
244 else
245 src_page--;
246 } while ((len -= l));
247 }
248 } else {
249 src_page = node->page + (src >> PAGE_SHIFT);
250 src &= ~PAGE_MASK;
251 dst_page = node->page + (dst >> PAGE_SHIFT);
252 dst &= ~PAGE_MASK;
253
254 if (src == dst) {
255 l = min_t(int, len, PAGE_SIZE - src);
256 memmove(kmap(*dst_page) + src,
257 kmap(*src_page) + src, l);
258 kunmap(*src_page);
259 set_page_dirty(*dst_page);
260 kunmap(*dst_page);
261
262 while ((len -= l) != 0) {
263 l = min_t(int, len, PAGE_SIZE);
264 memmove(kmap(*++dst_page),
265 kmap(*++src_page), l);
266 kunmap(*src_page);
267 set_page_dirty(*dst_page);
268 kunmap(*dst_page);
269 }
270 } else {
271 void *src_ptr, *dst_ptr;
272
273 do {
274 src_ptr = kmap(*src_page) + src;
275 dst_ptr = kmap(*dst_page) + dst;
276 if (PAGE_SIZE - src <
277 PAGE_SIZE - dst) {
278 l = PAGE_SIZE - src;
279 src = 0;
280 dst += l;
281 } else {
282 l = PAGE_SIZE - dst;
283 src += l;
284 dst = 0;
285 }
286 l = min(len, l);
287 memmove(dst_ptr, src_ptr, l);
288 kunmap(*src_page);
289 set_page_dirty(*dst_page);
290 kunmap(*dst_page);
291 if (!dst)
292 dst_page++;
293 else
294 src_page++;
295 } while ((len -= l));
296 }
297 }
298 }
299
300 void hfs_bnode_dump(struct hfs_bnode *node)
301 {
302 struct hfs_bnode_desc desc;
303 __be32 cnid;
304 int i, off, key_off;
305
306 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
307 hfs_bnode_read(node, &desc, 0, sizeof(desc));
308 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
309 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
310 desc.type, desc.height, be16_to_cpu(desc.num_recs));
311
312 off = node->tree->node_size - 2;
313 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
314 key_off = hfs_bnode_read_u16(node, off);
315 hfs_dbg(BNODE_MOD, " %d", key_off);
316 if (i && node->type == HFS_NODE_INDEX) {
317 int tmp;
318
319 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
320 node->tree->cnid == HFSPLUS_ATTR_CNID)
321 tmp = hfs_bnode_read_u16(node, key_off) + 2;
322 else
323 tmp = node->tree->max_key_len + 2;
324 hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
325 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
326 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
327 } else if (i && node->type == HFS_NODE_LEAF) {
328 int tmp;
329
330 tmp = hfs_bnode_read_u16(node, key_off);
331 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
332 }
333 }
334 hfs_dbg_cont(BNODE_MOD, "\n");
335 }
336
337 void hfs_bnode_unlink(struct hfs_bnode *node)
338 {
339 struct hfs_btree *tree;
340 struct hfs_bnode *tmp;
341 __be32 cnid;
342
343 tree = node->tree;
344 if (node->prev) {
345 tmp = hfs_bnode_find(tree, node->prev);
346 if (IS_ERR(tmp))
347 return;
348 tmp->next = node->next;
349 cnid = cpu_to_be32(tmp->next);
350 hfs_bnode_write(tmp, &cnid,
351 offsetof(struct hfs_bnode_desc, next), 4);
352 hfs_bnode_put(tmp);
353 } else if (node->type == HFS_NODE_LEAF)
354 tree->leaf_head = node->next;
355
356 if (node->next) {
357 tmp = hfs_bnode_find(tree, node->next);
358 if (IS_ERR(tmp))
359 return;
360 tmp->prev = node->prev;
361 cnid = cpu_to_be32(tmp->prev);
362 hfs_bnode_write(tmp, &cnid,
363 offsetof(struct hfs_bnode_desc, prev), 4);
364 hfs_bnode_put(tmp);
365 } else if (node->type == HFS_NODE_LEAF)
366 tree->leaf_tail = node->prev;
367
368 /* move down? */
369 if (!node->prev && !node->next)
370 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
371 if (!node->parent) {
372 tree->root = 0;
373 tree->depth = 0;
374 }
375 set_bit(HFS_BNODE_DELETED, &node->flags);
376 }
377
378 static inline int hfs_bnode_hash(u32 num)
379 {
380 num = (num >> 16) + num;
381 num += num >> 8;
382 return num & (NODE_HASH_SIZE - 1);
383 }
384
385 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
386 {
387 struct hfs_bnode *node;
388
389 if (cnid >= tree->node_count) {
390 pr_err("request for non-existent node %d in B*Tree\n",
391 cnid);
392 return NULL;
393 }
394
395 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
396 node; node = node->next_hash)
397 if (node->this == cnid)
398 return node;
399 return NULL;
400 }
401
402 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
403 {
404 struct super_block *sb;
405 struct hfs_bnode *node, *node2;
406 struct address_space *mapping;
407 struct page *page;
408 int size, block, i, hash;
409 loff_t off;
410
411 if (cnid >= tree->node_count) {
412 pr_err("request for non-existent node %d in B*Tree\n",
413 cnid);
414 return NULL;
415 }
416
417 sb = tree->inode->i_sb;
418 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
419 sizeof(struct page *);
420 node = kzalloc(size, GFP_KERNEL);
421 if (!node)
422 return NULL;
423 node->tree = tree;
424 node->this = cnid;
425 set_bit(HFS_BNODE_NEW, &node->flags);
426 atomic_set(&node->refcnt, 1);
427 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
428 node->tree->cnid, node->this);
429 init_waitqueue_head(&node->lock_wq);
430 spin_lock(&tree->hash_lock);
431 node2 = hfs_bnode_findhash(tree, cnid);
432 if (!node2) {
433 hash = hfs_bnode_hash(cnid);
434 node->next_hash = tree->node_hash[hash];
435 tree->node_hash[hash] = node;
436 tree->node_hash_cnt++;
437 } else {
438 spin_unlock(&tree->hash_lock);
439 kfree(node);
440 wait_event(node2->lock_wq,
441 !test_bit(HFS_BNODE_NEW, &node2->flags));
442 return node2;
443 }
444 spin_unlock(&tree->hash_lock);
445
446 mapping = tree->inode->i_mapping;
447 off = (loff_t)cnid << tree->node_size_shift;
448 block = off >> PAGE_SHIFT;
449 node->page_offset = off & ~PAGE_MASK;
450 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
451 page = read_mapping_page(mapping, block, NULL);
452 if (IS_ERR(page))
453 goto fail;
454 if (PageError(page)) {
455 put_page(page);
456 goto fail;
457 }
458 node->page[i] = page;
459 }
460
461 return node;
462 fail:
463 set_bit(HFS_BNODE_ERROR, &node->flags);
464 return node;
465 }
466
467 void hfs_bnode_unhash(struct hfs_bnode *node)
468 {
469 struct hfs_bnode **p;
470
471 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
472 node->tree->cnid, node->this, atomic_read(&node->refcnt));
473 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
474 *p && *p != node; p = &(*p)->next_hash)
475 ;
476 BUG_ON(!*p);
477 *p = node->next_hash;
478 node->tree->node_hash_cnt--;
479 }
480
481 /* Load a particular node out of a tree */
482 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
483 {
484 struct hfs_bnode *node;
485 struct hfs_bnode_desc *desc;
486 int i, rec_off, off, next_off;
487 int entry_size, key_size;
488
489 spin_lock(&tree->hash_lock);
490 node = hfs_bnode_findhash(tree, num);
491 if (node) {
492 hfs_bnode_get(node);
493 spin_unlock(&tree->hash_lock);
494 wait_event(node->lock_wq,
495 !test_bit(HFS_BNODE_NEW, &node->flags));
496 if (test_bit(HFS_BNODE_ERROR, &node->flags))
497 goto node_error;
498 return node;
499 }
500 spin_unlock(&tree->hash_lock);
501 node = __hfs_bnode_create(tree, num);
502 if (!node)
503 return ERR_PTR(-ENOMEM);
504 if (test_bit(HFS_BNODE_ERROR, &node->flags))
505 goto node_error;
506 if (!test_bit(HFS_BNODE_NEW, &node->flags))
507 return node;
508
509 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
510 node->page_offset);
511 node->prev = be32_to_cpu(desc->prev);
512 node->next = be32_to_cpu(desc->next);
513 node->num_recs = be16_to_cpu(desc->num_recs);
514 node->type = desc->type;
515 node->height = desc->height;
516 kunmap(node->page[0]);
517
518 switch (node->type) {
519 case HFS_NODE_HEADER:
520 case HFS_NODE_MAP:
521 if (node->height != 0)
522 goto node_error;
523 break;
524 case HFS_NODE_LEAF:
525 if (node->height != 1)
526 goto node_error;
527 break;
528 case HFS_NODE_INDEX:
529 if (node->height <= 1 || node->height > tree->depth)
530 goto node_error;
531 break;
532 default:
533 goto node_error;
534 }
535
536 rec_off = tree->node_size - 2;
537 off = hfs_bnode_read_u16(node, rec_off);
538 if (off != sizeof(struct hfs_bnode_desc))
539 goto node_error;
540 for (i = 1; i <= node->num_recs; off = next_off, i++) {
541 rec_off -= 2;
542 next_off = hfs_bnode_read_u16(node, rec_off);
543 if (next_off <= off ||
544 next_off > tree->node_size ||
545 next_off & 1)
546 goto node_error;
547 entry_size = next_off - off;
548 if (node->type != HFS_NODE_INDEX &&
549 node->type != HFS_NODE_LEAF)
550 continue;
551 key_size = hfs_bnode_read_u16(node, off) + 2;
552 if (key_size >= entry_size || key_size & 1)
553 goto node_error;
554 }
555 clear_bit(HFS_BNODE_NEW, &node->flags);
556 wake_up(&node->lock_wq);
557 return node;
558
559 node_error:
560 set_bit(HFS_BNODE_ERROR, &node->flags);
561 clear_bit(HFS_BNODE_NEW, &node->flags);
562 wake_up(&node->lock_wq);
563 hfs_bnode_put(node);
564 return ERR_PTR(-EIO);
565 }
566
567 void hfs_bnode_free(struct hfs_bnode *node)
568 {
569 int i;
570
571 for (i = 0; i < node->tree->pages_per_bnode; i++)
572 if (node->page[i])
573 put_page(node->page[i]);
574 kfree(node);
575 }
576
577 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
578 {
579 struct hfs_bnode *node;
580 struct page **pagep;
581 int i;
582
583 spin_lock(&tree->hash_lock);
584 node = hfs_bnode_findhash(tree, num);
585 spin_unlock(&tree->hash_lock);
586 if (node) {
587 pr_crit("new node %u already hashed?\n", num);
588 WARN_ON(1);
589 return node;
590 }
591 node = __hfs_bnode_create(tree, num);
592 if (!node)
593 return ERR_PTR(-ENOMEM);
594 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
595 hfs_bnode_put(node);
596 return ERR_PTR(-EIO);
597 }
598
599 pagep = node->page;
600 memset(kmap(*pagep) + node->page_offset, 0,
601 min_t(int, PAGE_SIZE, tree->node_size));
602 set_page_dirty(*pagep);
603 kunmap(*pagep);
604 for (i = 1; i < tree->pages_per_bnode; i++) {
605 memset(kmap(*++pagep), 0, PAGE_SIZE);
606 set_page_dirty(*pagep);
607 kunmap(*pagep);
608 }
609 clear_bit(HFS_BNODE_NEW, &node->flags);
610 wake_up(&node->lock_wq);
611
612 return node;
613 }
614
615 void hfs_bnode_get(struct hfs_bnode *node)
616 {
617 if (node) {
618 atomic_inc(&node->refcnt);
619 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
620 node->tree->cnid, node->this,
621 atomic_read(&node->refcnt));
622 }
623 }
624
625 /* Dispose of resources used by a node */
626 void hfs_bnode_put(struct hfs_bnode *node)
627 {
628 if (node) {
629 struct hfs_btree *tree = node->tree;
630 int i;
631
632 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
633 node->tree->cnid, node->this,
634 atomic_read(&node->refcnt));
635 BUG_ON(!atomic_read(&node->refcnt));
636 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
637 return;
638 for (i = 0; i < tree->pages_per_bnode; i++) {
639 if (!node->page[i])
640 continue;
641 mark_page_accessed(node->page[i]);
642 }
643
644 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
645 hfs_bnode_unhash(node);
646 spin_unlock(&tree->hash_lock);
647 if (hfs_bnode_need_zeroout(tree))
648 hfs_bnode_clear(node, 0, tree->node_size);
649 hfs_bmap_free(node);
650 hfs_bnode_free(node);
651 return;
652 }
653 spin_unlock(&tree->hash_lock);
654 }
655 }
656
657 /*
658 * Unused nodes have to be zeroed if this is the catalog tree and
659 * a corresponding flag in the volume header is set.
660 */
661 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
662 {
663 struct super_block *sb = tree->inode->i_sb;
664 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
665 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
666
667 return tree->cnid == HFSPLUS_CAT_CNID &&
668 volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
669 }