]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - fs/hfs/bnode.c
Merge remote-tracking branches 'asoc/topic/ac97', 'asoc/topic/ac97-mfd', 'asoc/topic...
[mirror_ubuntu-focal-kernel.git] / fs / hfs / bnode.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/fs/hfs/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12#include <linux/pagemap.h>
5a0e3ad6 13#include <linux/slab.h>
1da177e4
LT
14#include <linux/swap.h>
15
16#include "btree.h"
17
1da177e4
LT
18void hfs_bnode_read(struct hfs_bnode *node, void *buf,
19 int off, int len)
20{
21 struct page *page;
22
23 off += node->page_offset;
24 page = node->page[0];
25
26 memcpy(buf, kmap(page) + off, len);
27 kunmap(page);
28}
29
30u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
31{
32 __be16 data;
33 // optimize later...
34 hfs_bnode_read(node, &data, off, 2);
35 return be16_to_cpu(data);
36}
37
38u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
39{
40 u8 data;
41 // optimize later...
42 hfs_bnode_read(node, &data, off, 1);
43 return data;
44}
45
46void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
47{
48 struct hfs_btree *tree;
49 int key_len;
50
51 tree = node->tree;
52 if (node->type == HFS_NODE_LEAF ||
53 tree->attributes & HFS_TREE_VARIDXKEYS)
54 key_len = hfs_bnode_read_u8(node, off) + 1;
55 else
56 key_len = tree->max_key_len + 1;
57
58 hfs_bnode_read(node, key, off, key_len);
59}
60
61void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
62{
63 struct page *page;
64
65 off += node->page_offset;
66 page = node->page[0];
67
68 memcpy(kmap(page) + off, buf, len);
69 kunmap(page);
70 set_page_dirty(page);
71}
72
73void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
74{
75 __be16 v = cpu_to_be16(data);
76 // optimize later...
77 hfs_bnode_write(node, &v, off, 2);
78}
79
80void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
81{
82 // optimize later...
83 hfs_bnode_write(node, &data, off, 1);
84}
85
86void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
87{
88 struct page *page;
89
90 off += node->page_offset;
91 page = node->page[0];
92
93 memset(kmap(page) + off, 0, len);
94 kunmap(page);
95 set_page_dirty(page);
96}
97
98void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
99 struct hfs_bnode *src_node, int src, int len)
100{
101 struct hfs_btree *tree;
102 struct page *src_page, *dst_page;
103
c2b3e1f7 104 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
1da177e4
LT
105 if (!len)
106 return;
107 tree = src_node->tree;
108 src += src_node->page_offset;
109 dst += dst_node->page_offset;
110 src_page = src_node->page[0];
111 dst_page = dst_node->page[0];
112
113 memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
114 kunmap(src_page);
115 kunmap(dst_page);
116 set_page_dirty(dst_page);
117}
118
119void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
120{
121 struct page *page;
122 void *ptr;
123
c2b3e1f7 124 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
1da177e4
LT
125 if (!len)
126 return;
127 src += node->page_offset;
128 dst += node->page_offset;
129 page = node->page[0];
130 ptr = kmap(page);
131 memmove(ptr + dst, ptr + src, len);
132 kunmap(page);
133 set_page_dirty(page);
134}
135
136void hfs_bnode_dump(struct hfs_bnode *node)
137{
138 struct hfs_bnode_desc desc;
139 __be32 cnid;
140 int i, off, key_off;
141
c2b3e1f7 142 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
1da177e4 143 hfs_bnode_read(node, &desc, 0, sizeof(desc));
c2b3e1f7 144 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
1da177e4
LT
145 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
146 desc.type, desc.height, be16_to_cpu(desc.num_recs));
147
148 off = node->tree->node_size - 2;
149 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
150 key_off = hfs_bnode_read_u16(node, off);
c2b3e1f7 151 hfs_dbg_cont(BNODE_MOD, " %d", key_off);
1da177e4
LT
152 if (i && node->type == HFS_NODE_INDEX) {
153 int tmp;
154
155 if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
156 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
157 else
158 tmp = node->tree->max_key_len + 1;
c2b3e1f7
JP
159 hfs_dbg_cont(BNODE_MOD, " (%d,%d",
160 tmp, hfs_bnode_read_u8(node, key_off));
1da177e4 161 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
c2b3e1f7 162 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
1da177e4
LT
163 } else if (i && node->type == HFS_NODE_LEAF) {
164 int tmp;
165
166 tmp = hfs_bnode_read_u8(node, key_off);
c2b3e1f7 167 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
1da177e4
LT
168 }
169 }
c2b3e1f7 170 hfs_dbg_cont(BNODE_MOD, "\n");
1da177e4
LT
171}
172
173void hfs_bnode_unlink(struct hfs_bnode *node)
174{
175 struct hfs_btree *tree;
176 struct hfs_bnode *tmp;
177 __be32 cnid;
178
179 tree = node->tree;
180 if (node->prev) {
181 tmp = hfs_bnode_find(tree, node->prev);
182 if (IS_ERR(tmp))
183 return;
184 tmp->next = node->next;
185 cnid = cpu_to_be32(tmp->next);
186 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
187 hfs_bnode_put(tmp);
188 } else if (node->type == HFS_NODE_LEAF)
189 tree->leaf_head = node->next;
190
191 if (node->next) {
192 tmp = hfs_bnode_find(tree, node->next);
193 if (IS_ERR(tmp))
194 return;
195 tmp->prev = node->prev;
196 cnid = cpu_to_be32(tmp->prev);
197 hfs_bnode_write(tmp, &cnid, offsetof(struct hfs_bnode_desc, prev), 4);
198 hfs_bnode_put(tmp);
199 } else if (node->type == HFS_NODE_LEAF)
200 tree->leaf_tail = node->prev;
201
202 // move down?
203 if (!node->prev && !node->next) {
7cf3cc30 204 printk(KERN_DEBUG "hfs_btree_del_level\n");
1da177e4
LT
205 }
206 if (!node->parent) {
207 tree->root = 0;
208 tree->depth = 0;
209 }
210 set_bit(HFS_BNODE_DELETED, &node->flags);
211}
212
213static inline int hfs_bnode_hash(u32 num)
214{
215 num = (num >> 16) + num;
216 num += num >> 8;
217 return num & (NODE_HASH_SIZE - 1);
218}
219
220struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
221{
222 struct hfs_bnode *node;
223
224 if (cnid >= tree->node_count) {
d6142673 225 pr_err("request for non-existent node %d in B*Tree\n", cnid);
1da177e4
LT
226 return NULL;
227 }
228
229 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
230 node; node = node->next_hash) {
231 if (node->this == cnid) {
232 return node;
233 }
234 }
235 return NULL;
236}
237
238static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
239{
240 struct super_block *sb;
241 struct hfs_bnode *node, *node2;
242 struct address_space *mapping;
243 struct page *page;
244 int size, block, i, hash;
245 loff_t off;
246
247 if (cnid >= tree->node_count) {
d6142673 248 pr_err("request for non-existent node %d in B*Tree\n", cnid);
1da177e4
LT
249 return NULL;
250 }
251
252 sb = tree->inode->i_sb;
253 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
254 sizeof(struct page *);
f8314dc6 255 node = kzalloc(size, GFP_KERNEL);
1da177e4
LT
256 if (!node)
257 return NULL;
1da177e4
LT
258 node->tree = tree;
259 node->this = cnid;
260 set_bit(HFS_BNODE_NEW, &node->flags);
261 atomic_set(&node->refcnt, 1);
c2b3e1f7
JP
262 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
263 node->tree->cnid, node->this);
1da177e4
LT
264 init_waitqueue_head(&node->lock_wq);
265 spin_lock(&tree->hash_lock);
266 node2 = hfs_bnode_findhash(tree, cnid);
267 if (!node2) {
268 hash = hfs_bnode_hash(cnid);
269 node->next_hash = tree->node_hash[hash];
270 tree->node_hash[hash] = node;
271 tree->node_hash_cnt++;
272 } else {
273 spin_unlock(&tree->hash_lock);
274 kfree(node);
275 wait_event(node2->lock_wq, !test_bit(HFS_BNODE_NEW, &node2->flags));
276 return node2;
277 }
278 spin_unlock(&tree->hash_lock);
279
280 mapping = tree->inode->i_mapping;
281 off = (loff_t)cnid * tree->node_size;
09cbfeaf
KS
282 block = off >> PAGE_SHIFT;
283 node->page_offset = off & ~PAGE_MASK;
1da177e4 284 for (i = 0; i < tree->pages_per_bnode; i++) {
090d2b18 285 page = read_mapping_page(mapping, block++, NULL);
1da177e4
LT
286 if (IS_ERR(page))
287 goto fail;
288 if (PageError(page)) {
09cbfeaf 289 put_page(page);
1da177e4
LT
290 goto fail;
291 }
1da177e4
LT
292 node->page[i] = page;
293 }
294
295 return node;
296fail:
297 set_bit(HFS_BNODE_ERROR, &node->flags);
298 return node;
299}
300
301void hfs_bnode_unhash(struct hfs_bnode *node)
302{
303 struct hfs_bnode **p;
304
c2b3e1f7 305 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
1da177e4
LT
306 node->tree->cnid, node->this, atomic_read(&node->refcnt));
307 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
308 *p && *p != node; p = &(*p)->next_hash)
309 ;
4d4ef9ab 310 BUG_ON(!*p);
1da177e4
LT
311 *p = node->next_hash;
312 node->tree->node_hash_cnt--;
313}
314
315/* Load a particular node out of a tree */
316struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
317{
318 struct hfs_bnode *node;
319 struct hfs_bnode_desc *desc;
320 int i, rec_off, off, next_off;
321 int entry_size, key_size;
322
323 spin_lock(&tree->hash_lock);
324 node = hfs_bnode_findhash(tree, num);
325 if (node) {
326 hfs_bnode_get(node);
327 spin_unlock(&tree->hash_lock);
328 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
329 if (test_bit(HFS_BNODE_ERROR, &node->flags))
330 goto node_error;
331 return node;
332 }
333 spin_unlock(&tree->hash_lock);
334 node = __hfs_bnode_create(tree, num);
335 if (!node)
336 return ERR_PTR(-ENOMEM);
337 if (test_bit(HFS_BNODE_ERROR, &node->flags))
338 goto node_error;
339 if (!test_bit(HFS_BNODE_NEW, &node->flags))
340 return node;
341
342 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
343 node->prev = be32_to_cpu(desc->prev);
344 node->next = be32_to_cpu(desc->next);
345 node->num_recs = be16_to_cpu(desc->num_recs);
346 node->type = desc->type;
347 node->height = desc->height;
348 kunmap(node->page[0]);
349
350 switch (node->type) {
351 case HFS_NODE_HEADER:
352 case HFS_NODE_MAP:
353 if (node->height != 0)
354 goto node_error;
355 break;
356 case HFS_NODE_LEAF:
357 if (node->height != 1)
358 goto node_error;
359 break;
360 case HFS_NODE_INDEX:
361 if (node->height <= 1 || node->height > tree->depth)
362 goto node_error;
363 break;
364 default:
365 goto node_error;
366 }
367
368 rec_off = tree->node_size - 2;
369 off = hfs_bnode_read_u16(node, rec_off);
370 if (off != sizeof(struct hfs_bnode_desc))
371 goto node_error;
372 for (i = 1; i <= node->num_recs; off = next_off, i++) {
373 rec_off -= 2;
374 next_off = hfs_bnode_read_u16(node, rec_off);
375 if (next_off <= off ||
376 next_off > tree->node_size ||
377 next_off & 1)
378 goto node_error;
379 entry_size = next_off - off;
380 if (node->type != HFS_NODE_INDEX &&
381 node->type != HFS_NODE_LEAF)
382 continue;
383 key_size = hfs_bnode_read_u8(node, off) + 1;
384 if (key_size >= entry_size /*|| key_size & 1*/)
385 goto node_error;
386 }
387 clear_bit(HFS_BNODE_NEW, &node->flags);
388 wake_up(&node->lock_wq);
389 return node;
390
391node_error:
392 set_bit(HFS_BNODE_ERROR, &node->flags);
393 clear_bit(HFS_BNODE_NEW, &node->flags);
394 wake_up(&node->lock_wq);
395 hfs_bnode_put(node);
396 return ERR_PTR(-EIO);
397}
398
399void hfs_bnode_free(struct hfs_bnode *node)
400{
7cb74be6 401 int i;
1da177e4 402
7cb74be6
HTL
403 for (i = 0; i < node->tree->pages_per_bnode; i++)
404 if (node->page[i])
09cbfeaf 405 put_page(node->page[i]);
1da177e4
LT
406 kfree(node);
407}
408
409struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
410{
411 struct hfs_bnode *node;
412 struct page **pagep;
413 int i;
414
415 spin_lock(&tree->hash_lock);
416 node = hfs_bnode_findhash(tree, num);
417 spin_unlock(&tree->hash_lock);
fb09c373
JM
418 if (node) {
419 pr_crit("new node %u already hashed?\n", num);
420 WARN_ON(1);
421 return node;
422 }
1da177e4
LT
423 node = __hfs_bnode_create(tree, num);
424 if (!node)
425 return ERR_PTR(-ENOMEM);
426 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
427 hfs_bnode_put(node);
428 return ERR_PTR(-EIO);
429 }
430
431 pagep = node->page;
432 memset(kmap(*pagep) + node->page_offset, 0,
09cbfeaf 433 min((int)PAGE_SIZE, (int)tree->node_size));
1da177e4
LT
434 set_page_dirty(*pagep);
435 kunmap(*pagep);
436 for (i = 1; i < tree->pages_per_bnode; i++) {
09cbfeaf 437 memset(kmap(*++pagep), 0, PAGE_SIZE);
1da177e4
LT
438 set_page_dirty(*pagep);
439 kunmap(*pagep);
440 }
441 clear_bit(HFS_BNODE_NEW, &node->flags);
442 wake_up(&node->lock_wq);
443
444 return node;
445}
446
447void hfs_bnode_get(struct hfs_bnode *node)
448{
449 if (node) {
450 atomic_inc(&node->refcnt);
c2b3e1f7
JP
451 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
452 node->tree->cnid, node->this,
453 atomic_read(&node->refcnt));
1da177e4
LT
454 }
455}
456
457/* Dispose of resources used by a node */
458void hfs_bnode_put(struct hfs_bnode *node)
459{
460 if (node) {
461 struct hfs_btree *tree = node->tree;
462 int i;
463
c2b3e1f7
JP
464 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
465 node->tree->cnid, node->this,
466 atomic_read(&node->refcnt));
4d4ef9ab 467 BUG_ON(!atomic_read(&node->refcnt));
a5e3985f 468 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
1da177e4 469 return;
1da177e4 470 for (i = 0; i < tree->pages_per_bnode; i++) {
74f9c9c2
RZ
471 if (!node->page[i])
472 continue;
1da177e4 473 mark_page_accessed(node->page[i]);
1da177e4
LT
474 }
475
476 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
477 hfs_bnode_unhash(node);
478 spin_unlock(&tree->hash_lock);
479 hfs_bmap_free(node);
480 hfs_bnode_free(node);
481 return;
482 }
483 spin_unlock(&tree->hash_lock);
484 }
485}