]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/hpfs/anode.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / fs / hpfs / anode.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/hpfs/anode.c
3 *
4 * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
5 *
6 * handling HPFS anode tree that contains file allocation info
7 */
8
9#include "hpfs_fn.h"
10
11/* Find a sector in allocation tree */
12
13secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
14 struct bplus_header *btree, unsigned sec,
15 struct buffer_head *bh)
16{
17 anode_secno a = -1;
18 struct anode *anode;
19 int i;
20 int c1, c2 = 0;
21 go_down:
22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
ddc19e6e 23 if (bp_internal(btree)) {
1da177e4 24 for (i = 0; i < btree->n_used_nodes; i++)
0b69760b
MP
25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
26 a = le32_to_cpu(btree->u.internal[i].down);
1da177e4
LT
27 brelse(bh);
28 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
29 btree = &anode->btree;
30 goto go_down;
31 }
32 hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
33 brelse(bh);
34 return -1;
35 }
36 for (i = 0; i < btree->n_used_nodes; i++)
0b69760b
MP
37 if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
38 le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
39 a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
1da177e4
LT
40 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
41 brelse(bh);
42 return -1;
43 }
44 if (inode) {
45 struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
0b69760b
MP
46 hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
47 hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
48 hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
1da177e4
LT
49 }
50 brelse(bh);
51 return a;
52 }
53 hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
54 brelse(bh);
55 return -1;
56}
57
58/* Add a sector to tree */
59
60secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
61{
62 struct bplus_header *btree;
63 struct anode *anode = NULL, *ranode = NULL;
64 struct fnode *fnode;
65 anode_secno a, na = -1, ra, up = -1;
66 secno se;
67 struct buffer_head *bh, *bh1, *bh2;
68 int n;
69 unsigned fs;
70 int c1, c2 = 0;
71 if (fnod) {
72 if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
73 btree = &fnode->btree;
74 } else {
75 if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
76 btree = &anode->btree;
77 }
78 a = node;
79 go_down:
80 if ((n = btree->n_used_nodes - 1) < -!!fnod) {
81 hpfs_error(s, "anode %08x has no entries", a);
82 brelse(bh);
83 return -1;
84 }
ddc19e6e 85 if (bp_internal(btree)) {
0b69760b
MP
86 a = le32_to_cpu(btree->u.internal[n].down);
87 btree->u.internal[n].file_secno = cpu_to_le32(-1);
1da177e4
LT
88 mark_buffer_dirty(bh);
89 brelse(bh);
90 if (hpfs_sb(s)->sb_chk)
91 if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
92 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
93 btree = &anode->btree;
94 goto go_down;
95 }
96 if (n >= 0) {
0b69760b 97 if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
1da177e4 98 hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
0b69760b 99 le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
1da177e4
LT
100 fnod?'f':'a', node);
101 brelse(bh);
102 return -1;
103 }
0b69760b 104 if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
32daab96 105 le32_add_cpu(&btree->u.external[n].length, 1);
1da177e4
LT
106 mark_buffer_dirty(bh);
107 brelse(bh);
108 return se;
109 }
110 } else {
111 if (fsecno) {
112 hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
113 brelse(bh);
114 return -1;
115 }
116 se = !fnod ? node : (node + 16384) & ~16383;
117 }
7d23ce36 118 if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
1da177e4
LT
119 brelse(bh);
120 return -1;
121 }
0b69760b 122 fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
1da177e4 123 if (!btree->n_free_nodes) {
0b69760b 124 up = a != node ? le32_to_cpu(anode->up) : -1;
1da177e4
LT
125 if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
126 brelse(bh);
127 hpfs_free_sectors(s, se, 1);
128 return -1;
129 }
130 if (a == node && fnod) {
0b69760b 131 anode->up = cpu_to_le32(node);
ddc19e6e 132 anode->btree.flags |= BP_fnode_parent;
1da177e4
LT
133 anode->btree.n_used_nodes = btree->n_used_nodes;
134 anode->btree.first_free = btree->first_free;
135 anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
136 memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
ddc19e6e 137 btree->flags |= BP_internal;
1da177e4
LT
138 btree->n_free_nodes = 11;
139 btree->n_used_nodes = 1;
0b69760b
MP
140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
141 btree->u.internal[0].file_secno = cpu_to_le32(-1);
142 btree->u.internal[0].down = cpu_to_le32(na);
1da177e4
LT
143 mark_buffer_dirty(bh);
144 } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
145 brelse(bh);
146 brelse(bh1);
147 hpfs_free_sectors(s, se, 1);
148 hpfs_free_sectors(s, na, 1);
149 return -1;
150 }
151 brelse(bh);
152 bh = bh1;
153 btree = &anode->btree;
154 }
155 btree->n_free_nodes--; n = btree->n_used_nodes++;
32daab96 156 le16_add_cpu(&btree->first_free, 12);
0b69760b
MP
157 btree->u.external[n].disk_secno = cpu_to_le32(se);
158 btree->u.external[n].file_secno = cpu_to_le32(fs);
159 btree->u.external[n].length = cpu_to_le32(1);
1da177e4
LT
160 mark_buffer_dirty(bh);
161 brelse(bh);
162 if ((a == node && fnod) || na == -1) return se;
163 c2 = 0;
0b69760b 164 while (up != (anode_secno)-1) {
1da177e4
LT
165 struct anode *new_anode;
166 if (hpfs_sb(s)->sb_chk)
167 if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
168 if (up != node || !fnod) {
169 if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
170 btree = &anode->btree;
171 } else {
172 if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
173 btree = &fnode->btree;
174 }
175 if (btree->n_free_nodes) {
176 btree->n_free_nodes--; n = btree->n_used_nodes++;
32daab96 177 le16_add_cpu(&btree->first_free, 8);
0b69760b
MP
178 btree->u.internal[n].file_secno = cpu_to_le32(-1);
179 btree->u.internal[n].down = cpu_to_le32(na);
180 btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
1da177e4
LT
181 mark_buffer_dirty(bh);
182 brelse(bh);
183 brelse(bh2);
184 hpfs_free_sectors(s, ra, 1);
185 if ((anode = hpfs_map_anode(s, na, &bh))) {
0b69760b 186 anode->up = cpu_to_le32(up);
ddc19e6e
AV
187 if (up == node && fnod)
188 anode->btree.flags |= BP_fnode_parent;
189 else
190 anode->btree.flags &= ~BP_fnode_parent;
1da177e4
LT
191 mark_buffer_dirty(bh);
192 brelse(bh);
193 }
194 return se;
195 }
0b69760b
MP
196 up = up != node ? le32_to_cpu(anode->up) : -1;
197 btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
1da177e4
LT
198 mark_buffer_dirty(bh);
199 brelse(bh);
200 a = na;
201 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
202 anode = new_anode;
0b69760b 203 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
ddc19e6e 204 anode->btree.flags |= BP_internal;
1da177e4
LT
205 anode->btree.n_used_nodes = 1;
206 anode->btree.n_free_nodes = 59;
0b69760b
MP
207 anode->btree.first_free = cpu_to_le16(16);
208 anode->btree.u.internal[0].down = cpu_to_le32(a);
209 anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
1da177e4
LT
210 mark_buffer_dirty(bh);
211 brelse(bh);
212 if ((anode = hpfs_map_anode(s, a, &bh))) {
0b69760b 213 anode->up = cpu_to_le32(na);
1da177e4
LT
214 mark_buffer_dirty(bh);
215 brelse(bh);
216 }
217 } else na = a;
218 }
219 if ((anode = hpfs_map_anode(s, na, &bh))) {
0b69760b 220 anode->up = cpu_to_le32(node);
ddc19e6e
AV
221 if (fnod)
222 anode->btree.flags |= BP_fnode_parent;
1da177e4
LT
223 mark_buffer_dirty(bh);
224 brelse(bh);
225 }
226 if (!fnod) {
227 if (!(anode = hpfs_map_anode(s, node, &bh))) {
228 brelse(bh2);
229 return -1;
230 }
231 btree = &anode->btree;
232 } else {
233 if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
234 brelse(bh2);
235 return -1;
236 }
237 btree = &fnode->btree;
238 }
0b69760b
MP
239 ranode->up = cpu_to_le32(node);
240 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
ddc19e6e
AV
241 if (fnod)
242 ranode->btree.flags |= BP_fnode_parent;
243 ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
244 if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
1da177e4 245 struct anode *unode;
0b69760b
MP
246 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
247 unode->up = cpu_to_le32(ra);
ddc19e6e 248 unode->btree.flags &= ~BP_fnode_parent;
1da177e4
LT
249 mark_buffer_dirty(bh1);
250 brelse(bh1);
251 }
252 }
ddc19e6e 253 btree->flags |= BP_internal;
1da177e4
LT
254 btree->n_free_nodes = fnod ? 10 : 58;
255 btree->n_used_nodes = 2;
0b69760b
MP
256 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
257 btree->u.internal[0].file_secno = cpu_to_le32(fs);
258 btree->u.internal[0].down = cpu_to_le32(ra);
259 btree->u.internal[1].file_secno = cpu_to_le32(-1);
260 btree->u.internal[1].down = cpu_to_le32(na);
1da177e4
LT
261 mark_buffer_dirty(bh);
262 brelse(bh);
263 mark_buffer_dirty(bh2);
264 brelse(bh2);
265 return se;
266}
267
268/*
269 * Remove allocation tree. Recursion would look much nicer but
270 * I want to avoid it because it can cause stack overflow.
271 */
272
273void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
274{
275 struct bplus_header *btree1 = btree;
276 struct anode *anode = NULL;
277 anode_secno ano = 0, oano;
278 struct buffer_head *bh;
279 int level = 0;
280 int pos = 0;
281 int i;
282 int c1, c2 = 0;
283 int d1, d2;
284 go_down:
285 d2 = 0;
ddc19e6e 286 while (bp_internal(btree1)) {
0b69760b 287 ano = le32_to_cpu(btree1->u.internal[pos].down);
1da177e4
LT
288 if (level) brelse(bh);
289 if (hpfs_sb(s)->sb_chk)
290 if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
291 return;
292 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
293 btree1 = &anode->btree;
294 level++;
295 pos = 0;
296 }
297 for (i = 0; i < btree1->n_used_nodes; i++)
0b69760b 298 hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
1da177e4
LT
299 go_up:
300 if (!level) return;
301 brelse(bh);
302 if (hpfs_sb(s)->sb_chk)
303 if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
304 hpfs_free_sectors(s, ano, 1);
305 oano = ano;
0b69760b 306 ano = le32_to_cpu(anode->up);
1da177e4
LT
307 if (--level) {
308 if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
309 btree1 = &anode->btree;
310 } else btree1 = btree;
311 for (i = 0; i < btree1->n_used_nodes; i++) {
0b69760b 312 if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
1da177e4
LT
313 if ((pos = i + 1) < btree1->n_used_nodes)
314 goto go_down;
315 else
316 goto go_up;
317 }
318 }
319 hpfs_error(s,
320 "reference to anode %08x not found in anode %08x "
321 "(probably bad up pointer)",
322 oano, level ? ano : -1);
323 if (level)
324 brelse(bh);
325}
326
327/* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
328
329static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
330{
331 struct anode *anode;
332 struct buffer_head *bh;
333 if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
334 return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
335}
336
337int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
338 unsigned len, char *buf)
339{
340 struct buffer_head *bh;
341 char *data;
342 secno sec;
343 unsigned l;
344 while (len) {
345 if (ano) {
346 if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
347 return -1;
348 } else sec = a + (pos >> 9);
349 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
350 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
351 return -1;
352 l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
353 memcpy(buf, data + (pos & 0x1ff), l);
354 brelse(bh);
355 buf += l; pos += l; len -= l;
356 }
357 return 0;
358}
359
360int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
7e7742ee 361 unsigned len, const char *buf)
1da177e4
LT
362{
363 struct buffer_head *bh;
364 char *data;
365 secno sec;
366 unsigned l;
367 while (len) {
368 if (ano) {
369 if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
370 return -1;
371 } else sec = a + (pos >> 9);
372 if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
373 if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
374 return -1;
375 l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
376 memcpy(data + (pos & 0x1ff), buf, l);
377 mark_buffer_dirty(bh);
378 brelse(bh);
379 buf += l; pos += l; len -= l;
380 }
381 return 0;
382}
383
384void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
385{
386 struct anode *anode;
387 struct buffer_head *bh;
388 if (ano) {
389 if (!(anode = hpfs_map_anode(s, a, &bh))) return;
390 hpfs_remove_btree(s, &anode->btree);
391 brelse(bh);
392 hpfs_free_sectors(s, a, 1);
393 } else hpfs_free_sectors(s, a, (len + 511) >> 9);
394}
395
396/* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
397
398void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
399{
400 struct fnode *fnode;
401 struct anode *anode;
402 struct buffer_head *bh;
403 struct bplus_header *btree;
404 anode_secno node = f;
405 int i, j, nodes;
406 int c1, c2 = 0;
407 if (fno) {
408 if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
409 btree = &fnode->btree;
410 } else {
411 if (!(anode = hpfs_map_anode(s, f, &bh))) return;
412 btree = &anode->btree;
413 }
414 if (!secs) {
415 hpfs_remove_btree(s, btree);
416 if (fno) {
417 btree->n_free_nodes = 8;
418 btree->n_used_nodes = 0;
0b69760b 419 btree->first_free = cpu_to_le16(8);
ddc19e6e 420 btree->flags &= ~BP_internal;
1da177e4
LT
421 mark_buffer_dirty(bh);
422 } else hpfs_free_sectors(s, f, 1);
423 brelse(bh);
424 return;
425 }
ddc19e6e 426 while (bp_internal(btree)) {
1da177e4
LT
427 nodes = btree->n_used_nodes + btree->n_free_nodes;
428 for (i = 0; i < btree->n_used_nodes; i++)
0b69760b 429 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
1da177e4
LT
430 brelse(bh);
431 hpfs_error(s, "internal btree %08x doesn't end with -1", node);
432 return;
433 f:
434 for (j = i + 1; j < btree->n_used_nodes; j++)
0b69760b 435 hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
1da177e4
LT
436 btree->n_used_nodes = i + 1;
437 btree->n_free_nodes = nodes - btree->n_used_nodes;
0b69760b 438 btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
1da177e4 439 mark_buffer_dirty(bh);
0b69760b 440 if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
1da177e4
LT
441 brelse(bh);
442 return;
443 }
0b69760b 444 node = le32_to_cpu(btree->u.internal[i].down);
1da177e4
LT
445 brelse(bh);
446 if (hpfs_sb(s)->sb_chk)
447 if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
448 return;
449 if (!(anode = hpfs_map_anode(s, node, &bh))) return;
450 btree = &anode->btree;
451 }
452 nodes = btree->n_used_nodes + btree->n_free_nodes;
453 for (i = 0; i < btree->n_used_nodes; i++)
0b69760b 454 if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
1da177e4
LT
455 brelse(bh);
456 return;
457 ff:
0b69760b 458 if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
1da177e4
LT
459 hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
460 if (i) i--;
461 }
0b69760b
MP
462 else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
463 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
464 le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
465 - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
466 btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
1da177e4
LT
467 }
468 for (j = i + 1; j < btree->n_used_nodes; j++)
0b69760b 469 hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
1da177e4
LT
470 btree->n_used_nodes = i + 1;
471 btree->n_free_nodes = nodes - btree->n_used_nodes;
0b69760b 472 btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
1da177e4
LT
473 mark_buffer_dirty(bh);
474 brelse(bh);
475}
476
477/* Remove file or directory and it's eas - note that directory must
478 be empty when this is called. */
479
480void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
481{
482 struct buffer_head *bh;
483 struct fnode *fnode;
484 struct extended_attribute *ea;
485 struct extended_attribute *ea_end;
486 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
c4c99543 487 if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
0b69760b 488 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
1da177e4
LT
489 ea_end = fnode_end_ea(fnode);
490 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
4085e155
AV
491 if (ea_indirect(ea))
492 hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
c4c99543 493 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
1da177e4
LT
494 brelse(bh);
495 hpfs_free_sectors(s, fno, 1);
496}