]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/ufs/balloc.c
UBUNTU: rebase to v4.10-rc2
[mirror_ubuntu-zesty-kernel.git] / fs / ufs / balloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
54fb996a
ED
7 *
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
1da177e4
LT
9 */
10
11#include <linux/fs.h>
1da177e4
LT
12#include <linux/stat.h>
13#include <linux/time.h>
14#include <linux/string.h>
1da177e4 15#include <linux/buffer_head.h>
16f7e0fe 16#include <linux/capability.h>
1da177e4 17#include <linux/bitops.h>
2f8b5444 18#include <linux/bio.h>
1da177e4
LT
19#include <asm/byteorder.h>
20
e5420598 21#include "ufs_fs.h"
bcd6d4ec 22#include "ufs.h"
1da177e4
LT
23#include "swab.h"
24#include "util.h"
25
54fb996a
ED
26#define INVBLOCK ((u64)-1L)
27
45641c82 28static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
54fb996a
ED
29static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
1da177e4
LT
32static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35/*
36 * Free 'count' fragments from fragment number 'fragment'
37 */
54fb996a 38void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 39{
1da177e4
LT
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
1da177e4
LT
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
54fb996a
ED
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 u64 blkno;
1da177e4
LT
46
47 sb = inode->i_sb;
48 uspi = UFS_SB(sb)->s_uspi;
1da177e4 49
54fb996a
ED
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment, count);
1da177e4
LT
52
53 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 ufs_error (sb, "ufs_free_fragments", "internal error");
cdd9eefd
FF
55
56 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4 57
54fb996a
ED
58 cgno = ufs_dtog(uspi, fragment);
59 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
60 if (cgno >= uspi->s_ncg) {
61 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 goto failed;
63 }
64
65 ucpi = ufs_load_cylinder (sb, cgno);
66 if (!ucpi)
67 goto failed;
9695ef16 68 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
69 if (!ufs_cg_chkmagic(sb, ucg)) {
70 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 goto failed;
72 }
73
74 end_bit = bit + count;
75 bbase = ufs_blknum (bit);
9695ef16 76 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
77 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 for (i = bit; i < end_bit; i++) {
9695ef16
ED
79 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
7b4ee73e
E
81 else
82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i);
1da177e4
LT
84 }
85
1da177e4 86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 87 uspi->cs_total.cs_nffree += count;
1da177e4 88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
9695ef16 89 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
90 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
91
92 /*
93 * Trying to reassemble free fragments into block
94 */
95 blkno = ufs_fragstoblks (bbase);
9695ef16 96 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4 97 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
ee3ffd6c 98 uspi->cs_total.cs_nffree -= uspi->s_fpb;
1da177e4
LT
99 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101 ufs_clusteracct (sb, ucpi, blkno, 1);
102 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 103 uspi->cs_total.cs_nbfree++;
1da177e4 104 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
105 if (uspi->fs_magic != UFS2_MAGIC) {
106 unsigned cylno = ufs_cbtocylno (bbase);
107
108 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109 ufs_cbtorpos(bbase)), 1);
110 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111 }
1da177e4
LT
112 }
113
9695ef16
ED
114 ubh_mark_buffer_dirty (USPI_UBH(uspi));
115 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
116 if (sb->s_flags & MS_SYNCHRONOUS)
117 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 118 ufs_mark_sb_dirty(sb);
cdd9eefd
FF
119
120 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 121 UFSD("EXIT\n");
1da177e4
LT
122 return;
123
124failed:
cdd9eefd 125 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 126 UFSD("EXIT (FAILED)\n");
1da177e4
LT
127 return;
128}
129
130/*
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132 */
54fb996a 133void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 134{
1da177e4
LT
135 struct super_block * sb;
136 struct ufs_sb_private_info * uspi;
1da177e4
LT
137 struct ufs_cg_private_info * ucpi;
138 struct ufs_cylinder_group * ucg;
54fb996a
ED
139 unsigned overflow, cgno, bit, end_bit, i;
140 u64 blkno;
1da177e4
LT
141
142 sb = inode->i_sb;
143 uspi = UFS_SB(sb)->s_uspi;
1da177e4 144
54fb996a
ED
145 UFSD("ENTER, fragment %llu, count %u\n",
146 (unsigned long long)fragment, count);
1da177e4
LT
147
148 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149 ufs_error (sb, "ufs_free_blocks", "internal error, "
54fb996a
ED
150 "fragment %llu, count %u\n",
151 (unsigned long long)fragment, count);
1da177e4
LT
152 goto failed;
153 }
154
cdd9eefd 155 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4
LT
156
157do_more:
158 overflow = 0;
54fb996a
ED
159 cgno = ufs_dtog(uspi, fragment);
160 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
161 if (cgno >= uspi->s_ncg) {
162 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
2e006393 163 goto failed_unlock;
1da177e4
LT
164 }
165 end_bit = bit + count;
166 if (end_bit > uspi->s_fpg) {
167 overflow = bit + count - uspi->s_fpg;
168 count -= overflow;
169 end_bit -= overflow;
170 }
171
172 ucpi = ufs_load_cylinder (sb, cgno);
173 if (!ucpi)
2e006393 174 goto failed_unlock;
9695ef16 175 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
176 if (!ufs_cg_chkmagic(sb, ucg)) {
177 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
2e006393 178 goto failed_unlock;
1da177e4
LT
179 }
180
181 for (i = bit; i < end_bit; i += uspi->s_fpb) {
182 blkno = ufs_fragstoblks(i);
9695ef16 183 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4
LT
184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185 }
9695ef16 186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
187 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188 ufs_clusteracct (sb, ucpi, blkno, 1);
1da177e4
LT
189
190 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 191 uspi->cs_total.cs_nbfree++;
1da177e4 192 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
193
194 if (uspi->fs_magic != UFS2_MAGIC) {
195 unsigned cylno = ufs_cbtocylno(i);
196
197 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
198 ufs_cbtorpos(i)), 1);
199 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200 }
1da177e4
LT
201 }
202
9695ef16
ED
203 ubh_mark_buffer_dirty (USPI_UBH(uspi));
204 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
205 if (sb->s_flags & MS_SYNCHRONOUS)
206 ubh_sync_block(UCPI_UBH(ucpi));
1da177e4
LT
207
208 if (overflow) {
209 fragment += count;
210 count = overflow;
211 goto do_more;
212 }
213
9e9ad5f4 214 ufs_mark_sb_dirty(sb);
cdd9eefd 215 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 216 UFSD("EXIT\n");
1da177e4
LT
217 return;
218
2e006393 219failed_unlock:
cdd9eefd 220 mutex_unlock(&UFS_SB(sb)->s_lock);
2e006393 221failed:
abf5d15f 222 UFSD("EXIT (FAILED)\n");
1da177e4
LT
223 return;
224}
225
6ef4d6bf
ED
226/*
227 * Modify inode page cache in such way:
228 * have - blocks with b_blocknr equal to oldb...oldb+count-1
229 * get - blocks with b_blocknr equal to newb...newb+count-1
230 * also we suppose that oldb...oldb+count-1 blocks
231 * situated at the end of file.
232 *
233 * We can come here from ufs_writepage or ufs_prepare_write,
234 * locked_page is argument of these functions, so we already lock it.
235 */
5431bf97
ED
236static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237 unsigned int count, sector_t oldb,
238 sector_t newb, struct page *locked_page)
6ef4d6bf 239{
5431bf97 240 const unsigned blks_per_page =
09cbfeaf 241 1 << (PAGE_SHIFT - inode->i_blkbits);
5431bf97 242 const unsigned mask = blks_per_page - 1;
efee2b81 243 struct address_space * const mapping = inode->i_mapping;
5431bf97
ED
244 pgoff_t index, cur_index, last_index;
245 unsigned pos, j, lblock;
246 sector_t end, i;
6ef4d6bf
ED
247 struct page *page;
248 struct buffer_head *head, *bh;
249
5431bf97
ED
250 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
251 inode->i_ino, count,
252 (unsigned long long)oldb, (unsigned long long)newb);
6ef4d6bf 253
a685e26f 254 BUG_ON(!locked_page);
6ef4d6bf
ED
255 BUG_ON(!PageLocked(locked_page));
256
a685e26f 257 cur_index = locked_page->index;
5431bf97 258 end = count + beg;
09cbfeaf 259 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
5431bf97 260 for (i = beg; i < end; i = (i | mask) + 1) {
09cbfeaf 261 index = i >> (PAGE_SHIFT - inode->i_blkbits);
6ef4d6bf
ED
262
263 if (likely(cur_index != index)) {
264 page = ufs_get_locked_page(mapping, index);
5431bf97
ED
265 if (!page)/* it was truncated */
266 continue;
267 if (IS_ERR(page)) {/* or EIO */
9746077a 268 ufs_error(inode->i_sb, __func__,
5431bf97
ED
269 "read of page %llu failed\n",
270 (unsigned long long)index);
6ef4d6bf 271 continue;
5431bf97 272 }
6ef4d6bf
ED
273 } else
274 page = locked_page;
275
6ef4d6bf
ED
276 head = page_buffers(page);
277 bh = head;
5431bf97 278 pos = i & mask;
efee2b81
ED
279 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page;
5431bf97
ED
281
282
283 if (unlikely(index == last_index))
284 lblock = end & mask;
285 else
286 lblock = blks_per_page;
287
6ef4d6bf 288 do {
5431bf97
ED
289 if (j >= lblock)
290 break;
291 pos = (i - beg) + j;
292
293 if (!buffer_mapped(bh))
294 map_bh(bh, inode->i_sb, oldb + pos);
295 if (!buffer_uptodate(bh)) {
dfec8a14 296 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
5431bf97
ED
297 wait_on_buffer(bh);
298 if (!buffer_uptodate(bh)) {
9746077a 299 ufs_error(inode->i_sb, __func__,
5431bf97
ED
300 "read of block failed\n");
301 break;
efee2b81 302 }
6ef4d6bf
ED
303 }
304
5431bf97 305 UFSD(" change from %llu to %llu, pos %u\n",
9df13039
AM
306 (unsigned long long)(pos + oldb),
307 (unsigned long long)(pos + newb), pos);
5431bf97
ED
308
309 bh->b_blocknr = newb + pos;
e64855c6 310 clean_bdev_bh_alias(bh);
5431bf97
ED
311 mark_buffer_dirty(bh);
312 ++j;
6ef4d6bf
ED
313 bh = bh->b_this_page;
314 } while (bh != head);
315
10e5dce0
ED
316 if (likely(cur_index != index))
317 ufs_put_locked_page(page);
6ef4d6bf 318 }
abf5d15f 319 UFSD("EXIT\n");
6ef4d6bf
ED
320}
321
d63b7090
ED
322static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323 int sync)
324{
325 struct buffer_head *bh;
326 sector_t end = beg + n;
327
328 for (; beg < end; ++beg) {
329 bh = sb_getblk(inode->i_sb, beg);
330 lock_buffer(bh);
331 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332 set_buffer_uptodate(bh);
333 mark_buffer_dirty(bh);
334 unlock_buffer(bh);
335 if (IS_SYNC(inode) || sync)
336 sync_dirty_buffer(bh);
337 brelse(bh);
338 }
339}
340
54fb996a
ED
341u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342 u64 goal, unsigned count, int *err,
343 struct page *locked_page)
1da177e4
LT
344{
345 struct super_block * sb;
346 struct ufs_sb_private_info * uspi;
347 struct ufs_super_block_first * usb1;
54fb996a
ED
348 unsigned cgno, oldcount, newcount;
349 u64 tmp, request, result;
1da177e4 350
54fb996a
ED
351 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352 inode->i_ino, (unsigned long long)fragment,
353 (unsigned long long)goal, count);
1da177e4
LT
354
355 sb = inode->i_sb;
356 uspi = UFS_SB(sb)->s_uspi;
7b4ee73e 357 usb1 = ubh_get_usb_first(uspi);
1da177e4
LT
358 *err = -ENOSPC;
359
cdd9eefd 360 mutex_lock(&UFS_SB(sb)->s_lock);
54fb996a
ED
361 tmp = ufs_data_ptr_to_cpu(sb, p);
362
1da177e4 363 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
54fb996a
ED
364 ufs_warning(sb, "ufs_new_fragments", "internal warning"
365 " fragment %llu, count %u",
366 (unsigned long long)fragment, count);
1da177e4
LT
367 count = uspi->s_fpb - ufs_fragnum(fragment);
368 }
369 oldcount = ufs_fragnum (fragment);
370 newcount = oldcount + count;
371
372 /*
373 * Somebody else has just allocated our fragments
374 */
375 if (oldcount) {
376 if (!tmp) {
54fb996a
ED
377 ufs_error(sb, "ufs_new_fragments", "internal error, "
378 "fragment %llu, tmp %llu\n",
379 (unsigned long long)fragment,
380 (unsigned long long)tmp);
cdd9eefd 381 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 382 return INVBLOCK;
1da177e4
LT
383 }
384 if (fragment < UFS_I(inode)->i_lastfrag) {
abf5d15f 385 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 386 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
387 return 0;
388 }
389 }
390 else {
391 if (tmp) {
abf5d15f 392 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 393 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
394 return 0;
395 }
396 }
397
398 /*
399 * There is not enough space for user on the device
400 */
ee3ffd6c 401 if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
cdd9eefd 402 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 403 UFSD("EXIT (FAILED)\n");
1da177e4
LT
404 return 0;
405 }
406
407 if (goal >= uspi->s_size)
408 goal = 0;
409 if (goal == 0)
410 cgno = ufs_inotocg (inode->i_ino);
411 else
54fb996a 412 cgno = ufs_dtog(uspi, goal);
1da177e4
LT
413
414 /*
415 * allocate new fragment
416 */
417 if (oldcount == 0) {
418 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419 if (result) {
bd2843fe
AV
420 ufs_clear_frags(inode, result + oldcount,
421 newcount - oldcount, locked_page != NULL);
724bb09f 422 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 423 ufs_cpu_to_data_ptr(sb, p, result);
724bb09f 424 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 425 *err = 0;
54fb996a 426 UFS_I(inode)->i_lastfrag =
1d582723 427 max(UFS_I(inode)->i_lastfrag, fragment + count);
1da177e4 428 }
cdd9eefd 429 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 430 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
431 return result;
432 }
433
434 /*
435 * resize block
436 */
45641c82 437 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
1da177e4
LT
438 if (result) {
439 *err = 0;
1d582723
DC
440 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441 fragment + count);
d63b7090
ED
442 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
443 locked_page != NULL);
cdd9eefd 444 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 445 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
446 return result;
447 }
448
449 /*
450 * allocate new block and move data
451 */
452 switch (fs32_to_cpu(sb, usb1->fs_optim)) {
453 case UFS_OPTSPACE:
454 request = newcount;
ee3ffd6c
ED
455 if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
456 > uspi->s_dsize * uspi->s_minfree / (2 * 100))
1da177e4
LT
457 break;
458 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459 break;
460 default:
461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462
463 case UFS_OPTTIME:
464 request = uspi->s_fpb;
ee3ffd6c 465 if (uspi->cs_total.cs_nffree < uspi->s_dsize *
1da177e4
LT
466 (uspi->s_minfree - 2) / 100)
467 break;
468 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469 break;
470 }
471 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
472 if (result) {
efee2b81
ED
473 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
474 locked_page != NULL);
4b25a37e
ED
475 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
476 uspi->s_sbbase + tmp,
477 uspi->s_sbbase + result, locked_page);
724bb09f 478 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 479 ufs_cpu_to_data_ptr(sb, p, result);
724bb09f 480 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 481 *err = 0;
1d582723
DC
482 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483 fragment + count);
cdd9eefd 484 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
485 if (newcount < request)
486 ufs_free_fragments (inode, result + newcount, request - newcount);
487 ufs_free_fragments (inode, tmp, oldcount);
54fb996a 488 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
489 return result;
490 }
491
cdd9eefd 492 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 493 UFSD("EXIT (FAILED)\n");
1da177e4
LT
494 return 0;
495}
496
54fb996a 497static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
45641c82 498 unsigned oldcount, unsigned newcount)
1da177e4
LT
499{
500 struct super_block * sb;
501 struct ufs_sb_private_info * uspi;
1da177e4
LT
502 struct ufs_cg_private_info * ucpi;
503 struct ufs_cylinder_group * ucg;
504 unsigned cgno, fragno, fragoff, count, fragsize, i;
505
54fb996a
ED
506 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507 (unsigned long long)fragment, oldcount, newcount);
1da177e4
LT
508
509 sb = inode->i_sb;
510 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
511 count = newcount - oldcount;
512
54fb996a 513 cgno = ufs_dtog(uspi, fragment);
1da177e4
LT
514 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515 return 0;
516 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517 return 0;
518 ucpi = ufs_load_cylinder (sb, cgno);
519 if (!ucpi)
520 return 0;
9695ef16 521 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
522 if (!ufs_cg_chkmagic(sb, ucg)) {
523 ufs_panic (sb, "ufs_add_fragments",
524 "internal error, bad magic number on cg %u", cgno);
525 return 0;
526 }
527
54fb996a 528 fragno = ufs_dtogd(uspi, fragment);
1da177e4
LT
529 fragoff = ufs_fragnum (fragno);
530 for (i = oldcount; i < newcount; i++)
9695ef16 531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
532 return 0;
533 /*
534 * Block can be extended
535 */
536 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
9695ef16 538 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
539 break;
540 fragsize = i - oldcount;
541 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
542 ufs_panic (sb, "ufs_add_fragments",
543 "internal error or corrupted bitmap on cg %u", cgno);
544 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
545 if (fragsize != count)
546 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
547 for (i = oldcount; i < newcount; i++)
9695ef16 548 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
1da177e4
LT
549
550 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
551 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
ee3ffd6c 552 uspi->cs_total.cs_nffree -= count;
1da177e4 553
9695ef16
ED
554 ubh_mark_buffer_dirty (USPI_UBH(uspi));
555 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
556 if (sb->s_flags & MS_SYNCHRONOUS)
557 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 558 ufs_mark_sb_dirty(sb);
1da177e4 559
54fb996a 560 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
1da177e4
LT
561
562 return fragment;
563}
564
565#define UFS_TEST_FREE_SPACE_CG \
566 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
567 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
568 goto cg_found; \
569 for (k = count; k < uspi->s_fpb; k++) \
570 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
571 goto cg_found;
572
54fb996a
ED
573static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
574 u64 goal, unsigned count, int *err)
1da177e4
LT
575{
576 struct super_block * sb;
577 struct ufs_sb_private_info * uspi;
1da177e4
LT
578 struct ufs_cg_private_info * ucpi;
579 struct ufs_cylinder_group * ucg;
54fb996a
ED
580 unsigned oldcg, i, j, k, allocsize;
581 u64 result;
1da177e4 582
54fb996a
ED
583 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
584 inode->i_ino, cgno, (unsigned long long)goal, count);
1da177e4
LT
585
586 sb = inode->i_sb;
587 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
588 oldcg = cgno;
589
590 /*
591 * 1. searching on preferred cylinder group
592 */
593 UFS_TEST_FREE_SPACE_CG
594
595 /*
596 * 2. quadratic rehash
597 */
598 for (j = 1; j < uspi->s_ncg; j *= 2) {
599 cgno += j;
600 if (cgno >= uspi->s_ncg)
601 cgno -= uspi->s_ncg;
602 UFS_TEST_FREE_SPACE_CG
603 }
604
605 /*
606 * 3. brute force search
607 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
608 */
609 cgno = (oldcg + 1) % uspi->s_ncg;
610 for (j = 2; j < uspi->s_ncg; j++) {
611 cgno++;
612 if (cgno >= uspi->s_ncg)
613 cgno = 0;
614 UFS_TEST_FREE_SPACE_CG
615 }
616
abf5d15f 617 UFSD("EXIT (FAILED)\n");
1da177e4
LT
618 return 0;
619
620cg_found:
621 ucpi = ufs_load_cylinder (sb, cgno);
622 if (!ucpi)
623 return 0;
9695ef16 624 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
625 if (!ufs_cg_chkmagic(sb, ucg))
626 ufs_panic (sb, "ufs_alloc_fragments",
627 "internal error, bad magic number on cg %u", cgno);
628 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
629
630 if (count == uspi->s_fpb) {
631 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 632 if (result == INVBLOCK)
1da177e4
LT
633 return 0;
634 goto succed;
635 }
636
637 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
638 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
639 break;
640
641 if (allocsize == uspi->s_fpb) {
642 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 643 if (result == INVBLOCK)
1da177e4 644 return 0;
54fb996a 645 goal = ufs_dtogd(uspi, result);
1da177e4 646 for (i = count; i < uspi->s_fpb; i++)
9695ef16 647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
1da177e4 648 i = uspi->s_fpb - count;
1da177e4
LT
649
650 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
ee3ffd6c 651 uspi->cs_total.cs_nffree += i;
1da177e4
LT
652 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
653 fs32_add(sb, &ucg->cg_frsum[i], 1);
654 goto succed;
655 }
656
657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
54fb996a 658 if (result == INVBLOCK)
1da177e4 659 return 0;
1da177e4 660 for (i = 0; i < count; i++)
9695ef16 661 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
1da177e4
LT
662
663 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 664 uspi->cs_total.cs_nffree -= count;
1da177e4
LT
665 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
666 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
667
668 if (count != allocsize)
669 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
670
671succed:
9695ef16
ED
672 ubh_mark_buffer_dirty (USPI_UBH(uspi));
673 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
674 if (sb->s_flags & MS_SYNCHRONOUS)
675 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 676 ufs_mark_sb_dirty(sb);
1da177e4
LT
677
678 result += cgno * uspi->s_fpg;
54fb996a 679 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
1da177e4
LT
680 return result;
681}
682
54fb996a
ED
683static u64 ufs_alloccg_block(struct inode *inode,
684 struct ufs_cg_private_info *ucpi,
685 u64 goal, int *err)
1da177e4
LT
686{
687 struct super_block * sb;
688 struct ufs_sb_private_info * uspi;
1da177e4 689 struct ufs_cylinder_group * ucg;
54fb996a 690 u64 result, blkno;
1da177e4 691
54fb996a 692 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
1da177e4
LT
693
694 sb = inode->i_sb;
695 uspi = UFS_SB(sb)->s_uspi;
9695ef16 696 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1da177e4
LT
697
698 if (goal == 0) {
699 goal = ucpi->c_rotor;
700 goto norot;
701 }
702 goal = ufs_blknum (goal);
54fb996a 703 goal = ufs_dtogd(uspi, goal);
1da177e4
LT
704
705 /*
706 * If the requested block is available, use it.
707 */
9695ef16 708 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
1da177e4
LT
709 result = goal;
710 goto gotit;
711 }
712
713norot:
714 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
54fb996a
ED
715 if (result == INVBLOCK)
716 return INVBLOCK;
1da177e4
LT
717 ucpi->c_rotor = result;
718gotit:
719 blkno = ufs_fragstoblks(result);
9695ef16 720 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
721 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722 ufs_clusteracct (sb, ucpi, blkno, -1);
1da177e4
LT
723
724 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 725 uspi->cs_total.cs_nbfree--;
1da177e4 726 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
54fb996a
ED
727
728 if (uspi->fs_magic != UFS2_MAGIC) {
729 unsigned cylno = ufs_cbtocylno((unsigned)result);
730
731 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
732 ufs_cbtorpos((unsigned)result)), 1);
733 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
734 }
1da177e4 735
54fb996a 736 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
737
738 return result;
739}
740
3e41f597
ED
741static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
742 struct ufs_buffer_head *ubh,
743 unsigned begin, unsigned size,
744 unsigned char *table, unsigned char mask)
1da177e4 745{
3e41f597
ED
746 unsigned rest, offset;
747 unsigned char *cp;
1da177e4 748
1da177e4 749
3e41f597
ED
750 offset = begin & ~uspi->s_fmask;
751 begin >>= uspi->s_fshift;
752 for (;;) {
753 if ((offset + size) < uspi->s_fsize)
754 rest = size;
755 else
756 rest = uspi->s_fsize - offset;
757 size -= rest;
758 cp = ubh->bh[begin]->b_data + offset;
759 while ((table[*cp++] & mask) == 0 && --rest)
760 ;
761 if (rest || !size)
762 break;
763 begin++;
764 offset = 0;
765 }
766 return (size + rest);
767}
768
769/*
770 * Find a block of the specified size in the specified cylinder group.
771 * @sp: pointer to super block
772 * @ucpi: pointer to cylinder group info
773 * @goal: near which block we want find new one
774 * @count: specified size
775 */
54fb996a
ED
776static u64 ufs_bitmap_search(struct super_block *sb,
777 struct ufs_cg_private_info *ucpi,
778 u64 goal, unsigned count)
3e41f597
ED
779{
780 /*
781 * Bit patterns for identifying fragments in the block map
782 * used as ((map & mask_arr) == want_arr)
783 */
784 static const int mask_arr[9] = {
785 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
786 };
787 static const int want_arr[9] = {
788 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
789 };
790 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
54fb996a 791 unsigned start, length, loc;
3e41f597 792 unsigned pos, want, blockmap, mask, end;
54fb996a 793 u64 result;
3e41f597 794
54fb996a
ED
795 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
796 (unsigned long long)goal, count);
3e41f597 797
1da177e4 798 if (goal)
54fb996a 799 start = ufs_dtogd(uspi, goal) >> 3;
1da177e4
LT
800 else
801 start = ucpi->c_frotor >> 3;
802
803 length = ((uspi->s_fpg + 7) >> 3) - start;
3e41f597 804 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
1da177e4
LT
805 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
806 1 << (count - 1 + (uspi->s_fpb & 7)));
3e41f597 807 if (loc == 0) {
1da177e4 808 length = start + 1;
3e41f597
ED
809 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
810 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
811 ufs_fragtable_other,
812 1 << (count - 1 + (uspi->s_fpb & 7)));
813 if (loc == 0) {
814 ufs_error(sb, "ufs_bitmap_search",
815 "bitmap corrupted on cg %u, start %u,"
816 " length %u, count %u, freeoff %u\n",
817 ucpi->c_cgx, start, length, count,
818 ucpi->c_freeoff);
54fb996a 819 return INVBLOCK;
1da177e4
LT
820 }
821 start = 0;
822 }
3e41f597 823 result = (start + length - loc) << 3;
1da177e4
LT
824 ucpi->c_frotor = result;
825
826 /*
827 * found the byte in the map
828 */
3e41f597
ED
829
830 for (end = result + 8; result < end; result += uspi->s_fpb) {
831 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
832 blockmap <<= 1;
833 mask = mask_arr[count];
834 want = want_arr[count];
835 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
836 if ((blockmap & mask) == want) {
54fb996a
ED
837 UFSD("EXIT, result %llu\n",
838 (unsigned long long)result);
3e41f597
ED
839 return result + pos;
840 }
841 mask <<= 1;
842 want <<= 1;
843 }
844 }
845
846 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
847 ucpi->c_cgx);
abf5d15f 848 UFSD("EXIT (FAILED)\n");
54fb996a 849 return INVBLOCK;
1da177e4
LT
850}
851
852static void ufs_clusteracct(struct super_block * sb,
853 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
854{
855 struct ufs_sb_private_info * uspi;
856 int i, start, end, forw, back;
857
858 uspi = UFS_SB(sb)->s_uspi;
859 if (uspi->s_contigsumsize <= 0)
860 return;
861
862 if (cnt > 0)
9695ef16 863 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4 864 else
9695ef16 865 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4
LT
866
867 /*
868 * Find the size of the cluster going forward.
869 */
870 start = blkno + 1;
871 end = start + uspi->s_contigsumsize;
872 if ( end >= ucpi->c_nclusterblks)
873 end = ucpi->c_nclusterblks;
9695ef16 874 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
1da177e4
LT
875 if (i > end)
876 i = end;
877 forw = i - start;
878
879 /*
880 * Find the size of the cluster going backward.
881 */
882 start = blkno - 1;
883 end = start - uspi->s_contigsumsize;
884 if (end < 0 )
885 end = -1;
9695ef16 886 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
1da177e4
LT
887 if ( i < end)
888 i = end;
889 back = start - i;
890
891 /*
892 * Account for old cluster and the possibly new forward and
893 * back clusters.
894 */
895 i = back + forw + 1;
896 if (i > uspi->s_contigsumsize)
897 i = uspi->s_contigsumsize;
9695ef16 898 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
1da177e4 899 if (back > 0)
9695ef16 900 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
1da177e4 901 if (forw > 0)
9695ef16 902 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
1da177e4
LT
903}
904
905
906static unsigned char ufs_fragtable_8fpb[] = {
907 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
908 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
909 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
911 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
915 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
916 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
917 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
918 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
919 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
920 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
921 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
922 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
923};
924
925static unsigned char ufs_fragtable_other[] = {
926 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
927 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
930 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
933 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
934 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
938 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
940 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
941 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
942};