Merge branch 'bind_unbind' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / fs / ufs / balloc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/fs/ufs/balloc.c
3 *
4 * Copyright (C) 1998
5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
54fb996a
ED
7 *
8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
1da177e4
LT
9 */
10
11#include <linux/fs.h>
1da177e4
LT
12#include <linux/stat.h>
13#include <linux/time.h>
14#include <linux/string.h>
1da177e4 15#include <linux/buffer_head.h>
16f7e0fe 16#include <linux/capability.h>
1da177e4 17#include <linux/bitops.h>
2f8b5444 18#include <linux/bio.h>
1da177e4
LT
19#include <asm/byteorder.h>
20
e5420598 21#include "ufs_fs.h"
bcd6d4ec 22#include "ufs.h"
1da177e4
LT
23#include "swab.h"
24#include "util.h"
25
54fb996a
ED
26#define INVBLOCK ((u64)-1L)
27
45641c82 28static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
54fb996a
ED
29static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
30static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
31static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
1da177e4
LT
32static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
33static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
34
35/*
36 * Free 'count' fragments from fragment number 'fragment'
37 */
54fb996a 38void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 39{
1da177e4
LT
40 struct super_block * sb;
41 struct ufs_sb_private_info * uspi;
1da177e4
LT
42 struct ufs_cg_private_info * ucpi;
43 struct ufs_cylinder_group * ucg;
54fb996a
ED
44 unsigned cgno, bit, end_bit, bbase, blkmap, i;
45 u64 blkno;
1da177e4
LT
46
47 sb = inode->i_sb;
48 uspi = UFS_SB(sb)->s_uspi;
1da177e4 49
54fb996a
ED
50 UFSD("ENTER, fragment %llu, count %u\n",
51 (unsigned long long)fragment, count);
1da177e4
LT
52
53 if (ufs_fragnum(fragment) + count > uspi->s_fpg)
54 ufs_error (sb, "ufs_free_fragments", "internal error");
cdd9eefd
FF
55
56 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4 57
54fb996a
ED
58 cgno = ufs_dtog(uspi, fragment);
59 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
60 if (cgno >= uspi->s_ncg) {
61 ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
62 goto failed;
63 }
64
65 ucpi = ufs_load_cylinder (sb, cgno);
66 if (!ucpi)
67 goto failed;
9695ef16 68 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
69 if (!ufs_cg_chkmagic(sb, ucg)) {
70 ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
71 goto failed;
72 }
73
74 end_bit = bit + count;
75 bbase = ufs_blknum (bit);
9695ef16 76 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
77 ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
78 for (i = bit; i < end_bit; i++) {
9695ef16
ED
79 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
80 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
7b4ee73e
E
81 else
82 ufs_error (sb, "ufs_free_fragments",
83 "bit already cleared for fragment %u", i);
1da177e4 84 }
eb315d2a
AV
85
86 inode_sub_bytes(inode, count << uspi->s_fshift);
1da177e4 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 88 uspi->cs_total.cs_nffree += count;
1da177e4 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
9695ef16 90 blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
1da177e4
LT
91 ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
92
93 /*
94 * Trying to reassemble free fragments into block
95 */
96 blkno = ufs_fragstoblks (bbase);
9695ef16 97 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4 98 fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
ee3ffd6c 99 uspi->cs_total.cs_nffree -= uspi->s_fpb;
1da177e4
LT
100 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102 ufs_clusteracct (sb, ucpi, blkno, 1);
103 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 104 uspi->cs_total.cs_nbfree++;
1da177e4 105 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
106 if (uspi->fs_magic != UFS2_MAGIC) {
107 unsigned cylno = ufs_cbtocylno (bbase);
108
109 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110 ufs_cbtorpos(bbase)), 1);
111 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112 }
1da177e4
LT
113 }
114
9695ef16
ED
115 ubh_mark_buffer_dirty (USPI_UBH(uspi));
116 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
117 if (sb->s_flags & MS_SYNCHRONOUS)
118 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 119 ufs_mark_sb_dirty(sb);
cdd9eefd
FF
120
121 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 122 UFSD("EXIT\n");
1da177e4
LT
123 return;
124
125failed:
cdd9eefd 126 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 127 UFSD("EXIT (FAILED)\n");
1da177e4
LT
128 return;
129}
130
131/*
132 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 */
54fb996a 134void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
6ef4d6bf 135{
1da177e4
LT
136 struct super_block * sb;
137 struct ufs_sb_private_info * uspi;
1da177e4
LT
138 struct ufs_cg_private_info * ucpi;
139 struct ufs_cylinder_group * ucg;
54fb996a
ED
140 unsigned overflow, cgno, bit, end_bit, i;
141 u64 blkno;
1da177e4
LT
142
143 sb = inode->i_sb;
144 uspi = UFS_SB(sb)->s_uspi;
1da177e4 145
54fb996a
ED
146 UFSD("ENTER, fragment %llu, count %u\n",
147 (unsigned long long)fragment, count);
1da177e4
LT
148
149 if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
150 ufs_error (sb, "ufs_free_blocks", "internal error, "
54fb996a
ED
151 "fragment %llu, count %u\n",
152 (unsigned long long)fragment, count);
1da177e4
LT
153 goto failed;
154 }
155
cdd9eefd 156 mutex_lock(&UFS_SB(sb)->s_lock);
1da177e4
LT
157
158do_more:
159 overflow = 0;
54fb996a
ED
160 cgno = ufs_dtog(uspi, fragment);
161 bit = ufs_dtogd(uspi, fragment);
1da177e4
LT
162 if (cgno >= uspi->s_ncg) {
163 ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
2e006393 164 goto failed_unlock;
1da177e4
LT
165 }
166 end_bit = bit + count;
167 if (end_bit > uspi->s_fpg) {
168 overflow = bit + count - uspi->s_fpg;
169 count -= overflow;
170 end_bit -= overflow;
171 }
172
173 ucpi = ufs_load_cylinder (sb, cgno);
174 if (!ucpi)
2e006393 175 goto failed_unlock;
9695ef16 176 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
177 if (!ufs_cg_chkmagic(sb, ucg)) {
178 ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
2e006393 179 goto failed_unlock;
1da177e4
LT
180 }
181
182 for (i = bit; i < end_bit; i += uspi->s_fpb) {
183 blkno = ufs_fragstoblks(i);
9695ef16 184 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
1da177e4
LT
185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
186 }
9695ef16 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
eb315d2a 188 inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
1da177e4
LT
189 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
190 ufs_clusteracct (sb, ucpi, blkno, 1);
1da177e4
LT
191
192 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 193 uspi->cs_total.cs_nbfree++;
1da177e4 194 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
54fb996a
ED
195
196 if (uspi->fs_magic != UFS2_MAGIC) {
197 unsigned cylno = ufs_cbtocylno(i);
198
199 fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
200 ufs_cbtorpos(i)), 1);
201 fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
202 }
1da177e4
LT
203 }
204
9695ef16
ED
205 ubh_mark_buffer_dirty (USPI_UBH(uspi));
206 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
207 if (sb->s_flags & MS_SYNCHRONOUS)
208 ubh_sync_block(UCPI_UBH(ucpi));
1da177e4
LT
209
210 if (overflow) {
211 fragment += count;
212 count = overflow;
213 goto do_more;
214 }
215
9e9ad5f4 216 ufs_mark_sb_dirty(sb);
cdd9eefd 217 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 218 UFSD("EXIT\n");
1da177e4
LT
219 return;
220
2e006393 221failed_unlock:
cdd9eefd 222 mutex_unlock(&UFS_SB(sb)->s_lock);
2e006393 223failed:
abf5d15f 224 UFSD("EXIT (FAILED)\n");
1da177e4
LT
225 return;
226}
227
6ef4d6bf
ED
228/*
229 * Modify inode page cache in such way:
230 * have - blocks with b_blocknr equal to oldb...oldb+count-1
231 * get - blocks with b_blocknr equal to newb...newb+count-1
232 * also we suppose that oldb...oldb+count-1 blocks
233 * situated at the end of file.
234 *
235 * We can come here from ufs_writepage or ufs_prepare_write,
236 * locked_page is argument of these functions, so we already lock it.
237 */
5431bf97
ED
238static void ufs_change_blocknr(struct inode *inode, sector_t beg,
239 unsigned int count, sector_t oldb,
240 sector_t newb, struct page *locked_page)
6ef4d6bf 241{
5431bf97 242 const unsigned blks_per_page =
09cbfeaf 243 1 << (PAGE_SHIFT - inode->i_blkbits);
5431bf97 244 const unsigned mask = blks_per_page - 1;
efee2b81 245 struct address_space * const mapping = inode->i_mapping;
5431bf97
ED
246 pgoff_t index, cur_index, last_index;
247 unsigned pos, j, lblock;
248 sector_t end, i;
6ef4d6bf
ED
249 struct page *page;
250 struct buffer_head *head, *bh;
251
5431bf97
ED
252 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
253 inode->i_ino, count,
254 (unsigned long long)oldb, (unsigned long long)newb);
6ef4d6bf 255
a685e26f 256 BUG_ON(!locked_page);
6ef4d6bf
ED
257 BUG_ON(!PageLocked(locked_page));
258
a685e26f 259 cur_index = locked_page->index;
5431bf97 260 end = count + beg;
09cbfeaf 261 last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
5431bf97 262 for (i = beg; i < end; i = (i | mask) + 1) {
09cbfeaf 263 index = i >> (PAGE_SHIFT - inode->i_blkbits);
6ef4d6bf
ED
264
265 if (likely(cur_index != index)) {
266 page = ufs_get_locked_page(mapping, index);
5431bf97
ED
267 if (!page)/* it was truncated */
268 continue;
269 if (IS_ERR(page)) {/* or EIO */
9746077a 270 ufs_error(inode->i_sb, __func__,
5431bf97
ED
271 "read of page %llu failed\n",
272 (unsigned long long)index);
6ef4d6bf 273 continue;
5431bf97 274 }
6ef4d6bf
ED
275 } else
276 page = locked_page;
277
6ef4d6bf
ED
278 head = page_buffers(page);
279 bh = head;
5431bf97 280 pos = i & mask;
efee2b81
ED
281 for (j = 0; j < pos; ++j)
282 bh = bh->b_this_page;
5431bf97
ED
283
284
285 if (unlikely(index == last_index))
286 lblock = end & mask;
287 else
288 lblock = blks_per_page;
289
6ef4d6bf 290 do {
5431bf97
ED
291 if (j >= lblock)
292 break;
293 pos = (i - beg) + j;
294
295 if (!buffer_mapped(bh))
296 map_bh(bh, inode->i_sb, oldb + pos);
297 if (!buffer_uptodate(bh)) {
dfec8a14 298 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
5431bf97
ED
299 wait_on_buffer(bh);
300 if (!buffer_uptodate(bh)) {
9746077a 301 ufs_error(inode->i_sb, __func__,
5431bf97
ED
302 "read of block failed\n");
303 break;
efee2b81 304 }
6ef4d6bf
ED
305 }
306
5431bf97 307 UFSD(" change from %llu to %llu, pos %u\n",
9df13039
AM
308 (unsigned long long)(pos + oldb),
309 (unsigned long long)(pos + newb), pos);
5431bf97
ED
310
311 bh->b_blocknr = newb + pos;
e64855c6 312 clean_bdev_bh_alias(bh);
5431bf97
ED
313 mark_buffer_dirty(bh);
314 ++j;
6ef4d6bf
ED
315 bh = bh->b_this_page;
316 } while (bh != head);
317
10e5dce0
ED
318 if (likely(cur_index != index))
319 ufs_put_locked_page(page);
6ef4d6bf 320 }
abf5d15f 321 UFSD("EXIT\n");
6ef4d6bf
ED
322}
323
d63b7090
ED
324static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
325 int sync)
326{
327 struct buffer_head *bh;
328 sector_t end = beg + n;
329
330 for (; beg < end; ++beg) {
331 bh = sb_getblk(inode->i_sb, beg);
332 lock_buffer(bh);
333 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
334 set_buffer_uptodate(bh);
335 mark_buffer_dirty(bh);
336 unlock_buffer(bh);
337 if (IS_SYNC(inode) || sync)
338 sync_dirty_buffer(bh);
339 brelse(bh);
340 }
341}
342
54fb996a
ED
343u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
344 u64 goal, unsigned count, int *err,
345 struct page *locked_page)
1da177e4
LT
346{
347 struct super_block * sb;
348 struct ufs_sb_private_info * uspi;
349 struct ufs_super_block_first * usb1;
54fb996a
ED
350 unsigned cgno, oldcount, newcount;
351 u64 tmp, request, result;
1da177e4 352
54fb996a
ED
353 UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
354 inode->i_ino, (unsigned long long)fragment,
355 (unsigned long long)goal, count);
1da177e4
LT
356
357 sb = inode->i_sb;
358 uspi = UFS_SB(sb)->s_uspi;
7b4ee73e 359 usb1 = ubh_get_usb_first(uspi);
1da177e4
LT
360 *err = -ENOSPC;
361
cdd9eefd 362 mutex_lock(&UFS_SB(sb)->s_lock);
54fb996a
ED
363 tmp = ufs_data_ptr_to_cpu(sb, p);
364
1da177e4 365 if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
54fb996a
ED
366 ufs_warning(sb, "ufs_new_fragments", "internal warning"
367 " fragment %llu, count %u",
368 (unsigned long long)fragment, count);
1da177e4
LT
369 count = uspi->s_fpb - ufs_fragnum(fragment);
370 }
371 oldcount = ufs_fragnum (fragment);
372 newcount = oldcount + count;
373
374 /*
375 * Somebody else has just allocated our fragments
376 */
377 if (oldcount) {
378 if (!tmp) {
54fb996a
ED
379 ufs_error(sb, "ufs_new_fragments", "internal error, "
380 "fragment %llu, tmp %llu\n",
381 (unsigned long long)fragment,
382 (unsigned long long)tmp);
cdd9eefd 383 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 384 return INVBLOCK;
1da177e4
LT
385 }
386 if (fragment < UFS_I(inode)->i_lastfrag) {
abf5d15f 387 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 388 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
389 return 0;
390 }
391 }
392 else {
393 if (tmp) {
abf5d15f 394 UFSD("EXIT (ALREADY ALLOCATED)\n");
cdd9eefd 395 mutex_unlock(&UFS_SB(sb)->s_lock);
1da177e4
LT
396 return 0;
397 }
398 }
399
400 /*
401 * There is not enough space for user on the device
402 */
c596961d 403 if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
b451cec4
AV
404 if (!capable(CAP_SYS_RESOURCE)) {
405 mutex_unlock(&UFS_SB(sb)->s_lock);
406 UFSD("EXIT (FAILED)\n");
407 return 0;
408 }
1da177e4
LT
409 }
410
411 if (goal >= uspi->s_size)
412 goal = 0;
413 if (goal == 0)
414 cgno = ufs_inotocg (inode->i_ino);
415 else
54fb996a 416 cgno = ufs_dtog(uspi, goal);
1da177e4
LT
417
418 /*
419 * allocate new fragment
420 */
421 if (oldcount == 0) {
422 result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423 if (result) {
bd2843fe
AV
424 ufs_clear_frags(inode, result + oldcount,
425 newcount - oldcount, locked_page != NULL);
09bf4f5b 426 *err = 0;
724bb09f 427 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 428 ufs_cpu_to_data_ptr(sb, p, result);
54fb996a 429 UFS_I(inode)->i_lastfrag =
1d582723 430 max(UFS_I(inode)->i_lastfrag, fragment + count);
09bf4f5b 431 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4 432 }
cdd9eefd 433 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 434 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
435 return result;
436 }
437
438 /*
439 * resize block
440 */
45641c82 441 result = ufs_add_fragments(inode, tmp, oldcount, newcount);
1da177e4
LT
442 if (result) {
443 *err = 0;
09bf4f5b 444 read_seqlock_excl(&UFS_I(inode)->meta_lock);
1d582723
DC
445 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
446 fragment + count);
09bf4f5b 447 read_sequnlock_excl(&UFS_I(inode)->meta_lock);
d63b7090
ED
448 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
449 locked_page != NULL);
cdd9eefd 450 mutex_unlock(&UFS_SB(sb)->s_lock);
54fb996a 451 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
452 return result;
453 }
454
455 /*
456 * allocate new block and move data
457 */
77e9ce32 458 if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
1da177e4 459 request = newcount;
77e9ce32
AV
460 if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
461 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462 } else {
1da177e4 463 request = uspi->s_fpb;
77e9ce32
AV
464 if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
465 usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
1da177e4
LT
466 }
467 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
468 if (result) {
efee2b81
ED
469 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
470 locked_page != NULL);
289dec5b 471 mutex_unlock(&UFS_SB(sb)->s_lock);
4b25a37e
ED
472 ufs_change_blocknr(inode, fragment - oldcount, oldcount,
473 uspi->s_sbbase + tmp,
474 uspi->s_sbbase + result, locked_page);
09bf4f5b 475 *err = 0;
724bb09f 476 write_seqlock(&UFS_I(inode)->meta_lock);
54fb996a 477 ufs_cpu_to_data_ptr(sb, p, result);
1d582723
DC
478 UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
479 fragment + count);
09bf4f5b 480 write_sequnlock(&UFS_I(inode)->meta_lock);
1da177e4
LT
481 if (newcount < request)
482 ufs_free_fragments (inode, result + newcount, request - newcount);
483 ufs_free_fragments (inode, tmp, oldcount);
54fb996a 484 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
485 return result;
486 }
487
cdd9eefd 488 mutex_unlock(&UFS_SB(sb)->s_lock);
abf5d15f 489 UFSD("EXIT (FAILED)\n");
1da177e4
LT
490 return 0;
491}
492
eb315d2a
AV
493static bool try_add_frags(struct inode *inode, unsigned frags)
494{
495 unsigned size = frags * i_blocksize(inode);
496 spin_lock(&inode->i_lock);
497 __inode_add_bytes(inode, size);
498 if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
499 __inode_sub_bytes(inode, size);
500 spin_unlock(&inode->i_lock);
501 return false;
502 }
503 spin_unlock(&inode->i_lock);
504 return true;
505}
506
54fb996a 507static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
45641c82 508 unsigned oldcount, unsigned newcount)
1da177e4
LT
509{
510 struct super_block * sb;
511 struct ufs_sb_private_info * uspi;
1da177e4
LT
512 struct ufs_cg_private_info * ucpi;
513 struct ufs_cylinder_group * ucg;
514 unsigned cgno, fragno, fragoff, count, fragsize, i;
515
54fb996a
ED
516 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
517 (unsigned long long)fragment, oldcount, newcount);
1da177e4
LT
518
519 sb = inode->i_sb;
520 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
521 count = newcount - oldcount;
522
54fb996a 523 cgno = ufs_dtog(uspi, fragment);
1da177e4
LT
524 if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
525 return 0;
526 if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
527 return 0;
528 ucpi = ufs_load_cylinder (sb, cgno);
529 if (!ucpi)
530 return 0;
9695ef16 531 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
532 if (!ufs_cg_chkmagic(sb, ucg)) {
533 ufs_panic (sb, "ufs_add_fragments",
534 "internal error, bad magic number on cg %u", cgno);
535 return 0;
536 }
537
54fb996a 538 fragno = ufs_dtogd(uspi, fragment);
1da177e4
LT
539 fragoff = ufs_fragnum (fragno);
540 for (i = oldcount; i < newcount; i++)
9695ef16 541 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4 542 return 0;
eb315d2a
AV
543
544 if (!try_add_frags(inode, count))
545 return 0;
1da177e4
LT
546 /*
547 * Block can be extended
548 */
549 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
550 for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
9695ef16 551 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
1da177e4
LT
552 break;
553 fragsize = i - oldcount;
554 if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
555 ufs_panic (sb, "ufs_add_fragments",
556 "internal error or corrupted bitmap on cg %u", cgno);
557 fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
558 if (fragsize != count)
559 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
560 for (i = oldcount; i < newcount; i++)
9695ef16 561 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
1da177e4
LT
562
563 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
564 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
ee3ffd6c 565 uspi->cs_total.cs_nffree -= count;
1da177e4 566
9695ef16
ED
567 ubh_mark_buffer_dirty (USPI_UBH(uspi));
568 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
569 if (sb->s_flags & MS_SYNCHRONOUS)
570 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 571 ufs_mark_sb_dirty(sb);
1da177e4 572
54fb996a 573 UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
1da177e4
LT
574
575 return fragment;
576}
577
578#define UFS_TEST_FREE_SPACE_CG \
579 ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
580 if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
581 goto cg_found; \
582 for (k = count; k < uspi->s_fpb; k++) \
583 if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
584 goto cg_found;
585
54fb996a
ED
586static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
587 u64 goal, unsigned count, int *err)
1da177e4
LT
588{
589 struct super_block * sb;
590 struct ufs_sb_private_info * uspi;
1da177e4
LT
591 struct ufs_cg_private_info * ucpi;
592 struct ufs_cylinder_group * ucg;
54fb996a
ED
593 unsigned oldcg, i, j, k, allocsize;
594 u64 result;
1da177e4 595
54fb996a
ED
596 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
597 inode->i_ino, cgno, (unsigned long long)goal, count);
1da177e4
LT
598
599 sb = inode->i_sb;
600 uspi = UFS_SB(sb)->s_uspi;
1da177e4
LT
601 oldcg = cgno;
602
603 /*
604 * 1. searching on preferred cylinder group
605 */
606 UFS_TEST_FREE_SPACE_CG
607
608 /*
609 * 2. quadratic rehash
610 */
611 for (j = 1; j < uspi->s_ncg; j *= 2) {
612 cgno += j;
613 if (cgno >= uspi->s_ncg)
614 cgno -= uspi->s_ncg;
615 UFS_TEST_FREE_SPACE_CG
616 }
617
618 /*
619 * 3. brute force search
620 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
621 */
622 cgno = (oldcg + 1) % uspi->s_ncg;
623 for (j = 2; j < uspi->s_ncg; j++) {
624 cgno++;
625 if (cgno >= uspi->s_ncg)
626 cgno = 0;
627 UFS_TEST_FREE_SPACE_CG
628 }
629
abf5d15f 630 UFSD("EXIT (FAILED)\n");
1da177e4
LT
631 return 0;
632
633cg_found:
634 ucpi = ufs_load_cylinder (sb, cgno);
635 if (!ucpi)
636 return 0;
9695ef16 637 ucg = ubh_get_ucg (UCPI_UBH(ucpi));
1da177e4
LT
638 if (!ufs_cg_chkmagic(sb, ucg))
639 ufs_panic (sb, "ufs_alloc_fragments",
640 "internal error, bad magic number on cg %u", cgno);
641 ucg->cg_time = cpu_to_fs32(sb, get_seconds());
642
643 if (count == uspi->s_fpb) {
644 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 645 if (result == INVBLOCK)
1da177e4
LT
646 return 0;
647 goto succed;
648 }
649
650 for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
651 if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
652 break;
653
654 if (allocsize == uspi->s_fpb) {
655 result = ufs_alloccg_block (inode, ucpi, goal, err);
54fb996a 656 if (result == INVBLOCK)
1da177e4 657 return 0;
54fb996a 658 goal = ufs_dtogd(uspi, result);
1da177e4 659 for (i = count; i < uspi->s_fpb; i++)
9695ef16 660 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
1da177e4 661 i = uspi->s_fpb - count;
1da177e4 662
eb315d2a 663 inode_sub_bytes(inode, i << uspi->s_fshift);
1da177e4 664 fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
ee3ffd6c 665 uspi->cs_total.cs_nffree += i;
1da177e4
LT
666 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
667 fs32_add(sb, &ucg->cg_frsum[i], 1);
668 goto succed;
669 }
670
671 result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
54fb996a 672 if (result == INVBLOCK)
1da177e4 673 return 0;
eb315d2a
AV
674 if (!try_add_frags(inode, count))
675 return 0;
1da177e4 676 for (i = 0; i < count; i++)
9695ef16 677 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
1da177e4
LT
678
679 fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
ee3ffd6c 680 uspi->cs_total.cs_nffree -= count;
1da177e4
LT
681 fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
682 fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
683
684 if (count != allocsize)
685 fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
686
687succed:
9695ef16
ED
688 ubh_mark_buffer_dirty (USPI_UBH(uspi));
689 ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
9cb569d6
CH
690 if (sb->s_flags & MS_SYNCHRONOUS)
691 ubh_sync_block(UCPI_UBH(ucpi));
9e9ad5f4 692 ufs_mark_sb_dirty(sb);
1da177e4
LT
693
694 result += cgno * uspi->s_fpg;
54fb996a 695 UFSD("EXIT3, result %llu\n", (unsigned long long)result);
1da177e4
LT
696 return result;
697}
698
54fb996a
ED
699static u64 ufs_alloccg_block(struct inode *inode,
700 struct ufs_cg_private_info *ucpi,
701 u64 goal, int *err)
1da177e4
LT
702{
703 struct super_block * sb;
704 struct ufs_sb_private_info * uspi;
1da177e4 705 struct ufs_cylinder_group * ucg;
54fb996a 706 u64 result, blkno;
1da177e4 707
54fb996a 708 UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
1da177e4
LT
709
710 sb = inode->i_sb;
711 uspi = UFS_SB(sb)->s_uspi;
9695ef16 712 ucg = ubh_get_ucg(UCPI_UBH(ucpi));
1da177e4
LT
713
714 if (goal == 0) {
715 goal = ucpi->c_rotor;
716 goto norot;
717 }
718 goal = ufs_blknum (goal);
54fb996a 719 goal = ufs_dtogd(uspi, goal);
1da177e4
LT
720
721 /*
722 * If the requested block is available, use it.
723 */
9695ef16 724 if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
1da177e4
LT
725 result = goal;
726 goto gotit;
727 }
728
729norot:
730 result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
54fb996a
ED
731 if (result == INVBLOCK)
732 return INVBLOCK;
1da177e4
LT
733 ucpi->c_rotor = result;
734gotit:
eb315d2a
AV
735 if (!try_add_frags(inode, uspi->s_fpb))
736 return 0;
1da177e4 737 blkno = ufs_fragstoblks(result);
9695ef16 738 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
1da177e4
LT
739 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
740 ufs_clusteracct (sb, ucpi, blkno, -1);
1da177e4
LT
741
742 fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
ee3ffd6c 743 uspi->cs_total.cs_nbfree--;
1da177e4 744 fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
54fb996a
ED
745
746 if (uspi->fs_magic != UFS2_MAGIC) {
747 unsigned cylno = ufs_cbtocylno((unsigned)result);
748
749 fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
750 ufs_cbtorpos((unsigned)result)), 1);
751 fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
752 }
1da177e4 753
54fb996a 754 UFSD("EXIT, result %llu\n", (unsigned long long)result);
1da177e4
LT
755
756 return result;
757}
758
3e41f597
ED
759static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
760 struct ufs_buffer_head *ubh,
761 unsigned begin, unsigned size,
762 unsigned char *table, unsigned char mask)
1da177e4 763{
3e41f597
ED
764 unsigned rest, offset;
765 unsigned char *cp;
1da177e4 766
1da177e4 767
3e41f597
ED
768 offset = begin & ~uspi->s_fmask;
769 begin >>= uspi->s_fshift;
770 for (;;) {
771 if ((offset + size) < uspi->s_fsize)
772 rest = size;
773 else
774 rest = uspi->s_fsize - offset;
775 size -= rest;
776 cp = ubh->bh[begin]->b_data + offset;
777 while ((table[*cp++] & mask) == 0 && --rest)
778 ;
779 if (rest || !size)
780 break;
781 begin++;
782 offset = 0;
783 }
784 return (size + rest);
785}
786
787/*
788 * Find a block of the specified size in the specified cylinder group.
789 * @sp: pointer to super block
790 * @ucpi: pointer to cylinder group info
791 * @goal: near which block we want find new one
792 * @count: specified size
793 */
54fb996a
ED
794static u64 ufs_bitmap_search(struct super_block *sb,
795 struct ufs_cg_private_info *ucpi,
796 u64 goal, unsigned count)
3e41f597
ED
797{
798 /*
799 * Bit patterns for identifying fragments in the block map
800 * used as ((map & mask_arr) == want_arr)
801 */
802 static const int mask_arr[9] = {
803 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
804 };
805 static const int want_arr[9] = {
806 0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
807 };
808 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
54fb996a 809 unsigned start, length, loc;
3e41f597 810 unsigned pos, want, blockmap, mask, end;
54fb996a 811 u64 result;
3e41f597 812
54fb996a
ED
813 UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
814 (unsigned long long)goal, count);
3e41f597 815
1da177e4 816 if (goal)
54fb996a 817 start = ufs_dtogd(uspi, goal) >> 3;
1da177e4
LT
818 else
819 start = ucpi->c_frotor >> 3;
820
821 length = ((uspi->s_fpg + 7) >> 3) - start;
3e41f597 822 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
1da177e4
LT
823 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
824 1 << (count - 1 + (uspi->s_fpb & 7)));
3e41f597 825 if (loc == 0) {
1da177e4 826 length = start + 1;
3e41f597
ED
827 loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
828 (uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
829 ufs_fragtable_other,
830 1 << (count - 1 + (uspi->s_fpb & 7)));
831 if (loc == 0) {
832 ufs_error(sb, "ufs_bitmap_search",
833 "bitmap corrupted on cg %u, start %u,"
834 " length %u, count %u, freeoff %u\n",
835 ucpi->c_cgx, start, length, count,
836 ucpi->c_freeoff);
54fb996a 837 return INVBLOCK;
1da177e4
LT
838 }
839 start = 0;
840 }
3e41f597 841 result = (start + length - loc) << 3;
1da177e4
LT
842 ucpi->c_frotor = result;
843
844 /*
845 * found the byte in the map
846 */
3e41f597
ED
847
848 for (end = result + 8; result < end; result += uspi->s_fpb) {
849 blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
850 blockmap <<= 1;
851 mask = mask_arr[count];
852 want = want_arr[count];
853 for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
854 if ((blockmap & mask) == want) {
54fb996a
ED
855 UFSD("EXIT, result %llu\n",
856 (unsigned long long)result);
3e41f597
ED
857 return result + pos;
858 }
859 mask <<= 1;
860 want <<= 1;
861 }
862 }
863
864 ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
865 ucpi->c_cgx);
abf5d15f 866 UFSD("EXIT (FAILED)\n");
54fb996a 867 return INVBLOCK;
1da177e4
LT
868}
869
870static void ufs_clusteracct(struct super_block * sb,
871 struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
872{
873 struct ufs_sb_private_info * uspi;
874 int i, start, end, forw, back;
875
876 uspi = UFS_SB(sb)->s_uspi;
877 if (uspi->s_contigsumsize <= 0)
878 return;
879
880 if (cnt > 0)
9695ef16 881 ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4 882 else
9695ef16 883 ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
1da177e4
LT
884
885 /*
886 * Find the size of the cluster going forward.
887 */
888 start = blkno + 1;
889 end = start + uspi->s_contigsumsize;
890 if ( end >= ucpi->c_nclusterblks)
891 end = ucpi->c_nclusterblks;
9695ef16 892 i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
1da177e4
LT
893 if (i > end)
894 i = end;
895 forw = i - start;
896
897 /*
898 * Find the size of the cluster going backward.
899 */
900 start = blkno - 1;
901 end = start - uspi->s_contigsumsize;
902 if (end < 0 )
903 end = -1;
9695ef16 904 i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
1da177e4
LT
905 if ( i < end)
906 i = end;
907 back = start - i;
908
909 /*
910 * Account for old cluster and the possibly new forward and
911 * back clusters.
912 */
913 i = back + forw + 1;
914 if (i > uspi->s_contigsumsize)
915 i = uspi->s_contigsumsize;
9695ef16 916 fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
1da177e4 917 if (back > 0)
9695ef16 918 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
1da177e4 919 if (forw > 0)
9695ef16 920 fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
1da177e4
LT
921}
922
923
924static unsigned char ufs_fragtable_8fpb[] = {
925 0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
926 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
927 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
928 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
929 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
930 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
931 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
932 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
933 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
934 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
935 0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
936 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
937 0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
938 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
939 0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
940 0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
941};
942
943static unsigned char ufs_fragtable_other[] = {
944 0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
945 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
948 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
949 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
950 0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
951 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
953 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
954 0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
955 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
956 0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
957 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
958 0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
959 0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
960};