fs: push rcu_barrier() from deactivate_locked_super() to filesystems
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / udf / super.c
1 /*
2 * super.c
3 *
4 * PURPOSE
5 * Super block routines for the OSTA-UDF(tm) filesystem.
6 *
7 * DESCRIPTION
8 * OSTA-UDF(tm) = Optical Storage Technology Association
9 * Universal Disk Format.
10 *
11 * This code is based on version 2.00 of the UDF specification,
12 * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
13 * http://www.osta.org/
14 * http://www.ecma.ch/
15 * http://www.iso.org/
16 *
17 * COPYRIGHT
18 * This file is distributed under the terms of the GNU General Public
19 * License (GPL). Copies of the GPL can be obtained from:
20 * ftp://prep.ai.mit.edu/pub/gnu/GPL
21 * Each contributing author retains all rights to their own work.
22 *
23 * (C) 1998 Dave Boynton
24 * (C) 1998-2004 Ben Fennema
25 * (C) 2000 Stelias Computing Inc
26 *
27 * HISTORY
28 *
29 * 09/24/98 dgb changed to allow compiling outside of kernel, and
30 * added some debugging.
31 * 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34
32 * 10/16/98 attempting some multi-session support
33 * 10/17/98 added freespace count for "df"
34 * 11/11/98 gr added novrs option
35 * 11/26/98 dgb added fileset,anchor mount options
36 * 12/06/98 blf really hosed things royally. vat/sparing support. sequenced
37 * vol descs. rewrote option handling based on isofs
38 * 12/20/98 find the free space bitmap (if it exists)
39 */
40
41 #include "udfdecl.h"
42
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/kernel.h>
46 #include <linux/module.h>
47 #include <linux/parser.h>
48 #include <linux/stat.h>
49 #include <linux/cdrom.h>
50 #include <linux/nls.h>
51 #include <linux/buffer_head.h>
52 #include <linux/vfs.h>
53 #include <linux/vmalloc.h>
54 #include <linux/errno.h>
55 #include <linux/mount.h>
56 #include <linux/seq_file.h>
57 #include <linux/bitmap.h>
58 #include <linux/crc-itu-t.h>
59 #include <linux/log2.h>
60 #include <asm/byteorder.h>
61
62 #include "udf_sb.h"
63 #include "udf_i.h"
64
65 #include <linux/init.h>
66 #include <asm/uaccess.h>
67
68 #define VDS_POS_PRIMARY_VOL_DESC 0
69 #define VDS_POS_UNALLOC_SPACE_DESC 1
70 #define VDS_POS_LOGICAL_VOL_DESC 2
71 #define VDS_POS_PARTITION_DESC 3
72 #define VDS_POS_IMP_USE_VOL_DESC 4
73 #define VDS_POS_VOL_DESC_PTR 5
74 #define VDS_POS_TERMINATING_DESC 6
75 #define VDS_POS_LENGTH 7
76
77 #define UDF_DEFAULT_BLOCKSIZE 2048
78
79 enum { UDF_MAX_LINKS = 0xffff };
80
81 /* These are the "meat" - everything else is stuffing */
82 static int udf_fill_super(struct super_block *, void *, int);
83 static void udf_put_super(struct super_block *);
84 static int udf_sync_fs(struct super_block *, int);
85 static int udf_remount_fs(struct super_block *, int *, char *);
86 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
87 static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
88 struct kernel_lb_addr *);
89 static void udf_load_fileset(struct super_block *, struct buffer_head *,
90 struct kernel_lb_addr *);
91 static void udf_open_lvid(struct super_block *);
92 static void udf_close_lvid(struct super_block *);
93 static unsigned int udf_count_free(struct super_block *);
94 static int udf_statfs(struct dentry *, struct kstatfs *);
95 static int udf_show_options(struct seq_file *, struct dentry *);
96
97 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
98 {
99 struct logicalVolIntegrityDesc *lvid =
100 (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
101 __u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
102 __u32 offset = number_of_partitions * 2 *
103 sizeof(uint32_t)/sizeof(uint8_t);
104 return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
105 }
106
107 /* UDF filesystem type */
108 static struct dentry *udf_mount(struct file_system_type *fs_type,
109 int flags, const char *dev_name, void *data)
110 {
111 return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
112 }
113
114 static struct file_system_type udf_fstype = {
115 .owner = THIS_MODULE,
116 .name = "udf",
117 .mount = udf_mount,
118 .kill_sb = kill_block_super,
119 .fs_flags = FS_REQUIRES_DEV,
120 };
121
122 static struct kmem_cache *udf_inode_cachep;
123
124 static struct inode *udf_alloc_inode(struct super_block *sb)
125 {
126 struct udf_inode_info *ei;
127 ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
128 if (!ei)
129 return NULL;
130
131 ei->i_unique = 0;
132 ei->i_lenExtents = 0;
133 ei->i_next_alloc_block = 0;
134 ei->i_next_alloc_goal = 0;
135 ei->i_strat4096 = 0;
136 init_rwsem(&ei->i_data_sem);
137
138 return &ei->vfs_inode;
139 }
140
141 static void udf_i_callback(struct rcu_head *head)
142 {
143 struct inode *inode = container_of(head, struct inode, i_rcu);
144 kmem_cache_free(udf_inode_cachep, UDF_I(inode));
145 }
146
147 static void udf_destroy_inode(struct inode *inode)
148 {
149 call_rcu(&inode->i_rcu, udf_i_callback);
150 }
151
152 static void init_once(void *foo)
153 {
154 struct udf_inode_info *ei = (struct udf_inode_info *)foo;
155
156 ei->i_ext.i_data = NULL;
157 inode_init_once(&ei->vfs_inode);
158 }
159
160 static int init_inodecache(void)
161 {
162 udf_inode_cachep = kmem_cache_create("udf_inode_cache",
163 sizeof(struct udf_inode_info),
164 0, (SLAB_RECLAIM_ACCOUNT |
165 SLAB_MEM_SPREAD),
166 init_once);
167 if (!udf_inode_cachep)
168 return -ENOMEM;
169 return 0;
170 }
171
172 static void destroy_inodecache(void)
173 {
174 /*
175 * Make sure all delayed rcu free inodes are flushed before we
176 * destroy cache.
177 */
178 rcu_barrier();
179 kmem_cache_destroy(udf_inode_cachep);
180 }
181
182 /* Superblock operations */
183 static const struct super_operations udf_sb_ops = {
184 .alloc_inode = udf_alloc_inode,
185 .destroy_inode = udf_destroy_inode,
186 .write_inode = udf_write_inode,
187 .evict_inode = udf_evict_inode,
188 .put_super = udf_put_super,
189 .sync_fs = udf_sync_fs,
190 .statfs = udf_statfs,
191 .remount_fs = udf_remount_fs,
192 .show_options = udf_show_options,
193 };
194
195 struct udf_options {
196 unsigned char novrs;
197 unsigned int blocksize;
198 unsigned int session;
199 unsigned int lastblock;
200 unsigned int anchor;
201 unsigned int volume;
202 unsigned short partition;
203 unsigned int fileset;
204 unsigned int rootdir;
205 unsigned int flags;
206 umode_t umask;
207 gid_t gid;
208 uid_t uid;
209 umode_t fmode;
210 umode_t dmode;
211 struct nls_table *nls_map;
212 };
213
214 static int __init init_udf_fs(void)
215 {
216 int err;
217
218 err = init_inodecache();
219 if (err)
220 goto out1;
221 err = register_filesystem(&udf_fstype);
222 if (err)
223 goto out;
224
225 return 0;
226
227 out:
228 destroy_inodecache();
229
230 out1:
231 return err;
232 }
233
234 static void __exit exit_udf_fs(void)
235 {
236 unregister_filesystem(&udf_fstype);
237 destroy_inodecache();
238 }
239
240 module_init(init_udf_fs)
241 module_exit(exit_udf_fs)
242
243 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
244 {
245 struct udf_sb_info *sbi = UDF_SB(sb);
246
247 sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
248 GFP_KERNEL);
249 if (!sbi->s_partmaps) {
250 udf_err(sb, "Unable to allocate space for %d partition maps\n",
251 count);
252 sbi->s_partitions = 0;
253 return -ENOMEM;
254 }
255
256 sbi->s_partitions = count;
257 return 0;
258 }
259
260 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
261 {
262 int i;
263 int nr_groups = bitmap->s_nr_groups;
264 int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
265 nr_groups);
266
267 for (i = 0; i < nr_groups; i++)
268 if (bitmap->s_block_bitmap[i])
269 brelse(bitmap->s_block_bitmap[i]);
270
271 if (size <= PAGE_SIZE)
272 kfree(bitmap);
273 else
274 vfree(bitmap);
275 }
276
277 static void udf_free_partition(struct udf_part_map *map)
278 {
279 int i;
280 struct udf_meta_data *mdata;
281
282 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
283 iput(map->s_uspace.s_table);
284 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
285 iput(map->s_fspace.s_table);
286 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
287 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
288 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
289 udf_sb_free_bitmap(map->s_fspace.s_bitmap);
290 if (map->s_partition_type == UDF_SPARABLE_MAP15)
291 for (i = 0; i < 4; i++)
292 brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
293 else if (map->s_partition_type == UDF_METADATA_MAP25) {
294 mdata = &map->s_type_specific.s_metadata;
295 iput(mdata->s_metadata_fe);
296 mdata->s_metadata_fe = NULL;
297
298 iput(mdata->s_mirror_fe);
299 mdata->s_mirror_fe = NULL;
300
301 iput(mdata->s_bitmap_fe);
302 mdata->s_bitmap_fe = NULL;
303 }
304 }
305
306 static void udf_sb_free_partitions(struct super_block *sb)
307 {
308 struct udf_sb_info *sbi = UDF_SB(sb);
309 int i;
310
311 for (i = 0; i < sbi->s_partitions; i++)
312 udf_free_partition(&sbi->s_partmaps[i]);
313 kfree(sbi->s_partmaps);
314 sbi->s_partmaps = NULL;
315 }
316
317 static int udf_show_options(struct seq_file *seq, struct dentry *root)
318 {
319 struct super_block *sb = root->d_sb;
320 struct udf_sb_info *sbi = UDF_SB(sb);
321
322 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
323 seq_puts(seq, ",nostrict");
324 if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
325 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
326 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
327 seq_puts(seq, ",unhide");
328 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
329 seq_puts(seq, ",undelete");
330 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
331 seq_puts(seq, ",noadinicb");
332 if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
333 seq_puts(seq, ",shortad");
334 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
335 seq_puts(seq, ",uid=forget");
336 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
337 seq_puts(seq, ",uid=ignore");
338 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
339 seq_puts(seq, ",gid=forget");
340 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
341 seq_puts(seq, ",gid=ignore");
342 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
343 seq_printf(seq, ",uid=%u", sbi->s_uid);
344 if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
345 seq_printf(seq, ",gid=%u", sbi->s_gid);
346 if (sbi->s_umask != 0)
347 seq_printf(seq, ",umask=%ho", sbi->s_umask);
348 if (sbi->s_fmode != UDF_INVALID_MODE)
349 seq_printf(seq, ",mode=%ho", sbi->s_fmode);
350 if (sbi->s_dmode != UDF_INVALID_MODE)
351 seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
352 if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
353 seq_printf(seq, ",session=%u", sbi->s_session);
354 if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
355 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
356 if (sbi->s_anchor != 0)
357 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
358 /*
359 * volume, partition, fileset and rootdir seem to be ignored
360 * currently
361 */
362 if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
363 seq_puts(seq, ",utf8");
364 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
365 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
366
367 return 0;
368 }
369
370 /*
371 * udf_parse_options
372 *
373 * PURPOSE
374 * Parse mount options.
375 *
376 * DESCRIPTION
377 * The following mount options are supported:
378 *
379 * gid= Set the default group.
380 * umask= Set the default umask.
381 * mode= Set the default file permissions.
382 * dmode= Set the default directory permissions.
383 * uid= Set the default user.
384 * bs= Set the block size.
385 * unhide Show otherwise hidden files.
386 * undelete Show deleted files in lists.
387 * adinicb Embed data in the inode (default)
388 * noadinicb Don't embed data in the inode
389 * shortad Use short ad's
390 * longad Use long ad's (default)
391 * nostrict Unset strict conformance
392 * iocharset= Set the NLS character set
393 *
394 * The remaining are for debugging and disaster recovery:
395 *
396 * novrs Skip volume sequence recognition
397 *
398 * The following expect a offset from 0.
399 *
400 * session= Set the CDROM session (default= last session)
401 * anchor= Override standard anchor location. (default= 256)
402 * volume= Override the VolumeDesc location. (unused)
403 * partition= Override the PartitionDesc location. (unused)
404 * lastblock= Set the last block of the filesystem/
405 *
406 * The following expect a offset from the partition root.
407 *
408 * fileset= Override the fileset block location. (unused)
409 * rootdir= Override the root directory location. (unused)
410 * WARNING: overriding the rootdir to a non-directory may
411 * yield highly unpredictable results.
412 *
413 * PRE-CONDITIONS
414 * options Pointer to mount options string.
415 * uopts Pointer to mount options variable.
416 *
417 * POST-CONDITIONS
418 * <return> 1 Mount options parsed okay.
419 * <return> 0 Error parsing mount options.
420 *
421 * HISTORY
422 * July 1, 1997 - Andrew E. Mileski
423 * Written, tested, and released.
424 */
425
426 enum {
427 Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
428 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
429 Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
430 Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
431 Opt_rootdir, Opt_utf8, Opt_iocharset,
432 Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
433 Opt_fmode, Opt_dmode
434 };
435
436 static const match_table_t tokens = {
437 {Opt_novrs, "novrs"},
438 {Opt_nostrict, "nostrict"},
439 {Opt_bs, "bs=%u"},
440 {Opt_unhide, "unhide"},
441 {Opt_undelete, "undelete"},
442 {Opt_noadinicb, "noadinicb"},
443 {Opt_adinicb, "adinicb"},
444 {Opt_shortad, "shortad"},
445 {Opt_longad, "longad"},
446 {Opt_uforget, "uid=forget"},
447 {Opt_uignore, "uid=ignore"},
448 {Opt_gforget, "gid=forget"},
449 {Opt_gignore, "gid=ignore"},
450 {Opt_gid, "gid=%u"},
451 {Opt_uid, "uid=%u"},
452 {Opt_umask, "umask=%o"},
453 {Opt_session, "session=%u"},
454 {Opt_lastblock, "lastblock=%u"},
455 {Opt_anchor, "anchor=%u"},
456 {Opt_volume, "volume=%u"},
457 {Opt_partition, "partition=%u"},
458 {Opt_fileset, "fileset=%u"},
459 {Opt_rootdir, "rootdir=%u"},
460 {Opt_utf8, "utf8"},
461 {Opt_iocharset, "iocharset=%s"},
462 {Opt_fmode, "mode=%o"},
463 {Opt_dmode, "dmode=%o"},
464 {Opt_err, NULL}
465 };
466
467 static int udf_parse_options(char *options, struct udf_options *uopt,
468 bool remount)
469 {
470 char *p;
471 int option;
472
473 uopt->novrs = 0;
474 uopt->partition = 0xFFFF;
475 uopt->session = 0xFFFFFFFF;
476 uopt->lastblock = 0;
477 uopt->anchor = 0;
478 uopt->volume = 0xFFFFFFFF;
479 uopt->rootdir = 0xFFFFFFFF;
480 uopt->fileset = 0xFFFFFFFF;
481 uopt->nls_map = NULL;
482
483 if (!options)
484 return 1;
485
486 while ((p = strsep(&options, ",")) != NULL) {
487 substring_t args[MAX_OPT_ARGS];
488 int token;
489 if (!*p)
490 continue;
491
492 token = match_token(p, tokens, args);
493 switch (token) {
494 case Opt_novrs:
495 uopt->novrs = 1;
496 break;
497 case Opt_bs:
498 if (match_int(&args[0], &option))
499 return 0;
500 uopt->blocksize = option;
501 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
502 break;
503 case Opt_unhide:
504 uopt->flags |= (1 << UDF_FLAG_UNHIDE);
505 break;
506 case Opt_undelete:
507 uopt->flags |= (1 << UDF_FLAG_UNDELETE);
508 break;
509 case Opt_noadinicb:
510 uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
511 break;
512 case Opt_adinicb:
513 uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
514 break;
515 case Opt_shortad:
516 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
517 break;
518 case Opt_longad:
519 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
520 break;
521 case Opt_gid:
522 if (match_int(args, &option))
523 return 0;
524 uopt->gid = option;
525 uopt->flags |= (1 << UDF_FLAG_GID_SET);
526 break;
527 case Opt_uid:
528 if (match_int(args, &option))
529 return 0;
530 uopt->uid = option;
531 uopt->flags |= (1 << UDF_FLAG_UID_SET);
532 break;
533 case Opt_umask:
534 if (match_octal(args, &option))
535 return 0;
536 uopt->umask = option;
537 break;
538 case Opt_nostrict:
539 uopt->flags &= ~(1 << UDF_FLAG_STRICT);
540 break;
541 case Opt_session:
542 if (match_int(args, &option))
543 return 0;
544 uopt->session = option;
545 if (!remount)
546 uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
547 break;
548 case Opt_lastblock:
549 if (match_int(args, &option))
550 return 0;
551 uopt->lastblock = option;
552 if (!remount)
553 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
554 break;
555 case Opt_anchor:
556 if (match_int(args, &option))
557 return 0;
558 uopt->anchor = option;
559 break;
560 case Opt_volume:
561 if (match_int(args, &option))
562 return 0;
563 uopt->volume = option;
564 break;
565 case Opt_partition:
566 if (match_int(args, &option))
567 return 0;
568 uopt->partition = option;
569 break;
570 case Opt_fileset:
571 if (match_int(args, &option))
572 return 0;
573 uopt->fileset = option;
574 break;
575 case Opt_rootdir:
576 if (match_int(args, &option))
577 return 0;
578 uopt->rootdir = option;
579 break;
580 case Opt_utf8:
581 uopt->flags |= (1 << UDF_FLAG_UTF8);
582 break;
583 #ifdef CONFIG_UDF_NLS
584 case Opt_iocharset:
585 uopt->nls_map = load_nls(args[0].from);
586 uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
587 break;
588 #endif
589 case Opt_uignore:
590 uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
591 break;
592 case Opt_uforget:
593 uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
594 break;
595 case Opt_gignore:
596 uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
597 break;
598 case Opt_gforget:
599 uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
600 break;
601 case Opt_fmode:
602 if (match_octal(args, &option))
603 return 0;
604 uopt->fmode = option & 0777;
605 break;
606 case Opt_dmode:
607 if (match_octal(args, &option))
608 return 0;
609 uopt->dmode = option & 0777;
610 break;
611 default:
612 pr_err("bad mount option \"%s\" or missing value\n", p);
613 return 0;
614 }
615 }
616 return 1;
617 }
618
619 static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
620 {
621 struct udf_options uopt;
622 struct udf_sb_info *sbi = UDF_SB(sb);
623 int error = 0;
624
625 uopt.flags = sbi->s_flags;
626 uopt.uid = sbi->s_uid;
627 uopt.gid = sbi->s_gid;
628 uopt.umask = sbi->s_umask;
629 uopt.fmode = sbi->s_fmode;
630 uopt.dmode = sbi->s_dmode;
631
632 if (!udf_parse_options(options, &uopt, true))
633 return -EINVAL;
634
635 write_lock(&sbi->s_cred_lock);
636 sbi->s_flags = uopt.flags;
637 sbi->s_uid = uopt.uid;
638 sbi->s_gid = uopt.gid;
639 sbi->s_umask = uopt.umask;
640 sbi->s_fmode = uopt.fmode;
641 sbi->s_dmode = uopt.dmode;
642 write_unlock(&sbi->s_cred_lock);
643
644 if (sbi->s_lvid_bh) {
645 int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
646 if (write_rev > UDF_MAX_WRITE_VERSION)
647 *flags |= MS_RDONLY;
648 }
649
650 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
651 goto out_unlock;
652
653 if (*flags & MS_RDONLY)
654 udf_close_lvid(sb);
655 else
656 udf_open_lvid(sb);
657
658 out_unlock:
659 return error;
660 }
661
662 /* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
663 /* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
664 static loff_t udf_check_vsd(struct super_block *sb)
665 {
666 struct volStructDesc *vsd = NULL;
667 loff_t sector = 32768;
668 int sectorsize;
669 struct buffer_head *bh = NULL;
670 int nsr02 = 0;
671 int nsr03 = 0;
672 struct udf_sb_info *sbi;
673
674 sbi = UDF_SB(sb);
675 if (sb->s_blocksize < sizeof(struct volStructDesc))
676 sectorsize = sizeof(struct volStructDesc);
677 else
678 sectorsize = sb->s_blocksize;
679
680 sector += (sbi->s_session << sb->s_blocksize_bits);
681
682 udf_debug("Starting at sector %u (%ld byte sectors)\n",
683 (unsigned int)(sector >> sb->s_blocksize_bits),
684 sb->s_blocksize);
685 /* Process the sequence (if applicable) */
686 for (; !nsr02 && !nsr03; sector += sectorsize) {
687 /* Read a block */
688 bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
689 if (!bh)
690 break;
691
692 /* Look for ISO descriptors */
693 vsd = (struct volStructDesc *)(bh->b_data +
694 (sector & (sb->s_blocksize - 1)));
695
696 if (vsd->stdIdent[0] == 0) {
697 brelse(bh);
698 break;
699 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
700 VSD_STD_ID_LEN)) {
701 switch (vsd->structType) {
702 case 0:
703 udf_debug("ISO9660 Boot Record found\n");
704 break;
705 case 1:
706 udf_debug("ISO9660 Primary Volume Descriptor found\n");
707 break;
708 case 2:
709 udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
710 break;
711 case 3:
712 udf_debug("ISO9660 Volume Partition Descriptor found\n");
713 break;
714 case 255:
715 udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
716 break;
717 default:
718 udf_debug("ISO9660 VRS (%u) found\n",
719 vsd->structType);
720 break;
721 }
722 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
723 VSD_STD_ID_LEN))
724 ; /* nothing */
725 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
726 VSD_STD_ID_LEN)) {
727 brelse(bh);
728 break;
729 } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
730 VSD_STD_ID_LEN))
731 nsr02 = sector;
732 else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
733 VSD_STD_ID_LEN))
734 nsr03 = sector;
735 brelse(bh);
736 }
737
738 if (nsr03)
739 return nsr03;
740 else if (nsr02)
741 return nsr02;
742 else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
743 return -1;
744 else
745 return 0;
746 }
747
748 static int udf_find_fileset(struct super_block *sb,
749 struct kernel_lb_addr *fileset,
750 struct kernel_lb_addr *root)
751 {
752 struct buffer_head *bh = NULL;
753 long lastblock;
754 uint16_t ident;
755 struct udf_sb_info *sbi;
756
757 if (fileset->logicalBlockNum != 0xFFFFFFFF ||
758 fileset->partitionReferenceNum != 0xFFFF) {
759 bh = udf_read_ptagged(sb, fileset, 0, &ident);
760
761 if (!bh) {
762 return 1;
763 } else if (ident != TAG_IDENT_FSD) {
764 brelse(bh);
765 return 1;
766 }
767
768 }
769
770 sbi = UDF_SB(sb);
771 if (!bh) {
772 /* Search backwards through the partitions */
773 struct kernel_lb_addr newfileset;
774
775 /* --> cvg: FIXME - is it reasonable? */
776 return 1;
777
778 for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
779 (newfileset.partitionReferenceNum != 0xFFFF &&
780 fileset->logicalBlockNum == 0xFFFFFFFF &&
781 fileset->partitionReferenceNum == 0xFFFF);
782 newfileset.partitionReferenceNum--) {
783 lastblock = sbi->s_partmaps
784 [newfileset.partitionReferenceNum]
785 .s_partition_len;
786 newfileset.logicalBlockNum = 0;
787
788 do {
789 bh = udf_read_ptagged(sb, &newfileset, 0,
790 &ident);
791 if (!bh) {
792 newfileset.logicalBlockNum++;
793 continue;
794 }
795
796 switch (ident) {
797 case TAG_IDENT_SBD:
798 {
799 struct spaceBitmapDesc *sp;
800 sp = (struct spaceBitmapDesc *)
801 bh->b_data;
802 newfileset.logicalBlockNum += 1 +
803 ((le32_to_cpu(sp->numOfBytes) +
804 sizeof(struct spaceBitmapDesc)
805 - 1) >> sb->s_blocksize_bits);
806 brelse(bh);
807 break;
808 }
809 case TAG_IDENT_FSD:
810 *fileset = newfileset;
811 break;
812 default:
813 newfileset.logicalBlockNum++;
814 brelse(bh);
815 bh = NULL;
816 break;
817 }
818 } while (newfileset.logicalBlockNum < lastblock &&
819 fileset->logicalBlockNum == 0xFFFFFFFF &&
820 fileset->partitionReferenceNum == 0xFFFF);
821 }
822 }
823
824 if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
825 fileset->partitionReferenceNum != 0xFFFF) && bh) {
826 udf_debug("Fileset at block=%d, partition=%d\n",
827 fileset->logicalBlockNum,
828 fileset->partitionReferenceNum);
829
830 sbi->s_partition = fileset->partitionReferenceNum;
831 udf_load_fileset(sb, bh, root);
832 brelse(bh);
833 return 0;
834 }
835 return 1;
836 }
837
838 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
839 {
840 struct primaryVolDesc *pvoldesc;
841 struct ustr *instr, *outstr;
842 struct buffer_head *bh;
843 uint16_t ident;
844 int ret = 1;
845
846 instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
847 if (!instr)
848 return 1;
849
850 outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
851 if (!outstr)
852 goto out1;
853
854 bh = udf_read_tagged(sb, block, block, &ident);
855 if (!bh)
856 goto out2;
857
858 BUG_ON(ident != TAG_IDENT_PVD);
859
860 pvoldesc = (struct primaryVolDesc *)bh->b_data;
861
862 if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
863 pvoldesc->recordingDateAndTime)) {
864 #ifdef UDFFS_DEBUG
865 struct timestamp *ts = &pvoldesc->recordingDateAndTime;
866 udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
867 le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
868 ts->minute, le16_to_cpu(ts->typeAndTimezone));
869 #endif
870 }
871
872 if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
873 if (udf_CS0toUTF8(outstr, instr)) {
874 strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
875 outstr->u_len > 31 ? 31 : outstr->u_len);
876 udf_debug("volIdent[] = '%s'\n",
877 UDF_SB(sb)->s_volume_ident);
878 }
879
880 if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
881 if (udf_CS0toUTF8(outstr, instr))
882 udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
883
884 brelse(bh);
885 ret = 0;
886 out2:
887 kfree(outstr);
888 out1:
889 kfree(instr);
890 return ret;
891 }
892
893 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
894 u32 meta_file_loc, u32 partition_num)
895 {
896 struct kernel_lb_addr addr;
897 struct inode *metadata_fe;
898
899 addr.logicalBlockNum = meta_file_loc;
900 addr.partitionReferenceNum = partition_num;
901
902 metadata_fe = udf_iget(sb, &addr);
903
904 if (metadata_fe == NULL)
905 udf_warn(sb, "metadata inode efe not found\n");
906 else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
907 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
908 iput(metadata_fe);
909 metadata_fe = NULL;
910 }
911
912 return metadata_fe;
913 }
914
915 static int udf_load_metadata_files(struct super_block *sb, int partition)
916 {
917 struct udf_sb_info *sbi = UDF_SB(sb);
918 struct udf_part_map *map;
919 struct udf_meta_data *mdata;
920 struct kernel_lb_addr addr;
921
922 map = &sbi->s_partmaps[partition];
923 mdata = &map->s_type_specific.s_metadata;
924
925 /* metadata address */
926 udf_debug("Metadata file location: block = %d part = %d\n",
927 mdata->s_meta_file_loc, map->s_partition_num);
928
929 mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb,
930 mdata->s_meta_file_loc, map->s_partition_num);
931
932 if (mdata->s_metadata_fe == NULL) {
933 /* mirror file entry */
934 udf_debug("Mirror metadata file location: block = %d part = %d\n",
935 mdata->s_mirror_file_loc, map->s_partition_num);
936
937 mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
938 mdata->s_mirror_file_loc, map->s_partition_num);
939
940 if (mdata->s_mirror_fe == NULL) {
941 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
942 goto error_exit;
943 }
944 }
945
946 /*
947 * bitmap file entry
948 * Note:
949 * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
950 */
951 if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
952 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
953 addr.partitionReferenceNum = map->s_partition_num;
954
955 udf_debug("Bitmap file location: block = %d part = %d\n",
956 addr.logicalBlockNum, addr.partitionReferenceNum);
957
958 mdata->s_bitmap_fe = udf_iget(sb, &addr);
959
960 if (mdata->s_bitmap_fe == NULL) {
961 if (sb->s_flags & MS_RDONLY)
962 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
963 else {
964 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
965 goto error_exit;
966 }
967 }
968 }
969
970 udf_debug("udf_load_metadata_files Ok\n");
971
972 return 0;
973
974 error_exit:
975 return 1;
976 }
977
978 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
979 struct kernel_lb_addr *root)
980 {
981 struct fileSetDesc *fset;
982
983 fset = (struct fileSetDesc *)bh->b_data;
984
985 *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
986
987 UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
988
989 udf_debug("Rootdir at block=%d, partition=%d\n",
990 root->logicalBlockNum, root->partitionReferenceNum);
991 }
992
993 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
994 {
995 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
996 return DIV_ROUND_UP(map->s_partition_len +
997 (sizeof(struct spaceBitmapDesc) << 3),
998 sb->s_blocksize * 8);
999 }
1000
1001 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1002 {
1003 struct udf_bitmap *bitmap;
1004 int nr_groups;
1005 int size;
1006
1007 nr_groups = udf_compute_nr_groups(sb, index);
1008 size = sizeof(struct udf_bitmap) +
1009 (sizeof(struct buffer_head *) * nr_groups);
1010
1011 if (size <= PAGE_SIZE)
1012 bitmap = kzalloc(size, GFP_KERNEL);
1013 else
1014 bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
1015
1016 if (bitmap == NULL)
1017 return NULL;
1018
1019 bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
1020 bitmap->s_nr_groups = nr_groups;
1021 return bitmap;
1022 }
1023
1024 static int udf_fill_partdesc_info(struct super_block *sb,
1025 struct partitionDesc *p, int p_index)
1026 {
1027 struct udf_part_map *map;
1028 struct udf_sb_info *sbi = UDF_SB(sb);
1029 struct partitionHeaderDesc *phd;
1030
1031 map = &sbi->s_partmaps[p_index];
1032
1033 map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1034 map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1035
1036 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1037 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1038 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1039 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1040 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1041 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1042 if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1043 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1044
1045 udf_debug("Partition (%d type %x) starts at physical %d, block length %d\n",
1046 p_index, map->s_partition_type,
1047 map->s_partition_root, map->s_partition_len);
1048
1049 if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1050 strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1051 return 0;
1052
1053 phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1054 if (phd->unallocSpaceTable.extLength) {
1055 struct kernel_lb_addr loc = {
1056 .logicalBlockNum = le32_to_cpu(
1057 phd->unallocSpaceTable.extPosition),
1058 .partitionReferenceNum = p_index,
1059 };
1060
1061 map->s_uspace.s_table = udf_iget(sb, &loc);
1062 if (!map->s_uspace.s_table) {
1063 udf_debug("cannot load unallocSpaceTable (part %d)\n",
1064 p_index);
1065 return 1;
1066 }
1067 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1068 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
1069 p_index, map->s_uspace.s_table->i_ino);
1070 }
1071
1072 if (phd->unallocSpaceBitmap.extLength) {
1073 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1074 if (!bitmap)
1075 return 1;
1076 map->s_uspace.s_bitmap = bitmap;
1077 bitmap->s_extLength = le32_to_cpu(
1078 phd->unallocSpaceBitmap.extLength);
1079 bitmap->s_extPosition = le32_to_cpu(
1080 phd->unallocSpaceBitmap.extPosition);
1081 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1082 udf_debug("unallocSpaceBitmap (part %d) @ %d\n",
1083 p_index, bitmap->s_extPosition);
1084 }
1085
1086 if (phd->partitionIntegrityTable.extLength)
1087 udf_debug("partitionIntegrityTable (part %d)\n", p_index);
1088
1089 if (phd->freedSpaceTable.extLength) {
1090 struct kernel_lb_addr loc = {
1091 .logicalBlockNum = le32_to_cpu(
1092 phd->freedSpaceTable.extPosition),
1093 .partitionReferenceNum = p_index,
1094 };
1095
1096 map->s_fspace.s_table = udf_iget(sb, &loc);
1097 if (!map->s_fspace.s_table) {
1098 udf_debug("cannot load freedSpaceTable (part %d)\n",
1099 p_index);
1100 return 1;
1101 }
1102
1103 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
1104 udf_debug("freedSpaceTable (part %d) @ %ld\n",
1105 p_index, map->s_fspace.s_table->i_ino);
1106 }
1107
1108 if (phd->freedSpaceBitmap.extLength) {
1109 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1110 if (!bitmap)
1111 return 1;
1112 map->s_fspace.s_bitmap = bitmap;
1113 bitmap->s_extLength = le32_to_cpu(
1114 phd->freedSpaceBitmap.extLength);
1115 bitmap->s_extPosition = le32_to_cpu(
1116 phd->freedSpaceBitmap.extPosition);
1117 map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
1118 udf_debug("freedSpaceBitmap (part %d) @ %d\n",
1119 p_index, bitmap->s_extPosition);
1120 }
1121 return 0;
1122 }
1123
1124 static void udf_find_vat_block(struct super_block *sb, int p_index,
1125 int type1_index, sector_t start_block)
1126 {
1127 struct udf_sb_info *sbi = UDF_SB(sb);
1128 struct udf_part_map *map = &sbi->s_partmaps[p_index];
1129 sector_t vat_block;
1130 struct kernel_lb_addr ino;
1131
1132 /*
1133 * VAT file entry is in the last recorded block. Some broken disks have
1134 * it a few blocks before so try a bit harder...
1135 */
1136 ino.partitionReferenceNum = type1_index;
1137 for (vat_block = start_block;
1138 vat_block >= map->s_partition_root &&
1139 vat_block >= start_block - 3 &&
1140 !sbi->s_vat_inode; vat_block--) {
1141 ino.logicalBlockNum = vat_block - map->s_partition_root;
1142 sbi->s_vat_inode = udf_iget(sb, &ino);
1143 }
1144 }
1145
1146 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1147 {
1148 struct udf_sb_info *sbi = UDF_SB(sb);
1149 struct udf_part_map *map = &sbi->s_partmaps[p_index];
1150 struct buffer_head *bh = NULL;
1151 struct udf_inode_info *vati;
1152 uint32_t pos;
1153 struct virtualAllocationTable20 *vat20;
1154 sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
1155
1156 udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1157 if (!sbi->s_vat_inode &&
1158 sbi->s_last_block != blocks - 1) {
1159 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1160 (unsigned long)sbi->s_last_block,
1161 (unsigned long)blocks - 1);
1162 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1163 }
1164 if (!sbi->s_vat_inode)
1165 return 1;
1166
1167 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1168 map->s_type_specific.s_virtual.s_start_offset = 0;
1169 map->s_type_specific.s_virtual.s_num_entries =
1170 (sbi->s_vat_inode->i_size - 36) >> 2;
1171 } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1172 vati = UDF_I(sbi->s_vat_inode);
1173 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1174 pos = udf_block_map(sbi->s_vat_inode, 0);
1175 bh = sb_bread(sb, pos);
1176 if (!bh)
1177 return 1;
1178 vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1179 } else {
1180 vat20 = (struct virtualAllocationTable20 *)
1181 vati->i_ext.i_data;
1182 }
1183
1184 map->s_type_specific.s_virtual.s_start_offset =
1185 le16_to_cpu(vat20->lengthHeader);
1186 map->s_type_specific.s_virtual.s_num_entries =
1187 (sbi->s_vat_inode->i_size -
1188 map->s_type_specific.s_virtual.
1189 s_start_offset) >> 2;
1190 brelse(bh);
1191 }
1192 return 0;
1193 }
1194
1195 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1196 {
1197 struct buffer_head *bh;
1198 struct partitionDesc *p;
1199 struct udf_part_map *map;
1200 struct udf_sb_info *sbi = UDF_SB(sb);
1201 int i, type1_idx;
1202 uint16_t partitionNumber;
1203 uint16_t ident;
1204 int ret = 0;
1205
1206 bh = udf_read_tagged(sb, block, block, &ident);
1207 if (!bh)
1208 return 1;
1209 if (ident != TAG_IDENT_PD)
1210 goto out_bh;
1211
1212 p = (struct partitionDesc *)bh->b_data;
1213 partitionNumber = le16_to_cpu(p->partitionNumber);
1214
1215 /* First scan for TYPE1, SPARABLE and METADATA partitions */
1216 for (i = 0; i < sbi->s_partitions; i++) {
1217 map = &sbi->s_partmaps[i];
1218 udf_debug("Searching map: (%d == %d)\n",
1219 map->s_partition_num, partitionNumber);
1220 if (map->s_partition_num == partitionNumber &&
1221 (map->s_partition_type == UDF_TYPE1_MAP15 ||
1222 map->s_partition_type == UDF_SPARABLE_MAP15))
1223 break;
1224 }
1225
1226 if (i >= sbi->s_partitions) {
1227 udf_debug("Partition (%d) not found in partition map\n",
1228 partitionNumber);
1229 goto out_bh;
1230 }
1231
1232 ret = udf_fill_partdesc_info(sb, p, i);
1233
1234 /*
1235 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1236 * PHYSICAL partitions are already set up
1237 */
1238 type1_idx = i;
1239 for (i = 0; i < sbi->s_partitions; i++) {
1240 map = &sbi->s_partmaps[i];
1241
1242 if (map->s_partition_num == partitionNumber &&
1243 (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1244 map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1245 map->s_partition_type == UDF_METADATA_MAP25))
1246 break;
1247 }
1248
1249 if (i >= sbi->s_partitions)
1250 goto out_bh;
1251
1252 ret = udf_fill_partdesc_info(sb, p, i);
1253 if (ret)
1254 goto out_bh;
1255
1256 if (map->s_partition_type == UDF_METADATA_MAP25) {
1257 ret = udf_load_metadata_files(sb, i);
1258 if (ret) {
1259 udf_err(sb, "error loading MetaData partition map %d\n",
1260 i);
1261 goto out_bh;
1262 }
1263 } else {
1264 ret = udf_load_vat(sb, i, type1_idx);
1265 if (ret)
1266 goto out_bh;
1267 /*
1268 * Mark filesystem read-only if we have a partition with
1269 * virtual map since we don't handle writing to it (we
1270 * overwrite blocks instead of relocating them).
1271 */
1272 sb->s_flags |= MS_RDONLY;
1273 pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
1274 }
1275 out_bh:
1276 /* In case loading failed, we handle cleanup in udf_fill_super */
1277 brelse(bh);
1278 return ret;
1279 }
1280
1281 static int udf_load_sparable_map(struct super_block *sb,
1282 struct udf_part_map *map,
1283 struct sparablePartitionMap *spm)
1284 {
1285 uint32_t loc;
1286 uint16_t ident;
1287 struct sparingTable *st;
1288 struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1289 int i;
1290 struct buffer_head *bh;
1291
1292 map->s_partition_type = UDF_SPARABLE_MAP15;
1293 sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1294 if (!is_power_of_2(sdata->s_packet_len)) {
1295 udf_err(sb, "error loading logical volume descriptor: "
1296 "Invalid packet length %u\n",
1297 (unsigned)sdata->s_packet_len);
1298 return -EIO;
1299 }
1300 if (spm->numSparingTables > 4) {
1301 udf_err(sb, "error loading logical volume descriptor: "
1302 "Too many sparing tables (%d)\n",
1303 (int)spm->numSparingTables);
1304 return -EIO;
1305 }
1306
1307 for (i = 0; i < spm->numSparingTables; i++) {
1308 loc = le32_to_cpu(spm->locSparingTable[i]);
1309 bh = udf_read_tagged(sb, loc, loc, &ident);
1310 if (!bh)
1311 continue;
1312
1313 st = (struct sparingTable *)bh->b_data;
1314 if (ident != 0 ||
1315 strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1316 strlen(UDF_ID_SPARING)) ||
1317 sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1318 sb->s_blocksize) {
1319 brelse(bh);
1320 continue;
1321 }
1322
1323 sdata->s_spar_map[i] = bh;
1324 }
1325 map->s_partition_func = udf_get_pblock_spar15;
1326 return 0;
1327 }
1328
1329 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1330 struct kernel_lb_addr *fileset)
1331 {
1332 struct logicalVolDesc *lvd;
1333 int i, offset;
1334 uint8_t type;
1335 struct udf_sb_info *sbi = UDF_SB(sb);
1336 struct genericPartitionMap *gpm;
1337 uint16_t ident;
1338 struct buffer_head *bh;
1339 unsigned int table_len;
1340 int ret = 0;
1341
1342 bh = udf_read_tagged(sb, block, block, &ident);
1343 if (!bh)
1344 return 1;
1345 BUG_ON(ident != TAG_IDENT_LVD);
1346 lvd = (struct logicalVolDesc *)bh->b_data;
1347 table_len = le32_to_cpu(lvd->mapTableLength);
1348 if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1349 udf_err(sb, "error loading logical volume descriptor: "
1350 "Partition table too long (%u > %lu)\n", table_len,
1351 sb->s_blocksize - sizeof(*lvd));
1352 ret = 1;
1353 goto out_bh;
1354 }
1355
1356 ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1357 if (ret)
1358 goto out_bh;
1359
1360 for (i = 0, offset = 0;
1361 i < sbi->s_partitions && offset < table_len;
1362 i++, offset += gpm->partitionMapLength) {
1363 struct udf_part_map *map = &sbi->s_partmaps[i];
1364 gpm = (struct genericPartitionMap *)
1365 &(lvd->partitionMaps[offset]);
1366 type = gpm->partitionMapType;
1367 if (type == 1) {
1368 struct genericPartitionMap1 *gpm1 =
1369 (struct genericPartitionMap1 *)gpm;
1370 map->s_partition_type = UDF_TYPE1_MAP15;
1371 map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1372 map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1373 map->s_partition_func = NULL;
1374 } else if (type == 2) {
1375 struct udfPartitionMap2 *upm2 =
1376 (struct udfPartitionMap2 *)gpm;
1377 if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1378 strlen(UDF_ID_VIRTUAL))) {
1379 u16 suf =
1380 le16_to_cpu(((__le16 *)upm2->partIdent.
1381 identSuffix)[0]);
1382 if (suf < 0x0200) {
1383 map->s_partition_type =
1384 UDF_VIRTUAL_MAP15;
1385 map->s_partition_func =
1386 udf_get_pblock_virt15;
1387 } else {
1388 map->s_partition_type =
1389 UDF_VIRTUAL_MAP20;
1390 map->s_partition_func =
1391 udf_get_pblock_virt20;
1392 }
1393 } else if (!strncmp(upm2->partIdent.ident,
1394 UDF_ID_SPARABLE,
1395 strlen(UDF_ID_SPARABLE))) {
1396 if (udf_load_sparable_map(sb, map,
1397 (struct sparablePartitionMap *)gpm) < 0) {
1398 ret = 1;
1399 goto out_bh;
1400 }
1401 } else if (!strncmp(upm2->partIdent.ident,
1402 UDF_ID_METADATA,
1403 strlen(UDF_ID_METADATA))) {
1404 struct udf_meta_data *mdata =
1405 &map->s_type_specific.s_metadata;
1406 struct metadataPartitionMap *mdm =
1407 (struct metadataPartitionMap *)
1408 &(lvd->partitionMaps[offset]);
1409 udf_debug("Parsing Logical vol part %d type %d id=%s\n",
1410 i, type, UDF_ID_METADATA);
1411
1412 map->s_partition_type = UDF_METADATA_MAP25;
1413 map->s_partition_func = udf_get_pblock_meta25;
1414
1415 mdata->s_meta_file_loc =
1416 le32_to_cpu(mdm->metadataFileLoc);
1417 mdata->s_mirror_file_loc =
1418 le32_to_cpu(mdm->metadataMirrorFileLoc);
1419 mdata->s_bitmap_file_loc =
1420 le32_to_cpu(mdm->metadataBitmapFileLoc);
1421 mdata->s_alloc_unit_size =
1422 le32_to_cpu(mdm->allocUnitSize);
1423 mdata->s_align_unit_size =
1424 le16_to_cpu(mdm->alignUnitSize);
1425 if (mdm->flags & 0x01)
1426 mdata->s_flags |= MF_DUPLICATE_MD;
1427
1428 udf_debug("Metadata Ident suffix=0x%x\n",
1429 le16_to_cpu(*(__le16 *)
1430 mdm->partIdent.identSuffix));
1431 udf_debug("Metadata part num=%d\n",
1432 le16_to_cpu(mdm->partitionNum));
1433 udf_debug("Metadata part alloc unit size=%d\n",
1434 le32_to_cpu(mdm->allocUnitSize));
1435 udf_debug("Metadata file loc=%d\n",
1436 le32_to_cpu(mdm->metadataFileLoc));
1437 udf_debug("Mirror file loc=%d\n",
1438 le32_to_cpu(mdm->metadataMirrorFileLoc));
1439 udf_debug("Bitmap file loc=%d\n",
1440 le32_to_cpu(mdm->metadataBitmapFileLoc));
1441 udf_debug("Flags: %d %d\n",
1442 mdata->s_flags, mdm->flags);
1443 } else {
1444 udf_debug("Unknown ident: %s\n",
1445 upm2->partIdent.ident);
1446 continue;
1447 }
1448 map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1449 map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1450 }
1451 udf_debug("Partition (%d:%d) type %d on volume %d\n",
1452 i, map->s_partition_num, type, map->s_volumeseqnum);
1453 }
1454
1455 if (fileset) {
1456 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1457
1458 *fileset = lelb_to_cpu(la->extLocation);
1459 udf_debug("FileSet found in LogicalVolDesc at block=%d, partition=%d\n",
1460 fileset->logicalBlockNum,
1461 fileset->partitionReferenceNum);
1462 }
1463 if (lvd->integritySeqExt.extLength)
1464 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1465
1466 out_bh:
1467 brelse(bh);
1468 return ret;
1469 }
1470
1471 /*
1472 * udf_load_logicalvolint
1473 *
1474 */
1475 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1476 {
1477 struct buffer_head *bh = NULL;
1478 uint16_t ident;
1479 struct udf_sb_info *sbi = UDF_SB(sb);
1480 struct logicalVolIntegrityDesc *lvid;
1481
1482 while (loc.extLength > 0 &&
1483 (bh = udf_read_tagged(sb, loc.extLocation,
1484 loc.extLocation, &ident)) &&
1485 ident == TAG_IDENT_LVID) {
1486 sbi->s_lvid_bh = bh;
1487 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1488
1489 if (lvid->nextIntegrityExt.extLength)
1490 udf_load_logicalvolint(sb,
1491 leea_to_cpu(lvid->nextIntegrityExt));
1492
1493 if (sbi->s_lvid_bh != bh)
1494 brelse(bh);
1495 loc.extLength -= sb->s_blocksize;
1496 loc.extLocation++;
1497 }
1498 if (sbi->s_lvid_bh != bh)
1499 brelse(bh);
1500 }
1501
1502 /*
1503 * udf_process_sequence
1504 *
1505 * PURPOSE
1506 * Process a main/reserve volume descriptor sequence.
1507 *
1508 * PRE-CONDITIONS
1509 * sb Pointer to _locked_ superblock.
1510 * block First block of first extent of the sequence.
1511 * lastblock Lastblock of first extent of the sequence.
1512 *
1513 * HISTORY
1514 * July 1, 1997 - Andrew E. Mileski
1515 * Written, tested, and released.
1516 */
1517 static noinline int udf_process_sequence(struct super_block *sb, long block,
1518 long lastblock, struct kernel_lb_addr *fileset)
1519 {
1520 struct buffer_head *bh = NULL;
1521 struct udf_vds_record vds[VDS_POS_LENGTH];
1522 struct udf_vds_record *curr;
1523 struct generic_desc *gd;
1524 struct volDescPtr *vdp;
1525 int done = 0;
1526 uint32_t vdsn;
1527 uint16_t ident;
1528 long next_s = 0, next_e = 0;
1529
1530 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1531
1532 /*
1533 * Read the main descriptor sequence and find which descriptors
1534 * are in it.
1535 */
1536 for (; (!done && block <= lastblock); block++) {
1537
1538 bh = udf_read_tagged(sb, block, block, &ident);
1539 if (!bh) {
1540 udf_err(sb,
1541 "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
1542 (unsigned long long)block);
1543 return 1;
1544 }
1545
1546 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1547 gd = (struct generic_desc *)bh->b_data;
1548 vdsn = le32_to_cpu(gd->volDescSeqNum);
1549 switch (ident) {
1550 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1551 curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
1552 if (vdsn >= curr->volDescSeqNum) {
1553 curr->volDescSeqNum = vdsn;
1554 curr->block = block;
1555 }
1556 break;
1557 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1558 curr = &vds[VDS_POS_VOL_DESC_PTR];
1559 if (vdsn >= curr->volDescSeqNum) {
1560 curr->volDescSeqNum = vdsn;
1561 curr->block = block;
1562
1563 vdp = (struct volDescPtr *)bh->b_data;
1564 next_s = le32_to_cpu(
1565 vdp->nextVolDescSeqExt.extLocation);
1566 next_e = le32_to_cpu(
1567 vdp->nextVolDescSeqExt.extLength);
1568 next_e = next_e >> sb->s_blocksize_bits;
1569 next_e += next_s;
1570 }
1571 break;
1572 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1573 curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
1574 if (vdsn >= curr->volDescSeqNum) {
1575 curr->volDescSeqNum = vdsn;
1576 curr->block = block;
1577 }
1578 break;
1579 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1580 curr = &vds[VDS_POS_PARTITION_DESC];
1581 if (!curr->block)
1582 curr->block = block;
1583 break;
1584 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1585 curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
1586 if (vdsn >= curr->volDescSeqNum) {
1587 curr->volDescSeqNum = vdsn;
1588 curr->block = block;
1589 }
1590 break;
1591 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1592 curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
1593 if (vdsn >= curr->volDescSeqNum) {
1594 curr->volDescSeqNum = vdsn;
1595 curr->block = block;
1596 }
1597 break;
1598 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1599 vds[VDS_POS_TERMINATING_DESC].block = block;
1600 if (next_e) {
1601 block = next_s;
1602 lastblock = next_e;
1603 next_s = next_e = 0;
1604 } else
1605 done = 1;
1606 break;
1607 }
1608 brelse(bh);
1609 }
1610 /*
1611 * Now read interesting descriptors again and process them
1612 * in a suitable order
1613 */
1614 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1615 udf_err(sb, "Primary Volume Descriptor not found!\n");
1616 return 1;
1617 }
1618 if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
1619 return 1;
1620
1621 if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
1622 vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
1623 return 1;
1624
1625 if (vds[VDS_POS_PARTITION_DESC].block) {
1626 /*
1627 * We rescan the whole descriptor sequence to find
1628 * partition descriptor blocks and process them.
1629 */
1630 for (block = vds[VDS_POS_PARTITION_DESC].block;
1631 block < vds[VDS_POS_TERMINATING_DESC].block;
1632 block++)
1633 if (udf_load_partdesc(sb, block))
1634 return 1;
1635 }
1636
1637 return 0;
1638 }
1639
1640 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1641 struct kernel_lb_addr *fileset)
1642 {
1643 struct anchorVolDescPtr *anchor;
1644 long main_s, main_e, reserve_s, reserve_e;
1645
1646 anchor = (struct anchorVolDescPtr *)bh->b_data;
1647
1648 /* Locate the main sequence */
1649 main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1650 main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1651 main_e = main_e >> sb->s_blocksize_bits;
1652 main_e += main_s;
1653
1654 /* Locate the reserve sequence */
1655 reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1656 reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1657 reserve_e = reserve_e >> sb->s_blocksize_bits;
1658 reserve_e += reserve_s;
1659
1660 /* Process the main & reserve sequences */
1661 /* responsible for finding the PartitionDesc(s) */
1662 if (!udf_process_sequence(sb, main_s, main_e, fileset))
1663 return 1;
1664 udf_sb_free_partitions(sb);
1665 if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
1666 return 1;
1667 udf_sb_free_partitions(sb);
1668 return 0;
1669 }
1670
1671 /*
1672 * Check whether there is an anchor block in the given block and
1673 * load Volume Descriptor Sequence if so.
1674 */
1675 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1676 struct kernel_lb_addr *fileset)
1677 {
1678 struct buffer_head *bh;
1679 uint16_t ident;
1680 int ret;
1681
1682 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1683 udf_fixed_to_variable(block) >=
1684 sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
1685 return 0;
1686
1687 bh = udf_read_tagged(sb, block, block, &ident);
1688 if (!bh)
1689 return 0;
1690 if (ident != TAG_IDENT_AVDP) {
1691 brelse(bh);
1692 return 0;
1693 }
1694 ret = udf_load_sequence(sb, bh, fileset);
1695 brelse(bh);
1696 return ret;
1697 }
1698
1699 /* Search for an anchor volume descriptor pointer */
1700 static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
1701 struct kernel_lb_addr *fileset)
1702 {
1703 sector_t last[6];
1704 int i;
1705 struct udf_sb_info *sbi = UDF_SB(sb);
1706 int last_count = 0;
1707
1708 /* First try user provided anchor */
1709 if (sbi->s_anchor) {
1710 if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
1711 return lastblock;
1712 }
1713 /*
1714 * according to spec, anchor is in either:
1715 * block 256
1716 * lastblock-256
1717 * lastblock
1718 * however, if the disc isn't closed, it could be 512.
1719 */
1720 if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
1721 return lastblock;
1722 /*
1723 * The trouble is which block is the last one. Drives often misreport
1724 * this so we try various possibilities.
1725 */
1726 last[last_count++] = lastblock;
1727 if (lastblock >= 1)
1728 last[last_count++] = lastblock - 1;
1729 last[last_count++] = lastblock + 1;
1730 if (lastblock >= 2)
1731 last[last_count++] = lastblock - 2;
1732 if (lastblock >= 150)
1733 last[last_count++] = lastblock - 150;
1734 if (lastblock >= 152)
1735 last[last_count++] = lastblock - 152;
1736
1737 for (i = 0; i < last_count; i++) {
1738 if (last[i] >= sb->s_bdev->bd_inode->i_size >>
1739 sb->s_blocksize_bits)
1740 continue;
1741 if (udf_check_anchor_block(sb, last[i], fileset))
1742 return last[i];
1743 if (last[i] < 256)
1744 continue;
1745 if (udf_check_anchor_block(sb, last[i] - 256, fileset))
1746 return last[i];
1747 }
1748
1749 /* Finally try block 512 in case media is open */
1750 if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
1751 return last[0];
1752 return 0;
1753 }
1754
1755 /*
1756 * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1757 * area specified by it. The function expects sbi->s_lastblock to be the last
1758 * block on the media.
1759 *
1760 * Return 1 if ok, 0 if not found.
1761 *
1762 */
1763 static int udf_find_anchor(struct super_block *sb,
1764 struct kernel_lb_addr *fileset)
1765 {
1766 sector_t lastblock;
1767 struct udf_sb_info *sbi = UDF_SB(sb);
1768
1769 lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
1770 if (lastblock)
1771 goto out;
1772
1773 /* No anchor found? Try VARCONV conversion of block numbers */
1774 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1775 /* Firstly, we try to not convert number of the last block */
1776 lastblock = udf_scan_anchors(sb,
1777 udf_variable_to_fixed(sbi->s_last_block),
1778 fileset);
1779 if (lastblock)
1780 goto out;
1781
1782 /* Secondly, we try with converted number of the last block */
1783 lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
1784 if (!lastblock) {
1785 /* VARCONV didn't help. Clear it. */
1786 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1787 return 0;
1788 }
1789 out:
1790 sbi->s_last_block = lastblock;
1791 return 1;
1792 }
1793
1794 /*
1795 * Check Volume Structure Descriptor, find Anchor block and load Volume
1796 * Descriptor Sequence
1797 */
1798 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1799 int silent, struct kernel_lb_addr *fileset)
1800 {
1801 struct udf_sb_info *sbi = UDF_SB(sb);
1802 loff_t nsr_off;
1803
1804 if (!sb_set_blocksize(sb, uopt->blocksize)) {
1805 if (!silent)
1806 udf_warn(sb, "Bad block size\n");
1807 return 0;
1808 }
1809 sbi->s_last_block = uopt->lastblock;
1810 if (!uopt->novrs) {
1811 /* Check that it is NSR02 compliant */
1812 nsr_off = udf_check_vsd(sb);
1813 if (!nsr_off) {
1814 if (!silent)
1815 udf_warn(sb, "No VRS found\n");
1816 return 0;
1817 }
1818 if (nsr_off == -1)
1819 udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
1820 if (!sbi->s_last_block)
1821 sbi->s_last_block = udf_get_last_block(sb);
1822 } else {
1823 udf_debug("Validity check skipped because of novrs option\n");
1824 }
1825
1826 /* Look for anchor block and load Volume Descriptor Sequence */
1827 sbi->s_anchor = uopt->anchor;
1828 if (!udf_find_anchor(sb, fileset)) {
1829 if (!silent)
1830 udf_warn(sb, "No anchor found\n");
1831 return 0;
1832 }
1833 return 1;
1834 }
1835
1836 static void udf_open_lvid(struct super_block *sb)
1837 {
1838 struct udf_sb_info *sbi = UDF_SB(sb);
1839 struct buffer_head *bh = sbi->s_lvid_bh;
1840 struct logicalVolIntegrityDesc *lvid;
1841 struct logicalVolIntegrityDescImpUse *lvidiu;
1842
1843 if (!bh)
1844 return;
1845
1846 mutex_lock(&sbi->s_alloc_mutex);
1847 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1848 lvidiu = udf_sb_lvidiu(sbi);
1849
1850 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1851 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1852 udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
1853 CURRENT_TIME);
1854 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
1855
1856 lvid->descTag.descCRC = cpu_to_le16(
1857 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1858 le16_to_cpu(lvid->descTag.descCRCLength)));
1859
1860 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1861 mark_buffer_dirty(bh);
1862 sbi->s_lvid_dirty = 0;
1863 mutex_unlock(&sbi->s_alloc_mutex);
1864 }
1865
1866 static void udf_close_lvid(struct super_block *sb)
1867 {
1868 struct udf_sb_info *sbi = UDF_SB(sb);
1869 struct buffer_head *bh = sbi->s_lvid_bh;
1870 struct logicalVolIntegrityDesc *lvid;
1871 struct logicalVolIntegrityDescImpUse *lvidiu;
1872
1873 if (!bh)
1874 return;
1875
1876 mutex_lock(&sbi->s_alloc_mutex);
1877 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1878 lvidiu = udf_sb_lvidiu(sbi);
1879 lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1880 lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1881 udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
1882 if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
1883 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
1884 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
1885 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
1886 if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
1887 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
1888 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
1889
1890 lvid->descTag.descCRC = cpu_to_le16(
1891 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
1892 le16_to_cpu(lvid->descTag.descCRCLength)));
1893
1894 lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
1895 /*
1896 * We set buffer uptodate unconditionally here to avoid spurious
1897 * warnings from mark_buffer_dirty() when previous EIO has marked
1898 * the buffer as !uptodate
1899 */
1900 set_buffer_uptodate(bh);
1901 mark_buffer_dirty(bh);
1902 sbi->s_lvid_dirty = 0;
1903 mutex_unlock(&sbi->s_alloc_mutex);
1904 }
1905
1906 u64 lvid_get_unique_id(struct super_block *sb)
1907 {
1908 struct buffer_head *bh;
1909 struct udf_sb_info *sbi = UDF_SB(sb);
1910 struct logicalVolIntegrityDesc *lvid;
1911 struct logicalVolHeaderDesc *lvhd;
1912 u64 uniqueID;
1913 u64 ret;
1914
1915 bh = sbi->s_lvid_bh;
1916 if (!bh)
1917 return 0;
1918
1919 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
1920 lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
1921
1922 mutex_lock(&sbi->s_alloc_mutex);
1923 ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
1924 if (!(++uniqueID & 0xFFFFFFFF))
1925 uniqueID += 16;
1926 lvhd->uniqueID = cpu_to_le64(uniqueID);
1927 mutex_unlock(&sbi->s_alloc_mutex);
1928 mark_buffer_dirty(bh);
1929
1930 return ret;
1931 }
1932
1933 static int udf_fill_super(struct super_block *sb, void *options, int silent)
1934 {
1935 int ret;
1936 struct inode *inode = NULL;
1937 struct udf_options uopt;
1938 struct kernel_lb_addr rootdir, fileset;
1939 struct udf_sb_info *sbi;
1940
1941 uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
1942 uopt.uid = -1;
1943 uopt.gid = -1;
1944 uopt.umask = 0;
1945 uopt.fmode = UDF_INVALID_MODE;
1946 uopt.dmode = UDF_INVALID_MODE;
1947
1948 sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
1949 if (!sbi)
1950 return -ENOMEM;
1951
1952 sb->s_fs_info = sbi;
1953
1954 mutex_init(&sbi->s_alloc_mutex);
1955
1956 if (!udf_parse_options((char *)options, &uopt, false))
1957 goto error_out;
1958
1959 if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
1960 uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
1961 udf_err(sb, "utf8 cannot be combined with iocharset\n");
1962 goto error_out;
1963 }
1964 #ifdef CONFIG_UDF_NLS
1965 if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
1966 uopt.nls_map = load_nls_default();
1967 if (!uopt.nls_map)
1968 uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
1969 else
1970 udf_debug("Using default NLS map\n");
1971 }
1972 #endif
1973 if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
1974 uopt.flags |= (1 << UDF_FLAG_UTF8);
1975
1976 fileset.logicalBlockNum = 0xFFFFFFFF;
1977 fileset.partitionReferenceNum = 0xFFFF;
1978
1979 sbi->s_flags = uopt.flags;
1980 sbi->s_uid = uopt.uid;
1981 sbi->s_gid = uopt.gid;
1982 sbi->s_umask = uopt.umask;
1983 sbi->s_fmode = uopt.fmode;
1984 sbi->s_dmode = uopt.dmode;
1985 sbi->s_nls_map = uopt.nls_map;
1986 rwlock_init(&sbi->s_cred_lock);
1987
1988 if (uopt.session == 0xFFFFFFFF)
1989 sbi->s_session = udf_get_last_session(sb);
1990 else
1991 sbi->s_session = uopt.session;
1992
1993 udf_debug("Multi-session=%d\n", sbi->s_session);
1994
1995 /* Fill in the rest of the superblock */
1996 sb->s_op = &udf_sb_ops;
1997 sb->s_export_op = &udf_export_ops;
1998
1999 sb->s_magic = UDF_SUPER_MAGIC;
2000 sb->s_time_gran = 1000;
2001
2002 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2003 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2004 } else {
2005 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2006 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2007 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
2008 if (!silent)
2009 pr_notice("Rescanning with blocksize %d\n",
2010 UDF_DEFAULT_BLOCKSIZE);
2011 brelse(sbi->s_lvid_bh);
2012 sbi->s_lvid_bh = NULL;
2013 uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
2014 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2015 }
2016 }
2017 if (!ret) {
2018 udf_warn(sb, "No partition found (1)\n");
2019 goto error_out;
2020 }
2021
2022 udf_debug("Lastblock=%d\n", sbi->s_last_block);
2023
2024 if (sbi->s_lvid_bh) {
2025 struct logicalVolIntegrityDescImpUse *lvidiu =
2026 udf_sb_lvidiu(sbi);
2027 uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2028 uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2029 /* uint16_t maxUDFWriteRev =
2030 le16_to_cpu(lvidiu->maxUDFWriteRev); */
2031
2032 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2033 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2034 le16_to_cpu(lvidiu->minUDFReadRev),
2035 UDF_MAX_READ_VERSION);
2036 goto error_out;
2037 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
2038 sb->s_flags |= MS_RDONLY;
2039
2040 sbi->s_udfrev = minUDFWriteRev;
2041
2042 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2043 UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2044 if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2045 UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2046 }
2047
2048 if (!sbi->s_partitions) {
2049 udf_warn(sb, "No partition found (2)\n");
2050 goto error_out;
2051 }
2052
2053 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2054 UDF_PART_FLAG_READ_ONLY) {
2055 pr_notice("Partition marked readonly; forcing readonly mount\n");
2056 sb->s_flags |= MS_RDONLY;
2057 }
2058
2059 if (udf_find_fileset(sb, &fileset, &rootdir)) {
2060 udf_warn(sb, "No fileset found\n");
2061 goto error_out;
2062 }
2063
2064 if (!silent) {
2065 struct timestamp ts;
2066 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2067 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2068 sbi->s_volume_ident,
2069 le16_to_cpu(ts.year), ts.month, ts.day,
2070 ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2071 }
2072 if (!(sb->s_flags & MS_RDONLY))
2073 udf_open_lvid(sb);
2074
2075 /* Assign the root inode */
2076 /* assign inodes by physical block number */
2077 /* perhaps it's not extensible enough, but for now ... */
2078 inode = udf_iget(sb, &rootdir);
2079 if (!inode) {
2080 udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
2081 rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2082 goto error_out;
2083 }
2084
2085 /* Allocate a dentry for the root inode */
2086 sb->s_root = d_make_root(inode);
2087 if (!sb->s_root) {
2088 udf_err(sb, "Couldn't allocate root dentry\n");
2089 goto error_out;
2090 }
2091 sb->s_maxbytes = MAX_LFS_FILESIZE;
2092 sb->s_max_links = UDF_MAX_LINKS;
2093 return 0;
2094
2095 error_out:
2096 if (sbi->s_vat_inode)
2097 iput(sbi->s_vat_inode);
2098 #ifdef CONFIG_UDF_NLS
2099 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2100 unload_nls(sbi->s_nls_map);
2101 #endif
2102 if (!(sb->s_flags & MS_RDONLY))
2103 udf_close_lvid(sb);
2104 brelse(sbi->s_lvid_bh);
2105 udf_sb_free_partitions(sb);
2106 kfree(sbi);
2107 sb->s_fs_info = NULL;
2108
2109 return -EINVAL;
2110 }
2111
2112 void _udf_err(struct super_block *sb, const char *function,
2113 const char *fmt, ...)
2114 {
2115 struct va_format vaf;
2116 va_list args;
2117
2118 va_start(args, fmt);
2119
2120 vaf.fmt = fmt;
2121 vaf.va = &args;
2122
2123 pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2124
2125 va_end(args);
2126 }
2127
2128 void _udf_warn(struct super_block *sb, const char *function,
2129 const char *fmt, ...)
2130 {
2131 struct va_format vaf;
2132 va_list args;
2133
2134 va_start(args, fmt);
2135
2136 vaf.fmt = fmt;
2137 vaf.va = &args;
2138
2139 pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2140
2141 va_end(args);
2142 }
2143
2144 static void udf_put_super(struct super_block *sb)
2145 {
2146 struct udf_sb_info *sbi;
2147
2148 sbi = UDF_SB(sb);
2149
2150 if (sbi->s_vat_inode)
2151 iput(sbi->s_vat_inode);
2152 #ifdef CONFIG_UDF_NLS
2153 if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2154 unload_nls(sbi->s_nls_map);
2155 #endif
2156 if (!(sb->s_flags & MS_RDONLY))
2157 udf_close_lvid(sb);
2158 brelse(sbi->s_lvid_bh);
2159 udf_sb_free_partitions(sb);
2160 kfree(sb->s_fs_info);
2161 sb->s_fs_info = NULL;
2162 }
2163
2164 static int udf_sync_fs(struct super_block *sb, int wait)
2165 {
2166 struct udf_sb_info *sbi = UDF_SB(sb);
2167
2168 mutex_lock(&sbi->s_alloc_mutex);
2169 if (sbi->s_lvid_dirty) {
2170 /*
2171 * Blockdevice will be synced later so we don't have to submit
2172 * the buffer for IO
2173 */
2174 mark_buffer_dirty(sbi->s_lvid_bh);
2175 sbi->s_lvid_dirty = 0;
2176 }
2177 mutex_unlock(&sbi->s_alloc_mutex);
2178
2179 return 0;
2180 }
2181
2182 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2183 {
2184 struct super_block *sb = dentry->d_sb;
2185 struct udf_sb_info *sbi = UDF_SB(sb);
2186 struct logicalVolIntegrityDescImpUse *lvidiu;
2187 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2188
2189 if (sbi->s_lvid_bh != NULL)
2190 lvidiu = udf_sb_lvidiu(sbi);
2191 else
2192 lvidiu = NULL;
2193
2194 buf->f_type = UDF_SUPER_MAGIC;
2195 buf->f_bsize = sb->s_blocksize;
2196 buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2197 buf->f_bfree = udf_count_free(sb);
2198 buf->f_bavail = buf->f_bfree;
2199 buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2200 le32_to_cpu(lvidiu->numDirs)) : 0)
2201 + buf->f_bfree;
2202 buf->f_ffree = buf->f_bfree;
2203 buf->f_namelen = UDF_NAME_LEN - 2;
2204 buf->f_fsid.val[0] = (u32)id;
2205 buf->f_fsid.val[1] = (u32)(id >> 32);
2206
2207 return 0;
2208 }
2209
2210 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2211 struct udf_bitmap *bitmap)
2212 {
2213 struct buffer_head *bh = NULL;
2214 unsigned int accum = 0;
2215 int index;
2216 int block = 0, newblock;
2217 struct kernel_lb_addr loc;
2218 uint32_t bytes;
2219 uint8_t *ptr;
2220 uint16_t ident;
2221 struct spaceBitmapDesc *bm;
2222
2223 loc.logicalBlockNum = bitmap->s_extPosition;
2224 loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2225 bh = udf_read_ptagged(sb, &loc, 0, &ident);
2226
2227 if (!bh) {
2228 udf_err(sb, "udf_count_free failed\n");
2229 goto out;
2230 } else if (ident != TAG_IDENT_SBD) {
2231 brelse(bh);
2232 udf_err(sb, "udf_count_free failed\n");
2233 goto out;
2234 }
2235
2236 bm = (struct spaceBitmapDesc *)bh->b_data;
2237 bytes = le32_to_cpu(bm->numOfBytes);
2238 index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2239 ptr = (uint8_t *)bh->b_data;
2240
2241 while (bytes > 0) {
2242 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2243 accum += bitmap_weight((const unsigned long *)(ptr + index),
2244 cur_bytes * 8);
2245 bytes -= cur_bytes;
2246 if (bytes) {
2247 brelse(bh);
2248 newblock = udf_get_lb_pblock(sb, &loc, ++block);
2249 bh = udf_tread(sb, newblock);
2250 if (!bh) {
2251 udf_debug("read failed\n");
2252 goto out;
2253 }
2254 index = 0;
2255 ptr = (uint8_t *)bh->b_data;
2256 }
2257 }
2258 brelse(bh);
2259 out:
2260 return accum;
2261 }
2262
2263 static unsigned int udf_count_free_table(struct super_block *sb,
2264 struct inode *table)
2265 {
2266 unsigned int accum = 0;
2267 uint32_t elen;
2268 struct kernel_lb_addr eloc;
2269 int8_t etype;
2270 struct extent_position epos;
2271
2272 mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2273 epos.block = UDF_I(table)->i_location;
2274 epos.offset = sizeof(struct unallocSpaceEntry);
2275 epos.bh = NULL;
2276
2277 while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2278 accum += (elen >> table->i_sb->s_blocksize_bits);
2279
2280 brelse(epos.bh);
2281 mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2282
2283 return accum;
2284 }
2285
2286 static unsigned int udf_count_free(struct super_block *sb)
2287 {
2288 unsigned int accum = 0;
2289 struct udf_sb_info *sbi;
2290 struct udf_part_map *map;
2291
2292 sbi = UDF_SB(sb);
2293 if (sbi->s_lvid_bh) {
2294 struct logicalVolIntegrityDesc *lvid =
2295 (struct logicalVolIntegrityDesc *)
2296 sbi->s_lvid_bh->b_data;
2297 if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
2298 accum = le32_to_cpu(
2299 lvid->freeSpaceTable[sbi->s_partition]);
2300 if (accum == 0xFFFFFFFF)
2301 accum = 0;
2302 }
2303 }
2304
2305 if (accum)
2306 return accum;
2307
2308 map = &sbi->s_partmaps[sbi->s_partition];
2309 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2310 accum += udf_count_free_bitmap(sb,
2311 map->s_uspace.s_bitmap);
2312 }
2313 if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
2314 accum += udf_count_free_bitmap(sb,
2315 map->s_fspace.s_bitmap);
2316 }
2317 if (accum)
2318 return accum;
2319
2320 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2321 accum += udf_count_free_table(sb,
2322 map->s_uspace.s_table);
2323 }
2324 if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
2325 accum += udf_count_free_table(sb,
2326 map->s_fspace.s_table);
2327 }
2328
2329 return accum;
2330 }