2217f7ed7a495c099eb4f6c12174344945561dc5
[sfrench/cifs-2.6.git] / fs / udf / super.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * super.c
4  *
5  * PURPOSE
6  *  Super block routines for the OSTA-UDF(tm) filesystem.
7  *
8  * DESCRIPTION
9  *  OSTA-UDF(tm) = Optical Storage Technology Association
10  *  Universal Disk Format.
11  *
12  *  This code is based on version 2.00 of the UDF specification,
13  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
14  *    http://www.osta.org/
15  *    https://www.ecma.ch/
16  *    https://www.iso.org/
17  *
18  * COPYRIGHT
19  *  (C) 1998 Dave Boynton
20  *  (C) 1998-2004 Ben Fennema
21  *  (C) 2000 Stelias Computing Inc
22  *
23  * HISTORY
24  *
25  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
26  *                added some debugging.
27  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
28  *  10/16/98      attempting some multi-session support
29  *  10/17/98      added freespace count for "df"
30  *  11/11/98 gr   added novrs option
31  *  11/26/98 dgb  added fileset,anchor mount options
32  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
33  *                vol descs. rewrote option handling based on isofs
34  *  12/20/98      find the free space bitmap (if it exists)
35  */
36
37 #include "udfdecl.h"
38
39 #include <linux/blkdev.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/stat.h>
44 #include <linux/cdrom.h>
45 #include <linux/nls.h>
46 #include <linux/vfs.h>
47 #include <linux/vmalloc.h>
48 #include <linux/errno.h>
49 #include <linux/seq_file.h>
50 #include <linux/bitmap.h>
51 #include <linux/crc-itu-t.h>
52 #include <linux/log2.h>
53 #include <asm/byteorder.h>
54 #include <linux/iversion.h>
55 #include <linux/fs_context.h>
56 #include <linux/fs_parser.h>
57
58 #include "udf_sb.h"
59 #include "udf_i.h"
60
61 #include <linux/init.h>
62 #include <linux/uaccess.h>
63
64 enum {
65         VDS_POS_PRIMARY_VOL_DESC,
66         VDS_POS_UNALLOC_SPACE_DESC,
67         VDS_POS_LOGICAL_VOL_DESC,
68         VDS_POS_IMP_USE_VOL_DESC,
69         VDS_POS_LENGTH
70 };
71
72 #define VSD_FIRST_SECTOR_OFFSET         32768
73 #define VSD_MAX_SECTOR_OFFSET           0x800000
74
75 /*
76  * Maximum number of Terminating Descriptor / Logical Volume Integrity
77  * Descriptor redirections. The chosen numbers are arbitrary - just that we
78  * hopefully don't limit any real use of rewritten inode on write-once media
79  * but avoid looping for too long on corrupted media.
80  */
81 #define UDF_MAX_TD_NESTING 64
82 #define UDF_MAX_LVID_NESTING 1000
83
84 enum { UDF_MAX_LINKS = 0xffff };
85 /*
86  * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
87  * more but because the file space is described by a linked list of extents,
88  * each of which can have at most 1GB, the creation and handling of extents
89  * gets unusably slow beyond certain point...
90  */
91 #define UDF_MAX_FILESIZE (1ULL << 42)
92
93 /* These are the "meat" - everything else is stuffing */
94 static int udf_fill_super(struct super_block *sb, struct fs_context *fc);
95 static void udf_put_super(struct super_block *);
96 static int udf_sync_fs(struct super_block *, int);
97 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
98 static void udf_open_lvid(struct super_block *);
99 static void udf_close_lvid(struct super_block *);
100 static unsigned int udf_count_free(struct super_block *);
101 static int udf_statfs(struct dentry *, struct kstatfs *);
102 static int udf_show_options(struct seq_file *, struct dentry *);
103 static int udf_init_fs_context(struct fs_context *fc);
104 static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param);
105 static int udf_reconfigure(struct fs_context *fc);
106 static void udf_free_fc(struct fs_context *fc);
107 static const struct fs_parameter_spec udf_param_spec[];
108
109 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
110 {
111         struct logicalVolIntegrityDesc *lvid;
112         unsigned int partnum;
113         unsigned int offset;
114
115         if (!UDF_SB(sb)->s_lvid_bh)
116                 return NULL;
117         lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
118         partnum = le32_to_cpu(lvid->numOfPartitions);
119         /* The offset is to skip freeSpaceTable and sizeTable arrays */
120         offset = partnum * 2 * sizeof(uint32_t);
121         return (struct logicalVolIntegrityDescImpUse *)
122                                         (((uint8_t *)(lvid + 1)) + offset);
123 }
124
125 /* UDF filesystem type */
126 static int udf_get_tree(struct fs_context *fc)
127 {
128         return get_tree_bdev(fc, udf_fill_super);
129 }
130
131 static const struct fs_context_operations udf_context_ops = {
132         .parse_param    = udf_parse_param,
133         .get_tree       = udf_get_tree,
134         .reconfigure    = udf_reconfigure,
135         .free           = udf_free_fc,
136 };
137
138 static struct file_system_type udf_fstype = {
139         .owner          = THIS_MODULE,
140         .name           = "udf",
141         .kill_sb        = kill_block_super,
142         .fs_flags       = FS_REQUIRES_DEV,
143         .init_fs_context = udf_init_fs_context,
144         .parameters     = udf_param_spec,
145 };
146 MODULE_ALIAS_FS("udf");
147
148 static struct kmem_cache *udf_inode_cachep;
149
150 static struct inode *udf_alloc_inode(struct super_block *sb)
151 {
152         struct udf_inode_info *ei;
153         ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL);
154         if (!ei)
155                 return NULL;
156
157         ei->i_unique = 0;
158         ei->i_lenExtents = 0;
159         ei->i_lenStreams = 0;
160         ei->i_next_alloc_block = 0;
161         ei->i_next_alloc_goal = 0;
162         ei->i_strat4096 = 0;
163         ei->i_streamdir = 0;
164         ei->i_hidden = 0;
165         init_rwsem(&ei->i_data_sem);
166         ei->cached_extent.lstart = -1;
167         spin_lock_init(&ei->i_extent_cache_lock);
168         inode_set_iversion(&ei->vfs_inode, 1);
169
170         return &ei->vfs_inode;
171 }
172
173 static void udf_free_in_core_inode(struct inode *inode)
174 {
175         kmem_cache_free(udf_inode_cachep, UDF_I(inode));
176 }
177
178 static void init_once(void *foo)
179 {
180         struct udf_inode_info *ei = foo;
181
182         ei->i_data = NULL;
183         inode_init_once(&ei->vfs_inode);
184 }
185
186 static int __init init_inodecache(void)
187 {
188         udf_inode_cachep = kmem_cache_create("udf_inode_cache",
189                                              sizeof(struct udf_inode_info),
190                                              0, (SLAB_RECLAIM_ACCOUNT |
191                                                  SLAB_ACCOUNT),
192                                              init_once);
193         if (!udf_inode_cachep)
194                 return -ENOMEM;
195         return 0;
196 }
197
198 static void destroy_inodecache(void)
199 {
200         /*
201          * Make sure all delayed rcu free inodes are flushed before we
202          * destroy cache.
203          */
204         rcu_barrier();
205         kmem_cache_destroy(udf_inode_cachep);
206 }
207
208 /* Superblock operations */
209 static const struct super_operations udf_sb_ops = {
210         .alloc_inode    = udf_alloc_inode,
211         .free_inode     = udf_free_in_core_inode,
212         .write_inode    = udf_write_inode,
213         .evict_inode    = udf_evict_inode,
214         .put_super      = udf_put_super,
215         .sync_fs        = udf_sync_fs,
216         .statfs         = udf_statfs,
217         .show_options   = udf_show_options,
218 };
219
220 struct udf_options {
221         unsigned int blocksize;
222         unsigned int session;
223         unsigned int lastblock;
224         unsigned int anchor;
225         unsigned int flags;
226         umode_t umask;
227         kgid_t gid;
228         kuid_t uid;
229         umode_t fmode;
230         umode_t dmode;
231         struct nls_table *nls_map;
232 };
233
234 /*
235  * UDF has historically preserved prior mount options across
236  * a remount, so copy those here if remounting, otherwise set
237  * initial mount defaults.
238  */
239 static void udf_init_options(struct fs_context *fc, struct udf_options *uopt)
240 {
241         if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
242                 struct super_block *sb = fc->root->d_sb;
243                 struct udf_sb_info *sbi = UDF_SB(sb);
244
245                 uopt->flags = sbi->s_flags;
246                 uopt->uid   = sbi->s_uid;
247                 uopt->gid   = sbi->s_gid;
248                 uopt->umask = sbi->s_umask;
249                 uopt->fmode = sbi->s_fmode;
250                 uopt->dmode = sbi->s_dmode;
251                 uopt->nls_map = NULL;
252         } else {
253                 uopt->flags = (1 << UDF_FLAG_USE_AD_IN_ICB) |
254                               (1 << UDF_FLAG_STRICT);
255                 /*
256                  * By default we'll use overflow[ug]id when UDF
257                  * inode [ug]id == -1
258                  */
259                 uopt->uid = make_kuid(current_user_ns(), overflowuid);
260                 uopt->gid = make_kgid(current_user_ns(), overflowgid);
261                 uopt->umask = 0;
262                 uopt->fmode = UDF_INVALID_MODE;
263                 uopt->dmode = UDF_INVALID_MODE;
264                 uopt->nls_map = NULL;
265                 uopt->session = 0xFFFFFFFF;
266         }
267 }
268
269 static int udf_init_fs_context(struct fs_context *fc)
270 {
271         struct udf_options *uopt;
272
273         uopt = kzalloc(sizeof(*uopt), GFP_KERNEL);
274         if (!uopt)
275                 return -ENOMEM;
276
277         udf_init_options(fc, uopt);
278
279         fc->fs_private = uopt;
280         fc->ops = &udf_context_ops;
281
282         return 0;
283 }
284
285 static void udf_free_fc(struct fs_context *fc)
286 {
287         struct udf_options *uopt = fc->fs_private;
288
289         unload_nls(uopt->nls_map);
290         kfree(fc->fs_private);
291 }
292
293 static int __init init_udf_fs(void)
294 {
295         int err;
296
297         err = init_inodecache();
298         if (err)
299                 goto out1;
300         err = register_filesystem(&udf_fstype);
301         if (err)
302                 goto out;
303
304         return 0;
305
306 out:
307         destroy_inodecache();
308
309 out1:
310         return err;
311 }
312
313 static void __exit exit_udf_fs(void)
314 {
315         unregister_filesystem(&udf_fstype);
316         destroy_inodecache();
317 }
318
319 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
320 {
321         struct udf_sb_info *sbi = UDF_SB(sb);
322
323         sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
324         if (!sbi->s_partmaps) {
325                 sbi->s_partitions = 0;
326                 return -ENOMEM;
327         }
328
329         sbi->s_partitions = count;
330         return 0;
331 }
332
333 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
334 {
335         int i;
336         int nr_groups = bitmap->s_nr_groups;
337
338         for (i = 0; i < nr_groups; i++)
339                 brelse(bitmap->s_block_bitmap[i]);
340
341         kvfree(bitmap);
342 }
343
344 static void udf_free_partition(struct udf_part_map *map)
345 {
346         int i;
347         struct udf_meta_data *mdata;
348
349         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
350                 iput(map->s_uspace.s_table);
351         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
352                 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
353         if (map->s_partition_type == UDF_SPARABLE_MAP15)
354                 for (i = 0; i < 4; i++)
355                         brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
356         else if (map->s_partition_type == UDF_METADATA_MAP25) {
357                 mdata = &map->s_type_specific.s_metadata;
358                 iput(mdata->s_metadata_fe);
359                 mdata->s_metadata_fe = NULL;
360
361                 iput(mdata->s_mirror_fe);
362                 mdata->s_mirror_fe = NULL;
363
364                 iput(mdata->s_bitmap_fe);
365                 mdata->s_bitmap_fe = NULL;
366         }
367 }
368
369 static void udf_sb_free_partitions(struct super_block *sb)
370 {
371         struct udf_sb_info *sbi = UDF_SB(sb);
372         int i;
373
374         if (!sbi->s_partmaps)
375                 return;
376         for (i = 0; i < sbi->s_partitions; i++)
377                 udf_free_partition(&sbi->s_partmaps[i]);
378         kfree(sbi->s_partmaps);
379         sbi->s_partmaps = NULL;
380 }
381
382 static int udf_show_options(struct seq_file *seq, struct dentry *root)
383 {
384         struct super_block *sb = root->d_sb;
385         struct udf_sb_info *sbi = UDF_SB(sb);
386
387         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
388                 seq_puts(seq, ",nostrict");
389         if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
390                 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
391         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
392                 seq_puts(seq, ",unhide");
393         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
394                 seq_puts(seq, ",undelete");
395         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
396                 seq_puts(seq, ",noadinicb");
397         if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
398                 seq_puts(seq, ",shortad");
399         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
400                 seq_puts(seq, ",uid=forget");
401         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
402                 seq_puts(seq, ",gid=forget");
403         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
404                 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
405         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
406                 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
407         if (sbi->s_umask != 0)
408                 seq_printf(seq, ",umask=%ho", sbi->s_umask);
409         if (sbi->s_fmode != UDF_INVALID_MODE)
410                 seq_printf(seq, ",mode=%ho", sbi->s_fmode);
411         if (sbi->s_dmode != UDF_INVALID_MODE)
412                 seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
413         if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
414                 seq_printf(seq, ",session=%d", sbi->s_session);
415         if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
416                 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
417         if (sbi->s_anchor != 0)
418                 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
419         if (sbi->s_nls_map)
420                 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
421         else
422                 seq_puts(seq, ",iocharset=utf8");
423
424         return 0;
425 }
426
427 /*
428  * udf_parse_param
429  *
430  * PURPOSE
431  *      Parse mount options.
432  *
433  * DESCRIPTION
434  *      The following mount options are supported:
435  *
436  *      gid=            Set the default group.
437  *      umask=          Set the default umask.
438  *      mode=           Set the default file permissions.
439  *      dmode=          Set the default directory permissions.
440  *      uid=            Set the default user.
441  *      bs=             Set the block size.
442  *      unhide          Show otherwise hidden files.
443  *      undelete        Show deleted files in lists.
444  *      adinicb         Embed data in the inode (default)
445  *      noadinicb       Don't embed data in the inode
446  *      shortad         Use short ad's
447  *      longad          Use long ad's (default)
448  *      nostrict        Unset strict conformance
449  *      iocharset=      Set the NLS character set
450  *
451  *      The remaining are for debugging and disaster recovery:
452  *
453  *      novrs           Skip volume sequence recognition
454  *
455  *      The following expect a offset from 0.
456  *
457  *      session=        Set the CDROM session (default= last session)
458  *      anchor=         Override standard anchor location. (default= 256)
459  *      volume=         Override the VolumeDesc location. (unused)
460  *      partition=      Override the PartitionDesc location. (unused)
461  *      lastblock=      Set the last block of the filesystem/
462  *
463  *      The following expect a offset from the partition root.
464  *
465  *      fileset=        Override the fileset block location. (unused)
466  *      rootdir=        Override the root directory location. (unused)
467  *              WARNING: overriding the rootdir to a non-directory may
468  *              yield highly unpredictable results.
469  *
470  * PRE-CONDITIONS
471  *      fc              fs_context with pointer to mount options variable.
472  *      param           Pointer to fs_parameter being parsed.
473  *
474  * POST-CONDITIONS
475  *      <return>        0       Mount options parsed okay.
476  *      <return>        errno   Error parsing mount options.
477  *
478  * HISTORY
479  *      July 1, 1997 - Andrew E. Mileski
480  *      Written, tested, and released.
481  */
482
483 enum {
484         Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
485         Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
486         Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
487         Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
488         Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_fmode, Opt_dmode
489 };
490
491 static const struct fs_parameter_spec udf_param_spec[] = {
492         fsparam_flag    ("novrs",               Opt_novrs),
493         fsparam_flag    ("nostrict",            Opt_nostrict),
494         fsparam_u32     ("bs",                  Opt_bs),
495         fsparam_flag    ("unhide",              Opt_unhide),
496         fsparam_flag    ("undelete",            Opt_undelete),
497         fsparam_flag_no ("adinicb",             Opt_adinicb),
498         fsparam_flag    ("shortad",             Opt_shortad),
499         fsparam_flag    ("longad",              Opt_longad),
500         fsparam_string  ("gid",                 Opt_gid),
501         fsparam_string  ("uid",                 Opt_uid),
502         fsparam_u32     ("umask",               Opt_umask),
503         fsparam_u32     ("session",             Opt_session),
504         fsparam_u32     ("lastblock",           Opt_lastblock),
505         fsparam_u32     ("anchor",              Opt_anchor),
506         fsparam_u32     ("volume",              Opt_volume),
507         fsparam_u32     ("partition",           Opt_partition),
508         fsparam_u32     ("fileset",             Opt_fileset),
509         fsparam_u32     ("rootdir",             Opt_rootdir),
510         fsparam_flag    ("utf8",                Opt_utf8),
511         fsparam_string  ("iocharset",           Opt_iocharset),
512         fsparam_u32     ("mode",                Opt_fmode),
513         fsparam_u32     ("dmode",               Opt_dmode),
514         {}
515  };
516
517 static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param)
518 {
519         unsigned int uv;
520         unsigned int n;
521         struct udf_options *uopt = fc->fs_private;
522         struct fs_parse_result result;
523         int token;
524         bool remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE);
525
526         token = fs_parse(fc, udf_param_spec, param, &result);
527         if (token < 0)
528                 return token;
529
530         switch (token) {
531         case Opt_novrs:
532                 uopt->flags |= (1 << UDF_FLAG_NOVRS);
533                 break;
534         case Opt_bs:
535                 n = result.uint_32;
536                 if (n != 512 && n != 1024 && n != 2048 && n != 4096)
537                         return -EINVAL;
538                 uopt->blocksize = n;
539                 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
540                 break;
541         case Opt_unhide:
542                 uopt->flags |= (1 << UDF_FLAG_UNHIDE);
543                 break;
544         case Opt_undelete:
545                 uopt->flags |= (1 << UDF_FLAG_UNDELETE);
546                 break;
547         case Opt_adinicb:
548                 if (result.negated)
549                         uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
550                 else
551                         uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
552                 break;
553         case Opt_shortad:
554                 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
555                 break;
556         case Opt_longad:
557                 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
558                 break;
559         case Opt_gid:
560                 if (kstrtoint(param->string, 10, &uv) == 0) {
561                         kgid_t gid = make_kgid(current_user_ns(), uv);
562                         if (!gid_valid(gid))
563                                 return -EINVAL;
564                         uopt->gid = gid;
565                         uopt->flags |= (1 << UDF_FLAG_GID_SET);
566                 } else if (!strcmp(param->string, "forget")) {
567                         uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
568                 } else if (!strcmp(param->string, "ignore")) {
569                         /* this option is superseded by gid=<number> */
570                         ;
571                 } else {
572                         return -EINVAL;
573                 }
574                 break;
575         case Opt_uid:
576                 if (kstrtoint(param->string, 10, &uv) == 0) {
577                         kuid_t uid = make_kuid(current_user_ns(), uv);
578                         if (!uid_valid(uid))
579                                 return -EINVAL;
580                         uopt->uid = uid;
581                         uopt->flags |= (1 << UDF_FLAG_UID_SET);
582                 } else if (!strcmp(param->string, "forget")) {
583                         uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
584                 } else if (!strcmp(param->string, "ignore")) {
585                         /* this option is superseded by uid=<number> */
586                         ;
587                 } else {
588                         return -EINVAL;
589                 }
590                 break;
591         case Opt_umask:
592                 uopt->umask = result.uint_32;
593                 break;
594         case Opt_nostrict:
595                 uopt->flags &= ~(1 << UDF_FLAG_STRICT);
596                 break;
597         case Opt_session:
598                 uopt->session = result.uint_32;
599                 if (!remount)
600                         uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
601                 break;
602         case Opt_lastblock:
603                 uopt->lastblock = result.uint_32;
604                 if (!remount)
605                         uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
606                 break;
607         case Opt_anchor:
608                 uopt->anchor = result.uint_32;
609                 break;
610         case Opt_volume:
611         case Opt_partition:
612         case Opt_fileset:
613         case Opt_rootdir:
614                 /* Ignored (never implemented properly) */
615                 break;
616         case Opt_utf8:
617                 if (!remount) {
618                         unload_nls(uopt->nls_map);
619                         uopt->nls_map = NULL;
620                 }
621                 break;
622         case Opt_iocharset:
623                 if (!remount) {
624                         unload_nls(uopt->nls_map);
625                         uopt->nls_map = NULL;
626                 }
627                 /* When nls_map is not loaded then UTF-8 is used */
628                 if (!remount && strcmp(param->string, "utf8") != 0) {
629                         uopt->nls_map = load_nls(param->string);
630                         if (!uopt->nls_map) {
631                                 errorf(fc, "iocharset %s not found",
632                                         param->string);
633                                 return -EINVAL;;
634                         }
635                 }
636                 break;
637         case Opt_fmode:
638                 uopt->fmode = result.uint_32 & 0777;
639                 break;
640         case Opt_dmode:
641                 uopt->dmode = result.uint_32 & 0777;
642                 break;
643         default:
644                 return -EINVAL;
645         }
646         return 0;
647 }
648
649 static int udf_reconfigure(struct fs_context *fc)
650 {
651         struct udf_options *uopt = fc->fs_private;
652         struct super_block *sb = fc->root->d_sb;
653         struct udf_sb_info *sbi = UDF_SB(sb);
654         int readonly = fc->sb_flags & SB_RDONLY;
655         int error = 0;
656
657         if (!readonly && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
658                 return -EACCES;
659
660         sync_filesystem(sb);
661
662         write_lock(&sbi->s_cred_lock);
663         sbi->s_flags = uopt->flags;
664         sbi->s_uid   = uopt->uid;
665         sbi->s_gid   = uopt->gid;
666         sbi->s_umask = uopt->umask;
667         sbi->s_fmode = uopt->fmode;
668         sbi->s_dmode = uopt->dmode;
669         write_unlock(&sbi->s_cred_lock);
670
671         if (readonly == sb_rdonly(sb))
672                 goto out_unlock;
673
674         if (readonly)
675                 udf_close_lvid(sb);
676         else
677                 udf_open_lvid(sb);
678
679 out_unlock:
680         return error;
681 }
682
683 /*
684  * Check VSD descriptor. Returns -1 in case we are at the end of volume
685  * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
686  * we found one of NSR descriptors we are looking for.
687  */
688 static int identify_vsd(const struct volStructDesc *vsd)
689 {
690         int ret = 0;
691
692         if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
693                 switch (vsd->structType) {
694                 case 0:
695                         udf_debug("ISO9660 Boot Record found\n");
696                         break;
697                 case 1:
698                         udf_debug("ISO9660 Primary Volume Descriptor found\n");
699                         break;
700                 case 2:
701                         udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
702                         break;
703                 case 3:
704                         udf_debug("ISO9660 Volume Partition Descriptor found\n");
705                         break;
706                 case 255:
707                         udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
708                         break;
709                 default:
710                         udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
711                         break;
712                 }
713         } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
714                 ; /* ret = 0 */
715         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
716                 ret = 1;
717         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
718                 ret = 1;
719         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
720                 ; /* ret = 0 */
721         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
722                 ; /* ret = 0 */
723         else {
724                 /* TEA01 or invalid id : end of volume recognition area */
725                 ret = -1;
726         }
727
728         return ret;
729 }
730
731 /*
732  * Check Volume Structure Descriptors (ECMA 167 2/9.1)
733  * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
734  * @return   1 if NSR02 or NSR03 found,
735  *          -1 if first sector read error, 0 otherwise
736  */
737 static int udf_check_vsd(struct super_block *sb)
738 {
739         struct volStructDesc *vsd = NULL;
740         loff_t sector = VSD_FIRST_SECTOR_OFFSET;
741         int sectorsize;
742         struct buffer_head *bh = NULL;
743         int nsr = 0;
744         struct udf_sb_info *sbi;
745         loff_t session_offset;
746
747         sbi = UDF_SB(sb);
748         if (sb->s_blocksize < sizeof(struct volStructDesc))
749                 sectorsize = sizeof(struct volStructDesc);
750         else
751                 sectorsize = sb->s_blocksize;
752
753         session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
754         sector += session_offset;
755
756         udf_debug("Starting at sector %u (%lu byte sectors)\n",
757                   (unsigned int)(sector >> sb->s_blocksize_bits),
758                   sb->s_blocksize);
759         /* Process the sequence (if applicable). The hard limit on the sector
760          * offset is arbitrary, hopefully large enough so that all valid UDF
761          * filesystems will be recognised. There is no mention of an upper
762          * bound to the size of the volume recognition area in the standard.
763          *  The limit will prevent the code to read all the sectors of a
764          * specially crafted image (like a bluray disc full of CD001 sectors),
765          * potentially causing minutes or even hours of uninterruptible I/O
766          * activity. This actually happened with uninitialised SSD partitions
767          * (all 0xFF) before the check for the limit and all valid IDs were
768          * added */
769         for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
770                 /* Read a block */
771                 bh = sb_bread(sb, sector >> sb->s_blocksize_bits);
772                 if (!bh)
773                         break;
774
775                 vsd = (struct volStructDesc *)(bh->b_data +
776                                               (sector & (sb->s_blocksize - 1)));
777                 nsr = identify_vsd(vsd);
778                 /* Found NSR or end? */
779                 if (nsr) {
780                         brelse(bh);
781                         break;
782                 }
783                 /*
784                  * Special handling for improperly formatted VRS (e.g., Win10)
785                  * where components are separated by 2048 bytes even though
786                  * sectors are 4K
787                  */
788                 if (sb->s_blocksize == 4096) {
789                         nsr = identify_vsd(vsd + 1);
790                         /* Ignore unknown IDs... */
791                         if (nsr < 0)
792                                 nsr = 0;
793                 }
794                 brelse(bh);
795         }
796
797         if (nsr > 0)
798                 return 1;
799         else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
800                 return -1;
801         else
802                 return 0;
803 }
804
805 static int udf_verify_domain_identifier(struct super_block *sb,
806                                         struct regid *ident, char *dname)
807 {
808         struct domainIdentSuffix *suffix;
809
810         if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
811                 udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
812                 goto force_ro;
813         }
814         if (ident->flags & ENTITYID_FLAGS_DIRTY) {
815                 udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
816                          dname);
817                 goto force_ro;
818         }
819         suffix = (struct domainIdentSuffix *)ident->identSuffix;
820         if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
821             (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
822                 if (!sb_rdonly(sb)) {
823                         udf_warn(sb, "Descriptor for %s marked write protected."
824                                  " Forcing read only mount.\n", dname);
825                 }
826                 goto force_ro;
827         }
828         return 0;
829
830 force_ro:
831         if (!sb_rdonly(sb))
832                 return -EACCES;
833         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
834         return 0;
835 }
836
837 static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
838                             struct kernel_lb_addr *root)
839 {
840         int ret;
841
842         ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
843         if (ret < 0)
844                 return ret;
845
846         *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
847         UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
848
849         udf_debug("Rootdir at block=%u, partition=%u\n",
850                   root->logicalBlockNum, root->partitionReferenceNum);
851         return 0;
852 }
853
854 static int udf_find_fileset(struct super_block *sb,
855                             struct kernel_lb_addr *fileset,
856                             struct kernel_lb_addr *root)
857 {
858         struct buffer_head *bh;
859         uint16_t ident;
860         int ret;
861
862         if (fileset->logicalBlockNum == 0xFFFFFFFF &&
863             fileset->partitionReferenceNum == 0xFFFF)
864                 return -EINVAL;
865
866         bh = udf_read_ptagged(sb, fileset, 0, &ident);
867         if (!bh)
868                 return -EIO;
869         if (ident != TAG_IDENT_FSD) {
870                 brelse(bh);
871                 return -EINVAL;
872         }
873
874         udf_debug("Fileset at block=%u, partition=%u\n",
875                   fileset->logicalBlockNum, fileset->partitionReferenceNum);
876
877         UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
878         ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
879         brelse(bh);
880         return ret;
881 }
882
883 /*
884  * Load primary Volume Descriptor Sequence
885  *
886  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
887  * should be tried.
888  */
889 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
890 {
891         struct primaryVolDesc *pvoldesc;
892         uint8_t *outstr;
893         struct buffer_head *bh;
894         uint16_t ident;
895         int ret;
896         struct timestamp *ts;
897
898         outstr = kmalloc(128, GFP_KERNEL);
899         if (!outstr)
900                 return -ENOMEM;
901
902         bh = udf_read_tagged(sb, block, block, &ident);
903         if (!bh) {
904                 ret = -EAGAIN;
905                 goto out2;
906         }
907
908         if (ident != TAG_IDENT_PVD) {
909                 ret = -EIO;
910                 goto out_bh;
911         }
912
913         pvoldesc = (struct primaryVolDesc *)bh->b_data;
914
915         udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
916                               pvoldesc->recordingDateAndTime);
917         ts = &pvoldesc->recordingDateAndTime;
918         udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
919                   le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
920                   ts->minute, le16_to_cpu(ts->typeAndTimezone));
921
922         ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
923         if (ret < 0) {
924                 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
925                 pr_warn("incorrect volume identification, setting to "
926                         "'InvalidName'\n");
927         } else {
928                 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
929         }
930         udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
931
932         ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
933         if (ret < 0) {
934                 ret = 0;
935                 goto out_bh;
936         }
937         outstr[ret] = 0;
938         udf_debug("volSetIdent[] = '%s'\n", outstr);
939
940         ret = 0;
941 out_bh:
942         brelse(bh);
943 out2:
944         kfree(outstr);
945         return ret;
946 }
947
948 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
949                                         u32 meta_file_loc, u32 partition_ref)
950 {
951         struct kernel_lb_addr addr;
952         struct inode *metadata_fe;
953
954         addr.logicalBlockNum = meta_file_loc;
955         addr.partitionReferenceNum = partition_ref;
956
957         metadata_fe = udf_iget_special(sb, &addr);
958
959         if (IS_ERR(metadata_fe)) {
960                 udf_warn(sb, "metadata inode efe not found\n");
961                 return metadata_fe;
962         }
963         if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
964                 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
965                 iput(metadata_fe);
966                 return ERR_PTR(-EIO);
967         }
968
969         return metadata_fe;
970 }
971
972 static int udf_load_metadata_files(struct super_block *sb, int partition,
973                                    int type1_index)
974 {
975         struct udf_sb_info *sbi = UDF_SB(sb);
976         struct udf_part_map *map;
977         struct udf_meta_data *mdata;
978         struct kernel_lb_addr addr;
979         struct inode *fe;
980
981         map = &sbi->s_partmaps[partition];
982         mdata = &map->s_type_specific.s_metadata;
983         mdata->s_phys_partition_ref = type1_index;
984
985         /* metadata address */
986         udf_debug("Metadata file location: block = %u part = %u\n",
987                   mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
988
989         fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
990                                          mdata->s_phys_partition_ref);
991         if (IS_ERR(fe)) {
992                 /* mirror file entry */
993                 udf_debug("Mirror metadata file location: block = %u part = %u\n",
994                           mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
995
996                 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
997                                                  mdata->s_phys_partition_ref);
998
999                 if (IS_ERR(fe)) {
1000                         udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
1001                         return PTR_ERR(fe);
1002                 }
1003                 mdata->s_mirror_fe = fe;
1004         } else
1005                 mdata->s_metadata_fe = fe;
1006
1007
1008         /*
1009          * bitmap file entry
1010          * Note:
1011          * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
1012         */
1013         if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1014                 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1015                 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
1016
1017                 udf_debug("Bitmap file location: block = %u part = %u\n",
1018                           addr.logicalBlockNum, addr.partitionReferenceNum);
1019
1020                 fe = udf_iget_special(sb, &addr);
1021                 if (IS_ERR(fe)) {
1022                         if (sb_rdonly(sb))
1023                                 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
1024                         else {
1025                                 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
1026                                 return PTR_ERR(fe);
1027                         }
1028                 } else
1029                         mdata->s_bitmap_fe = fe;
1030         }
1031
1032         udf_debug("udf_load_metadata_files Ok\n");
1033         return 0;
1034 }
1035
1036 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1037 {
1038         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1039         return DIV_ROUND_UP(map->s_partition_len +
1040                             (sizeof(struct spaceBitmapDesc) << 3),
1041                             sb->s_blocksize * 8);
1042 }
1043
1044 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1045 {
1046         struct udf_bitmap *bitmap;
1047         int nr_groups = udf_compute_nr_groups(sb, index);
1048
1049         bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups),
1050                           GFP_KERNEL);
1051         if (!bitmap)
1052                 return NULL;
1053
1054         bitmap->s_nr_groups = nr_groups;
1055         return bitmap;
1056 }
1057
1058 static int check_partition_desc(struct super_block *sb,
1059                                 struct partitionDesc *p,
1060                                 struct udf_part_map *map)
1061 {
1062         bool umap, utable, fmap, ftable;
1063         struct partitionHeaderDesc *phd;
1064
1065         switch (le32_to_cpu(p->accessType)) {
1066         case PD_ACCESS_TYPE_READ_ONLY:
1067         case PD_ACCESS_TYPE_WRITE_ONCE:
1068         case PD_ACCESS_TYPE_NONE:
1069                 goto force_ro;
1070         }
1071
1072         /* No Partition Header Descriptor? */
1073         if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1074             strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1075                 goto force_ro;
1076
1077         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1078         utable = phd->unallocSpaceTable.extLength;
1079         umap = phd->unallocSpaceBitmap.extLength;
1080         ftable = phd->freedSpaceTable.extLength;
1081         fmap = phd->freedSpaceBitmap.extLength;
1082
1083         /* No allocation info? */
1084         if (!utable && !umap && !ftable && !fmap)
1085                 goto force_ro;
1086
1087         /* We don't support blocks that require erasing before overwrite */
1088         if (ftable || fmap)
1089                 goto force_ro;
1090         /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1091         if (utable && umap)
1092                 goto force_ro;
1093
1094         if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1095             map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1096             map->s_partition_type == UDF_METADATA_MAP25)
1097                 goto force_ro;
1098
1099         return 0;
1100 force_ro:
1101         if (!sb_rdonly(sb))
1102                 return -EACCES;
1103         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1104         return 0;
1105 }
1106
1107 static int udf_fill_partdesc_info(struct super_block *sb,
1108                 struct partitionDesc *p, int p_index)
1109 {
1110         struct udf_part_map *map;
1111         struct udf_sb_info *sbi = UDF_SB(sb);
1112         struct partitionHeaderDesc *phd;
1113         int err;
1114
1115         map = &sbi->s_partmaps[p_index];
1116
1117         map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1118         map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1119
1120         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1121                 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1122         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1123                 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1124         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1125                 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1126         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1127                 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1128
1129         udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1130                   p_index, map->s_partition_type,
1131                   map->s_partition_root, map->s_partition_len);
1132
1133         err = check_partition_desc(sb, p, map);
1134         if (err)
1135                 return err;
1136
1137         /*
1138          * Skip loading allocation info it we cannot ever write to the fs.
1139          * This is a correctness thing as we may have decided to force ro mount
1140          * to avoid allocation info we don't support.
1141          */
1142         if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1143                 return 0;
1144
1145         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1146         if (phd->unallocSpaceTable.extLength) {
1147                 struct kernel_lb_addr loc = {
1148                         .logicalBlockNum = le32_to_cpu(
1149                                 phd->unallocSpaceTable.extPosition),
1150                         .partitionReferenceNum = p_index,
1151                 };
1152                 struct inode *inode;
1153
1154                 inode = udf_iget_special(sb, &loc);
1155                 if (IS_ERR(inode)) {
1156                         udf_debug("cannot load unallocSpaceTable (part %d)\n",
1157                                   p_index);
1158                         return PTR_ERR(inode);
1159                 }
1160                 map->s_uspace.s_table = inode;
1161                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1162                 udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1163                           p_index, map->s_uspace.s_table->i_ino);
1164         }
1165
1166         if (phd->unallocSpaceBitmap.extLength) {
1167                 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1168                 if (!bitmap)
1169                         return -ENOMEM;
1170                 map->s_uspace.s_bitmap = bitmap;
1171                 bitmap->s_extPosition = le32_to_cpu(
1172                                 phd->unallocSpaceBitmap.extPosition);
1173                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1174                 udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1175                           p_index, bitmap->s_extPosition);
1176         }
1177
1178         return 0;
1179 }
1180
1181 static void udf_find_vat_block(struct super_block *sb, int p_index,
1182                                int type1_index, sector_t start_block)
1183 {
1184         struct udf_sb_info *sbi = UDF_SB(sb);
1185         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1186         sector_t vat_block;
1187         struct kernel_lb_addr ino;
1188         struct inode *inode;
1189
1190         /*
1191          * VAT file entry is in the last recorded block. Some broken disks have
1192          * it a few blocks before so try a bit harder...
1193          */
1194         ino.partitionReferenceNum = type1_index;
1195         for (vat_block = start_block;
1196              vat_block >= map->s_partition_root &&
1197              vat_block >= start_block - 3; vat_block--) {
1198                 ino.logicalBlockNum = vat_block - map->s_partition_root;
1199                 inode = udf_iget_special(sb, &ino);
1200                 if (!IS_ERR(inode)) {
1201                         sbi->s_vat_inode = inode;
1202                         break;
1203                 }
1204         }
1205 }
1206
1207 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1208 {
1209         struct udf_sb_info *sbi = UDF_SB(sb);
1210         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1211         struct buffer_head *bh = NULL;
1212         struct udf_inode_info *vati;
1213         struct virtualAllocationTable20 *vat20;
1214         sector_t blocks = sb_bdev_nr_blocks(sb);
1215
1216         udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1217         if (!sbi->s_vat_inode &&
1218             sbi->s_last_block != blocks - 1) {
1219                 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1220                           (unsigned long)sbi->s_last_block,
1221                           (unsigned long)blocks - 1);
1222                 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1223         }
1224         if (!sbi->s_vat_inode)
1225                 return -EIO;
1226
1227         if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1228                 map->s_type_specific.s_virtual.s_start_offset = 0;
1229                 map->s_type_specific.s_virtual.s_num_entries =
1230                         (sbi->s_vat_inode->i_size - 36) >> 2;
1231         } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1232                 vati = UDF_I(sbi->s_vat_inode);
1233                 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1234                         int err = 0;
1235
1236                         bh = udf_bread(sbi->s_vat_inode, 0, 0, &err);
1237                         if (!bh) {
1238                                 if (!err)
1239                                         err = -EFSCORRUPTED;
1240                                 return err;
1241                         }
1242                         vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1243                 } else {
1244                         vat20 = (struct virtualAllocationTable20 *)
1245                                                         vati->i_data;
1246                 }
1247
1248                 map->s_type_specific.s_virtual.s_start_offset =
1249                         le16_to_cpu(vat20->lengthHeader);
1250                 map->s_type_specific.s_virtual.s_num_entries =
1251                         (sbi->s_vat_inode->i_size -
1252                                 map->s_type_specific.s_virtual.
1253                                         s_start_offset) >> 2;
1254                 brelse(bh);
1255         }
1256         return 0;
1257 }
1258
1259 /*
1260  * Load partition descriptor block
1261  *
1262  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1263  * sequence.
1264  */
1265 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1266 {
1267         struct buffer_head *bh;
1268         struct partitionDesc *p;
1269         struct udf_part_map *map;
1270         struct udf_sb_info *sbi = UDF_SB(sb);
1271         int i, type1_idx;
1272         uint16_t partitionNumber;
1273         uint16_t ident;
1274         int ret;
1275
1276         bh = udf_read_tagged(sb, block, block, &ident);
1277         if (!bh)
1278                 return -EAGAIN;
1279         if (ident != TAG_IDENT_PD) {
1280                 ret = 0;
1281                 goto out_bh;
1282         }
1283
1284         p = (struct partitionDesc *)bh->b_data;
1285         partitionNumber = le16_to_cpu(p->partitionNumber);
1286
1287         /* First scan for TYPE1 and SPARABLE partitions */
1288         for (i = 0; i < sbi->s_partitions; i++) {
1289                 map = &sbi->s_partmaps[i];
1290                 udf_debug("Searching map: (%u == %u)\n",
1291                           map->s_partition_num, partitionNumber);
1292                 if (map->s_partition_num == partitionNumber &&
1293                     (map->s_partition_type == UDF_TYPE1_MAP15 ||
1294                      map->s_partition_type == UDF_SPARABLE_MAP15))
1295                         break;
1296         }
1297
1298         if (i >= sbi->s_partitions) {
1299                 udf_debug("Partition (%u) not found in partition map\n",
1300                           partitionNumber);
1301                 ret = 0;
1302                 goto out_bh;
1303         }
1304
1305         ret = udf_fill_partdesc_info(sb, p, i);
1306         if (ret < 0)
1307                 goto out_bh;
1308
1309         /*
1310          * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1311          * PHYSICAL partitions are already set up
1312          */
1313         type1_idx = i;
1314         map = NULL; /* supress 'maybe used uninitialized' warning */
1315         for (i = 0; i < sbi->s_partitions; i++) {
1316                 map = &sbi->s_partmaps[i];
1317
1318                 if (map->s_partition_num == partitionNumber &&
1319                     (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1320                      map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1321                      map->s_partition_type == UDF_METADATA_MAP25))
1322                         break;
1323         }
1324
1325         if (i >= sbi->s_partitions) {
1326                 ret = 0;
1327                 goto out_bh;
1328         }
1329
1330         ret = udf_fill_partdesc_info(sb, p, i);
1331         if (ret < 0)
1332                 goto out_bh;
1333
1334         if (map->s_partition_type == UDF_METADATA_MAP25) {
1335                 ret = udf_load_metadata_files(sb, i, type1_idx);
1336                 if (ret < 0) {
1337                         udf_err(sb, "error loading MetaData partition map %d\n",
1338                                 i);
1339                         goto out_bh;
1340                 }
1341         } else {
1342                 /*
1343                  * If we have a partition with virtual map, we don't handle
1344                  * writing to it (we overwrite blocks instead of relocating
1345                  * them).
1346                  */
1347                 if (!sb_rdonly(sb)) {
1348                         ret = -EACCES;
1349                         goto out_bh;
1350                 }
1351                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1352                 ret = udf_load_vat(sb, i, type1_idx);
1353                 if (ret < 0)
1354                         goto out_bh;
1355         }
1356         ret = 0;
1357 out_bh:
1358         /* In case loading failed, we handle cleanup in udf_fill_super */
1359         brelse(bh);
1360         return ret;
1361 }
1362
1363 static int udf_load_sparable_map(struct super_block *sb,
1364                                  struct udf_part_map *map,
1365                                  struct sparablePartitionMap *spm)
1366 {
1367         uint32_t loc;
1368         uint16_t ident;
1369         struct sparingTable *st;
1370         struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1371         int i;
1372         struct buffer_head *bh;
1373
1374         map->s_partition_type = UDF_SPARABLE_MAP15;
1375         sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1376         if (!is_power_of_2(sdata->s_packet_len)) {
1377                 udf_err(sb, "error loading logical volume descriptor: "
1378                         "Invalid packet length %u\n",
1379                         (unsigned)sdata->s_packet_len);
1380                 return -EIO;
1381         }
1382         if (spm->numSparingTables > 4) {
1383                 udf_err(sb, "error loading logical volume descriptor: "
1384                         "Too many sparing tables (%d)\n",
1385                         (int)spm->numSparingTables);
1386                 return -EIO;
1387         }
1388         if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
1389                 udf_err(sb, "error loading logical volume descriptor: "
1390                         "Too big sparing table size (%u)\n",
1391                         le32_to_cpu(spm->sizeSparingTable));
1392                 return -EIO;
1393         }
1394
1395         for (i = 0; i < spm->numSparingTables; i++) {
1396                 loc = le32_to_cpu(spm->locSparingTable[i]);
1397                 bh = udf_read_tagged(sb, loc, loc, &ident);
1398                 if (!bh)
1399                         continue;
1400
1401                 st = (struct sparingTable *)bh->b_data;
1402                 if (ident != 0 ||
1403                     strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1404                             strlen(UDF_ID_SPARING)) ||
1405                     sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1406                                                         sb->s_blocksize) {
1407                         brelse(bh);
1408                         continue;
1409                 }
1410
1411                 sdata->s_spar_map[i] = bh;
1412         }
1413         map->s_partition_func = udf_get_pblock_spar15;
1414         return 0;
1415 }
1416
1417 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1418                                struct kernel_lb_addr *fileset)
1419 {
1420         struct logicalVolDesc *lvd;
1421         int i, offset;
1422         uint8_t type;
1423         struct udf_sb_info *sbi = UDF_SB(sb);
1424         struct genericPartitionMap *gpm;
1425         uint16_t ident;
1426         struct buffer_head *bh;
1427         unsigned int table_len;
1428         int ret;
1429
1430         bh = udf_read_tagged(sb, block, block, &ident);
1431         if (!bh)
1432                 return -EAGAIN;
1433         BUG_ON(ident != TAG_IDENT_LVD);
1434         lvd = (struct logicalVolDesc *)bh->b_data;
1435         table_len = le32_to_cpu(lvd->mapTableLength);
1436         if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1437                 udf_err(sb, "error loading logical volume descriptor: "
1438                         "Partition table too long (%u > %lu)\n", table_len,
1439                         sb->s_blocksize - sizeof(*lvd));
1440                 ret = -EIO;
1441                 goto out_bh;
1442         }
1443
1444         ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1445                                            "logical volume");
1446         if (ret)
1447                 goto out_bh;
1448         ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1449         if (ret)
1450                 goto out_bh;
1451
1452         for (i = 0, offset = 0;
1453              i < sbi->s_partitions && offset < table_len;
1454              i++, offset += gpm->partitionMapLength) {
1455                 struct udf_part_map *map = &sbi->s_partmaps[i];
1456                 gpm = (struct genericPartitionMap *)
1457                                 &(lvd->partitionMaps[offset]);
1458                 type = gpm->partitionMapType;
1459                 if (type == 1) {
1460                         struct genericPartitionMap1 *gpm1 =
1461                                 (struct genericPartitionMap1 *)gpm;
1462                         map->s_partition_type = UDF_TYPE1_MAP15;
1463                         map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1464                         map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1465                         map->s_partition_func = NULL;
1466                 } else if (type == 2) {
1467                         struct udfPartitionMap2 *upm2 =
1468                                                 (struct udfPartitionMap2 *)gpm;
1469                         if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1470                                                 strlen(UDF_ID_VIRTUAL))) {
1471                                 u16 suf =
1472                                         le16_to_cpu(((__le16 *)upm2->partIdent.
1473                                                         identSuffix)[0]);
1474                                 if (suf < 0x0200) {
1475                                         map->s_partition_type =
1476                                                         UDF_VIRTUAL_MAP15;
1477                                         map->s_partition_func =
1478                                                         udf_get_pblock_virt15;
1479                                 } else {
1480                                         map->s_partition_type =
1481                                                         UDF_VIRTUAL_MAP20;
1482                                         map->s_partition_func =
1483                                                         udf_get_pblock_virt20;
1484                                 }
1485                         } else if (!strncmp(upm2->partIdent.ident,
1486                                                 UDF_ID_SPARABLE,
1487                                                 strlen(UDF_ID_SPARABLE))) {
1488                                 ret = udf_load_sparable_map(sb, map,
1489                                         (struct sparablePartitionMap *)gpm);
1490                                 if (ret < 0)
1491                                         goto out_bh;
1492                         } else if (!strncmp(upm2->partIdent.ident,
1493                                                 UDF_ID_METADATA,
1494                                                 strlen(UDF_ID_METADATA))) {
1495                                 struct udf_meta_data *mdata =
1496                                         &map->s_type_specific.s_metadata;
1497                                 struct metadataPartitionMap *mdm =
1498                                                 (struct metadataPartitionMap *)
1499                                                 &(lvd->partitionMaps[offset]);
1500                                 udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1501                                           i, type, UDF_ID_METADATA);
1502
1503                                 map->s_partition_type = UDF_METADATA_MAP25;
1504                                 map->s_partition_func = udf_get_pblock_meta25;
1505
1506                                 mdata->s_meta_file_loc   =
1507                                         le32_to_cpu(mdm->metadataFileLoc);
1508                                 mdata->s_mirror_file_loc =
1509                                         le32_to_cpu(mdm->metadataMirrorFileLoc);
1510                                 mdata->s_bitmap_file_loc =
1511                                         le32_to_cpu(mdm->metadataBitmapFileLoc);
1512                                 mdata->s_alloc_unit_size =
1513                                         le32_to_cpu(mdm->allocUnitSize);
1514                                 mdata->s_align_unit_size =
1515                                         le16_to_cpu(mdm->alignUnitSize);
1516                                 if (mdm->flags & 0x01)
1517                                         mdata->s_flags |= MF_DUPLICATE_MD;
1518
1519                                 udf_debug("Metadata Ident suffix=0x%x\n",
1520                                           le16_to_cpu(*(__le16 *)
1521                                                       mdm->partIdent.identSuffix));
1522                                 udf_debug("Metadata part num=%u\n",
1523                                           le16_to_cpu(mdm->partitionNum));
1524                                 udf_debug("Metadata part alloc unit size=%u\n",
1525                                           le32_to_cpu(mdm->allocUnitSize));
1526                                 udf_debug("Metadata file loc=%u\n",
1527                                           le32_to_cpu(mdm->metadataFileLoc));
1528                                 udf_debug("Mirror file loc=%u\n",
1529                                           le32_to_cpu(mdm->metadataMirrorFileLoc));
1530                                 udf_debug("Bitmap file loc=%u\n",
1531                                           le32_to_cpu(mdm->metadataBitmapFileLoc));
1532                                 udf_debug("Flags: %d %u\n",
1533                                           mdata->s_flags, mdm->flags);
1534                         } else {
1535                                 udf_debug("Unknown ident: %s\n",
1536                                           upm2->partIdent.ident);
1537                                 continue;
1538                         }
1539                         map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1540                         map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1541                 }
1542                 udf_debug("Partition (%d:%u) type %u on volume %u\n",
1543                           i, map->s_partition_num, type, map->s_volumeseqnum);
1544         }
1545
1546         if (fileset) {
1547                 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1548
1549                 *fileset = lelb_to_cpu(la->extLocation);
1550                 udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1551                           fileset->logicalBlockNum,
1552                           fileset->partitionReferenceNum);
1553         }
1554         if (lvd->integritySeqExt.extLength)
1555                 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1556         ret = 0;
1557
1558         if (!sbi->s_lvid_bh) {
1559                 /* We can't generate unique IDs without a valid LVID */
1560                 if (sb_rdonly(sb)) {
1561                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1562                 } else {
1563                         udf_warn(sb, "Damaged or missing LVID, forcing "
1564                                      "readonly mount\n");
1565                         ret = -EACCES;
1566                 }
1567         }
1568 out_bh:
1569         brelse(bh);
1570         return ret;
1571 }
1572
1573 static bool udf_lvid_valid(struct super_block *sb,
1574                            struct logicalVolIntegrityDesc *lvid)
1575 {
1576         u32 parts, impuselen;
1577
1578         parts = le32_to_cpu(lvid->numOfPartitions);
1579         impuselen = le32_to_cpu(lvid->lengthOfImpUse);
1580         if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
1581             sizeof(struct logicalVolIntegrityDesc) + impuselen +
1582             2 * parts * sizeof(u32) > sb->s_blocksize)
1583                 return false;
1584         return true;
1585 }
1586
1587 /*
1588  * Find the prevailing Logical Volume Integrity Descriptor.
1589  */
1590 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1591 {
1592         struct buffer_head *bh, *final_bh;
1593         uint16_t ident;
1594         struct udf_sb_info *sbi = UDF_SB(sb);
1595         struct logicalVolIntegrityDesc *lvid;
1596         int indirections = 0;
1597
1598         while (++indirections <= UDF_MAX_LVID_NESTING) {
1599                 final_bh = NULL;
1600                 while (loc.extLength > 0 &&
1601                         (bh = udf_read_tagged(sb, loc.extLocation,
1602                                         loc.extLocation, &ident))) {
1603                         if (ident != TAG_IDENT_LVID) {
1604                                 brelse(bh);
1605                                 break;
1606                         }
1607
1608                         brelse(final_bh);
1609                         final_bh = bh;
1610
1611                         loc.extLength -= sb->s_blocksize;
1612                         loc.extLocation++;
1613                 }
1614
1615                 if (!final_bh)
1616                         return;
1617
1618                 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1619                 if (udf_lvid_valid(sb, lvid)) {
1620                         brelse(sbi->s_lvid_bh);
1621                         sbi->s_lvid_bh = final_bh;
1622                 } else {
1623                         udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
1624                                  "ignoring.\n",
1625                                  le32_to_cpu(lvid->numOfPartitions),
1626                                  le32_to_cpu(lvid->lengthOfImpUse));
1627                 }
1628
1629                 if (lvid->nextIntegrityExt.extLength == 0)
1630                         return;
1631
1632                 loc = leea_to_cpu(lvid->nextIntegrityExt);
1633         }
1634
1635         udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1636                 UDF_MAX_LVID_NESTING);
1637         brelse(sbi->s_lvid_bh);
1638         sbi->s_lvid_bh = NULL;
1639 }
1640
1641 /*
1642  * Step for reallocation of table of partition descriptor sequence numbers.
1643  * Must be power of 2.
1644  */
1645 #define PART_DESC_ALLOC_STEP 32
1646
1647 struct part_desc_seq_scan_data {
1648         struct udf_vds_record rec;
1649         u32 partnum;
1650 };
1651
1652 struct desc_seq_scan_data {
1653         struct udf_vds_record vds[VDS_POS_LENGTH];
1654         unsigned int size_part_descs;
1655         unsigned int num_part_descs;
1656         struct part_desc_seq_scan_data *part_descs_loc;
1657 };
1658
1659 static struct udf_vds_record *handle_partition_descriptor(
1660                                 struct buffer_head *bh,
1661                                 struct desc_seq_scan_data *data)
1662 {
1663         struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1664         int partnum;
1665         int i;
1666
1667         partnum = le16_to_cpu(desc->partitionNumber);
1668         for (i = 0; i < data->num_part_descs; i++)
1669                 if (partnum == data->part_descs_loc[i].partnum)
1670                         return &(data->part_descs_loc[i].rec);
1671         if (data->num_part_descs >= data->size_part_descs) {
1672                 struct part_desc_seq_scan_data *new_loc;
1673                 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1674
1675                 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1676                 if (!new_loc)
1677                         return ERR_PTR(-ENOMEM);
1678                 memcpy(new_loc, data->part_descs_loc,
1679                        data->size_part_descs * sizeof(*new_loc));
1680                 kfree(data->part_descs_loc);
1681                 data->part_descs_loc = new_loc;
1682                 data->size_part_descs = new_size;
1683         }
1684         return &(data->part_descs_loc[data->num_part_descs++].rec);
1685 }
1686
1687
1688 static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1689                 struct buffer_head *bh, struct desc_seq_scan_data *data)
1690 {
1691         switch (ident) {
1692         case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1693                 return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1694         case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1695                 return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1696         case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1697                 return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1698         case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1699                 return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1700         case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1701                 return handle_partition_descriptor(bh, data);
1702         }
1703         return NULL;
1704 }
1705
1706 /*
1707  * Process a main/reserve volume descriptor sequence.
1708  *   @block             First block of first extent of the sequence.
1709  *   @lastblock         Lastblock of first extent of the sequence.
1710  *   @fileset           There we store extent containing root fileset
1711  *
1712  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1713  * sequence
1714  */
1715 static noinline int udf_process_sequence(
1716                 struct super_block *sb,
1717                 sector_t block, sector_t lastblock,
1718                 struct kernel_lb_addr *fileset)
1719 {
1720         struct buffer_head *bh = NULL;
1721         struct udf_vds_record *curr;
1722         struct generic_desc *gd;
1723         struct volDescPtr *vdp;
1724         bool done = false;
1725         uint32_t vdsn;
1726         uint16_t ident;
1727         int ret;
1728         unsigned int indirections = 0;
1729         struct desc_seq_scan_data data;
1730         unsigned int i;
1731
1732         memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1733         data.size_part_descs = PART_DESC_ALLOC_STEP;
1734         data.num_part_descs = 0;
1735         data.part_descs_loc = kcalloc(data.size_part_descs,
1736                                       sizeof(*data.part_descs_loc),
1737                                       GFP_KERNEL);
1738         if (!data.part_descs_loc)
1739                 return -ENOMEM;
1740
1741         /*
1742          * Read the main descriptor sequence and find which descriptors
1743          * are in it.
1744          */
1745         for (; (!done && block <= lastblock); block++) {
1746                 bh = udf_read_tagged(sb, block, block, &ident);
1747                 if (!bh)
1748                         break;
1749
1750                 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1751                 gd = (struct generic_desc *)bh->b_data;
1752                 vdsn = le32_to_cpu(gd->volDescSeqNum);
1753                 switch (ident) {
1754                 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1755                         if (++indirections > UDF_MAX_TD_NESTING) {
1756                                 udf_err(sb, "too many Volume Descriptor "
1757                                         "Pointers (max %u supported)\n",
1758                                         UDF_MAX_TD_NESTING);
1759                                 brelse(bh);
1760                                 ret = -EIO;
1761                                 goto out;
1762                         }
1763
1764                         vdp = (struct volDescPtr *)bh->b_data;
1765                         block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1766                         lastblock = le32_to_cpu(
1767                                 vdp->nextVolDescSeqExt.extLength) >>
1768                                 sb->s_blocksize_bits;
1769                         lastblock += block - 1;
1770                         /* For loop is going to increment 'block' again */
1771                         block--;
1772                         break;
1773                 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1774                 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1775                 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1776                 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1777                 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1778                         curr = get_volume_descriptor_record(ident, bh, &data);
1779                         if (IS_ERR(curr)) {
1780                                 brelse(bh);
1781                                 ret = PTR_ERR(curr);
1782                                 goto out;
1783                         }
1784                         /* Descriptor we don't care about? */
1785                         if (!curr)
1786                                 break;
1787                         if (vdsn >= curr->volDescSeqNum) {
1788                                 curr->volDescSeqNum = vdsn;
1789                                 curr->block = block;
1790                         }
1791                         break;
1792                 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1793                         done = true;
1794                         break;
1795                 }
1796                 brelse(bh);
1797         }
1798         /*
1799          * Now read interesting descriptors again and process them
1800          * in a suitable order
1801          */
1802         if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1803                 udf_err(sb, "Primary Volume Descriptor not found!\n");
1804                 ret = -EAGAIN;
1805                 goto out;
1806         }
1807         ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1808         if (ret < 0)
1809                 goto out;
1810
1811         if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1812                 ret = udf_load_logicalvol(sb,
1813                                 data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1814                                 fileset);
1815                 if (ret < 0)
1816                         goto out;
1817         }
1818
1819         /* Now handle prevailing Partition Descriptors */
1820         for (i = 0; i < data.num_part_descs; i++) {
1821                 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1822                 if (ret < 0)
1823                         goto out;
1824         }
1825         ret = 0;
1826 out:
1827         kfree(data.part_descs_loc);
1828         return ret;
1829 }
1830
1831 /*
1832  * Load Volume Descriptor Sequence described by anchor in bh
1833  *
1834  * Returns <0 on error, 0 on success
1835  */
1836 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1837                              struct kernel_lb_addr *fileset)
1838 {
1839         struct anchorVolDescPtr *anchor;
1840         sector_t main_s, main_e, reserve_s, reserve_e;
1841         int ret;
1842
1843         anchor = (struct anchorVolDescPtr *)bh->b_data;
1844
1845         /* Locate the main sequence */
1846         main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1847         main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1848         main_e = main_e >> sb->s_blocksize_bits;
1849         main_e += main_s - 1;
1850
1851         /* Locate the reserve sequence */
1852         reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1853         reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1854         reserve_e = reserve_e >> sb->s_blocksize_bits;
1855         reserve_e += reserve_s - 1;
1856
1857         /* Process the main & reserve sequences */
1858         /* responsible for finding the PartitionDesc(s) */
1859         ret = udf_process_sequence(sb, main_s, main_e, fileset);
1860         if (ret != -EAGAIN)
1861                 return ret;
1862         udf_sb_free_partitions(sb);
1863         ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1864         if (ret < 0) {
1865                 udf_sb_free_partitions(sb);
1866                 /* No sequence was OK, return -EIO */
1867                 if (ret == -EAGAIN)
1868                         ret = -EIO;
1869         }
1870         return ret;
1871 }
1872
1873 /*
1874  * Check whether there is an anchor block in the given block and
1875  * load Volume Descriptor Sequence if so.
1876  *
1877  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1878  * block
1879  */
1880 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1881                                   struct kernel_lb_addr *fileset)
1882 {
1883         struct buffer_head *bh;
1884         uint16_t ident;
1885         int ret;
1886
1887         bh = udf_read_tagged(sb, block, block, &ident);
1888         if (!bh)
1889                 return -EAGAIN;
1890         if (ident != TAG_IDENT_AVDP) {
1891                 brelse(bh);
1892                 return -EAGAIN;
1893         }
1894         ret = udf_load_sequence(sb, bh, fileset);
1895         brelse(bh);
1896         return ret;
1897 }
1898
1899 /*
1900  * Search for an anchor volume descriptor pointer.
1901  *
1902  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1903  * of anchors.
1904  */
1905 static int udf_scan_anchors(struct super_block *sb, udf_pblk_t *lastblock,
1906                             struct kernel_lb_addr *fileset)
1907 {
1908         udf_pblk_t last[6];
1909         int i;
1910         struct udf_sb_info *sbi = UDF_SB(sb);
1911         int last_count = 0;
1912         int ret;
1913
1914         /* First try user provided anchor */
1915         if (sbi->s_anchor) {
1916                 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1917                 if (ret != -EAGAIN)
1918                         return ret;
1919         }
1920         /*
1921          * according to spec, anchor is in either:
1922          *     block 256
1923          *     lastblock-256
1924          *     lastblock
1925          *  however, if the disc isn't closed, it could be 512.
1926          */
1927         ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1928         if (ret != -EAGAIN)
1929                 return ret;
1930         /*
1931          * The trouble is which block is the last one. Drives often misreport
1932          * this so we try various possibilities.
1933          */
1934         last[last_count++] = *lastblock;
1935         if (*lastblock >= 1)
1936                 last[last_count++] = *lastblock - 1;
1937         last[last_count++] = *lastblock + 1;
1938         if (*lastblock >= 2)
1939                 last[last_count++] = *lastblock - 2;
1940         if (*lastblock >= 150)
1941                 last[last_count++] = *lastblock - 150;
1942         if (*lastblock >= 152)
1943                 last[last_count++] = *lastblock - 152;
1944
1945         for (i = 0; i < last_count; i++) {
1946                 if (last[i] >= sb_bdev_nr_blocks(sb))
1947                         continue;
1948                 ret = udf_check_anchor_block(sb, last[i], fileset);
1949                 if (ret != -EAGAIN) {
1950                         if (!ret)
1951                                 *lastblock = last[i];
1952                         return ret;
1953                 }
1954                 if (last[i] < 256)
1955                         continue;
1956                 ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1957                 if (ret != -EAGAIN) {
1958                         if (!ret)
1959                                 *lastblock = last[i];
1960                         return ret;
1961                 }
1962         }
1963
1964         /* Finally try block 512 in case media is open */
1965         return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1966 }
1967
1968 /*
1969  * Check Volume Structure Descriptor, find Anchor block and load Volume
1970  * Descriptor Sequence.
1971  *
1972  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1973  * block was not found.
1974  */
1975 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1976                         int silent, struct kernel_lb_addr *fileset)
1977 {
1978         struct udf_sb_info *sbi = UDF_SB(sb);
1979         int nsr = 0;
1980         int ret;
1981
1982         if (!sb_set_blocksize(sb, uopt->blocksize)) {
1983                 if (!silent)
1984                         udf_warn(sb, "Bad block size\n");
1985                 return -EINVAL;
1986         }
1987         sbi->s_last_block = uopt->lastblock;
1988         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_NOVRS)) {
1989                 /* Check that it is NSR02 compliant */
1990                 nsr = udf_check_vsd(sb);
1991                 if (!nsr) {
1992                         if (!silent)
1993                                 udf_warn(sb, "No VRS found\n");
1994                         return -EINVAL;
1995                 }
1996                 if (nsr == -1)
1997                         udf_debug("Failed to read sector at offset %d. "
1998                                   "Assuming open disc. Skipping validity "
1999                                   "check\n", VSD_FIRST_SECTOR_OFFSET);
2000                 if (!sbi->s_last_block)
2001                         sbi->s_last_block = udf_get_last_block(sb);
2002         } else {
2003                 udf_debug("Validity check skipped because of novrs option\n");
2004         }
2005
2006         /* Look for anchor block and load Volume Descriptor Sequence */
2007         sbi->s_anchor = uopt->anchor;
2008         ret = udf_scan_anchors(sb, &sbi->s_last_block, fileset);
2009         if (ret < 0) {
2010                 if (!silent && ret == -EAGAIN)
2011                         udf_warn(sb, "No anchor found\n");
2012                 return ret;
2013         }
2014         return 0;
2015 }
2016
2017 static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
2018 {
2019         struct timespec64 ts;
2020
2021         ktime_get_real_ts64(&ts);
2022         udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2023         lvid->descTag.descCRC = cpu_to_le16(
2024                 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2025                         le16_to_cpu(lvid->descTag.descCRCLength)));
2026         lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2027 }
2028
2029 static void udf_open_lvid(struct super_block *sb)
2030 {
2031         struct udf_sb_info *sbi = UDF_SB(sb);
2032         struct buffer_head *bh = sbi->s_lvid_bh;
2033         struct logicalVolIntegrityDesc *lvid;
2034         struct logicalVolIntegrityDescImpUse *lvidiu;
2035
2036         if (!bh)
2037                 return;
2038         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2039         lvidiu = udf_sb_lvidiu(sb);
2040         if (!lvidiu)
2041                 return;
2042
2043         mutex_lock(&sbi->s_alloc_mutex);
2044         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2045         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2046         if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2047                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2048         else
2049                 UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2050
2051         udf_finalize_lvid(lvid);
2052         mark_buffer_dirty(bh);
2053         sbi->s_lvid_dirty = 0;
2054         mutex_unlock(&sbi->s_alloc_mutex);
2055         /* Make opening of filesystem visible on the media immediately */
2056         sync_dirty_buffer(bh);
2057 }
2058
2059 static void udf_close_lvid(struct super_block *sb)
2060 {
2061         struct udf_sb_info *sbi = UDF_SB(sb);
2062         struct buffer_head *bh = sbi->s_lvid_bh;
2063         struct logicalVolIntegrityDesc *lvid;
2064         struct logicalVolIntegrityDescImpUse *lvidiu;
2065
2066         if (!bh)
2067                 return;
2068         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2069         lvidiu = udf_sb_lvidiu(sb);
2070         if (!lvidiu)
2071                 return;
2072
2073         mutex_lock(&sbi->s_alloc_mutex);
2074         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2075         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2076         if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2077                 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2078         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2079                 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2080         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2081                 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2082         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2083                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2084
2085         /*
2086          * We set buffer uptodate unconditionally here to avoid spurious
2087          * warnings from mark_buffer_dirty() when previous EIO has marked
2088          * the buffer as !uptodate
2089          */
2090         set_buffer_uptodate(bh);
2091         udf_finalize_lvid(lvid);
2092         mark_buffer_dirty(bh);
2093         sbi->s_lvid_dirty = 0;
2094         mutex_unlock(&sbi->s_alloc_mutex);
2095         /* Make closing of filesystem visible on the media immediately */
2096         sync_dirty_buffer(bh);
2097 }
2098
2099 u64 lvid_get_unique_id(struct super_block *sb)
2100 {
2101         struct buffer_head *bh;
2102         struct udf_sb_info *sbi = UDF_SB(sb);
2103         struct logicalVolIntegrityDesc *lvid;
2104         struct logicalVolHeaderDesc *lvhd;
2105         u64 uniqueID;
2106         u64 ret;
2107
2108         bh = sbi->s_lvid_bh;
2109         if (!bh)
2110                 return 0;
2111
2112         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2113         lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2114
2115         mutex_lock(&sbi->s_alloc_mutex);
2116         ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2117         if (!(++uniqueID & 0xFFFFFFFF))
2118                 uniqueID += 16;
2119         lvhd->uniqueID = cpu_to_le64(uniqueID);
2120         udf_updated_lvid(sb);
2121         mutex_unlock(&sbi->s_alloc_mutex);
2122
2123         return ret;
2124 }
2125
2126 static int udf_fill_super(struct super_block *sb, struct fs_context *fc)
2127 {
2128         int ret = -EINVAL;
2129         struct inode *inode = NULL;
2130         struct udf_options *uopt = fc->fs_private;
2131         struct kernel_lb_addr rootdir, fileset;
2132         struct udf_sb_info *sbi;
2133         bool lvid_open = false;
2134         int silent = fc->sb_flags & SB_SILENT;
2135
2136         sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2137         if (!sbi)
2138                 return -ENOMEM;
2139
2140         sb->s_fs_info = sbi;
2141
2142         mutex_init(&sbi->s_alloc_mutex);
2143
2144         fileset.logicalBlockNum = 0xFFFFFFFF;
2145         fileset.partitionReferenceNum = 0xFFFF;
2146
2147         sbi->s_flags = uopt->flags;
2148         sbi->s_uid = uopt->uid;
2149         sbi->s_gid = uopt->gid;
2150         sbi->s_umask = uopt->umask;
2151         sbi->s_fmode = uopt->fmode;
2152         sbi->s_dmode = uopt->dmode;
2153         sbi->s_nls_map = uopt->nls_map;
2154         uopt->nls_map = NULL;
2155         rwlock_init(&sbi->s_cred_lock);
2156
2157         if (uopt->session == 0xFFFFFFFF)
2158                 sbi->s_session = udf_get_last_session(sb);
2159         else
2160                 sbi->s_session = uopt->session;
2161
2162         udf_debug("Multi-session=%d\n", sbi->s_session);
2163
2164         /* Fill in the rest of the superblock */
2165         sb->s_op = &udf_sb_ops;
2166         sb->s_export_op = &udf_export_ops;
2167
2168         sb->s_magic = UDF_SUPER_MAGIC;
2169         sb->s_time_gran = 1000;
2170
2171         if (uopt->flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2172                 ret = udf_load_vrs(sb, uopt, silent, &fileset);
2173         } else {
2174                 uopt->blocksize = bdev_logical_block_size(sb->s_bdev);
2175                 while (uopt->blocksize <= 4096) {
2176                         ret = udf_load_vrs(sb, uopt, silent, &fileset);
2177                         if (ret < 0) {
2178                                 if (!silent && ret != -EACCES) {
2179                                         pr_notice("Scanning with blocksize %u failed\n",
2180                                                   uopt->blocksize);
2181                                 }
2182                                 brelse(sbi->s_lvid_bh);
2183                                 sbi->s_lvid_bh = NULL;
2184                                 /*
2185                                  * EACCES is special - we want to propagate to
2186                                  * upper layers that we cannot handle RW mount.
2187                                  */
2188                                 if (ret == -EACCES)
2189                                         break;
2190                         } else
2191                                 break;
2192
2193                         uopt->blocksize <<= 1;
2194                 }
2195         }
2196         if (ret < 0) {
2197                 if (ret == -EAGAIN) {
2198                         udf_warn(sb, "No partition found (1)\n");
2199                         ret = -EINVAL;
2200                 }
2201                 goto error_out;
2202         }
2203
2204         udf_debug("Lastblock=%u\n", sbi->s_last_block);
2205
2206         if (sbi->s_lvid_bh) {
2207                 struct logicalVolIntegrityDescImpUse *lvidiu =
2208                                                         udf_sb_lvidiu(sb);
2209                 uint16_t minUDFReadRev;
2210                 uint16_t minUDFWriteRev;
2211
2212                 if (!lvidiu) {
2213                         ret = -EINVAL;
2214                         goto error_out;
2215                 }
2216                 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2217                 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2218                 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2219                         udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2220                                 minUDFReadRev,
2221                                 UDF_MAX_READ_VERSION);
2222                         ret = -EINVAL;
2223                         goto error_out;
2224                 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2225                         if (!sb_rdonly(sb)) {
2226                                 ret = -EACCES;
2227                                 goto error_out;
2228                         }
2229                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2230                 }
2231
2232                 sbi->s_udfrev = minUDFWriteRev;
2233
2234                 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2235                         UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2236                 if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2237                         UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2238         }
2239
2240         if (!sbi->s_partitions) {
2241                 udf_warn(sb, "No partition found (2)\n");
2242                 ret = -EINVAL;
2243                 goto error_out;
2244         }
2245
2246         if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2247                         UDF_PART_FLAG_READ_ONLY) {
2248                 if (!sb_rdonly(sb)) {
2249                         ret = -EACCES;
2250                         goto error_out;
2251                 }
2252                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2253         }
2254
2255         ret = udf_find_fileset(sb, &fileset, &rootdir);
2256         if (ret < 0) {
2257                 udf_warn(sb, "No fileset found\n");
2258                 goto error_out;
2259         }
2260
2261         if (!silent) {
2262                 struct timestamp ts;
2263                 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2264                 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2265                          sbi->s_volume_ident,
2266                          le16_to_cpu(ts.year), ts.month, ts.day,
2267                          ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2268         }
2269         if (!sb_rdonly(sb)) {
2270                 udf_open_lvid(sb);
2271                 lvid_open = true;
2272         }
2273
2274         /* Assign the root inode */
2275         /* assign inodes by physical block number */
2276         /* perhaps it's not extensible enough, but for now ... */
2277         inode = udf_iget(sb, &rootdir);
2278         if (IS_ERR(inode)) {
2279                 udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2280                        rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2281                 ret = PTR_ERR(inode);
2282                 goto error_out;
2283         }
2284
2285         /* Allocate a dentry for the root inode */
2286         sb->s_root = d_make_root(inode);
2287         if (!sb->s_root) {
2288                 udf_err(sb, "Couldn't allocate root dentry\n");
2289                 ret = -ENOMEM;
2290                 goto error_out;
2291         }
2292         sb->s_maxbytes = UDF_MAX_FILESIZE;
2293         sb->s_max_links = UDF_MAX_LINKS;
2294         return 0;
2295
2296 error_out:
2297         iput(sbi->s_vat_inode);
2298         unload_nls(uopt->nls_map);
2299         if (lvid_open)
2300                 udf_close_lvid(sb);
2301         brelse(sbi->s_lvid_bh);
2302         udf_sb_free_partitions(sb);
2303         kfree(sbi);
2304         sb->s_fs_info = NULL;
2305
2306         return ret;
2307 }
2308
2309 void _udf_err(struct super_block *sb, const char *function,
2310               const char *fmt, ...)
2311 {
2312         struct va_format vaf;
2313         va_list args;
2314
2315         va_start(args, fmt);
2316
2317         vaf.fmt = fmt;
2318         vaf.va = &args;
2319
2320         pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2321
2322         va_end(args);
2323 }
2324
2325 void _udf_warn(struct super_block *sb, const char *function,
2326                const char *fmt, ...)
2327 {
2328         struct va_format vaf;
2329         va_list args;
2330
2331         va_start(args, fmt);
2332
2333         vaf.fmt = fmt;
2334         vaf.va = &args;
2335
2336         pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2337
2338         va_end(args);
2339 }
2340
2341 static void udf_put_super(struct super_block *sb)
2342 {
2343         struct udf_sb_info *sbi;
2344
2345         sbi = UDF_SB(sb);
2346
2347         iput(sbi->s_vat_inode);
2348         unload_nls(sbi->s_nls_map);
2349         if (!sb_rdonly(sb))
2350                 udf_close_lvid(sb);
2351         brelse(sbi->s_lvid_bh);
2352         udf_sb_free_partitions(sb);
2353         mutex_destroy(&sbi->s_alloc_mutex);
2354         kfree(sb->s_fs_info);
2355         sb->s_fs_info = NULL;
2356 }
2357
2358 static int udf_sync_fs(struct super_block *sb, int wait)
2359 {
2360         struct udf_sb_info *sbi = UDF_SB(sb);
2361
2362         mutex_lock(&sbi->s_alloc_mutex);
2363         if (sbi->s_lvid_dirty) {
2364                 struct buffer_head *bh = sbi->s_lvid_bh;
2365                 struct logicalVolIntegrityDesc *lvid;
2366
2367                 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2368                 udf_finalize_lvid(lvid);
2369
2370                 /*
2371                  * Blockdevice will be synced later so we don't have to submit
2372                  * the buffer for IO
2373                  */
2374                 mark_buffer_dirty(bh);
2375                 sbi->s_lvid_dirty = 0;
2376         }
2377         mutex_unlock(&sbi->s_alloc_mutex);
2378
2379         return 0;
2380 }
2381
2382 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2383 {
2384         struct super_block *sb = dentry->d_sb;
2385         struct udf_sb_info *sbi = UDF_SB(sb);
2386         struct logicalVolIntegrityDescImpUse *lvidiu;
2387         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2388
2389         lvidiu = udf_sb_lvidiu(sb);
2390         buf->f_type = UDF_SUPER_MAGIC;
2391         buf->f_bsize = sb->s_blocksize;
2392         buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2393         buf->f_bfree = udf_count_free(sb);
2394         buf->f_bavail = buf->f_bfree;
2395         /*
2396          * Let's pretend each free block is also a free 'inode' since UDF does
2397          * not have separate preallocated table of inodes.
2398          */
2399         buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2400                                           le32_to_cpu(lvidiu->numDirs)) : 0)
2401                         + buf->f_bfree;
2402         buf->f_ffree = buf->f_bfree;
2403         buf->f_namelen = UDF_NAME_LEN;
2404         buf->f_fsid = u64_to_fsid(id);
2405
2406         return 0;
2407 }
2408
2409 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2410                                           struct udf_bitmap *bitmap)
2411 {
2412         struct buffer_head *bh = NULL;
2413         unsigned int accum = 0;
2414         int index;
2415         udf_pblk_t block = 0, newblock;
2416         struct kernel_lb_addr loc;
2417         uint32_t bytes;
2418         uint8_t *ptr;
2419         uint16_t ident;
2420         struct spaceBitmapDesc *bm;
2421
2422         loc.logicalBlockNum = bitmap->s_extPosition;
2423         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2424         bh = udf_read_ptagged(sb, &loc, 0, &ident);
2425
2426         if (!bh) {
2427                 udf_err(sb, "udf_count_free failed\n");
2428                 goto out;
2429         } else if (ident != TAG_IDENT_SBD) {
2430                 brelse(bh);
2431                 udf_err(sb, "udf_count_free failed\n");
2432                 goto out;
2433         }
2434
2435         bm = (struct spaceBitmapDesc *)bh->b_data;
2436         bytes = le32_to_cpu(bm->numOfBytes);
2437         index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2438         ptr = (uint8_t *)bh->b_data;
2439
2440         while (bytes > 0) {
2441                 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2442                 accum += bitmap_weight((const unsigned long *)(ptr + index),
2443                                         cur_bytes * 8);
2444                 bytes -= cur_bytes;
2445                 if (bytes) {
2446                         brelse(bh);
2447                         newblock = udf_get_lb_pblock(sb, &loc, ++block);
2448                         bh = sb_bread(sb, newblock);
2449                         if (!bh) {
2450                                 udf_debug("read failed\n");
2451                                 goto out;
2452                         }
2453                         index = 0;
2454                         ptr = (uint8_t *)bh->b_data;
2455                 }
2456         }
2457         brelse(bh);
2458 out:
2459         return accum;
2460 }
2461
2462 static unsigned int udf_count_free_table(struct super_block *sb,
2463                                          struct inode *table)
2464 {
2465         unsigned int accum = 0;
2466         uint32_t elen;
2467         struct kernel_lb_addr eloc;
2468         struct extent_position epos;
2469
2470         mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2471         epos.block = UDF_I(table)->i_location;
2472         epos.offset = sizeof(struct unallocSpaceEntry);
2473         epos.bh = NULL;
2474
2475         while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1)
2476                 accum += (elen >> table->i_sb->s_blocksize_bits);
2477
2478         brelse(epos.bh);
2479         mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2480
2481         return accum;
2482 }
2483
2484 static unsigned int udf_count_free(struct super_block *sb)
2485 {
2486         unsigned int accum = 0;
2487         struct udf_sb_info *sbi = UDF_SB(sb);
2488         struct udf_part_map *map;
2489         unsigned int part = sbi->s_partition;
2490         int ptype = sbi->s_partmaps[part].s_partition_type;
2491
2492         if (ptype == UDF_METADATA_MAP25) {
2493                 part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2494                                                         s_phys_partition_ref;
2495         } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2496                 /*
2497                  * Filesystems with VAT are append-only and we cannot write to
2498                  * them. Let's just report 0 here.
2499                  */
2500                 return 0;
2501         }
2502
2503         if (sbi->s_lvid_bh) {
2504                 struct logicalVolIntegrityDesc *lvid =
2505                         (struct logicalVolIntegrityDesc *)
2506                         sbi->s_lvid_bh->b_data;
2507                 if (le32_to_cpu(lvid->numOfPartitions) > part) {
2508                         accum = le32_to_cpu(
2509                                         lvid->freeSpaceTable[part]);
2510                         if (accum == 0xFFFFFFFF)
2511                                 accum = 0;
2512                 }
2513         }
2514
2515         if (accum)
2516                 return accum;
2517
2518         map = &sbi->s_partmaps[part];
2519         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2520                 accum += udf_count_free_bitmap(sb,
2521                                                map->s_uspace.s_bitmap);
2522         }
2523         if (accum)
2524                 return accum;
2525
2526         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2527                 accum += udf_count_free_table(sb,
2528                                               map->s_uspace.s_table);
2529         }
2530         return accum;
2531 }
2532
2533 MODULE_AUTHOR("Ben Fennema");
2534 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2535 MODULE_LICENSE("GPL");
2536 module_init(init_udf_fs)
2537 module_exit(exit_udf_fs)