Merge tag 'exfat-for-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linki...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2022 01:30:27 +0000 (18:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 May 2022 01:30:27 +0000 (18:30 -0700)
Pull exfat updates from Namjae Jeon:

 - fix referencing wrong parent directory information during rename

 - introduce a sys_tz mount option to use system timezone

 - improve performance while zeroing a cluster with dirsync mount option

 - fix slab-out-bounds in exat_clear_bitmap() reported from syzbot

* tag 'exfat-for-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/linkinjeon/exfat:
  exfat: check if cluster num is valid
  exfat: reduce block requests when zeroing a cluster
  block: add sync_blockdev_range()
  exfat: introduce mount option 'sys_tz'
  exfat: fix referencing wrong parent directory information after renaming

1  2 
block/bdev.c
fs/exfat/super.c
include/linux/blkdev.h

diff --combined block/bdev.c
index 7bf88e591aaf3ba40d60fa9d68443785f2e95730,8b549b071bd64637da6594c46e30532a8f5439ca..5fe06c1f2def4196f736e183846b0a093ccaa73f
@@@ -200,6 -200,13 +200,13 @@@ int sync_blockdev(struct block_device *
  }
  EXPORT_SYMBOL(sync_blockdev);
  
+ int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
+ {
+       return filemap_write_and_wait_range(bdev->bd_inode->i_mapping,
+                       lstart, lend);
+ }
+ EXPORT_SYMBOL(sync_blockdev_range);
  /*
   * Write out and wait upon all dirty data associated with this
   * device.   Filesystem data as well as the underlying block
@@@ -673,17 -680,17 +680,17 @@@ static int blkdev_get_whole(struct bloc
                }
        }
  
 -      if (!bdev->bd_openers)
 +      if (!atomic_read(&bdev->bd_openers))
                set_init_blocksize(bdev);
        if (test_bit(GD_NEED_PART_SCAN, &disk->state))
                bdev_disk_changed(disk, false);
 -      bdev->bd_openers++;
 +      atomic_inc(&bdev->bd_openers);
        return 0;
  }
  
  static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
  {
 -      if (!--bdev->bd_openers)
 +      if (atomic_dec_and_test(&bdev->bd_openers))
                blkdev_flush_mapping(bdev);
        if (bdev->bd_disk->fops->release)
                bdev->bd_disk->fops->release(bdev->bd_disk, mode);
@@@ -694,7 -701,7 +701,7 @@@ static int blkdev_get_part(struct block
        struct gendisk *disk = part->bd_disk;
        int ret;
  
 -      if (part->bd_openers)
 +      if (atomic_read(&part->bd_openers))
                goto done;
  
        ret = blkdev_get_whole(bdev_whole(part), mode);
        disk->open_partitions++;
        set_init_blocksize(part);
  done:
 -      part->bd_openers++;
 +      atomic_inc(&part->bd_openers);
        return 0;
  
  out_blkdev_put:
@@@ -720,7 -727,7 +727,7 @@@ static void blkdev_put_part(struct bloc
  {
        struct block_device *whole = bdev_whole(part);
  
 -      if (--part->bd_openers)
 +      if (!atomic_dec_and_test(&part->bd_openers))
                return;
        blkdev_flush_mapping(part);
        whole->bd_disk->open_partitions--;
@@@ -899,7 -906,7 +906,7 @@@ void blkdev_put(struct block_device *bd
         * of the world and we want to avoid long (could be several minute)
         * syncs while holding the mutex.
         */
 -      if (bdev->bd_openers == 1)
 +      if (atomic_read(&bdev->bd_openers) == 1)
                sync_blockdev(bdev);
  
        mutex_lock(&disk->open_mutex);
@@@ -1044,7 -1051,7 +1051,7 @@@ void sync_bdevs(bool wait
                bdev = I_BDEV(inode);
  
                mutex_lock(&bdev->bd_disk->open_mutex);
 -              if (!bdev->bd_openers) {
 +              if (!atomic_read(&bdev->bd_openers)) {
                        ; /* skip */
                } else if (wait) {
                        /*
diff --combined fs/exfat/super.c
index be0788ecaf20e894bf00fb7373e3e83f4a09a41e,3e0f67b2103e20d65d0d7e26f93800ed9e77d9f3..6a4dfe9f31eeeb5bddeeb75d38421548e70f7153
@@@ -170,7 -170,9 +170,9 @@@ static int exfat_show_options(struct se
                seq_puts(m, ",discard");
        if (opts->keep_last_dots)
                seq_puts(m, ",keep_last_dots");
-       if (opts->time_offset)
+       if (opts->sys_tz)
+               seq_puts(m, ",sys_tz");
+       else if (opts->time_offset)
                seq_printf(m, ",time_offset=%d", opts->time_offset);
        return 0;
  }
@@@ -214,6 -216,7 +216,7 @@@ enum 
        Opt_errors,
        Opt_discard,
        Opt_keep_last_dots,
+       Opt_sys_tz,
        Opt_time_offset,
  
        /* Deprecated options */
@@@ -241,6 -244,7 +244,7 @@@ static const struct fs_parameter_spec e
        fsparam_enum("errors",                  Opt_errors, exfat_param_enums),
        fsparam_flag("discard",                 Opt_discard),
        fsparam_flag("keep_last_dots",          Opt_keep_last_dots),
+       fsparam_flag("sys_tz",                  Opt_sys_tz),
        fsparam_s32("time_offset",              Opt_time_offset),
        __fsparam(NULL, "utf8",                 Opt_utf8, fs_param_deprecated,
                  NULL),
@@@ -298,6 -302,9 +302,9 @@@ static int exfat_parse_param(struct fs_
        case Opt_keep_last_dots:
                opts->keep_last_dots = 1;
                break;
+       case Opt_sys_tz:
+               opts->sys_tz = 1;
+               break;
        case Opt_time_offset:
                /*
                 * Make the limit 24 just in case someone invents something
@@@ -627,9 -634,13 +634,9 @@@ static int exfat_fill_super(struct supe
        if (opts->allow_utime == (unsigned short)-1)
                opts->allow_utime = ~opts->fs_dmask & 0022;
  
 -      if (opts->discard) {
 -              struct request_queue *q = bdev_get_queue(sb->s_bdev);
 -
 -              if (!blk_queue_discard(q)) {
 -                      exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
 -                      opts->discard = 0;
 -              }
 +      if (opts->discard && !bdev_max_discard_sectors(sb->s_bdev)) {
 +              exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
 +              opts->discard = 0;
        }
  
        sb->s_flags |= SB_NODIRATIME;
diff --combined include/linux/blkdev.h
index 5bdf2ac9142c95a8d66d4fbd75eba47acd77142b,331cc6918ee9b1d5599f5cb4a14def43fdccb38c..1b24c1fb3bb1ee9f1e06f0d049d1947994a86530
@@@ -176,21 -176,6 +176,21 @@@ static inline bool disk_live(struct gen
        return !inode_unhashed(disk->part0->bd_inode);
  }
  
 +/**
 + * disk_openers - returns how many openers are there for a disk
 + * @disk: disk to check
 + *
 + * This returns the number of openers for a disk.  Note that this value is only
 + * stable if disk->open_mutex is held.
 + *
 + * Note: Due to a quirk in the block layer open code, each open partition is
 + * only counted once even if there are multiple openers.
 + */
 +static inline unsigned int disk_openers(struct gendisk *disk)
 +{
 +      return atomic_read(&disk->part0->bd_openers);
 +}
 +
  /*
   * The gendisk is refcounted by the part0 block_device, and the bd_device
   * therein is also used for device model presentation in sysfs.
@@@ -263,7 -248,6 +263,7 @@@ struct queue_limits 
        unsigned int            io_opt;
        unsigned int            max_discard_sectors;
        unsigned int            max_hw_discard_sectors;
 +      unsigned int            max_secure_erase_sectors;
        unsigned int            max_write_zeroes_sectors;
        unsigned int            max_zone_append_sectors;
        unsigned int            discard_granularity;
@@@ -556,8 -540,10 +556,8 @@@ struct request_queue 
  #define QUEUE_FLAG_NONROT     6       /* non-rotational device (SSD) */
  #define QUEUE_FLAG_VIRT               QUEUE_FLAG_NONROT /* paravirt device */
  #define QUEUE_FLAG_IO_STAT    7       /* do disk/partitions IO accounting */
 -#define QUEUE_FLAG_DISCARD    8       /* supports DISCARD */
  #define QUEUE_FLAG_NOXMERGES  9       /* No extended merges */
  #define QUEUE_FLAG_ADD_RANDOM 10      /* Contributes to random pool */
 -#define QUEUE_FLAG_SECERASE   11      /* supports secure erase */
  #define QUEUE_FLAG_SAME_FORCE 12      /* force complete on same CPU */
  #define QUEUE_FLAG_DEAD               13      /* queue tear-down finished */
  #define QUEUE_FLAG_INIT_DONE  14      /* queue is initialized */
@@@ -596,8 -582,11 +596,8 @@@ bool blk_queue_flag_test_and_set(unsign
        test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
  #define blk_queue_io_stat(q)  test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
  #define blk_queue_add_random(q)       test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
 -#define blk_queue_discard(q)  test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
  #define blk_queue_zone_resetall(q)    \
        test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
 -#define blk_queue_secure_erase(q) \
 -      (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
  #define blk_queue_dax(q)      test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
  #define blk_queue_pci_p2pdma(q)       \
        test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
                             REQ_FAILFAST_DRIVER))
  #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
  #define blk_queue_pm_only(q)  atomic_read(&(q)->pm_only)
 -#define blk_queue_fua(q)      test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
  #define blk_queue_registered(q)       test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
  #define blk_queue_nowait(q)   test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
  
@@@ -960,8 -950,6 +960,8 @@@ extern void blk_queue_chunk_sectors(str
  extern void blk_queue_max_segments(struct request_queue *, unsigned short);
  extern void blk_queue_max_discard_segments(struct request_queue *,
                unsigned short);
 +void blk_queue_max_secure_erase_sectors(struct request_queue *q,
 +              unsigned int max_sectors);
  extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
  extern void blk_queue_max_discard_sectors(struct request_queue *q,
                unsigned int max_discard_sectors);
@@@ -1102,12 -1090,13 +1102,12 @@@ static inline long nr_blockdev_pages(vo
  
  extern void blk_io_schedule(void);
  
 -#define BLKDEV_DISCARD_SECURE (1 << 0)        /* issue a secure erase */
 -
 -extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 -              sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
 -extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 -              sector_t nr_sects, gfp_t gfp_mask, int flags,
 -              struct bio **biop);
 +int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 +              sector_t nr_sects, gfp_t gfp_mask);
 +int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 +              sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
 +int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
 +              sector_t nr_sects, gfp_t gfp);
  
  #define BLKDEV_ZERO_NOUNMAP   (1 << 0)  /* do not free blocks */
  #define BLKDEV_ZERO_NOFALLBACK        (1 << 1)  /* don't write explicit zeroes */
@@@ -1126,7 -1115,7 +1126,7 @@@ static inline int sb_issue_discard(stru
                                              SECTOR_SHIFT),
                                    nr_blocks << (sb->s_blocksize_bits -
                                                  SECTOR_SHIFT),
 -                                  gfp_mask, flags);
 +                                  gfp_mask);
  }
  static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
                sector_t nr_blocks, gfp_t gfp_mask)
@@@ -1200,12 -1189,6 +1200,12 @@@ static inline unsigned int queue_max_zo
        return min(l->max_zone_append_sectors, l->max_sectors);
  }
  
 +static inline unsigned int
 +bdev_max_zone_append_sectors(struct block_device *bdev)
 +{
 +      return queue_max_zone_append_sectors(bdev_get_queue(bdev));
 +}
 +
  static inline unsigned queue_logical_block_size(const struct request_queue *q)
  {
        int retval = 512;
@@@ -1263,54 -1246,84 +1263,54 @@@ bdev_zone_write_granularity(struct bloc
        return queue_zone_write_granularity(bdev_get_queue(bdev));
  }
  
 -static inline int queue_alignment_offset(const struct request_queue *q)
 -{
 -      if (q->limits.misaligned)
 -              return -1;
 +int bdev_alignment_offset(struct block_device *bdev);
 +unsigned int bdev_discard_alignment(struct block_device *bdev);
  
 -      return q->limits.alignment_offset;
 +static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
 +{
 +      return bdev_get_queue(bdev)->limits.max_discard_sectors;
  }
  
 -static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
 +static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
  {
 -      unsigned int granularity = max(lim->physical_block_size, lim->io_min);
 -      unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
 -              << SECTOR_SHIFT;
 +      return bdev_get_queue(bdev)->limits.discard_granularity;
 +}
  
 -      return (granularity + lim->alignment_offset - alignment) % granularity;
 +static inline unsigned int
 +bdev_max_secure_erase_sectors(struct block_device *bdev)
 +{
 +      return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
  }
  
 -static inline int bdev_alignment_offset(struct block_device *bdev)
 +static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
  {
        struct request_queue *q = bdev_get_queue(bdev);
  
 -      if (q->limits.misaligned)
 -              return -1;
 -      if (bdev_is_partition(bdev))
 -              return queue_limit_alignment_offset(&q->limits,
 -                              bdev->bd_start_sect);
 -      return q->limits.alignment_offset;
 +      if (q)
 +              return q->limits.max_write_zeroes_sectors;
 +
 +      return 0;
  }
  
 -static inline int queue_discard_alignment(const struct request_queue *q)
 +static inline bool bdev_nonrot(struct block_device *bdev)
  {
 -      if (q->limits.discard_misaligned)
 -              return -1;
 -
 -      return q->limits.discard_alignment;
 +      return blk_queue_nonrot(bdev_get_queue(bdev));
  }
  
 -static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
 +static inline bool bdev_stable_writes(struct block_device *bdev)
  {
 -      unsigned int alignment, granularity, offset;
 -
 -      if (!lim->max_discard_sectors)
 -              return 0;
 -
 -      /* Why are these in bytes, not sectors? */
 -      alignment = lim->discard_alignment >> SECTOR_SHIFT;
 -      granularity = lim->discard_granularity >> SECTOR_SHIFT;
 -      if (!granularity)
 -              return 0;
 -
 -      /* Offset of the partition start in 'granularity' sectors */
 -      offset = sector_div(sector, granularity);
 -
 -      /* And why do we do this modulus *again* in blkdev_issue_discard()? */
 -      offset = (granularity + alignment - offset) % granularity;
 -
 -      /* Turn it back into bytes, gaah */
 -      return offset << SECTOR_SHIFT;
 +      return test_bit(QUEUE_FLAG_STABLE_WRITES,
 +                      &bdev_get_queue(bdev)->queue_flags);
  }
  
 -static inline int bdev_discard_alignment(struct block_device *bdev)
 +static inline bool bdev_write_cache(struct block_device *bdev)
  {
 -      struct request_queue *q = bdev_get_queue(bdev);
 -
 -      if (bdev_is_partition(bdev))
 -              return queue_limit_discard_alignment(&q->limits,
 -                              bdev->bd_start_sect);
 -      return q->limits.discard_alignment;
 +      return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
  }
  
 -static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
 +static inline bool bdev_fua(struct block_device *bdev)
  {
 -      struct request_queue *q = bdev_get_queue(bdev);
 -
 -      if (q)
 -              return q->limits.max_write_zeroes_sectors;
 -
 -      return 0;
 +      return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
  }
  
  static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
@@@ -1478,10 -1491,9 +1478,10 @@@ static inline void blk_wake_io_task(str
                wake_up_process(waiter);
  }
  
 -unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
 -              unsigned int op);
 -void disk_end_io_acct(struct gendisk *disk, unsigned int op,
 +unsigned long bdev_start_io_acct(struct block_device *bdev,
 +                               unsigned int sectors, unsigned int op,
 +                               unsigned long start_time);
 +void bdev_end_io_acct(struct block_device *bdev, unsigned int op,
                unsigned long start_time);
  
  void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
@@@ -1535,6 -1547,7 +1535,7 @@@ int truncate_bdev_range(struct block_de
  #ifdef CONFIG_BLOCK
  void invalidate_bdev(struct block_device *bdev);
  int sync_blockdev(struct block_device *bdev);
+ int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
  int sync_blockdev_nowait(struct block_device *bdev);
  void sync_bdevs(bool wait);
  void printk_all_partitions(void);