Merge branch 'stable/for-linus-5.12' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Feb 2021 21:59:32 +0000 (13:59 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Feb 2021 21:59:32 +0000 (13:59 -0800)
Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "Two memory encryption related patches (SWIOTLB is enabled by default
  for AMD-SEV):

   - Add support for alignment so that NVME can properly work

   - Keep track of requested DMA buffers length, as underlaying hardware
     devices can trip SWIOTLB to bounce too much and crash the kernel

  And a tiny fix to use proper APIs in drivers"

* 'stable/for-linus-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: Validate bounce size in the sync/unmap path
  nvme-pci: set min_align_mask
  swiotlb: respect min_align_mask
  swiotlb: don't modify orig_addr in swiotlb_tbl_sync_single
  swiotlb: refactor swiotlb_tbl_map_single
  swiotlb: clean up swiotlb_tbl_unmap_single
  swiotlb: factor out a nr_slots helper
  swiotlb: factor out an io_tlb_offset helper
  swiotlb: add a IO_TLB_SIZE define
  driver core: add a min_align_mask field to struct device_dma_parameters
  sdhci: stop poking into swiotlb internals

1  2 
drivers/nvme/host/pci.c
include/linux/device.h
include/linux/dma-mapping.h

diff --combined drivers/nvme/host/pci.c
index 7b6632c00ffd01072d5b3aff63f175d57278938a,83303e4e301924c28d65dae01afbd387b66e10c8..38b0d694dfc9102b4c40d6cdec00e2ed8018dfea
@@@ -1357,7 -1357,7 +1357,7 @@@ static enum blk_eh_timer_return nvme_ti
        }
  
        abort_req->end_io_data = NULL;
 -      blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio);
 +      blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
  
        /*
         * The aborted req will be completed on receiving the abort req.
@@@ -2281,7 -2281,7 +2281,7 @@@ static int nvme_delete_queue(struct nvm
        req->end_io_data = nvmeq;
  
        init_completion(&nvmeq->delete_done);
 -      blk_execute_rq_nowait(q, NULL, req, false,
 +      blk_execute_rq_nowait(NULL, req, false,
                        opcode == nvme_admin_delete_cq ?
                                nvme_del_cq_end : nvme_del_queue_end);
        return 0;
@@@ -2362,16 -2362,13 +2362,16 @@@ static int nvme_pci_enable(struct nvme_
  {
        int result = -ENOMEM;
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 +      int dma_address_bits = 64;
  
        if (pci_enable_device_mem(pdev))
                return result;
  
        pci_set_master(pdev);
  
 -      if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)))
 +      if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
 +              dma_address_bits = 48;
 +      if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
                goto disable;
  
        if (readl(dev->bar + NVME_REG_CSTS) == -1) {
@@@ -2632,6 -2629,7 +2632,7 @@@ static void nvme_reset_work(struct work
         * Don't limit the IOMMU merged segment size.
         */
        dma_set_max_seg_size(dev->dev, 0xffffffff);
+       dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
  
        mutex_unlock(&dev->shutdown_lock);
  
@@@ -3245,8 -3243,6 +3246,8 @@@ static const struct pci_device_id nvme_
        { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
 +      { PCI_DEVICE(0x1987, 0x5016),   /* Phison E16 */
 +              .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
 +      { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
 +              .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
 +      { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02),
 +              .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
                .driver_data = NVME_QUIRK_SINGLE_VECTOR },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --combined include/linux/device.h
index 7619a84f8ce4abef94579bb1881bb95b8c08e834,7960bf516dd7fead084a0cdf52a5f6fb58f04d2a..ba660731bd258279d8c9b5513a90015b22fad3e0
@@@ -291,6 -291,7 +291,7 @@@ struct device_dma_parameters 
         * sg limitations.
         */
        unsigned int max_segment_size;
+       unsigned int min_align_mask;
        unsigned long segment_boundary_mask;
  };
  
@@@ -323,7 -324,6 +324,7 @@@ enum device_link_state 
   * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
   * MANAGED: The core tracks presence of supplier/consumer drivers (internal).
   * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
 + * INFERRED: Inferred from data (eg: firmware) and not from driver actions.
   */
  #define DL_FLAG_STATELESS             BIT(0)
  #define DL_FLAG_AUTOREMOVE_CONSUMER   BIT(1)
  #define DL_FLAG_AUTOPROBE_CONSUMER    BIT(5)
  #define DL_FLAG_MANAGED                       BIT(6)
  #define DL_FLAG_SYNC_STATE_ONLY               BIT(7)
 +#define DL_FLAG_INFERRED              BIT(8)
  
  /**
   * enum dl_dev_state - Device driver presence tracking information.
index fbfa3f5abd9498183e19ad6ab1b809d2aca9d62a,9c26225754e71983b62a44dc20fb0da852c3ab9f..2a984cb4d1e037645c8e599223508f1b6d663abe
@@@ -263,19 -263,10 +263,19 @@@ struct page *dma_alloc_pages(struct dev
                dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
  void dma_free_pages(struct device *dev, size_t size, struct page *page,
                dma_addr_t dma_handle, enum dma_data_direction dir);
 -void *dma_alloc_noncoherent(struct device *dev, size_t size,
 -              dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
 -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
 -              dma_addr_t dma_handle, enum dma_data_direction dir);
 +
 +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
 +              dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
 +{
 +      struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
 +      return page ? page_address(page) : NULL;
 +}
 +
 +static inline void dma_free_noncoherent(struct device *dev, size_t size,
 +              void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
 +{
 +      dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
 +}
  
  static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
                size_t size, enum dma_data_direction dir, unsigned long attrs)
@@@ -509,6 -500,22 +509,22 @@@ static inline int dma_set_seg_boundary(
        return -EIO;
  }
  
+ static inline unsigned int dma_get_min_align_mask(struct device *dev)
+ {
+       if (dev->dma_parms)
+               return dev->dma_parms->min_align_mask;
+       return 0;
+ }
+ static inline int dma_set_min_align_mask(struct device *dev,
+               unsigned int min_align_mask)
+ {
+       if (WARN_ON_ONCE(!dev->dma_parms))
+               return -EIO;
+       dev->dma_parms->min_align_mask = min_align_mask;
+       return 0;
+ }
  static inline int dma_get_cache_alignment(void)
  {
  #ifdef ARCH_DMA_MINALIGN