dax: add .recovery_write dax_operation
authorJane Chu <jane.chu@oracle.com>
Fri, 22 Apr 2022 22:45:06 +0000 (16:45 -0600)
committerDan Williams <dan.j.williams@intel.com>
Mon, 16 May 2022 20:37:59 +0000 (13:37 -0700)
Introduce dax_recovery_write() operation. The function is used to
recover a dax range that contains poison. Typical use case is when
a user process receives a SIGBUS with si_code BUS_MCEERR_AR
indicating poison(s) in a dax range, in response, the user process
issues a pwrite() to the page-aligned dax range, thus clears the
poison and puts valid data in the range.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
Link: https://lore.kernel.org/r/20220422224508.440670-6-jane.chu@oracle.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
drivers/dax/super.c
drivers/md/dm-linear.c
drivers/md/dm-log-writes.c
drivers/md/dm-stripe.c
drivers/md/dm.c
drivers/nvdimm/pmem.c
fs/dax.c
include/linux/dax.h
include/linux/device-mapper.h

index 5405eb553430fd80546ff4a3e05e87587e7185b4..50a08b2ec24749cf8525905822f48e296981e49b 100644 (file)
@@ -195,6 +195,15 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
 }
 EXPORT_SYMBOL_GPL(dax_zero_page_range);
 
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *iter)
+{
+       if (!dax_dev->ops->recovery_write)
+               return 0;
+       return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
+}
+EXPORT_SYMBOL_GPL(dax_recovery_write);
+
 #ifdef CONFIG_ARCH_HAS_PMEM_API
 void arch_wb_cache_pmem(void *addr, size_t size);
 void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
index 13e263299c9c97112375dc2b7a344937a1f5da74..cdf48bc8c5b05ff6c02b31101ac87ba57691dd51 100644 (file)
@@ -188,9 +188,18 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
        return dax_zero_page_range(dax_dev, pgoff, nr_pages);
 }
 
+static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i)
+{
+       struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
+       return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
 #else
 #define linear_dax_direct_access NULL
 #define linear_dax_zero_page_range NULL
+#define linear_dax_recovery_write NULL
 #endif
 
 static struct target_type linear_target = {
@@ -208,6 +217,7 @@ static struct target_type linear_target = {
        .iterate_devices = linear_iterate_devices,
        .direct_access = linear_dax_direct_access,
        .dax_zero_page_range = linear_dax_zero_page_range,
+       .dax_recovery_write = linear_dax_recovery_write,
 };
 
 int __init dm_linear_init(void)
index 06bdbed65eb149cd0518f2a2a82febfc08847033..22739dccdd173b6b394b97b70f190011cb22c3fb 100644 (file)
@@ -905,9 +905,18 @@ static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
        return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
 }
 
+static size_t log_writes_dax_recovery_write(struct dm_target *ti,
+               pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+       struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
+
+       return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
 #else
 #define log_writes_dax_direct_access NULL
 #define log_writes_dax_zero_page_range NULL
+#define log_writes_dax_recovery_write NULL
 #endif
 
 static struct target_type log_writes_target = {
@@ -925,6 +934,7 @@ static struct target_type log_writes_target = {
        .io_hints = log_writes_io_hints,
        .direct_access = log_writes_dax_direct_access,
        .dax_zero_page_range = log_writes_dax_zero_page_range,
+       .dax_recovery_write = log_writes_dax_recovery_write,
 };
 
 static int __init dm_log_writes_init(void)
index 77d72900e997b21f3dda2c456af70c45341034f3..baa085cc67bdefbe4317b8840f3eaca136a344da 100644 (file)
@@ -331,9 +331,18 @@ static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
        return dax_zero_page_range(dax_dev, pgoff, nr_pages);
 }
 
+static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i)
+{
+       struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
+
+       return dax_recovery_write(dax_dev, pgoff, addr, bytes, i);
+}
+
 #else
 #define stripe_dax_direct_access NULL
 #define stripe_dax_zero_page_range NULL
+#define stripe_dax_recovery_write NULL
 #endif
 
 /*
@@ -470,6 +479,7 @@ static struct target_type stripe_target = {
        .io_hints = stripe_io_hints,
        .direct_access = stripe_dax_direct_access,
        .dax_zero_page_range = stripe_dax_zero_page_range,
+       .dax_recovery_write = stripe_dax_recovery_write,
 };
 
 int __init dm_stripe_init(void)
index 9c452641c3d5ed613066fe0131f1e1dd1f03e129..3fe76ab20069c048d4ef3da44da71b60e2f03ef2 100644 (file)
@@ -1147,6 +1147,25 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
        return ret;
 }
 
+static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i)
+{
+       struct mapped_device *md = dax_get_private(dax_dev);
+       sector_t sector = pgoff * PAGE_SECTORS;
+       struct dm_target *ti;
+       int srcu_idx;
+       long ret = 0;
+
+       ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+       if (!ti || !ti->type->dax_recovery_write)
+               goto out;
+
+       ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
+out:
+       dm_put_live_table(md, srcu_idx);
+       return ret;
+}
+
 /*
  * A target may call dm_accept_partial_bio only from the map routine.  It is
  * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
@@ -3147,6 +3166,7 @@ static const struct block_device_operations dm_rq_blk_dops = {
 static const struct dax_operations dm_dax_ops = {
        .direct_access = dm_dax_direct_access,
        .zero_page_range = dm_dax_zero_page_range,
+       .recovery_write = dm_dax_recovery_write,
 };
 
 /*
index 47f34c50f944114887168b2361fc9c9f7ea69e11..e5e288135af7af26a86808b80960c9f75361a78c 100644 (file)
@@ -287,9 +287,16 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
        return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn);
 }
 
+static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i)
+{
+       return 0;
+}
+
 static const struct dax_operations pmem_dax_ops = {
        .direct_access = pmem_dax_direct_access,
        .zero_page_range = pmem_dax_zero_page_range,
+       .recovery_write = pmem_recovery_write,
 };
 
 static ssize_t write_cache_show(struct device *dev,
index ef310310710461c6e82c6f054c12fc41670f0f9d..a1e4b45cbf55b16ace2b7eb77b7ace871a48e2aa 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1240,6 +1240,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                const size_t size = ALIGN(length + offset, PAGE_SIZE);
                pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
                ssize_t map_len;
+               bool recovery = false;
                void *kaddr;
 
                if (fatal_signal_pending(current)) {
@@ -1249,6 +1250,13 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
 
                map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
                                DAX_ACCESS, &kaddr, NULL);
+               if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
+                       map_len = dax_direct_access(dax_dev, pgoff,
+                                       PHYS_PFN(size), DAX_RECOVERY_WRITE,
+                                       &kaddr, NULL);
+                       if (map_len > 0)
+                               recovery = true;
+               }
                if (map_len < 0) {
                        ret = map_len;
                        break;
@@ -1260,7 +1268,10 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
                if (map_len > end - pos)
                        map_len = end - pos;
 
-               if (iov_iter_rw(iter) == WRITE)
+               if (recovery)
+                       xfer = dax_recovery_write(dax_dev, pgoff, kaddr,
+                                       map_len, iter);
+               else if (iov_iter_rw(iter) == WRITE)
                        xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
                                        map_len, iter);
                else
index 3f1339bce3c0c553f10f13022f4d841052a4f65b..e7b81634c52add9c673da3597703a04f0f055701 100644 (file)
@@ -35,6 +35,12 @@ struct dax_operations {
                        sector_t, sector_t);
        /* zero_page_range: required operation. Zero page range   */
        int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
+       /*
+        * recovery_write: recover a poisoned range by DAX device driver
+        * capable of clearing poison.
+        */
+       size_t (*recovery_write)(struct dax_device *dax_dev, pgoff_t pgoff,
+                       void *addr, size_t bytes, struct iov_iter *iter);
 };
 
 #if IS_ENABLED(CONFIG_DAX)
@@ -45,6 +51,8 @@ void dax_write_cache(struct dax_device *dax_dev, bool wc);
 bool dax_write_cache_enabled(struct dax_device *dax_dev);
 bool dax_synchronous(struct dax_device *dax_dev);
 void set_dax_synchronous(struct dax_device *dax_dev);
+size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i);
 /*
  * Check if given mapping is supported by the file / underlying device.
  */
@@ -92,6 +100,11 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
 {
        return !(vma->vm_flags & VM_SYNC);
 }
+static inline size_t dax_recovery_write(struct dax_device *dax_dev,
+               pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
+{
+       return 0;
+}
 #endif
 
 void set_dax_nocache(struct dax_device *dax_dev);
index acdedda0d12bc51c9b6909176363a35218bde908..47a01c7cffdf3694ebcfc2653cf468e01a8698fc 100644 (file)
@@ -152,6 +152,14 @@ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
 typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
                size_t nr_pages);
 
+/*
+ * Returns:
+ * != 0 : number of bytes transferred
+ * 0    : recovery write failed
+ */
+typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff,
+               void *addr, size_t bytes, struct iov_iter *i);
+
 void dm_error(const char *message);
 
 struct dm_dev {
@@ -201,6 +209,7 @@ struct target_type {
        dm_io_hints_fn io_hints;
        dm_dax_direct_access_fn direct_access;
        dm_dax_zero_page_range_fn dax_zero_page_range;
+       dm_dax_recovery_write_fn dax_recovery_write;
 
        /* For internal device-mapper use. */
        struct list_head list;