Merge branch 'for-4.18/dax' into libnvdimm-for-next
[sfrench/cifs-2.6.git] / drivers / nvdimm / pmem.c
index 9d714926ecf525e90e6f15892da8c931cdb4151c..bf2dd2a4a5e60050f092ee3c2f14b01b76781b53 100644 (file)
@@ -164,11 +164,6 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
        return rc;
 }
 
-/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
-#ifndef REQ_FLUSH
-#define REQ_FLUSH REQ_PREFLUSH
-#endif
-
 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 {
        blk_status_t rc = 0;
@@ -179,7 +174,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
        struct pmem_device *pmem = q->queuedata;
        struct nd_region *nd_region = to_region(pmem);
 
-       if (bio->bi_opf & REQ_FLUSH)
+       if (bio->bi_opf & REQ_PREFLUSH)
                nvdimm_flush(nd_region);
 
        do_acct = nd_iostat_start(bio, &start);
@@ -294,12 +289,33 @@ static void pmem_release_disk(void *__pmem)
        put_disk(pmem->disk);
 }
 
+static void pmem_release_pgmap_ops(void *__pgmap)
+{
+       dev_pagemap_put_ops();
+}
+
+static void fsdax_pagefree(struct page *page, void *data)
+{
+       wake_up_var(&page->_refcount);
+}
+
+static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap)
+{
+       dev_pagemap_get_ops();
+       if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap))
+               return -ENOMEM;
+       pgmap->type = MEMORY_DEVICE_FS_DAX;
+       pgmap->page_free = fsdax_pagefree;
+
+       return 0;
+}
+
 static int pmem_attach_disk(struct device *dev,
                struct nd_namespace_common *ndns)
 {
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        struct nd_region *nd_region = to_nd_region(dev->parent);
-       int nid = dev_to_node(dev), fua, wbc;
+       int nid = dev_to_node(dev), fua;
        struct resource *res = &nsio->res;
        struct resource bb_res;
        struct nd_pfn *nd_pfn = NULL;
@@ -335,7 +351,6 @@ static int pmem_attach_disk(struct device *dev,
                dev_warn(dev, "unable to guarantee persistence of writes\n");
                fua = 0;
        }
-       wbc = nvdimm_has_cache(nd_region);
 
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                                dev_name(&ndns->dev))) {
@@ -353,6 +368,8 @@ static int pmem_attach_disk(struct device *dev,
        pmem->pfn_flags = PFN_DEV;
        pmem->pgmap.ref = &q->q_usage_counter;
        if (is_nd_pfn(dev)) {
+               if (setup_pagemap_fsdax(dev, &pmem->pgmap))
+                       return -ENOMEM;
                addr = devm_memremap_pages(dev, &pmem->pgmap);
                pfn_sb = nd_pfn->pfn_sb;
                pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -364,6 +381,8 @@ static int pmem_attach_disk(struct device *dev,
        } else if (pmem_should_map_pages(dev)) {
                memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
                pmem->pgmap.altmap_valid = false;
+               if (setup_pagemap_fsdax(dev, &pmem->pgmap))
+                       return -ENOMEM;
                addr = devm_memremap_pages(dev, &pmem->pgmap);
                pmem->pfn_flags |= PFN_MAP;
                memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
@@ -382,7 +401,7 @@ static int pmem_attach_disk(struct device *dev,
                return PTR_ERR(addr);
        pmem->virt_addr = addr;
 
-       blk_queue_write_cache(q, wbc, fua);
+       blk_queue_write_cache(q, true, fua);
        blk_queue_make_request(q, pmem_make_request);
        blk_queue_physical_block_size(q, PAGE_SIZE);
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
@@ -413,7 +432,7 @@ static int pmem_attach_disk(struct device *dev,
                put_disk(disk);
                return -ENOMEM;
        }
-       dax_write_cache(dax_dev, wbc);
+       dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
        pmem->dax_dev = dax_dev;
 
        gendev = disk_to_dev(disk);