2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/blk-mq.h>
29 #include <linux/pfn_t.h>
30 #include <linux/slab.h>
31 #include <linux/uio.h>
32 #include <linux/dax.h>
34 #include <linux/backing-dev.h>
39 static struct device *to_dev(struct pmem_device *pmem)
42 * nvdimm bus services need a 'dev' parameter, and we record the device
48 static struct nd_region *to_region(struct pmem_device *pmem)
50 return to_nd_region(to_dev(pmem)->parent);
53 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
54 phys_addr_t offset, unsigned int len)
56 struct device *dev = to_dev(pmem);
59 blk_status_t rc = BLK_STS_OK;
61 sector = (offset - pmem->data_offset) / 512;
63 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
66 if (cleared > 0 && cleared / 512) {
68 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
69 (unsigned long long) sector, cleared,
70 cleared > 1 ? "s" : "");
71 badblocks_clear(&pmem->bb, sector, cleared);
73 sysfs_notify_dirent(pmem->bb_state);
76 arch_invalidate_pmem(pmem->virt_addr + offset, len);
81 static void write_pmem(void *pmem_addr, struct page *page,
82 unsigned int off, unsigned int len)
88 mem = kmap_atomic(page);
89 chunk = min_t(unsigned int, len, PAGE_SIZE);
90 memcpy_flushcache(pmem_addr, mem + off, chunk);
95 pmem_addr += PAGE_SIZE;
99 static blk_status_t read_pmem(struct page *page, unsigned int off,
100 void *pmem_addr, unsigned int len)
107 mem = kmap_atomic(page);
108 chunk = min_t(unsigned int, len, PAGE_SIZE);
109 rc = memcpy_mcsafe(mem + off, pmem_addr, chunk);
112 return BLK_STS_IOERR;
116 pmem_addr += PAGE_SIZE;
121 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
122 unsigned int len, unsigned int off, bool is_write,
125 blk_status_t rc = BLK_STS_OK;
126 bool bad_pmem = false;
127 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
128 void *pmem_addr = pmem->virt_addr + pmem_off;
130 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
134 if (unlikely(bad_pmem))
137 rc = read_pmem(page, off, pmem_addr, len);
138 flush_dcache_page(page);
142 * Note that we write the data both before and after
143 * clearing poison. The write before clear poison
144 * handles situations where the latest written data is
145 * preserved and the clear poison operation simply marks
146 * the address range as valid without changing the data.
147 * In this case application software can assume that an
148 * interrupted write will either return the new good
151 * However, if pmem_clear_poison() leaves the data in an
152 * indeterminate state we need to perform the write
153 * after clear poison.
155 flush_dcache_page(page);
156 write_pmem(pmem_addr, page, off, len);
157 if (unlikely(bad_pmem)) {
158 rc = pmem_clear_poison(pmem, pmem_off, len);
159 write_pmem(pmem_addr, page, off, len);
166 /* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
168 #define REQ_FLUSH REQ_PREFLUSH
171 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
177 struct bvec_iter iter;
178 struct pmem_device *pmem = q->queuedata;
179 struct nd_region *nd_region = to_region(pmem);
181 if (bio->bi_opf & REQ_FLUSH)
182 nvdimm_flush(nd_region);
184 do_acct = nd_iostat_start(bio, &start);
185 bio_for_each_segment(bvec, bio, iter) {
186 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
187 bvec.bv_offset, op_is_write(bio_op(bio)),
195 nd_iostat_end(bio, start);
197 if (bio->bi_opf & REQ_FUA)
198 nvdimm_flush(nd_region);
201 return BLK_QC_T_NONE;
204 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
205 struct page *page, bool is_write)
207 struct pmem_device *pmem = bdev->bd_queue->queuedata;
210 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
211 0, is_write, sector);
214 * The ->rw_page interface is subtle and tricky. The core
215 * retries on any error, so we can only invoke page_endio() in
216 * the successful completion case. Otherwise, we'll see crashes
217 * caused by double completion.
220 page_endio(page, is_write, 0);
222 return blk_status_to_errno(rc);
225 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
226 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
227 long nr_pages, void **kaddr, pfn_t *pfn)
229 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
231 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
232 PFN_PHYS(nr_pages))))
234 *kaddr = pmem->virt_addr + offset;
235 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
238 * If badblocks are present, limit known good range to the
241 if (unlikely(pmem->bb.count))
243 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
246 static const struct block_device_operations pmem_fops = {
247 .owner = THIS_MODULE,
248 .rw_page = pmem_rw_page,
249 .revalidate_disk = nvdimm_revalidate_disk,
252 static long pmem_dax_direct_access(struct dax_device *dax_dev,
253 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
255 struct pmem_device *pmem = dax_get_private(dax_dev);
257 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
260 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
261 void *addr, size_t bytes, struct iov_iter *i)
263 return copy_from_iter_flushcache(addr, bytes, i);
266 static const struct dax_operations pmem_dax_ops = {
267 .direct_access = pmem_dax_direct_access,
268 .copy_from_iter = pmem_copy_from_iter,
271 static const struct attribute_group *pmem_attribute_groups[] = {
272 &dax_attribute_group,
276 static void pmem_release_queue(void *q)
278 blk_cleanup_queue(q);
281 static void pmem_freeze_queue(void *q)
283 blk_freeze_queue_start(q);
286 static void pmem_release_disk(void *__pmem)
288 struct pmem_device *pmem = __pmem;
290 kill_dax(pmem->dax_dev);
291 put_dax(pmem->dax_dev);
292 del_gendisk(pmem->disk);
293 put_disk(pmem->disk);
296 static int pmem_attach_disk(struct device *dev,
297 struct nd_namespace_common *ndns)
299 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
300 struct nd_region *nd_region = to_nd_region(dev->parent);
301 int nid = dev_to_node(dev), fua, wbc;
302 struct resource *res = &nsio->res;
303 struct resource bb_res;
304 struct nd_pfn *nd_pfn = NULL;
305 struct dax_device *dax_dev;
306 struct nd_pfn_sb *pfn_sb;
307 struct pmem_device *pmem;
308 struct request_queue *q;
309 struct device *gendev;
310 struct gendisk *disk;
314 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
318 /* while nsio_rw_bytes is active, parse a pfn info block if present */
319 if (is_nd_pfn(dev)) {
320 nd_pfn = to_nd_pfn(dev);
321 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
326 /* we're attaching a block device, disable raw namespace access */
327 devm_nsio_disable(dev, nsio);
329 dev_set_drvdata(dev, pmem);
330 pmem->phys_addr = res->start;
331 pmem->size = resource_size(res);
332 fua = nvdimm_has_flush(nd_region);
333 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
334 dev_warn(dev, "unable to guarantee persistence of writes\n");
337 wbc = nvdimm_has_cache(nd_region);
339 if (!devm_request_mem_region(dev, res->start, resource_size(res),
340 dev_name(&ndns->dev))) {
341 dev_warn(dev, "could not reserve region %pR\n", res);
345 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
349 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
352 pmem->pfn_flags = PFN_DEV;
353 pmem->pgmap.ref = &q->q_usage_counter;
354 if (is_nd_pfn(dev)) {
355 addr = devm_memremap_pages(dev, &pmem->pgmap);
356 pfn_sb = nd_pfn->pfn_sb;
357 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
358 pmem->pfn_pad = resource_size(res) -
359 resource_size(&pmem->pgmap.res);
360 pmem->pfn_flags |= PFN_MAP;
361 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
362 bb_res.start += pmem->data_offset;
363 } else if (pmem_should_map_pages(dev)) {
364 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
365 pmem->pgmap.altmap_valid = false;
366 addr = devm_memremap_pages(dev, &pmem->pgmap);
367 pmem->pfn_flags |= PFN_MAP;
368 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
370 addr = devm_memremap(dev, pmem->phys_addr,
371 pmem->size, ARCH_MEMREMAP_PMEM);
374 * At release time the queue must be frozen before
375 * devm_memremap_pages is unwound
377 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
381 return PTR_ERR(addr);
382 pmem->virt_addr = addr;
384 blk_queue_write_cache(q, wbc, fua);
385 blk_queue_make_request(q, pmem_make_request);
386 blk_queue_physical_block_size(q, PAGE_SIZE);
387 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
388 blk_queue_max_hw_sectors(q, UINT_MAX);
389 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
390 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
393 disk = alloc_disk_node(0, nid);
398 disk->fops = &pmem_fops;
400 disk->flags = GENHD_FL_EXT_DEVT;
401 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
402 nvdimm_namespace_disk_name(ndns, disk->disk_name);
403 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
405 if (devm_init_badblocks(dev, &pmem->bb))
407 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
408 disk->bb = &pmem->bb;
410 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
415 dax_write_cache(dax_dev, wbc);
416 pmem->dax_dev = dax_dev;
418 gendev = disk_to_dev(disk);
419 gendev->groups = pmem_attribute_groups;
421 device_add_disk(dev, disk);
422 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
425 revalidate_disk(disk);
427 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
430 dev_warn(dev, "'badblocks' notification disabled\n");
435 static int nd_pmem_probe(struct device *dev)
437 struct nd_namespace_common *ndns;
439 ndns = nvdimm_namespace_common_probe(dev);
441 return PTR_ERR(ndns);
443 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
447 return nvdimm_namespace_attach_btt(ndns);
450 return pmem_attach_disk(dev, ndns);
452 /* if we find a valid info-block we'll come back as that personality */
453 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
454 || nd_dax_probe(dev, ndns) == 0)
457 /* ...otherwise we're just a raw pmem device */
458 return pmem_attach_disk(dev, ndns);
461 static int nd_pmem_remove(struct device *dev)
463 struct pmem_device *pmem = dev_get_drvdata(dev);
466 nvdimm_namespace_detach_btt(to_nd_btt(dev));
469 * Note, this assumes device_lock() context to not race
472 sysfs_put(pmem->bb_state);
473 pmem->bb_state = NULL;
475 nvdimm_flush(to_nd_region(dev->parent));
480 static void nd_pmem_shutdown(struct device *dev)
482 nvdimm_flush(to_nd_region(dev->parent));
485 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
487 struct nd_region *nd_region;
488 resource_size_t offset = 0, end_trunc = 0;
489 struct nd_namespace_common *ndns;
490 struct nd_namespace_io *nsio;
492 struct badblocks *bb;
493 struct kernfs_node *bb_state;
495 if (event != NVDIMM_REVALIDATE_POISON)
498 if (is_nd_btt(dev)) {
499 struct nd_btt *nd_btt = to_nd_btt(dev);
502 nd_region = to_nd_region(ndns->dev.parent);
503 nsio = to_nd_namespace_io(&ndns->dev);
507 struct pmem_device *pmem = dev_get_drvdata(dev);
509 nd_region = to_region(pmem);
511 bb_state = pmem->bb_state;
513 if (is_nd_pfn(dev)) {
514 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
515 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
518 offset = pmem->data_offset +
519 __le32_to_cpu(pfn_sb->start_pad);
520 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
525 nsio = to_nd_namespace_io(&ndns->dev);
528 res.start = nsio->res.start + offset;
529 res.end = nsio->res.end - end_trunc;
530 nvdimm_badblocks_populate(nd_region, bb, &res);
532 sysfs_notify_dirent(bb_state);
535 MODULE_ALIAS("pmem");
536 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
537 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
538 static struct nd_device_driver nd_pmem_driver = {
539 .probe = nd_pmem_probe,
540 .remove = nd_pmem_remove,
541 .notify = nd_pmem_notify,
542 .shutdown = nd_pmem_shutdown,
546 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
549 static int __init pmem_init(void)
551 return nd_driver_register(&nd_pmem_driver);
553 module_init(pmem_init);
555 static void pmem_exit(void)
557 driver_unregister(&nd_pmem_driver.drv);
559 module_exit(pmem_exit);
561 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
562 MODULE_LICENSE("GPL v2");