Merge tag 'libnvdimm-fixes-5.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / nvdimm / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Persistent Memory Driver
4  *
5  * Copyright (c) 2014-2015, Intel Corporation.
6  * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7  * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8  */
9
10 #include <asm/cacheflush.h>
11 #include <linux/blkdev.h>
12 #include <linux/hdreg.h>
13 #include <linux/init.h>
14 #include <linux/platform_device.h>
15 #include <linux/set_memory.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/badblocks.h>
19 #include <linux/memremap.h>
20 #include <linux/vmalloc.h>
21 #include <linux/blk-mq.h>
22 #include <linux/pfn_t.h>
23 #include <linux/slab.h>
24 #include <linux/uio.h>
25 #include <linux/dax.h>
26 #include <linux/nd.h>
27 #include <linux/backing-dev.h>
28 #include "pmem.h"
29 #include "pfn.h"
30 #include "nd.h"
31 #include "nd-core.h"
32
33 static struct device *to_dev(struct pmem_device *pmem)
34 {
35         /*
36          * nvdimm bus services need a 'dev' parameter, and we record the device
37          * at init in bb.dev.
38          */
39         return pmem->bb.dev;
40 }
41
42 static struct nd_region *to_region(struct pmem_device *pmem)
43 {
44         return to_nd_region(to_dev(pmem)->parent);
45 }
46
47 static void hwpoison_clear(struct pmem_device *pmem,
48                 phys_addr_t phys, unsigned int len)
49 {
50         unsigned long pfn_start, pfn_end, pfn;
51
52         /* only pmem in the linear map supports HWPoison */
53         if (is_vmalloc_addr(pmem->virt_addr))
54                 return;
55
56         pfn_start = PHYS_PFN(phys);
57         pfn_end = pfn_start + PHYS_PFN(len);
58         for (pfn = pfn_start; pfn < pfn_end; pfn++) {
59                 struct page *page = pfn_to_page(pfn);
60
61                 /*
62                  * Note, no need to hold a get_dev_pagemap() reference
63                  * here since we're in the driver I/O path and
64                  * outstanding I/O requests pin the dev_pagemap.
65                  */
66                 if (test_and_clear_pmem_poison(page))
67                         clear_mce_nospec(pfn);
68         }
69 }
70
71 static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
72                 phys_addr_t offset, unsigned int len)
73 {
74         struct device *dev = to_dev(pmem);
75         sector_t sector;
76         long cleared;
77         blk_status_t rc = BLK_STS_OK;
78
79         sector = (offset - pmem->data_offset) / 512;
80
81         cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
82         if (cleared < len)
83                 rc = BLK_STS_IOERR;
84         if (cleared > 0 && cleared / 512) {
85                 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
86                 cleared /= 512;
87                 dev_dbg(dev, "%#llx clear %ld sector%s\n",
88                                 (unsigned long long) sector, cleared,
89                                 cleared > 1 ? "s" : "");
90                 badblocks_clear(&pmem->bb, sector, cleared);
91                 if (pmem->bb_state)
92                         sysfs_notify_dirent(pmem->bb_state);
93         }
94
95         arch_invalidate_pmem(pmem->virt_addr + offset, len);
96
97         return rc;
98 }
99
100 static void write_pmem(void *pmem_addr, struct page *page,
101                 unsigned int off, unsigned int len)
102 {
103         unsigned int chunk;
104         void *mem;
105
106         while (len) {
107                 mem = kmap_atomic(page);
108                 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
109                 memcpy_flushcache(pmem_addr, mem + off, chunk);
110                 kunmap_atomic(mem);
111                 len -= chunk;
112                 off = 0;
113                 page++;
114                 pmem_addr += chunk;
115         }
116 }
117
118 static blk_status_t read_pmem(struct page *page, unsigned int off,
119                 void *pmem_addr, unsigned int len)
120 {
121         unsigned int chunk;
122         unsigned long rem;
123         void *mem;
124
125         while (len) {
126                 mem = kmap_atomic(page);
127                 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
128                 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
129                 kunmap_atomic(mem);
130                 if (rem)
131                         return BLK_STS_IOERR;
132                 len -= chunk;
133                 off = 0;
134                 page++;
135                 pmem_addr += chunk;
136         }
137         return BLK_STS_OK;
138 }
139
140 static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
141                         unsigned int len, unsigned int off, unsigned int op,
142                         sector_t sector)
143 {
144         blk_status_t rc = BLK_STS_OK;
145         bool bad_pmem = false;
146         phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
147         void *pmem_addr = pmem->virt_addr + pmem_off;
148
149         if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150                 bad_pmem = true;
151
152         if (!op_is_write(op)) {
153                 if (unlikely(bad_pmem))
154                         rc = BLK_STS_IOERR;
155                 else {
156                         rc = read_pmem(page, off, pmem_addr, len);
157                         flush_dcache_page(page);
158                 }
159         } else {
160                 /*
161                  * Note that we write the data both before and after
162                  * clearing poison.  The write before clear poison
163                  * handles situations where the latest written data is
164                  * preserved and the clear poison operation simply marks
165                  * the address range as valid without changing the data.
166                  * In this case application software can assume that an
167                  * interrupted write will either return the new good
168                  * data or an error.
169                  *
170                  * However, if pmem_clear_poison() leaves the data in an
171                  * indeterminate state we need to perform the write
172                  * after clear poison.
173                  */
174                 flush_dcache_page(page);
175                 write_pmem(pmem_addr, page, off, len);
176                 if (unlikely(bad_pmem)) {
177                         rc = pmem_clear_poison(pmem, pmem_off, len);
178                         write_pmem(pmem_addr, page, off, len);
179                 }
180         }
181
182         return rc;
183 }
184
185 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
186 {
187         int ret = 0;
188         blk_status_t rc = 0;
189         bool do_acct;
190         unsigned long start;
191         struct bio_vec bvec;
192         struct bvec_iter iter;
193         struct pmem_device *pmem = q->queuedata;
194         struct nd_region *nd_region = to_region(pmem);
195
196         if (bio->bi_opf & REQ_PREFLUSH)
197                 ret = nvdimm_flush(nd_region, bio);
198
199         do_acct = nd_iostat_start(bio, &start);
200         bio_for_each_segment(bvec, bio, iter) {
201                 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
202                                 bvec.bv_offset, bio_op(bio), iter.bi_sector);
203                 if (rc) {
204                         bio->bi_status = rc;
205                         break;
206                 }
207         }
208         if (do_acct)
209                 nd_iostat_end(bio, start);
210
211         if (bio->bi_opf & REQ_FUA)
212                 ret = nvdimm_flush(nd_region, bio);
213
214         if (ret)
215                 bio->bi_status = errno_to_blk_status(ret);
216
217         bio_endio(bio);
218         return BLK_QC_T_NONE;
219 }
220
221 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
222                        struct page *page, unsigned int op)
223 {
224         struct pmem_device *pmem = bdev->bd_queue->queuedata;
225         blk_status_t rc;
226
227         rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
228                           0, op, sector);
229
230         /*
231          * The ->rw_page interface is subtle and tricky.  The core
232          * retries on any error, so we can only invoke page_endio() in
233          * the successful completion case.  Otherwise, we'll see crashes
234          * caused by double completion.
235          */
236         if (rc == 0)
237                 page_endio(page, op_is_write(op), 0);
238
239         return blk_status_to_errno(rc);
240 }
241
242 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
243 __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
244                 long nr_pages, void **kaddr, pfn_t *pfn)
245 {
246         resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
247
248         if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
249                                         PFN_PHYS(nr_pages))))
250                 return -EIO;
251
252         if (kaddr)
253                 *kaddr = pmem->virt_addr + offset;
254         if (pfn)
255                 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
256
257         /*
258          * If badblocks are present, limit known good range to the
259          * requested range.
260          */
261         if (unlikely(pmem->bb.count))
262                 return nr_pages;
263         return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
264 }
265
266 static const struct block_device_operations pmem_fops = {
267         .owner =                THIS_MODULE,
268         .rw_page =              pmem_rw_page,
269         .revalidate_disk =      nvdimm_revalidate_disk,
270 };
271
272 static long pmem_dax_direct_access(struct dax_device *dax_dev,
273                 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
274 {
275         struct pmem_device *pmem = dax_get_private(dax_dev);
276
277         return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
278 }
279
280 /*
281  * Use the 'no check' versions of copy_from_iter_flushcache() and
282  * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
283  * checking, both file offset and device offset, is handled by
284  * dax_iomap_actor()
285  */
286 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
287                 void *addr, size_t bytes, struct iov_iter *i)
288 {
289         return _copy_from_iter_flushcache(addr, bytes, i);
290 }
291
292 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
293                 void *addr, size_t bytes, struct iov_iter *i)
294 {
295         return _copy_to_iter_mcsafe(addr, bytes, i);
296 }
297
298 static const struct dax_operations pmem_dax_ops = {
299         .direct_access = pmem_dax_direct_access,
300         .dax_supported = generic_fsdax_supported,
301         .copy_from_iter = pmem_copy_from_iter,
302         .copy_to_iter = pmem_copy_to_iter,
303 };
304
305 static const struct attribute_group *pmem_attribute_groups[] = {
306         &dax_attribute_group,
307         NULL,
308 };
309
310 static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
311 {
312         struct request_queue *q =
313                 container_of(pgmap->ref, struct request_queue, q_usage_counter);
314
315         blk_cleanup_queue(q);
316 }
317
318 static void pmem_release_queue(void *pgmap)
319 {
320         pmem_pagemap_cleanup(pgmap);
321 }
322
323 static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
324 {
325         struct request_queue *q =
326                 container_of(pgmap->ref, struct request_queue, q_usage_counter);
327
328         blk_freeze_queue_start(q);
329 }
330
331 static void pmem_release_disk(void *__pmem)
332 {
333         struct pmem_device *pmem = __pmem;
334
335         kill_dax(pmem->dax_dev);
336         put_dax(pmem->dax_dev);
337         del_gendisk(pmem->disk);
338         put_disk(pmem->disk);
339 }
340
341 static void pmem_pagemap_page_free(struct page *page)
342 {
343         wake_up_var(&page->_refcount);
344 }
345
346 static const struct dev_pagemap_ops fsdax_pagemap_ops = {
347         .page_free              = pmem_pagemap_page_free,
348         .kill                   = pmem_pagemap_kill,
349         .cleanup                = pmem_pagemap_cleanup,
350 };
351
352 static int pmem_attach_disk(struct device *dev,
353                 struct nd_namespace_common *ndns)
354 {
355         struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
356         struct nd_region *nd_region = to_nd_region(dev->parent);
357         int nid = dev_to_node(dev), fua;
358         struct resource *res = &nsio->res;
359         struct resource bb_res;
360         struct nd_pfn *nd_pfn = NULL;
361         struct dax_device *dax_dev;
362         struct nd_pfn_sb *pfn_sb;
363         struct pmem_device *pmem;
364         struct request_queue *q;
365         struct device *gendev;
366         struct gendisk *disk;
367         void *addr;
368         int rc;
369         unsigned long flags = 0UL;
370
371         pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
372         if (!pmem)
373                 return -ENOMEM;
374
375         /* while nsio_rw_bytes is active, parse a pfn info block if present */
376         if (is_nd_pfn(dev)) {
377                 nd_pfn = to_nd_pfn(dev);
378                 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
379                 if (rc)
380                         return rc;
381         }
382
383         /* we're attaching a block device, disable raw namespace access */
384         devm_nsio_disable(dev, nsio);
385
386         dev_set_drvdata(dev, pmem);
387         pmem->phys_addr = res->start;
388         pmem->size = resource_size(res);
389         fua = nvdimm_has_flush(nd_region);
390         if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
391                 dev_warn(dev, "unable to guarantee persistence of writes\n");
392                 fua = 0;
393         }
394
395         if (!devm_request_mem_region(dev, res->start, resource_size(res),
396                                 dev_name(&ndns->dev))) {
397                 dev_warn(dev, "could not reserve region %pR\n", res);
398                 return -EBUSY;
399         }
400
401         q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
402         if (!q)
403                 return -ENOMEM;
404
405         pmem->pfn_flags = PFN_DEV;
406         pmem->pgmap.ref = &q->q_usage_counter;
407         if (is_nd_pfn(dev)) {
408                 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
409                 pmem->pgmap.ops = &fsdax_pagemap_ops;
410                 addr = devm_memremap_pages(dev, &pmem->pgmap);
411                 pfn_sb = nd_pfn->pfn_sb;
412                 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
413                 pmem->pfn_pad = resource_size(res) -
414                         resource_size(&pmem->pgmap.res);
415                 pmem->pfn_flags |= PFN_MAP;
416                 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
417                 bb_res.start += pmem->data_offset;
418         } else if (pmem_should_map_pages(dev)) {
419                 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
420                 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
421                 pmem->pgmap.ops = &fsdax_pagemap_ops;
422                 addr = devm_memremap_pages(dev, &pmem->pgmap);
423                 pmem->pfn_flags |= PFN_MAP;
424                 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
425         } else {
426                 if (devm_add_action_or_reset(dev, pmem_release_queue,
427                                         &pmem->pgmap))
428                         return -ENOMEM;
429                 addr = devm_memremap(dev, pmem->phys_addr,
430                                 pmem->size, ARCH_MEMREMAP_PMEM);
431                 memcpy(&bb_res, &nsio->res, sizeof(bb_res));
432         }
433
434         if (IS_ERR(addr))
435                 return PTR_ERR(addr);
436         pmem->virt_addr = addr;
437
438         blk_queue_write_cache(q, true, fua);
439         blk_queue_make_request(q, pmem_make_request);
440         blk_queue_physical_block_size(q, PAGE_SIZE);
441         blk_queue_logical_block_size(q, pmem_sector_size(ndns));
442         blk_queue_max_hw_sectors(q, UINT_MAX);
443         blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
444         if (pmem->pfn_flags & PFN_MAP)
445                 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
446         q->queuedata = pmem;
447
448         disk = alloc_disk_node(0, nid);
449         if (!disk)
450                 return -ENOMEM;
451         pmem->disk = disk;
452
453         disk->fops              = &pmem_fops;
454         disk->queue             = q;
455         disk->flags             = GENHD_FL_EXT_DEVT;
456         disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
457         nvdimm_namespace_disk_name(ndns, disk->disk_name);
458         set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
459                         / 512);
460         if (devm_init_badblocks(dev, &pmem->bb))
461                 return -ENOMEM;
462         nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
463         disk->bb = &pmem->bb;
464
465         if (is_nvdimm_sync(nd_region))
466                 flags = DAXDEV_F_SYNC;
467         dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
468         if (!dax_dev) {
469                 put_disk(disk);
470                 return -ENOMEM;
471         }
472         dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
473         pmem->dax_dev = dax_dev;
474         gendev = disk_to_dev(disk);
475         gendev->groups = pmem_attribute_groups;
476
477         device_add_disk(dev, disk, NULL);
478         if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
479                 return -ENOMEM;
480
481         revalidate_disk(disk);
482
483         pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
484                                           "badblocks");
485         if (!pmem->bb_state)
486                 dev_warn(dev, "'badblocks' notification disabled\n");
487
488         return 0;
489 }
490
491 static int nd_pmem_probe(struct device *dev)
492 {
493         struct nd_namespace_common *ndns;
494
495         ndns = nvdimm_namespace_common_probe(dev);
496         if (IS_ERR(ndns))
497                 return PTR_ERR(ndns);
498
499         if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
500                 return -ENXIO;
501
502         if (is_nd_btt(dev))
503                 return nvdimm_namespace_attach_btt(ndns);
504
505         if (is_nd_pfn(dev))
506                 return pmem_attach_disk(dev, ndns);
507
508         /* if we find a valid info-block we'll come back as that personality */
509         if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
510                         || nd_dax_probe(dev, ndns) == 0)
511                 return -ENXIO;
512
513         /* ...otherwise we're just a raw pmem device */
514         return pmem_attach_disk(dev, ndns);
515 }
516
517 static int nd_pmem_remove(struct device *dev)
518 {
519         struct pmem_device *pmem = dev_get_drvdata(dev);
520
521         if (is_nd_btt(dev))
522                 nvdimm_namespace_detach_btt(to_nd_btt(dev));
523         else {
524                 /*
525                  * Note, this assumes nd_device_lock() context to not
526                  * race nd_pmem_notify()
527                  */
528                 sysfs_put(pmem->bb_state);
529                 pmem->bb_state = NULL;
530         }
531         nvdimm_flush(to_nd_region(dev->parent), NULL);
532
533         return 0;
534 }
535
536 static void nd_pmem_shutdown(struct device *dev)
537 {
538         nvdimm_flush(to_nd_region(dev->parent), NULL);
539 }
540
541 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
542 {
543         struct nd_region *nd_region;
544         resource_size_t offset = 0, end_trunc = 0;
545         struct nd_namespace_common *ndns;
546         struct nd_namespace_io *nsio;
547         struct resource res;
548         struct badblocks *bb;
549         struct kernfs_node *bb_state;
550
551         if (event != NVDIMM_REVALIDATE_POISON)
552                 return;
553
554         if (is_nd_btt(dev)) {
555                 struct nd_btt *nd_btt = to_nd_btt(dev);
556
557                 ndns = nd_btt->ndns;
558                 nd_region = to_nd_region(ndns->dev.parent);
559                 nsio = to_nd_namespace_io(&ndns->dev);
560                 bb = &nsio->bb;
561                 bb_state = NULL;
562         } else {
563                 struct pmem_device *pmem = dev_get_drvdata(dev);
564
565                 nd_region = to_region(pmem);
566                 bb = &pmem->bb;
567                 bb_state = pmem->bb_state;
568
569                 if (is_nd_pfn(dev)) {
570                         struct nd_pfn *nd_pfn = to_nd_pfn(dev);
571                         struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
572
573                         ndns = nd_pfn->ndns;
574                         offset = pmem->data_offset +
575                                         __le32_to_cpu(pfn_sb->start_pad);
576                         end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
577                 } else {
578                         ndns = to_ndns(dev);
579                 }
580
581                 nsio = to_nd_namespace_io(&ndns->dev);
582         }
583
584         res.start = nsio->res.start + offset;
585         res.end = nsio->res.end - end_trunc;
586         nvdimm_badblocks_populate(nd_region, bb, &res);
587         if (bb_state)
588                 sysfs_notify_dirent(bb_state);
589 }
590
591 MODULE_ALIAS("pmem");
592 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
593 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
594 static struct nd_device_driver nd_pmem_driver = {
595         .probe = nd_pmem_probe,
596         .remove = nd_pmem_remove,
597         .notify = nd_pmem_notify,
598         .shutdown = nd_pmem_shutdown,
599         .drv = {
600                 .name = "nd_pmem",
601         },
602         .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
603 };
604
605 module_nd_driver(nd_pmem_driver);
606
607 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
608 MODULE_LICENSE("GPL v2");