Merge branch 'for-4.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / drivers / dax / pmem.c
1 /*
2  * Copyright(c) 2016 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/percpu-refcount.h>
14 #include <linux/memremap.h>
15 #include <linux/module.h>
16 #include <linux/pfn_t.h>
17 #include "../nvdimm/pfn.h"
18 #include "../nvdimm/nd.h"
19 #include "device-dax.h"
20
21 struct dax_pmem {
22         struct device *dev;
23         struct percpu_ref ref;
24         struct completion cmp;
25 };
26
27 static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
28 {
29         return container_of(ref, struct dax_pmem, ref);
30 }
31
32 static void dax_pmem_percpu_release(struct percpu_ref *ref)
33 {
34         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
35
36         dev_dbg(dax_pmem->dev, "%s\n", __func__);
37         complete(&dax_pmem->cmp);
38 }
39
40 static void dax_pmem_percpu_exit(void *data)
41 {
42         struct percpu_ref *ref = data;
43         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
44
45         dev_dbg(dax_pmem->dev, "%s\n", __func__);
46         wait_for_completion(&dax_pmem->cmp);
47         percpu_ref_exit(ref);
48 }
49
50 static void dax_pmem_percpu_kill(void *data)
51 {
52         struct percpu_ref *ref = data;
53         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
54
55         dev_dbg(dax_pmem->dev, "%s\n", __func__);
56         percpu_ref_kill(ref);
57 }
58
59 static int dax_pmem_probe(struct device *dev)
60 {
61         void *addr;
62         struct resource res;
63         int rc, id, region_id;
64         struct nd_pfn_sb *pfn_sb;
65         struct dev_dax *dev_dax;
66         struct dax_pmem *dax_pmem;
67         struct nd_namespace_io *nsio;
68         struct dax_region *dax_region;
69         struct nd_namespace_common *ndns;
70         struct nd_dax *nd_dax = to_nd_dax(dev);
71         struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
72         struct vmem_altmap __altmap, *altmap = NULL;
73
74         ndns = nvdimm_namespace_common_probe(dev);
75         if (IS_ERR(ndns))
76                 return PTR_ERR(ndns);
77         nsio = to_nd_namespace_io(&ndns->dev);
78
79         /* parse the 'pfn' info block via ->rw_bytes */
80         rc = devm_nsio_enable(dev, nsio);
81         if (rc)
82                 return rc;
83         altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
84         if (IS_ERR(altmap))
85                 return PTR_ERR(altmap);
86         devm_nsio_disable(dev, nsio);
87
88         pfn_sb = nd_pfn->pfn_sb;
89
90         if (!devm_request_mem_region(dev, nsio->res.start,
91                                 resource_size(&nsio->res),
92                                 dev_name(&ndns->dev))) {
93                 dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
94                 return -EBUSY;
95         }
96
97         dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
98         if (!dax_pmem)
99                 return -ENOMEM;
100
101         dax_pmem->dev = dev;
102         init_completion(&dax_pmem->cmp);
103         rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
104                         GFP_KERNEL);
105         if (rc)
106                 return rc;
107
108         rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
109                                                         &dax_pmem->ref);
110         if (rc)
111                 return rc;
112
113         addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
114         if (IS_ERR(addr))
115                 return PTR_ERR(addr);
116
117         rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
118                                                         &dax_pmem->ref);
119         if (rc)
120                 return rc;
121
122         /* adjust the dax_region resource to the start of data */
123         res.start += le64_to_cpu(pfn_sb->dataoff);
124
125         rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
126         if (rc != 2)
127                 return -EINVAL;
128
129         dax_region = alloc_dax_region(dev, region_id, &res,
130                         le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
131         if (!dax_region)
132                 return -ENOMEM;
133
134         /* TODO: support for subdividing a dax region... */
135         dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
136
137         /* child dev_dax instances now own the lifetime of the dax_region */
138         dax_region_put(dax_region);
139
140         return PTR_ERR_OR_ZERO(dev_dax);
141 }
142
143 static struct nd_device_driver dax_pmem_driver = {
144         .probe = dax_pmem_probe,
145         .drv = {
146                 .name = "dax_pmem",
147         },
148         .type = ND_DRIVER_DAX_PMEM,
149 };
150
151 static int __init dax_pmem_init(void)
152 {
153         return nd_driver_register(&dax_pmem_driver);
154 }
155 module_init(dax_pmem_init);
156
157 static void __exit dax_pmem_exit(void)
158 {
159         driver_unregister(&dax_pmem_driver.drv);
160 }
161 module_exit(dax_pmem_exit);
162
163 MODULE_LICENSE("GPL v2");
164 MODULE_AUTHOR("Intel Corporation");
165 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);