IB/uverbs: Fix OOPs in uverbs_user_mmap_disassociate
[sfrench/cifs-2.6.git] / drivers / acpi / nfit / core.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/list_sort.h>
14 #include <linux/libnvdimm.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/ndctl.h>
18 #include <linux/sysfs.h>
19 #include <linux/delay.h>
20 #include <linux/list.h>
21 #include <linux/acpi.h>
22 #include <linux/sort.h>
23 #include <linux/io.h>
24 #include <linux/nd.h>
25 #include <asm/cacheflush.h>
26 #include <acpi/nfit.h>
27 #include "intel.h"
28 #include "nfit.h"
29
30 /*
31  * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
32  * irrelevant.
33  */
34 #include <linux/io-64-nonatomic-hi-lo.h>
35
36 static bool force_enable_dimms;
37 module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
38 MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");
39
40 static bool disable_vendor_specific;
41 module_param(disable_vendor_specific, bool, S_IRUGO);
42 MODULE_PARM_DESC(disable_vendor_specific,
43                 "Limit commands to the publicly specified set");
44
45 static unsigned long override_dsm_mask;
46 module_param(override_dsm_mask, ulong, S_IRUGO);
47 MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");
48
49 static int default_dsm_family = -1;
50 module_param(default_dsm_family, int, S_IRUGO);
51 MODULE_PARM_DESC(default_dsm_family,
52                 "Try this DSM type first when identifying NVDIMM family");
53
54 static bool no_init_ars;
55 module_param(no_init_ars, bool, 0644);
56 MODULE_PARM_DESC(no_init_ars, "Skip ARS run at nfit init time");
57
58 LIST_HEAD(acpi_descs);
59 DEFINE_MUTEX(acpi_desc_lock);
60
61 static struct workqueue_struct *nfit_wq;
62
63 struct nfit_table_prev {
64         struct list_head spas;
65         struct list_head memdevs;
66         struct list_head dcrs;
67         struct list_head bdws;
68         struct list_head idts;
69         struct list_head flushes;
70 };
71
72 static guid_t nfit_uuid[NFIT_UUID_MAX];
73
74 const guid_t *to_nfit_uuid(enum nfit_uuids id)
75 {
76         return &nfit_uuid[id];
77 }
78 EXPORT_SYMBOL(to_nfit_uuid);
79
80 static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
81 {
82         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
83
84         /*
85          * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
86          * acpi_device.
87          */
88         if (!nd_desc->provider_name
89                         || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
90                 return NULL;
91
92         return to_acpi_device(acpi_desc->dev);
93 }
94
95 static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
96 {
97         struct nd_cmd_clear_error *clear_err;
98         struct nd_cmd_ars_status *ars_status;
99         u16 flags;
100
101         switch (cmd) {
102         case ND_CMD_ARS_CAP:
103                 if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
104                         return -ENOTTY;
105
106                 /* Command failed */
107                 if (status & 0xffff)
108                         return -EIO;
109
110                 /* No supported scan types for this range */
111                 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
112                 if ((status >> 16 & flags) == 0)
113                         return -ENOTTY;
114                 return 0;
115         case ND_CMD_ARS_START:
116                 /* ARS is in progress */
117                 if ((status & 0xffff) == NFIT_ARS_START_BUSY)
118                         return -EBUSY;
119
120                 /* Command failed */
121                 if (status & 0xffff)
122                         return -EIO;
123                 return 0;
124         case ND_CMD_ARS_STATUS:
125                 ars_status = buf;
126                 /* Command failed */
127                 if (status & 0xffff)
128                         return -EIO;
129                 /* Check extended status (Upper two bytes) */
130                 if (status == NFIT_ARS_STATUS_DONE)
131                         return 0;
132
133                 /* ARS is in progress */
134                 if (status == NFIT_ARS_STATUS_BUSY)
135                         return -EBUSY;
136
137                 /* No ARS performed for the current boot */
138                 if (status == NFIT_ARS_STATUS_NONE)
139                         return -EAGAIN;
140
141                 /*
142                  * ARS interrupted, either we overflowed or some other
143                  * agent wants the scan to stop.  If we didn't overflow
144                  * then just continue with the returned results.
145                  */
146                 if (status == NFIT_ARS_STATUS_INTR) {
147                         if (ars_status->out_length >= 40 && (ars_status->flags
148                                                 & NFIT_ARS_F_OVERFLOW))
149                                 return -ENOSPC;
150                         return 0;
151                 }
152
153                 /* Unknown status */
154                 if (status >> 16)
155                         return -EIO;
156                 return 0;
157         case ND_CMD_CLEAR_ERROR:
158                 clear_err = buf;
159                 if (status & 0xffff)
160                         return -EIO;
161                 if (!clear_err->cleared)
162                         return -EIO;
163                 if (clear_err->length > clear_err->cleared)
164                         return clear_err->cleared;
165                 return 0;
166         default:
167                 break;
168         }
169
170         /* all other non-zero status results in an error */
171         if (status)
172                 return -EIO;
173         return 0;
174 }
175
176 #define ACPI_LABELS_LOCKED 3
177
178 static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
179                 u32 status)
180 {
181         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
182
183         switch (cmd) {
184         case ND_CMD_GET_CONFIG_SIZE:
185                 /*
186                  * In the _LSI, _LSR, _LSW case the locked status is
187                  * communicated via the read/write commands
188                  */
189                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
190                         break;
191
192                 if (status >> 16 & ND_CONFIG_LOCKED)
193                         return -EACCES;
194                 break;
195         case ND_CMD_GET_CONFIG_DATA:
196                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
197                                 && status == ACPI_LABELS_LOCKED)
198                         return -EACCES;
199                 break;
200         case ND_CMD_SET_CONFIG_DATA:
201                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
202                                 && status == ACPI_LABELS_LOCKED)
203                         return -EACCES;
204                 break;
205         default:
206                 break;
207         }
208
209         /* all other non-zero status results in an error */
210         if (status)
211                 return -EIO;
212         return 0;
213 }
214
215 static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
216                 u32 status)
217 {
218         if (!nvdimm)
219                 return xlat_bus_status(buf, cmd, status);
220         return xlat_nvdimm_status(nvdimm, buf, cmd, status);
221 }
222
223 /* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
224 static union acpi_object *pkg_to_buf(union acpi_object *pkg)
225 {
226         int i;
227         void *dst;
228         size_t size = 0;
229         union acpi_object *buf = NULL;
230
231         if (pkg->type != ACPI_TYPE_PACKAGE) {
232                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
233                                 pkg->type);
234                 goto err;
235         }
236
237         for (i = 0; i < pkg->package.count; i++) {
238                 union acpi_object *obj = &pkg->package.elements[i];
239
240                 if (obj->type == ACPI_TYPE_INTEGER)
241                         size += 4;
242                 else if (obj->type == ACPI_TYPE_BUFFER)
243                         size += obj->buffer.length;
244                 else {
245                         WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
246                                         obj->type);
247                         goto err;
248                 }
249         }
250
251         buf = ACPI_ALLOCATE(sizeof(*buf) + size);
252         if (!buf)
253                 goto err;
254
255         dst = buf + 1;
256         buf->type = ACPI_TYPE_BUFFER;
257         buf->buffer.length = size;
258         buf->buffer.pointer = dst;
259         for (i = 0; i < pkg->package.count; i++) {
260                 union acpi_object *obj = &pkg->package.elements[i];
261
262                 if (obj->type == ACPI_TYPE_INTEGER) {
263                         memcpy(dst, &obj->integer.value, 4);
264                         dst += 4;
265                 } else if (obj->type == ACPI_TYPE_BUFFER) {
266                         memcpy(dst, obj->buffer.pointer, obj->buffer.length);
267                         dst += obj->buffer.length;
268                 }
269         }
270 err:
271         ACPI_FREE(pkg);
272         return buf;
273 }
274
275 static union acpi_object *int_to_buf(union acpi_object *integer)
276 {
277         union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
278         void *dst = NULL;
279
280         if (!buf)
281                 goto err;
282
283         if (integer->type != ACPI_TYPE_INTEGER) {
284                 WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
285                                 integer->type);
286                 goto err;
287         }
288
289         dst = buf + 1;
290         buf->type = ACPI_TYPE_BUFFER;
291         buf->buffer.length = 4;
292         buf->buffer.pointer = dst;
293         memcpy(dst, &integer->integer.value, 4);
294 err:
295         ACPI_FREE(integer);
296         return buf;
297 }
298
299 static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
300                 u32 len, void *data)
301 {
302         acpi_status rc;
303         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
304         struct acpi_object_list input = {
305                 .count = 3,
306                 .pointer = (union acpi_object []) {
307                         [0] = {
308                                 .integer.type = ACPI_TYPE_INTEGER,
309                                 .integer.value = offset,
310                         },
311                         [1] = {
312                                 .integer.type = ACPI_TYPE_INTEGER,
313                                 .integer.value = len,
314                         },
315                         [2] = {
316                                 .buffer.type = ACPI_TYPE_BUFFER,
317                                 .buffer.pointer = data,
318                                 .buffer.length = len,
319                         },
320                 },
321         };
322
323         rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
324         if (ACPI_FAILURE(rc))
325                 return NULL;
326         return int_to_buf(buf.pointer);
327 }
328
329 static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
330                 u32 len)
331 {
332         acpi_status rc;
333         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
334         struct acpi_object_list input = {
335                 .count = 2,
336                 .pointer = (union acpi_object []) {
337                         [0] = {
338                                 .integer.type = ACPI_TYPE_INTEGER,
339                                 .integer.value = offset,
340                         },
341                         [1] = {
342                                 .integer.type = ACPI_TYPE_INTEGER,
343                                 .integer.value = len,
344                         },
345                 },
346         };
347
348         rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
349         if (ACPI_FAILURE(rc))
350                 return NULL;
351         return pkg_to_buf(buf.pointer);
352 }
353
354 static union acpi_object *acpi_label_info(acpi_handle handle)
355 {
356         acpi_status rc;
357         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
358
359         rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
360         if (ACPI_FAILURE(rc))
361                 return NULL;
362         return pkg_to_buf(buf.pointer);
363 }
364
365 static u8 nfit_dsm_revid(unsigned family, unsigned func)
366 {
367         static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
368                 [NVDIMM_FAMILY_INTEL] = {
369                         [NVDIMM_INTEL_GET_MODES] = 2,
370                         [NVDIMM_INTEL_GET_FWINFO] = 2,
371                         [NVDIMM_INTEL_START_FWUPDATE] = 2,
372                         [NVDIMM_INTEL_SEND_FWUPDATE] = 2,
373                         [NVDIMM_INTEL_FINISH_FWUPDATE] = 2,
374                         [NVDIMM_INTEL_QUERY_FWUPDATE] = 2,
375                         [NVDIMM_INTEL_SET_THRESHOLD] = 2,
376                         [NVDIMM_INTEL_INJECT_ERROR] = 2,
377                         [NVDIMM_INTEL_GET_SECURITY_STATE] = 2,
378                         [NVDIMM_INTEL_SET_PASSPHRASE] = 2,
379                         [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2,
380                         [NVDIMM_INTEL_UNLOCK_UNIT] = 2,
381                         [NVDIMM_INTEL_FREEZE_LOCK] = 2,
382                         [NVDIMM_INTEL_SECURE_ERASE] = 2,
383                         [NVDIMM_INTEL_OVERWRITE] = 2,
384                         [NVDIMM_INTEL_QUERY_OVERWRITE] = 2,
385                         [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2,
386                         [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2,
387                 },
388         };
389         u8 id;
390
391         if (family > NVDIMM_FAMILY_MAX)
392                 return 0;
393         if (func > 31)
394                 return 0;
395         id = revid_table[family][func];
396         if (id == 0)
397                 return 1; /* default */
398         return id;
399 }
400
401 static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func)
402 {
403         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
404
405         if (nfit_mem && nfit_mem->family == NVDIMM_FAMILY_INTEL
406                         && func >= NVDIMM_INTEL_GET_SECURITY_STATE
407                         && func <= NVDIMM_INTEL_MASTER_SECURE_ERASE)
408                 return IS_ENABLED(CONFIG_NFIT_SECURITY_DEBUG);
409         return true;
410 }
411
412 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
413                 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
414 {
415         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
416         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
417         union acpi_object in_obj, in_buf, *out_obj;
418         const struct nd_cmd_desc *desc = NULL;
419         struct device *dev = acpi_desc->dev;
420         struct nd_cmd_pkg *call_pkg = NULL;
421         const char *cmd_name, *dimm_name;
422         unsigned long cmd_mask, dsm_mask;
423         u32 offset, fw_status = 0;
424         acpi_handle handle;
425         unsigned int func;
426         const guid_t *guid;
427         int rc, i;
428
429         if (cmd_rc)
430                 *cmd_rc = -EINVAL;
431         func = cmd;
432         if (cmd == ND_CMD_CALL) {
433                 call_pkg = buf;
434                 func = call_pkg->nd_command;
435
436                 for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
437                         if (call_pkg->nd_reserved2[i])
438                                 return -EINVAL;
439         }
440
441         if (nvdimm) {
442                 struct acpi_device *adev = nfit_mem->adev;
443
444                 if (!adev)
445                         return -ENOTTY;
446                 if (call_pkg && nfit_mem->family != call_pkg->nd_family)
447                         return -ENOTTY;
448
449                 dimm_name = nvdimm_name(nvdimm);
450                 cmd_name = nvdimm_cmd_name(cmd);
451                 cmd_mask = nvdimm_cmd_mask(nvdimm);
452                 dsm_mask = nfit_mem->dsm_mask;
453                 desc = nd_cmd_dimm_desc(cmd);
454                 guid = to_nfit_uuid(nfit_mem->family);
455                 handle = adev->handle;
456         } else {
457                 struct acpi_device *adev = to_acpi_dev(acpi_desc);
458
459                 cmd_name = nvdimm_bus_cmd_name(cmd);
460                 cmd_mask = nd_desc->cmd_mask;
461                 dsm_mask = cmd_mask;
462                 if (cmd == ND_CMD_CALL)
463                         dsm_mask = nd_desc->bus_dsm_mask;
464                 desc = nd_cmd_bus_desc(cmd);
465                 guid = to_nfit_uuid(NFIT_DEV_BUS);
466                 handle = adev->handle;
467                 dimm_name = "bus";
468         }
469
470         if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
471                 return -ENOTTY;
472
473         if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
474                 return -ENOTTY;
475
476         in_obj.type = ACPI_TYPE_PACKAGE;
477         in_obj.package.count = 1;
478         in_obj.package.elements = &in_buf;
479         in_buf.type = ACPI_TYPE_BUFFER;
480         in_buf.buffer.pointer = buf;
481         in_buf.buffer.length = 0;
482
483         /* libnvdimm has already validated the input envelope */
484         for (i = 0; i < desc->in_num; i++)
485                 in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
486                                 i, buf);
487
488         if (call_pkg) {
489                 /* skip over package wrapper */
490                 in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
491                 in_buf.buffer.length = call_pkg->nd_size_in;
492         }
493
494         dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n",
495                 dimm_name, cmd, func, in_buf.buffer.length);
496         if (payload_dumpable(nvdimm, func))
497                 print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
498                                 in_buf.buffer.pointer,
499                                 min_t(u32, 256, in_buf.buffer.length), true);
500
501         /* call the BIOS, prefer the named methods over _DSM if available */
502         if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
503                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
504                 out_obj = acpi_label_info(handle);
505         else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
506                         && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
507                 struct nd_cmd_get_config_data_hdr *p = buf;
508
509                 out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
510         } else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
511                         && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
512                 struct nd_cmd_set_config_hdr *p = buf;
513
514                 out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
515                                 p->in_buf);
516         } else {
517                 u8 revid;
518
519                 if (nvdimm)
520                         revid = nfit_dsm_revid(nfit_mem->family, func);
521                 else
522                         revid = 1;
523                 out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
524         }
525
526         if (!out_obj) {
527                 dev_dbg(dev, "%s _DSM failed cmd: %s\n", dimm_name, cmd_name);
528                 return -EINVAL;
529         }
530
531         if (call_pkg) {
532                 call_pkg->nd_fw_size = out_obj->buffer.length;
533                 memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
534                         out_obj->buffer.pointer,
535                         min(call_pkg->nd_fw_size, call_pkg->nd_size_out));
536
537                 ACPI_FREE(out_obj);
538                 /*
539                  * Need to support FW function w/o known size in advance.
540                  * Caller can determine required size based upon nd_fw_size.
541                  * If we return an error (like elsewhere) then caller wouldn't
542                  * be able to rely upon data returned to make calculation.
543                  */
544                 if (cmd_rc)
545                         *cmd_rc = 0;
546                 return 0;
547         }
548
549         if (out_obj->package.type != ACPI_TYPE_BUFFER) {
550                 dev_dbg(dev, "%s unexpected output object type cmd: %s type: %d\n",
551                                 dimm_name, cmd_name, out_obj->type);
552                 rc = -EINVAL;
553                 goto out;
554         }
555
556         dev_dbg(dev, "%s cmd: %s output length: %d\n", dimm_name,
557                         cmd_name, out_obj->buffer.length);
558         print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
559                         out_obj->buffer.pointer,
560                         min_t(u32, 128, out_obj->buffer.length), true);
561
562         for (i = 0, offset = 0; i < desc->out_num; i++) {
563                 u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
564                                 (u32 *) out_obj->buffer.pointer,
565                                 out_obj->buffer.length - offset);
566
567                 if (offset + out_size > out_obj->buffer.length) {
568                         dev_dbg(dev, "%s output object underflow cmd: %s field: %d\n",
569                                         dimm_name, cmd_name, i);
570                         break;
571                 }
572
573                 if (in_buf.buffer.length + offset + out_size > buf_len) {
574                         dev_dbg(dev, "%s output overrun cmd: %s field: %d\n",
575                                         dimm_name, cmd_name, i);
576                         rc = -ENXIO;
577                         goto out;
578                 }
579                 memcpy(buf + in_buf.buffer.length + offset,
580                                 out_obj->buffer.pointer + offset, out_size);
581                 offset += out_size;
582         }
583
584         /*
585          * Set fw_status for all the commands with a known format to be
586          * later interpreted by xlat_status().
587          */
588         if (i >= 1 && ((!nvdimm && cmd >= ND_CMD_ARS_CAP
589                                         && cmd <= ND_CMD_CLEAR_ERROR)
590                                 || (nvdimm && cmd >= ND_CMD_SMART
591                                         && cmd <= ND_CMD_VENDOR)))
592                 fw_status = *(u32 *) out_obj->buffer.pointer;
593
594         if (offset + in_buf.buffer.length < buf_len) {
595                 if (i >= 1) {
596                         /*
597                          * status valid, return the number of bytes left
598                          * unfilled in the output buffer
599                          */
600                         rc = buf_len - offset - in_buf.buffer.length;
601                         if (cmd_rc)
602                                 *cmd_rc = xlat_status(nvdimm, buf, cmd,
603                                                 fw_status);
604                 } else {
605                         dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
606                                         __func__, dimm_name, cmd_name, buf_len,
607                                         offset);
608                         rc = -ENXIO;
609                 }
610         } else {
611                 rc = 0;
612                 if (cmd_rc)
613                         *cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
614         }
615
616  out:
617         ACPI_FREE(out_obj);
618
619         return rc;
620 }
621 EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
622
623 static const char *spa_type_name(u16 type)
624 {
625         static const char *to_name[] = {
626                 [NFIT_SPA_VOLATILE] = "volatile",
627                 [NFIT_SPA_PM] = "pmem",
628                 [NFIT_SPA_DCR] = "dimm-control-region",
629                 [NFIT_SPA_BDW] = "block-data-window",
630                 [NFIT_SPA_VDISK] = "volatile-disk",
631                 [NFIT_SPA_VCD] = "volatile-cd",
632                 [NFIT_SPA_PDISK] = "persistent-disk",
633                 [NFIT_SPA_PCD] = "persistent-cd",
634
635         };
636
637         if (type > NFIT_SPA_PCD)
638                 return "unknown";
639
640         return to_name[type];
641 }
642
643 int nfit_spa_type(struct acpi_nfit_system_address *spa)
644 {
645         int i;
646
647         for (i = 0; i < NFIT_UUID_MAX; i++)
648                 if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
649                         return i;
650         return -1;
651 }
652
653 static bool add_spa(struct acpi_nfit_desc *acpi_desc,
654                 struct nfit_table_prev *prev,
655                 struct acpi_nfit_system_address *spa)
656 {
657         struct device *dev = acpi_desc->dev;
658         struct nfit_spa *nfit_spa;
659
660         if (spa->header.length != sizeof(*spa))
661                 return false;
662
663         list_for_each_entry(nfit_spa, &prev->spas, list) {
664                 if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
665                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
666                         return true;
667                 }
668         }
669
670         nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
671                         GFP_KERNEL);
672         if (!nfit_spa)
673                 return false;
674         INIT_LIST_HEAD(&nfit_spa->list);
675         memcpy(nfit_spa->spa, spa, sizeof(*spa));
676         list_add_tail(&nfit_spa->list, &acpi_desc->spas);
677         dev_dbg(dev, "spa index: %d type: %s\n",
678                         spa->range_index,
679                         spa_type_name(nfit_spa_type(spa)));
680         return true;
681 }
682
683 static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
684                 struct nfit_table_prev *prev,
685                 struct acpi_nfit_memory_map *memdev)
686 {
687         struct device *dev = acpi_desc->dev;
688         struct nfit_memdev *nfit_memdev;
689
690         if (memdev->header.length != sizeof(*memdev))
691                 return false;
692
693         list_for_each_entry(nfit_memdev, &prev->memdevs, list)
694                 if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
695                         list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
696                         return true;
697                 }
698
699         nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
700                         GFP_KERNEL);
701         if (!nfit_memdev)
702                 return false;
703         INIT_LIST_HEAD(&nfit_memdev->list);
704         memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
705         list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
706         dev_dbg(dev, "memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
707                         memdev->device_handle, memdev->range_index,
708                         memdev->region_index, memdev->flags);
709         return true;
710 }
711
712 int nfit_get_smbios_id(u32 device_handle, u16 *flags)
713 {
714         struct acpi_nfit_memory_map *memdev;
715         struct acpi_nfit_desc *acpi_desc;
716         struct nfit_mem *nfit_mem;
717         u16 physical_id;
718
719         mutex_lock(&acpi_desc_lock);
720         list_for_each_entry(acpi_desc, &acpi_descs, list) {
721                 mutex_lock(&acpi_desc->init_mutex);
722                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
723                         memdev = __to_nfit_memdev(nfit_mem);
724                         if (memdev->device_handle == device_handle) {
725                                 *flags = memdev->flags;
726                                 physical_id = memdev->physical_id;
727                                 mutex_unlock(&acpi_desc->init_mutex);
728                                 mutex_unlock(&acpi_desc_lock);
729                                 return physical_id;
730                         }
731                 }
732                 mutex_unlock(&acpi_desc->init_mutex);
733         }
734         mutex_unlock(&acpi_desc_lock);
735
736         return -ENODEV;
737 }
738 EXPORT_SYMBOL_GPL(nfit_get_smbios_id);
739
740 /*
741  * An implementation may provide a truncated control region if no block windows
742  * are defined.
743  */
744 static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
745 {
746         if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
747                                 window_size))
748                 return 0;
749         if (dcr->windows)
750                 return sizeof(*dcr);
751         return offsetof(struct acpi_nfit_control_region, window_size);
752 }
753
754 static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
755                 struct nfit_table_prev *prev,
756                 struct acpi_nfit_control_region *dcr)
757 {
758         struct device *dev = acpi_desc->dev;
759         struct nfit_dcr *nfit_dcr;
760
761         if (!sizeof_dcr(dcr))
762                 return false;
763
764         list_for_each_entry(nfit_dcr, &prev->dcrs, list)
765                 if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
766                         list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
767                         return true;
768                 }
769
770         nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
771                         GFP_KERNEL);
772         if (!nfit_dcr)
773                 return false;
774         INIT_LIST_HEAD(&nfit_dcr->list);
775         memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
776         list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
777         dev_dbg(dev, "dcr index: %d windows: %d\n",
778                         dcr->region_index, dcr->windows);
779         return true;
780 }
781
782 static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
783                 struct nfit_table_prev *prev,
784                 struct acpi_nfit_data_region *bdw)
785 {
786         struct device *dev = acpi_desc->dev;
787         struct nfit_bdw *nfit_bdw;
788
789         if (bdw->header.length != sizeof(*bdw))
790                 return false;
791         list_for_each_entry(nfit_bdw, &prev->bdws, list)
792                 if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
793                         list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
794                         return true;
795                 }
796
797         nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
798                         GFP_KERNEL);
799         if (!nfit_bdw)
800                 return false;
801         INIT_LIST_HEAD(&nfit_bdw->list);
802         memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
803         list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
804         dev_dbg(dev, "bdw dcr: %d windows: %d\n",
805                         bdw->region_index, bdw->windows);
806         return true;
807 }
808
809 static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
810 {
811         if (idt->header.length < sizeof(*idt))
812                 return 0;
813         return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
814 }
815
816 static bool add_idt(struct acpi_nfit_desc *acpi_desc,
817                 struct nfit_table_prev *prev,
818                 struct acpi_nfit_interleave *idt)
819 {
820         struct device *dev = acpi_desc->dev;
821         struct nfit_idt *nfit_idt;
822
823         if (!sizeof_idt(idt))
824                 return false;
825
826         list_for_each_entry(nfit_idt, &prev->idts, list) {
827                 if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
828                         continue;
829
830                 if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
831                         list_move_tail(&nfit_idt->list, &acpi_desc->idts);
832                         return true;
833                 }
834         }
835
836         nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
837                         GFP_KERNEL);
838         if (!nfit_idt)
839                 return false;
840         INIT_LIST_HEAD(&nfit_idt->list);
841         memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
842         list_add_tail(&nfit_idt->list, &acpi_desc->idts);
843         dev_dbg(dev, "idt index: %d num_lines: %d\n",
844                         idt->interleave_index, idt->line_count);
845         return true;
846 }
847
848 static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
849 {
850         if (flush->header.length < sizeof(*flush))
851                 return 0;
852         return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
853 }
854
855 static bool add_flush(struct acpi_nfit_desc *acpi_desc,
856                 struct nfit_table_prev *prev,
857                 struct acpi_nfit_flush_address *flush)
858 {
859         struct device *dev = acpi_desc->dev;
860         struct nfit_flush *nfit_flush;
861
862         if (!sizeof_flush(flush))
863                 return false;
864
865         list_for_each_entry(nfit_flush, &prev->flushes, list) {
866                 if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
867                         continue;
868
869                 if (memcmp(nfit_flush->flush, flush,
870                                         sizeof_flush(flush)) == 0) {
871                         list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
872                         return true;
873                 }
874         }
875
876         nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
877                         + sizeof_flush(flush), GFP_KERNEL);
878         if (!nfit_flush)
879                 return false;
880         INIT_LIST_HEAD(&nfit_flush->list);
881         memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
882         list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
883         dev_dbg(dev, "nfit_flush handle: %d hint_count: %d\n",
884                         flush->device_handle, flush->hint_count);
885         return true;
886 }
887
888 static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
889                 struct acpi_nfit_capabilities *pcap)
890 {
891         struct device *dev = acpi_desc->dev;
892         u32 mask;
893
894         mask = (1 << (pcap->highest_capability + 1)) - 1;
895         acpi_desc->platform_cap = pcap->capabilities & mask;
896         dev_dbg(dev, "cap: %#x\n", acpi_desc->platform_cap);
897         return true;
898 }
899
900 static void *add_table(struct acpi_nfit_desc *acpi_desc,
901                 struct nfit_table_prev *prev, void *table, const void *end)
902 {
903         struct device *dev = acpi_desc->dev;
904         struct acpi_nfit_header *hdr;
905         void *err = ERR_PTR(-ENOMEM);
906
907         if (table >= end)
908                 return NULL;
909
910         hdr = table;
911         if (!hdr->length) {
912                 dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
913                         hdr->type);
914                 return NULL;
915         }
916
917         switch (hdr->type) {
918         case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
919                 if (!add_spa(acpi_desc, prev, table))
920                         return err;
921                 break;
922         case ACPI_NFIT_TYPE_MEMORY_MAP:
923                 if (!add_memdev(acpi_desc, prev, table))
924                         return err;
925                 break;
926         case ACPI_NFIT_TYPE_CONTROL_REGION:
927                 if (!add_dcr(acpi_desc, prev, table))
928                         return err;
929                 break;
930         case ACPI_NFIT_TYPE_DATA_REGION:
931                 if (!add_bdw(acpi_desc, prev, table))
932                         return err;
933                 break;
934         case ACPI_NFIT_TYPE_INTERLEAVE:
935                 if (!add_idt(acpi_desc, prev, table))
936                         return err;
937                 break;
938         case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
939                 if (!add_flush(acpi_desc, prev, table))
940                         return err;
941                 break;
942         case ACPI_NFIT_TYPE_SMBIOS:
943                 dev_dbg(dev, "smbios\n");
944                 break;
945         case ACPI_NFIT_TYPE_CAPABILITIES:
946                 if (!add_platform_cap(acpi_desc, table))
947                         return err;
948                 break;
949         default:
950                 dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
951                 break;
952         }
953
954         return table + hdr->length;
955 }
956
957 static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
958                 struct nfit_mem *nfit_mem)
959 {
960         u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
961         u16 dcr = nfit_mem->dcr->region_index;
962         struct nfit_spa *nfit_spa;
963
964         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
965                 u16 range_index = nfit_spa->spa->range_index;
966                 int type = nfit_spa_type(nfit_spa->spa);
967                 struct nfit_memdev *nfit_memdev;
968
969                 if (type != NFIT_SPA_BDW)
970                         continue;
971
972                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
973                         if (nfit_memdev->memdev->range_index != range_index)
974                                 continue;
975                         if (nfit_memdev->memdev->device_handle != device_handle)
976                                 continue;
977                         if (nfit_memdev->memdev->region_index != dcr)
978                                 continue;
979
980                         nfit_mem->spa_bdw = nfit_spa->spa;
981                         return;
982                 }
983         }
984
985         dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
986                         nfit_mem->spa_dcr->range_index);
987         nfit_mem->bdw = NULL;
988 }
989
990 static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
991                 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
992 {
993         u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
994         struct nfit_memdev *nfit_memdev;
995         struct nfit_bdw *nfit_bdw;
996         struct nfit_idt *nfit_idt;
997         u16 idt_idx, range_index;
998
999         list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1000                 if (nfit_bdw->bdw->region_index != dcr)
1001                         continue;
1002                 nfit_mem->bdw = nfit_bdw->bdw;
1003                 break;
1004         }
1005
1006         if (!nfit_mem->bdw)
1007                 return;
1008
1009         nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1010
1011         if (!nfit_mem->spa_bdw)
1012                 return;
1013
1014         range_index = nfit_mem->spa_bdw->range_index;
1015         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1016                 if (nfit_memdev->memdev->range_index != range_index ||
1017                                 nfit_memdev->memdev->region_index != dcr)
1018                         continue;
1019                 nfit_mem->memdev_bdw = nfit_memdev->memdev;
1020                 idt_idx = nfit_memdev->memdev->interleave_index;
1021                 list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1022                         if (nfit_idt->idt->interleave_index != idt_idx)
1023                                 continue;
1024                         nfit_mem->idt_bdw = nfit_idt->idt;
1025                         break;
1026                 }
1027                 break;
1028         }
1029 }
1030
1031 static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
1032                 struct acpi_nfit_system_address *spa)
1033 {
1034         struct nfit_mem *nfit_mem, *found;
1035         struct nfit_memdev *nfit_memdev;
1036         int type = spa ? nfit_spa_type(spa) : 0;
1037
1038         switch (type) {
1039         case NFIT_SPA_DCR:
1040         case NFIT_SPA_PM:
1041                 break;
1042         default:
1043                 if (spa)
1044                         return 0;
1045         }
1046
1047         /*
1048          * This loop runs in two modes, when a dimm is mapped the loop
1049          * adds memdev associations to an existing dimm, or creates a
1050          * dimm. In the unmapped dimm case this loop sweeps for memdev
1051          * instances with an invalid / zero range_index and adds those
1052          * dimms without spa associations.
1053          */
1054         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1055                 struct nfit_flush *nfit_flush;
1056                 struct nfit_dcr *nfit_dcr;
1057                 u32 device_handle;
1058                 u16 dcr;
1059
1060                 if (spa && nfit_memdev->memdev->range_index != spa->range_index)
1061                         continue;
1062                 if (!spa && nfit_memdev->memdev->range_index)
1063                         continue;
1064                 found = NULL;
1065                 dcr = nfit_memdev->memdev->region_index;
1066                 device_handle = nfit_memdev->memdev->device_handle;
1067                 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1068                         if (__to_nfit_memdev(nfit_mem)->device_handle
1069                                         == device_handle) {
1070                                 found = nfit_mem;
1071                                 break;
1072                         }
1073
1074                 if (found)
1075                         nfit_mem = found;
1076                 else {
1077                         nfit_mem = devm_kzalloc(acpi_desc->dev,
1078                                         sizeof(*nfit_mem), GFP_KERNEL);
1079                         if (!nfit_mem)
1080                                 return -ENOMEM;
1081                         INIT_LIST_HEAD(&nfit_mem->list);
1082                         nfit_mem->acpi_desc = acpi_desc;
1083                         list_add(&nfit_mem->list, &acpi_desc->dimms);
1084                 }
1085
1086                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1087                         if (nfit_dcr->dcr->region_index != dcr)
1088                                 continue;
1089                         /*
1090                          * Record the control region for the dimm.  For
1091                          * the ACPI 6.1 case, where there are separate
1092                          * control regions for the pmem vs blk
1093                          * interfaces, be sure to record the extended
1094                          * blk details.
1095                          */
1096                         if (!nfit_mem->dcr)
1097                                 nfit_mem->dcr = nfit_dcr->dcr;
1098                         else if (nfit_mem->dcr->windows == 0
1099                                         && nfit_dcr->dcr->windows)
1100                                 nfit_mem->dcr = nfit_dcr->dcr;
1101                         break;
1102                 }
1103
1104                 list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1105                         struct acpi_nfit_flush_address *flush;
1106                         u16 i;
1107
1108                         if (nfit_flush->flush->device_handle != device_handle)
1109                                 continue;
1110                         nfit_mem->nfit_flush = nfit_flush;
1111                         flush = nfit_flush->flush;
1112                         nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
1113                                         flush->hint_count,
1114                                         sizeof(struct resource),
1115                                         GFP_KERNEL);
1116                         if (!nfit_mem->flush_wpq)
1117                                 return -ENOMEM;
1118                         for (i = 0; i < flush->hint_count; i++) {
1119                                 struct resource *res = &nfit_mem->flush_wpq[i];
1120
1121                                 res->start = flush->hint_address[i];
1122                                 res->end = res->start + 8 - 1;
1123                         }
1124                         break;
1125                 }
1126
1127                 if (dcr && !nfit_mem->dcr) {
1128                         dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1129                                         spa->range_index, dcr);
1130                         return -ENODEV;
1131                 }
1132
1133                 if (type == NFIT_SPA_DCR) {
1134                         struct nfit_idt *nfit_idt;
1135                         u16 idt_idx;
1136
1137                         /* multiple dimms may share a SPA when interleaved */
1138                         nfit_mem->spa_dcr = spa;
1139                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1140                         idt_idx = nfit_memdev->memdev->interleave_index;
1141                         list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
1142                                 if (nfit_idt->idt->interleave_index != idt_idx)
1143                                         continue;
1144                                 nfit_mem->idt_dcr = nfit_idt->idt;
1145                                 break;
1146                         }
1147                         nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1148                 } else if (type == NFIT_SPA_PM) {
1149                         /*
1150                          * A single dimm may belong to multiple SPA-PM
1151                          * ranges, record at least one in addition to
1152                          * any SPA-DCR range.
1153                          */
1154                         nfit_mem->memdev_pmem = nfit_memdev->memdev;
1155                 } else
1156                         nfit_mem->memdev_dcr = nfit_memdev->memdev;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
1163 {
1164         struct nfit_mem *a = container_of(_a, typeof(*a), list);
1165         struct nfit_mem *b = container_of(_b, typeof(*b), list);
1166         u32 handleA, handleB;
1167
1168         handleA = __to_nfit_memdev(a)->device_handle;
1169         handleB = __to_nfit_memdev(b)->device_handle;
1170         if (handleA < handleB)
1171                 return -1;
1172         else if (handleA > handleB)
1173                 return 1;
1174         return 0;
1175 }
1176
1177 static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
1178 {
1179         struct nfit_spa *nfit_spa;
1180         int rc;
1181
1182
1183         /*
1184          * For each SPA-DCR or SPA-PMEM address range find its
1185          * corresponding MEMDEV(s).  From each MEMDEV find the
1186          * corresponding DCR.  Then, if we're operating on a SPA-DCR,
1187          * try to find a SPA-BDW and a corresponding BDW that references
1188          * the DCR.  Throw it all into an nfit_mem object.  Note, that
1189          * BDWs are optional.
1190          */
1191         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1192                 rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1193                 if (rc)
1194                         return rc;
1195         }
1196
1197         /*
1198          * If a DIMM has failed to be mapped into SPA there will be no
1199          * SPA entries above. Find and register all the unmapped DIMMs
1200          * for reporting and recovery purposes.
1201          */
1202         rc = __nfit_mem_init(acpi_desc, NULL);
1203         if (rc)
1204                 return rc;
1205
1206         list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);
1207
1208         return 0;
1209 }
1210
1211 static ssize_t bus_dsm_mask_show(struct device *dev,
1212                 struct device_attribute *attr, char *buf)
1213 {
1214         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1215         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1216
1217         return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
1218 }
1219 static struct device_attribute dev_attr_bus_dsm_mask =
1220                 __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);
1221
1222 static ssize_t revision_show(struct device *dev,
1223                 struct device_attribute *attr, char *buf)
1224 {
1225         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1226         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1227         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1228
1229         return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1230 }
1231 static DEVICE_ATTR_RO(revision);
1232
1233 static ssize_t hw_error_scrub_show(struct device *dev,
1234                 struct device_attribute *attr, char *buf)
1235 {
1236         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1237         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1238         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1239
1240         return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
1241 }
1242
1243 /*
1244  * The 'hw_error_scrub' attribute can have the following values written to it:
1245  * '0': Switch to the default mode where an exception will only insert
1246  *      the address of the memory error into the poison and badblocks lists.
1247  * '1': Enable a full scrub to happen if an exception for a memory error is
1248  *      received.
1249  */
1250 static ssize_t hw_error_scrub_store(struct device *dev,
1251                 struct device_attribute *attr, const char *buf, size_t size)
1252 {
1253         struct nvdimm_bus_descriptor *nd_desc;
1254         ssize_t rc;
1255         long val;
1256
1257         rc = kstrtol(buf, 0, &val);
1258         if (rc)
1259                 return rc;
1260
1261         device_lock(dev);
1262         nd_desc = dev_get_drvdata(dev);
1263         if (nd_desc) {
1264                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1265
1266                 switch (val) {
1267                 case HW_ERROR_SCRUB_ON:
1268                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
1269                         break;
1270                 case HW_ERROR_SCRUB_OFF:
1271                         acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
1272                         break;
1273                 default:
1274                         rc = -EINVAL;
1275                         break;
1276                 }
1277         }
1278         device_unlock(dev);
1279         if (rc)
1280                 return rc;
1281         return size;
1282 }
1283 static DEVICE_ATTR_RW(hw_error_scrub);
1284
1285 /*
1286  * This shows the number of full Address Range Scrubs that have been
1287  * completed since driver load time. Userspace can wait on this using
1288  * select/poll etc. A '+' at the end indicates an ARS is in progress
1289  */
1290 static ssize_t scrub_show(struct device *dev,
1291                 struct device_attribute *attr, char *buf)
1292 {
1293         struct nvdimm_bus_descriptor *nd_desc;
1294         ssize_t rc = -ENXIO;
1295
1296         device_lock(dev);
1297         nd_desc = dev_get_drvdata(dev);
1298         if (nd_desc) {
1299                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1300
1301                 mutex_lock(&acpi_desc->init_mutex);
1302                 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1303                                 acpi_desc->scrub_busy
1304                                 && !acpi_desc->cancel ? "+\n" : "\n");
1305                 mutex_unlock(&acpi_desc->init_mutex);
1306         }
1307         device_unlock(dev);
1308         return rc;
1309 }
1310
1311 static ssize_t scrub_store(struct device *dev,
1312                 struct device_attribute *attr, const char *buf, size_t size)
1313 {
1314         struct nvdimm_bus_descriptor *nd_desc;
1315         ssize_t rc;
1316         long val;
1317
1318         rc = kstrtol(buf, 0, &val);
1319         if (rc)
1320                 return rc;
1321         if (val != 1)
1322                 return -EINVAL;
1323
1324         device_lock(dev);
1325         nd_desc = dev_get_drvdata(dev);
1326         if (nd_desc) {
1327                 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1328
1329                 rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
1330         }
1331         device_unlock(dev);
1332         if (rc)
1333                 return rc;
1334         return size;
1335 }
1336 static DEVICE_ATTR_RW(scrub);
1337
1338 static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
1339 {
1340         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1341         const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
1342                 | 1 << ND_CMD_ARS_STATUS;
1343
1344         return (nd_desc->cmd_mask & mask) == mask;
1345 }
1346
1347 static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
1348 {
1349         struct device *dev = container_of(kobj, struct device, kobj);
1350         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
1351
1352         if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
1353                 return 0;
1354         return a->mode;
1355 }
1356
1357 static struct attribute *acpi_nfit_attributes[] = {
1358         &dev_attr_revision.attr,
1359         &dev_attr_scrub.attr,
1360         &dev_attr_hw_error_scrub.attr,
1361         &dev_attr_bus_dsm_mask.attr,
1362         NULL,
1363 };
1364
1365 static const struct attribute_group acpi_nfit_attribute_group = {
1366         .name = "nfit",
1367         .attrs = acpi_nfit_attributes,
1368         .is_visible = nfit_visible,
1369 };
1370
1371 static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1372         &nvdimm_bus_attribute_group,
1373         &acpi_nfit_attribute_group,
1374         NULL,
1375 };
1376
1377 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
1378 {
1379         struct nvdimm *nvdimm = to_nvdimm(dev);
1380         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1381
1382         return __to_nfit_memdev(nfit_mem);
1383 }
1384
1385 static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
1386 {
1387         struct nvdimm *nvdimm = to_nvdimm(dev);
1388         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1389
1390         return nfit_mem->dcr;
1391 }
1392
1393 static ssize_t handle_show(struct device *dev,
1394                 struct device_attribute *attr, char *buf)
1395 {
1396         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1397
1398         return sprintf(buf, "%#x\n", memdev->device_handle);
1399 }
1400 static DEVICE_ATTR_RO(handle);
1401
1402 static ssize_t phys_id_show(struct device *dev,
1403                 struct device_attribute *attr, char *buf)
1404 {
1405         struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);
1406
1407         return sprintf(buf, "%#x\n", memdev->physical_id);
1408 }
1409 static DEVICE_ATTR_RO(phys_id);
1410
1411 static ssize_t vendor_show(struct device *dev,
1412                 struct device_attribute *attr, char *buf)
1413 {
1414         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1415
1416         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1417 }
1418 static DEVICE_ATTR_RO(vendor);
1419
1420 static ssize_t rev_id_show(struct device *dev,
1421                 struct device_attribute *attr, char *buf)
1422 {
1423         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1424
1425         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1426 }
1427 static DEVICE_ATTR_RO(rev_id);
1428
1429 static ssize_t device_show(struct device *dev,
1430                 struct device_attribute *attr, char *buf)
1431 {
1432         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1433
1434         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1435 }
1436 static DEVICE_ATTR_RO(device);
1437
1438 static ssize_t subsystem_vendor_show(struct device *dev,
1439                 struct device_attribute *attr, char *buf)
1440 {
1441         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1442
1443         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
1444 }
1445 static DEVICE_ATTR_RO(subsystem_vendor);
1446
1447 static ssize_t subsystem_rev_id_show(struct device *dev,
1448                 struct device_attribute *attr, char *buf)
1449 {
1450         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1451
1452         return sprintf(buf, "0x%04x\n",
1453                         be16_to_cpu(dcr->subsystem_revision_id));
1454 }
1455 static DEVICE_ATTR_RO(subsystem_rev_id);
1456
1457 static ssize_t subsystem_device_show(struct device *dev,
1458                 struct device_attribute *attr, char *buf)
1459 {
1460         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1461
1462         return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
1463 }
1464 static DEVICE_ATTR_RO(subsystem_device);
1465
1466 static int num_nvdimm_formats(struct nvdimm *nvdimm)
1467 {
1468         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1469         int formats = 0;
1470
1471         if (nfit_mem->memdev_pmem)
1472                 formats++;
1473         if (nfit_mem->memdev_bdw)
1474                 formats++;
1475         return formats;
1476 }
1477
1478 static ssize_t format_show(struct device *dev,
1479                 struct device_attribute *attr, char *buf)
1480 {
1481         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1482
1483         return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1484 }
1485 static DEVICE_ATTR_RO(format);
1486
1487 static ssize_t format1_show(struct device *dev,
1488                 struct device_attribute *attr, char *buf)
1489 {
1490         u32 handle;
1491         ssize_t rc = -ENXIO;
1492         struct nfit_mem *nfit_mem;
1493         struct nfit_memdev *nfit_memdev;
1494         struct acpi_nfit_desc *acpi_desc;
1495         struct nvdimm *nvdimm = to_nvdimm(dev);
1496         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1497
1498         nfit_mem = nvdimm_provider_data(nvdimm);
1499         acpi_desc = nfit_mem->acpi_desc;
1500         handle = to_nfit_memdev(dev)->device_handle;
1501
1502         /* assumes DIMMs have at most 2 published interface codes */
1503         mutex_lock(&acpi_desc->init_mutex);
1504         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1505                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
1506                 struct nfit_dcr *nfit_dcr;
1507
1508                 if (memdev->device_handle != handle)
1509                         continue;
1510
1511                 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1512                         if (nfit_dcr->dcr->region_index != memdev->region_index)
1513                                 continue;
1514                         if (nfit_dcr->dcr->code == dcr->code)
1515                                 continue;
1516                         rc = sprintf(buf, "0x%04x\n",
1517                                         le16_to_cpu(nfit_dcr->dcr->code));
1518                         break;
1519                 }
1520                 if (rc != ENXIO)
1521                         break;
1522         }
1523         mutex_unlock(&acpi_desc->init_mutex);
1524         return rc;
1525 }
1526 static DEVICE_ATTR_RO(format1);
1527
1528 static ssize_t formats_show(struct device *dev,
1529                 struct device_attribute *attr, char *buf)
1530 {
1531         struct nvdimm *nvdimm = to_nvdimm(dev);
1532
1533         return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
1534 }
1535 static DEVICE_ATTR_RO(formats);
1536
1537 static ssize_t serial_show(struct device *dev,
1538                 struct device_attribute *attr, char *buf)
1539 {
1540         struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);
1541
1542         return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1543 }
1544 static DEVICE_ATTR_RO(serial);
1545
1546 static ssize_t family_show(struct device *dev,
1547                 struct device_attribute *attr, char *buf)
1548 {
1549         struct nvdimm *nvdimm = to_nvdimm(dev);
1550         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1551
1552         if (nfit_mem->family < 0)
1553                 return -ENXIO;
1554         return sprintf(buf, "%d\n", nfit_mem->family);
1555 }
1556 static DEVICE_ATTR_RO(family);
1557
1558 static ssize_t dsm_mask_show(struct device *dev,
1559                 struct device_attribute *attr, char *buf)
1560 {
1561         struct nvdimm *nvdimm = to_nvdimm(dev);
1562         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1563
1564         if (nfit_mem->family < 0)
1565                 return -ENXIO;
1566         return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
1567 }
1568 static DEVICE_ATTR_RO(dsm_mask);
1569
1570 static ssize_t flags_show(struct device *dev,
1571                 struct device_attribute *attr, char *buf)
1572 {
1573         struct nvdimm *nvdimm = to_nvdimm(dev);
1574         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1575         u16 flags = __to_nfit_memdev(nfit_mem)->flags;
1576
1577         if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
1578                 flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
1579
1580         return sprintf(buf, "%s%s%s%s%s%s%s\n",
1581                 flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
1582                 flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
1583                 flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1584                 flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1585                 flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
1586                 flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
1587                 flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1588 }
1589 static DEVICE_ATTR_RO(flags);
1590
1591 static ssize_t id_show(struct device *dev,
1592                 struct device_attribute *attr, char *buf)
1593 {
1594         struct nvdimm *nvdimm = to_nvdimm(dev);
1595         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1596
1597         return sprintf(buf, "%s\n", nfit_mem->id);
1598 }
1599 static DEVICE_ATTR_RO(id);
1600
1601 static ssize_t dirty_shutdown_show(struct device *dev,
1602                 struct device_attribute *attr, char *buf)
1603 {
1604         struct nvdimm *nvdimm = to_nvdimm(dev);
1605         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1606
1607         return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
1608 }
1609 static DEVICE_ATTR_RO(dirty_shutdown);
1610
1611 static struct attribute *acpi_nfit_dimm_attributes[] = {
1612         &dev_attr_handle.attr,
1613         &dev_attr_phys_id.attr,
1614         &dev_attr_vendor.attr,
1615         &dev_attr_device.attr,
1616         &dev_attr_rev_id.attr,
1617         &dev_attr_subsystem_vendor.attr,
1618         &dev_attr_subsystem_device.attr,
1619         &dev_attr_subsystem_rev_id.attr,
1620         &dev_attr_format.attr,
1621         &dev_attr_formats.attr,
1622         &dev_attr_format1.attr,
1623         &dev_attr_serial.attr,
1624         &dev_attr_flags.attr,
1625         &dev_attr_id.attr,
1626         &dev_attr_family.attr,
1627         &dev_attr_dsm_mask.attr,
1628         &dev_attr_dirty_shutdown.attr,
1629         NULL,
1630 };
1631
1632 static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
1633                 struct attribute *a, int n)
1634 {
1635         struct device *dev = container_of(kobj, struct device, kobj);
1636         struct nvdimm *nvdimm = to_nvdimm(dev);
1637         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1638
1639         if (!to_nfit_dcr(dev)) {
1640                 /* Without a dcr only the memdev attributes can be surfaced */
1641                 if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
1642                                 || a == &dev_attr_flags.attr
1643                                 || a == &dev_attr_family.attr
1644                                 || a == &dev_attr_dsm_mask.attr)
1645                         return a->mode;
1646                 return 0;
1647         }
1648
1649         if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1650                 return 0;
1651
1652         if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
1653                         && a == &dev_attr_dirty_shutdown.attr)
1654                 return 0;
1655
1656         return a->mode;
1657 }
1658
1659 static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1660         .name = "nfit",
1661         .attrs = acpi_nfit_dimm_attributes,
1662         .is_visible = acpi_nfit_dimm_attr_visible,
1663 };
1664
1665 static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1666         &nvdimm_attribute_group,
1667         &nd_device_attribute_group,
1668         &acpi_nfit_dimm_attribute_group,
1669         NULL,
1670 };
1671
1672 static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
1673                 u32 device_handle)
1674 {
1675         struct nfit_mem *nfit_mem;
1676
1677         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1678                 if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
1679                         return nfit_mem->nvdimm;
1680
1681         return NULL;
1682 }
1683
1684 void __acpi_nvdimm_notify(struct device *dev, u32 event)
1685 {
1686         struct nfit_mem *nfit_mem;
1687         struct acpi_nfit_desc *acpi_desc;
1688
1689         dev_dbg(dev->parent, "%s: event: %d\n", dev_name(dev),
1690                         event);
1691
1692         if (event != NFIT_NOTIFY_DIMM_HEALTH) {
1693                 dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
1694                                 event);
1695                 return;
1696         }
1697
1698         acpi_desc = dev_get_drvdata(dev->parent);
1699         if (!acpi_desc)
1700                 return;
1701
1702         /*
1703          * If we successfully retrieved acpi_desc, then we know nfit_mem data
1704          * is still valid.
1705          */
1706         nfit_mem = dev_get_drvdata(dev);
1707         if (nfit_mem && nfit_mem->flags_attr)
1708                 sysfs_notify_dirent(nfit_mem->flags_attr);
1709 }
1710 EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1711
1712 static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
1713 {
1714         struct acpi_device *adev = data;
1715         struct device *dev = &adev->dev;
1716
1717         device_lock(dev->parent);
1718         __acpi_nvdimm_notify(dev, event);
1719         device_unlock(dev->parent);
1720 }
1721
1722 static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
1723 {
1724         acpi_handle handle;
1725         acpi_status status;
1726
1727         status = acpi_get_handle(adev->handle, method, &handle);
1728
1729         if (ACPI_SUCCESS(status))
1730                 return true;
1731         return false;
1732 }
1733
1734 __weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
1735 {
1736         struct nd_intel_smart smart = { 0 };
1737         union acpi_object in_buf = {
1738                 .type = ACPI_TYPE_BUFFER,
1739                 .buffer.pointer = (char *) &smart,
1740                 .buffer.length = sizeof(smart),
1741         };
1742         union acpi_object in_obj = {
1743                 .type = ACPI_TYPE_PACKAGE,
1744                 .package.count = 1,
1745                 .package.elements = &in_buf,
1746         };
1747         const u8 func = ND_INTEL_SMART;
1748         const guid_t *guid = to_nfit_uuid(nfit_mem->family);
1749         u8 revid = nfit_dsm_revid(nfit_mem->family, func);
1750         struct acpi_device *adev = nfit_mem->adev;
1751         acpi_handle handle = adev->handle;
1752         union acpi_object *out_obj;
1753
1754         if ((nfit_mem->dsm_mask & (1 << func)) == 0)
1755                 return;
1756
1757         out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
1758         if (!out_obj)
1759                 return;
1760
1761         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
1762                 if (smart.shutdown_state)
1763                         set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
1764         }
1765
1766         if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
1767                 set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
1768                 nfit_mem->dirty_shutdown = smart.shutdown_count;
1769         }
1770         ACPI_FREE(out_obj);
1771 }
1772
1773 static void populate_shutdown_status(struct nfit_mem *nfit_mem)
1774 {
1775         /*
1776          * For DIMMs that provide a dynamic facility to retrieve a
1777          * dirty-shutdown status and/or a dirty-shutdown count, cache
1778          * these values in nfit_mem.
1779          */
1780         if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
1781                 nfit_intel_shutdown_status(nfit_mem);
1782 }
1783
1784 static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
1785                 struct nfit_mem *nfit_mem, u32 device_handle)
1786 {
1787         struct acpi_device *adev, *adev_dimm;
1788         struct device *dev = acpi_desc->dev;
1789         unsigned long dsm_mask, label_mask;
1790         const guid_t *guid;
1791         int i;
1792         int family = -1;
1793         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1794
1795         /* nfit test assumes 1:1 relationship between commands and dsms */
1796         nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1797         nfit_mem->family = NVDIMM_FAMILY_INTEL;
1798
1799         if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
1800                 sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x",
1801                                 be16_to_cpu(dcr->vendor_id),
1802                                 dcr->manufacturing_location,
1803                                 be16_to_cpu(dcr->manufacturing_date),
1804                                 be32_to_cpu(dcr->serial_number));
1805         else
1806                 sprintf(nfit_mem->id, "%04x-%08x",
1807                                 be16_to_cpu(dcr->vendor_id),
1808                                 be32_to_cpu(dcr->serial_number));
1809
1810         adev = to_acpi_dev(acpi_desc);
1811         if (!adev) {
1812                 /* unit test case */
1813                 populate_shutdown_status(nfit_mem);
1814                 return 0;
1815         }
1816
1817         adev_dimm = acpi_find_child_device(adev, device_handle, false);
1818         nfit_mem->adev = adev_dimm;
1819         if (!adev_dimm) {
1820                 dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
1821                                 device_handle);
1822                 return force_enable_dimms ? 0 : -ENODEV;
1823         }
1824
1825         if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
1826                 ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
1827                 dev_err(dev, "%s: notification registration failed\n",
1828                                 dev_name(&adev_dimm->dev));
1829                 return -ENXIO;
1830         }
1831         /*
1832          * Record nfit_mem for the notification path to track back to
1833          * the nfit sysfs attributes for this dimm device object.
1834          */
1835         dev_set_drvdata(&adev_dimm->dev, nfit_mem);
1836
1837         /*
1838          * Until standardization materializes we need to consider 4
1839          * different command sets.  Note, that checking for function0 (bit0)
1840          * tells us if any commands are reachable through this GUID.
1841          */
1842         for (i = 0; i <= NVDIMM_FAMILY_MAX; i++)
1843                 if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1844                         if (family < 0 || i == default_dsm_family)
1845                                 family = i;
1846
1847         /* limit the supported commands to those that are publicly documented */
1848         nfit_mem->family = family;
1849         if (override_dsm_mask && !disable_vendor_specific)
1850                 dsm_mask = override_dsm_mask;
1851         else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1852                 dsm_mask = NVDIMM_INTEL_CMDMASK;
1853                 if (disable_vendor_specific)
1854                         dsm_mask &= ~(1 << ND_CMD_VENDOR);
1855         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1856                 dsm_mask = 0x1c3c76;
1857         } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1858                 dsm_mask = 0x1fe;
1859                 if (disable_vendor_specific)
1860                         dsm_mask &= ~(1 << 8);
1861         } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
1862                 dsm_mask = 0xffffffff;
1863         } else {
1864                 dev_dbg(dev, "unknown dimm command family\n");
1865                 nfit_mem->family = -1;
1866                 /* DSMs are optional, continue loading the driver... */
1867                 return 0;
1868         }
1869
1870         guid = to_nfit_uuid(nfit_mem->family);
1871         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1872                 if (acpi_check_dsm(adev_dimm->handle, guid,
1873                                         nfit_dsm_revid(nfit_mem->family, i),
1874                                         1ULL << i))
1875                         set_bit(i, &nfit_mem->dsm_mask);
1876
1877         /*
1878          * Prefer the NVDIMM_FAMILY_INTEL label read commands if present
1879          * due to their better semantics handling locked capacity.
1880          */
1881         label_mask = 1 << ND_CMD_GET_CONFIG_SIZE | 1 << ND_CMD_GET_CONFIG_DATA
1882                 | 1 << ND_CMD_SET_CONFIG_DATA;
1883         if (family == NVDIMM_FAMILY_INTEL
1884                         && (dsm_mask & label_mask) == label_mask)
1885                 return 0;
1886
1887         if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
1888                         && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
1889                 dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
1890                 set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
1891         }
1892
1893         if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
1894                         && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
1895                 dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
1896                 set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
1897         }
1898
1899         populate_shutdown_status(nfit_mem);
1900
1901         return 0;
1902 }
1903
1904 static void shutdown_dimm_notify(void *data)
1905 {
1906         struct acpi_nfit_desc *acpi_desc = data;
1907         struct nfit_mem *nfit_mem;
1908
1909         mutex_lock(&acpi_desc->init_mutex);
1910         /*
1911          * Clear out the nfit_mem->flags_attr and shut down dimm event
1912          * notifications.
1913          */
1914         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1915                 struct acpi_device *adev_dimm = nfit_mem->adev;
1916
1917                 if (nfit_mem->flags_attr) {
1918                         sysfs_put(nfit_mem->flags_attr);
1919                         nfit_mem->flags_attr = NULL;
1920                 }
1921                 if (adev_dimm) {
1922                         acpi_remove_notify_handler(adev_dimm->handle,
1923                                         ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1924                         dev_set_drvdata(&adev_dimm->dev, NULL);
1925                 }
1926         }
1927         mutex_unlock(&acpi_desc->init_mutex);
1928 }
1929
1930 static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family)
1931 {
1932         switch (family) {
1933         case NVDIMM_FAMILY_INTEL:
1934                 return intel_security_ops;
1935         default:
1936                 return NULL;
1937         }
1938 }
1939
1940 static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
1941 {
1942         struct nfit_mem *nfit_mem;
1943         int dimm_count = 0, rc;
1944         struct nvdimm *nvdimm;
1945
1946         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1947                 struct acpi_nfit_flush_address *flush;
1948                 unsigned long flags = 0, cmd_mask;
1949                 struct nfit_memdev *nfit_memdev;
1950                 u32 device_handle;
1951                 u16 mem_flags;
1952
1953                 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
1954                 nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
1955                 if (nvdimm) {
1956                         dimm_count++;
1957                         continue;
1958                 }
1959
1960                 if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1961                         set_bit(NDD_ALIASING, &flags);
1962
1963                 /* collate flags across all memdevs for this dimm */
1964                 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1965                         struct acpi_nfit_memory_map *dimm_memdev;
1966
1967                         dimm_memdev = __to_nfit_memdev(nfit_mem);
1968                         if (dimm_memdev->device_handle
1969                                         != nfit_memdev->memdev->device_handle)
1970                                 continue;
1971                         dimm_memdev->flags |= nfit_memdev->memdev->flags;
1972                 }
1973
1974                 mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1975                 if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1976                         set_bit(NDD_UNARMED, &flags);
1977
1978                 rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
1979                 if (rc)
1980                         continue;
1981
1982                 /*
1983                  * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
1984                  * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
1985                  * userspace interface.
1986                  */
1987                 cmd_mask = 1UL << ND_CMD_CALL;
1988                 if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1989                         /*
1990                          * These commands have a 1:1 correspondence
1991                          * between DSM payload and libnvdimm ioctl
1992                          * payload format.
1993                          */
1994                         cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
1995                 }
1996
1997                 if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
1998                         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
1999                         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
2000                 }
2001                 if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
2002                         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
2003
2004                 flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
2005                         : NULL;
2006                 nvdimm = __nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
2007                                 acpi_nfit_dimm_attribute_groups,
2008                                 flags, cmd_mask, flush ? flush->hint_count : 0,
2009                                 nfit_mem->flush_wpq, &nfit_mem->id[0],
2010                                 acpi_nfit_get_security_ops(nfit_mem->family));
2011                 if (!nvdimm)
2012                         return -ENOMEM;
2013
2014                 nfit_mem->nvdimm = nvdimm;
2015                 dimm_count++;
2016
2017                 if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
2018                         continue;
2019
2020                 dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
2021                                 nvdimm_name(nvdimm),
2022                   mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
2023                   mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
2024                   mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
2025                   mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
2026                   mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
2027
2028         }
2029
2030         rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
2031         if (rc)
2032                 return rc;
2033
2034         /*
2035          * Now that dimms are successfully registered, and async registration
2036          * is flushed, attempt to enable event notification.
2037          */
2038         list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2039                 struct kernfs_node *nfit_kernfs;
2040
2041                 nvdimm = nfit_mem->nvdimm;
2042                 if (!nvdimm)
2043                         continue;
2044
2045                 rc = nvdimm_security_setup_events(nvdimm);
2046                 if (rc < 0)
2047                         dev_warn(acpi_desc->dev,
2048                                 "security event setup failed: %d\n", rc);
2049
2050                 nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
2051                 if (nfit_kernfs)
2052                         nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
2053                                         "flags");
2054                 sysfs_put(nfit_kernfs);
2055                 if (!nfit_mem->flags_attr)
2056                         dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
2057                                         nvdimm_name(nvdimm));
2058         }
2059
2060         return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
2061                         acpi_desc);
2062 }
2063
2064 /*
2065  * These constants are private because there are no kernel consumers of
2066  * these commands.
2067  */
2068 enum nfit_aux_cmds {
2069         NFIT_CMD_TRANSLATE_SPA = 5,
2070         NFIT_CMD_ARS_INJECT_SET = 7,
2071         NFIT_CMD_ARS_INJECT_CLEAR = 8,
2072         NFIT_CMD_ARS_INJECT_GET = 9,
2073 };
2074
2075 static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
2076 {
2077         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2078         const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
2079         struct acpi_device *adev;
2080         unsigned long dsm_mask;
2081         int i;
2082
2083         nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
2084         nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
2085         adev = to_acpi_dev(acpi_desc);
2086         if (!adev)
2087                 return;
2088
2089         for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
2090                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2091                         set_bit(i, &nd_desc->cmd_mask);
2092         set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
2093
2094         dsm_mask =
2095                 (1 << ND_CMD_ARS_CAP) |
2096                 (1 << ND_CMD_ARS_START) |
2097                 (1 << ND_CMD_ARS_STATUS) |
2098                 (1 << ND_CMD_CLEAR_ERROR) |
2099                 (1 << NFIT_CMD_TRANSLATE_SPA) |
2100                 (1 << NFIT_CMD_ARS_INJECT_SET) |
2101                 (1 << NFIT_CMD_ARS_INJECT_CLEAR) |
2102                 (1 << NFIT_CMD_ARS_INJECT_GET);
2103         for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
2104                 if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
2105                         set_bit(i, &nd_desc->bus_dsm_mask);
2106 }
2107
2108 static ssize_t range_index_show(struct device *dev,
2109                 struct device_attribute *attr, char *buf)
2110 {
2111         struct nd_region *nd_region = to_nd_region(dev);
2112         struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
2113
2114         return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
2115 }
2116 static DEVICE_ATTR_RO(range_index);
2117
2118 static struct attribute *acpi_nfit_region_attributes[] = {
2119         &dev_attr_range_index.attr,
2120         NULL,
2121 };
2122
2123 static const struct attribute_group acpi_nfit_region_attribute_group = {
2124         .name = "nfit",
2125         .attrs = acpi_nfit_region_attributes,
2126 };
2127
2128 static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
2129         &nd_region_attribute_group,
2130         &nd_mapping_attribute_group,
2131         &nd_device_attribute_group,
2132         &nd_numa_attribute_group,
2133         &acpi_nfit_region_attribute_group,
2134         NULL,
2135 };
2136
2137 /* enough info to uniquely specify an interleave set */
2138 struct nfit_set_info {
2139         struct nfit_set_info_map {
2140                 u64 region_offset;
2141                 u32 serial_number;
2142                 u32 pad;
2143         } mapping[0];
2144 };
2145
2146 struct nfit_set_info2 {
2147         struct nfit_set_info_map2 {
2148                 u64 region_offset;
2149                 u32 serial_number;
2150                 u16 vendor_id;
2151                 u16 manufacturing_date;
2152                 u8  manufacturing_location;
2153                 u8  reserved[31];
2154         } mapping[0];
2155 };
2156
2157 static size_t sizeof_nfit_set_info(int num_mappings)
2158 {
2159         return sizeof(struct nfit_set_info)
2160                 + num_mappings * sizeof(struct nfit_set_info_map);
2161 }
2162
2163 static size_t sizeof_nfit_set_info2(int num_mappings)
2164 {
2165         return sizeof(struct nfit_set_info2)
2166                 + num_mappings * sizeof(struct nfit_set_info_map2);
2167 }
2168
2169 static int cmp_map_compat(const void *m0, const void *m1)
2170 {
2171         const struct nfit_set_info_map *map0 = m0;
2172         const struct nfit_set_info_map *map1 = m1;
2173
2174         return memcmp(&map0->region_offset, &map1->region_offset,
2175                         sizeof(u64));
2176 }
2177
2178 static int cmp_map(const void *m0, const void *m1)
2179 {
2180         const struct nfit_set_info_map *map0 = m0;
2181         const struct nfit_set_info_map *map1 = m1;
2182
2183         if (map0->region_offset < map1->region_offset)
2184                 return -1;
2185         else if (map0->region_offset > map1->region_offset)
2186                 return 1;
2187         return 0;
2188 }
2189
2190 static int cmp_map2(const void *m0, const void *m1)
2191 {
2192         const struct nfit_set_info_map2 *map0 = m0;
2193         const struct nfit_set_info_map2 *map1 = m1;
2194
2195         if (map0->region_offset < map1->region_offset)
2196                 return -1;
2197         else if (map0->region_offset > map1->region_offset)
2198                 return 1;
2199         return 0;
2200 }
2201
2202 /* Retrieve the nth entry referencing this spa */
2203 static struct acpi_nfit_memory_map *memdev_from_spa(
2204                 struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
2205 {
2206         struct nfit_memdev *nfit_memdev;
2207
2208         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
2209                 if (nfit_memdev->memdev->range_index == range_index)
2210                         if (n-- == 0)
2211                                 return nfit_memdev->memdev;
2212         return NULL;
2213 }
2214
2215 static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
2216                 struct nd_region_desc *ndr_desc,
2217                 struct acpi_nfit_system_address *spa)
2218 {
2219         struct device *dev = acpi_desc->dev;
2220         struct nd_interleave_set *nd_set;
2221         u16 nr = ndr_desc->num_mappings;
2222         struct nfit_set_info2 *info2;
2223         struct nfit_set_info *info;
2224         int i;
2225
2226         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
2227         if (!nd_set)
2228                 return -ENOMEM;
2229         guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);
2230
2231         info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
2232         if (!info)
2233                 return -ENOMEM;
2234
2235         info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
2236         if (!info2)
2237                 return -ENOMEM;
2238
2239         for (i = 0; i < nr; i++) {
2240                 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2241                 struct nfit_set_info_map *map = &info->mapping[i];
2242                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2243                 struct nvdimm *nvdimm = mapping->nvdimm;
2244                 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2245                 struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
2246                                 spa->range_index, i);
2247                 struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2248
2249                 if (!memdev || !nfit_mem->dcr) {
2250                         dev_err(dev, "%s: failed to find DCR\n", __func__);
2251                         return -ENODEV;
2252                 }
2253
2254                 map->region_offset = memdev->region_offset;
2255                 map->serial_number = dcr->serial_number;
2256
2257                 map2->region_offset = memdev->region_offset;
2258                 map2->serial_number = dcr->serial_number;
2259                 map2->vendor_id = dcr->vendor_id;
2260                 map2->manufacturing_date = dcr->manufacturing_date;
2261                 map2->manufacturing_location = dcr->manufacturing_location;
2262         }
2263
2264         /* v1.1 namespaces */
2265         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2266                         cmp_map, NULL);
2267         nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2268
2269         /* v1.2 namespaces */
2270         sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
2271                         cmp_map2, NULL);
2272         nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2273
2274         /* support v1.1 namespaces created with the wrong sort order */
2275         sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
2276                         cmp_map_compat, NULL);
2277         nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
2278
2279         /* record the result of the sort for the mapping position */
2280         for (i = 0; i < nr; i++) {
2281                 struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2282                 int j;
2283
2284                 for (j = 0; j < nr; j++) {
2285                         struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
2286                         struct nvdimm *nvdimm = mapping->nvdimm;
2287                         struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2288                         struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2289
2290                         if (map2->serial_number == dcr->serial_number &&
2291                             map2->vendor_id == dcr->vendor_id &&
2292                             map2->manufacturing_date == dcr->manufacturing_date &&
2293                             map2->manufacturing_location
2294                                     == dcr->manufacturing_location) {
2295                                 mapping->position = i;
2296                                 break;
2297                         }
2298                 }
2299         }
2300
2301         ndr_desc->nd_set = nd_set;
2302         devm_kfree(dev, info);
2303         devm_kfree(dev, info2);
2304
2305         return 0;
2306 }
2307
2308 static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
2309 {
2310         struct acpi_nfit_interleave *idt = mmio->idt;
2311         u32 sub_line_offset, line_index, line_offset;
2312         u64 line_no, table_skip_count, table_offset;
2313
2314         line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
2315         table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
2316         line_offset = idt->line_offset[line_index]
2317                 * mmio->line_size;
2318         table_offset = table_skip_count * mmio->table_size;
2319
2320         return mmio->base_offset + line_offset + table_offset + sub_line_offset;
2321 }
2322
2323 static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2324 {
2325         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2326         u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2327         const u32 STATUS_MASK = 0x80000037;
2328
2329         if (mmio->num_lines)
2330                 offset = to_interleave_offset(offset, mmio);
2331
2332         return readl(mmio->addr.base + offset) & STATUS_MASK;
2333 }
2334
2335 static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
2336                 resource_size_t dpa, unsigned int len, unsigned int write)
2337 {
2338         u64 cmd, offset;
2339         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
2340
2341         enum {
2342                 BCW_OFFSET_MASK = (1ULL << 48)-1,
2343                 BCW_LEN_SHIFT = 48,
2344                 BCW_LEN_MASK = (1ULL << 8) - 1,
2345                 BCW_CMD_SHIFT = 56,
2346         };
2347
2348         cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
2349         len = len >> L1_CACHE_SHIFT;
2350         cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
2351         cmd |= ((u64) write) << BCW_CMD_SHIFT;
2352
2353         offset = nfit_blk->cmd_offset + mmio->size * bw;
2354         if (mmio->num_lines)
2355                 offset = to_interleave_offset(offset, mmio);
2356
2357         writeq(cmd, mmio->addr.base + offset);
2358         nvdimm_flush(nfit_blk->nd_region);
2359
2360         if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2361                 readq(mmio->addr.base + offset);
2362 }
2363
2364 static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
2365                 resource_size_t dpa, void *iobuf, size_t len, int rw,
2366                 unsigned int lane)
2367 {
2368         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2369         unsigned int copied = 0;
2370         u64 base_offset;
2371         int rc;
2372
2373         base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
2374                 + lane * mmio->size;
2375         write_blk_ctl(nfit_blk, lane, dpa, len, rw);
2376         while (len) {
2377                 unsigned int c;
2378                 u64 offset;
2379
2380                 if (mmio->num_lines) {
2381                         u32 line_offset;
2382
2383                         offset = to_interleave_offset(base_offset + copied,
2384                                         mmio);
2385                         div_u64_rem(offset, mmio->line_size, &line_offset);
2386                         c = min_t(size_t, len, mmio->line_size - line_offset);
2387                 } else {
2388                         offset = base_offset + nfit_blk->bdw_offset;
2389                         c = len;
2390                 }
2391
2392                 if (rw)
2393                         memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2394                 else {
2395                         if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2396                                 arch_invalidate_pmem((void __force *)
2397                                         mmio->addr.aperture + offset, c);
2398
2399                         memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2400                 }
2401
2402                 copied += c;
2403                 len -= c;
2404         }
2405
2406         if (rw)
2407                 nvdimm_flush(nfit_blk->nd_region);
2408
2409         rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
2410         return rc;
2411 }
2412
2413 static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
2414                 resource_size_t dpa, void *iobuf, u64 len, int rw)
2415 {
2416         struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
2417         struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
2418         struct nd_region *nd_region = nfit_blk->nd_region;
2419         unsigned int lane, copied = 0;
2420         int rc = 0;
2421
2422         lane = nd_region_acquire_lane(nd_region);
2423         while (len) {
2424                 u64 c = min(len, mmio->size);
2425
2426                 rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
2427                                 iobuf + copied, c, rw, lane);
2428                 if (rc)
2429                         break;
2430
2431                 copied += c;
2432                 len -= c;
2433         }
2434         nd_region_release_lane(nd_region, lane);
2435
2436         return rc;
2437 }
2438
2439 static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
2440                 struct acpi_nfit_interleave *idt, u16 interleave_ways)
2441 {
2442         if (idt) {
2443                 mmio->num_lines = idt->line_count;
2444                 mmio->line_size = idt->line_size;
2445                 if (interleave_ways == 0)
2446                         return -ENXIO;
2447                 mmio->table_size = mmio->num_lines * interleave_ways
2448                         * mmio->line_size;
2449         }
2450
2451         return 0;
2452 }
2453
2454 static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
2455                 struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
2456 {
2457         struct nd_cmd_dimm_flags flags;
2458         int rc;
2459
2460         memset(&flags, 0, sizeof(flags));
2461         rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2462                         sizeof(flags), NULL);
2463
2464         if (rc >= 0 && flags.status == 0)
2465                 nfit_blk->dimm_flags = flags.flags;
2466         else if (rc == -ENOTTY) {
2467                 /* fall back to a conservative default */
2468                 nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2469                 rc = 0;
2470         } else
2471                 rc = -ENXIO;
2472
2473         return rc;
2474 }
2475
2476 static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
2477                 struct device *dev)
2478 {
2479         struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
2480         struct nd_blk_region *ndbr = to_nd_blk_region(dev);
2481         struct nfit_blk_mmio *mmio;
2482         struct nfit_blk *nfit_blk;
2483         struct nfit_mem *nfit_mem;
2484         struct nvdimm *nvdimm;
2485         int rc;
2486
2487         nvdimm = nd_blk_region_to_dimm(ndbr);
2488         nfit_mem = nvdimm_provider_data(nvdimm);
2489         if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
2490                 dev_dbg(dev, "missing%s%s%s\n",
2491                                 nfit_mem ? "" : " nfit_mem",
2492                                 (nfit_mem && nfit_mem->dcr) ? "" : " dcr",
2493                                 (nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2494                 return -ENXIO;
2495         }
2496
2497         nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
2498         if (!nfit_blk)
2499                 return -ENOMEM;
2500         nd_blk_region_set_provider_data(ndbr, nfit_blk);
2501         nfit_blk->nd_region = to_nd_region(dev);
2502
2503         /* map block aperture memory */
2504         nfit_blk->bdw_offset = nfit_mem->bdw->offset;
2505         mmio = &nfit_blk->mmio[BDW];
2506         mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2507                         nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2508         if (!mmio->addr.base) {
2509                 dev_dbg(dev, "%s failed to map bdw\n",
2510                                 nvdimm_name(nvdimm));
2511                 return -ENOMEM;
2512         }
2513         mmio->size = nfit_mem->bdw->size;
2514         mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
2515         mmio->idt = nfit_mem->idt_bdw;
2516         mmio->spa = nfit_mem->spa_bdw;
2517         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
2518                         nfit_mem->memdev_bdw->interleave_ways);
2519         if (rc) {
2520                 dev_dbg(dev, "%s failed to init bdw interleave\n",
2521                                 nvdimm_name(nvdimm));
2522                 return rc;
2523         }
2524
2525         /* map block control memory */
2526         nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
2527         nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
2528         mmio = &nfit_blk->mmio[DCR];
2529         mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
2530                         nfit_mem->spa_dcr->length);
2531         if (!mmio->addr.base) {
2532                 dev_dbg(dev, "%s failed to map dcr\n",
2533                                 nvdimm_name(nvdimm));
2534                 return -ENOMEM;
2535         }
2536         mmio->size = nfit_mem->dcr->window_size;
2537         mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
2538         mmio->idt = nfit_mem->idt_dcr;
2539         mmio->spa = nfit_mem->spa_dcr;
2540         rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
2541                         nfit_mem->memdev_dcr->interleave_ways);
2542         if (rc) {
2543                 dev_dbg(dev, "%s failed to init dcr interleave\n",
2544                                 nvdimm_name(nvdimm));
2545                 return rc;
2546         }
2547
2548         rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
2549         if (rc < 0) {
2550                 dev_dbg(dev, "%s failed get DIMM flags\n",
2551                                 nvdimm_name(nvdimm));
2552                 return rc;
2553         }
2554
2555         if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2556                 dev_warn(dev, "unable to guarantee persistence of writes\n");
2557
2558         if (mmio->line_size == 0)
2559                 return 0;
2560
2561         if ((u32) nfit_blk->cmd_offset % mmio->line_size
2562                         + 8 > mmio->line_size) {
2563                 dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
2564                 return -ENXIO;
2565         } else if ((u32) nfit_blk->stat_offset % mmio->line_size
2566                         + 8 > mmio->line_size) {
2567                 dev_dbg(dev, "stat_offset crosses interleave boundary\n");
2568                 return -ENXIO;
2569         }
2570
2571         return 0;
2572 }
2573
2574 static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2575                 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2576 {
2577         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2578         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2579         int cmd_rc, rc;
2580
2581         cmd->address = spa->address;
2582         cmd->length = spa->length;
2583         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
2584                         sizeof(*cmd), &cmd_rc);
2585         if (rc < 0)
2586                 return rc;
2587         return cmd_rc;
2588 }
2589
2590 static int ars_start(struct acpi_nfit_desc *acpi_desc,
2591                 struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
2592 {
2593         int rc;
2594         int cmd_rc;
2595         struct nd_cmd_ars_start ars_start;
2596         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2597         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2598
2599         memset(&ars_start, 0, sizeof(ars_start));
2600         ars_start.address = spa->address;
2601         ars_start.length = spa->length;
2602         if (req_type == ARS_REQ_SHORT)
2603                 ars_start.flags = ND_ARS_RETURN_PREV_DATA;
2604         if (nfit_spa_type(spa) == NFIT_SPA_PM)
2605                 ars_start.type = ND_ARS_PERSISTENT;
2606         else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
2607                 ars_start.type = ND_ARS_VOLATILE;
2608         else
2609                 return -ENOTTY;
2610
2611         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2612                         sizeof(ars_start), &cmd_rc);
2613
2614         if (rc < 0)
2615                 return rc;
2616         return cmd_rc;
2617 }
2618
2619 static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2620 {
2621         int rc, cmd_rc;
2622         struct nd_cmd_ars_start ars_start;
2623         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2624         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2625
2626         memset(&ars_start, 0, sizeof(ars_start));
2627         ars_start.address = ars_status->restart_address;
2628         ars_start.length = ars_status->restart_length;
2629         ars_start.type = ars_status->type;
2630         ars_start.flags = acpi_desc->ars_start_flags;
2631         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
2632                         sizeof(ars_start), &cmd_rc);
2633         if (rc < 0)
2634                 return rc;
2635         return cmd_rc;
2636 }
2637
2638 static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
2639 {
2640         struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2641         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2642         int rc, cmd_rc;
2643
2644         rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
2645                         acpi_desc->max_ars, &cmd_rc);
2646         if (rc < 0)
2647                 return rc;
2648         return cmd_rc;
2649 }
2650
2651 static void ars_complete(struct acpi_nfit_desc *acpi_desc,
2652                 struct nfit_spa *nfit_spa)
2653 {
2654         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2655         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2656         struct nd_region *nd_region = nfit_spa->nd_region;
2657         struct device *dev;
2658
2659         lockdep_assert_held(&acpi_desc->init_mutex);
2660         /*
2661          * Only advance the ARS state for ARS runs initiated by the
2662          * kernel, ignore ARS results from BIOS initiated runs for scrub
2663          * completion tracking.
2664          */
2665         if (acpi_desc->scrub_spa != nfit_spa)
2666                 return;
2667
2668         if ((ars_status->address >= spa->address && ars_status->address
2669                                 < spa->address + spa->length)
2670                         || (ars_status->address < spa->address)) {
2671                 /*
2672                  * Assume that if a scrub starts at an offset from the
2673                  * start of nfit_spa that we are in the continuation
2674                  * case.
2675                  *
2676                  * Otherwise, if the scrub covers the spa range, mark
2677                  * any pending request complete.
2678                  */
2679                 if (ars_status->address + ars_status->length
2680                                 >= spa->address + spa->length)
2681                                 /* complete */;
2682                 else
2683                         return;
2684         } else
2685                 return;
2686
2687         acpi_desc->scrub_spa = NULL;
2688         if (nd_region) {
2689                 dev = nd_region_dev(nd_region);
2690                 nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
2691         } else
2692                 dev = acpi_desc->dev;
2693         dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
2694 }
2695
2696 static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
2697 {
2698         struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2699         struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
2700         int rc;
2701         u32 i;
2702
2703         /*
2704          * First record starts at 44 byte offset from the start of the
2705          * payload.
2706          */
2707         if (ars_status->out_length < 44)
2708                 return 0;
2709         for (i = 0; i < ars_status->num_records; i++) {
2710                 /* only process full records */
2711                 if (ars_status->out_length
2712                                 < 44 + sizeof(struct nd_ars_record) * (i + 1))
2713                         break;
2714                 rc = nvdimm_bus_add_badrange(nvdimm_bus,
2715                                 ars_status->records[i].err_address,
2716                                 ars_status->records[i].length);
2717                 if (rc)
2718                         return rc;
2719         }
2720         if (i < ars_status->num_records)
2721                 dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2722
2723         return 0;
2724 }
2725
2726 static void acpi_nfit_remove_resource(void *data)
2727 {
2728         struct resource *res = data;
2729
2730         remove_resource(res);
2731 }
2732
2733 static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
2734                 struct nd_region_desc *ndr_desc)
2735 {
2736         struct resource *res, *nd_res = ndr_desc->res;
2737         int is_pmem, ret;
2738
2739         /* No operation if the region is already registered as PMEM */
2740         is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
2741                                 IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
2742         if (is_pmem == REGION_INTERSECTS)
2743                 return 0;
2744
2745         res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
2746         if (!res)
2747                 return -ENOMEM;
2748
2749         res->name = "Persistent Memory";
2750         res->start = nd_res->start;
2751         res->end = nd_res->end;
2752         res->flags = IORESOURCE_MEM;
2753         res->desc = IORES_DESC_PERSISTENT_MEMORY;
2754
2755         ret = insert_resource(&iomem_resource, res);
2756         if (ret)
2757                 return ret;
2758
2759         ret = devm_add_action_or_reset(acpi_desc->dev,
2760                                         acpi_nfit_remove_resource,
2761                                         res);
2762         if (ret)
2763                 return ret;
2764
2765         return 0;
2766 }
2767
2768 static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2769                 struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2770                 struct acpi_nfit_memory_map *memdev,
2771                 struct nfit_spa *nfit_spa)
2772 {
2773         struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
2774                         memdev->device_handle);
2775         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2776         struct nd_blk_region_desc *ndbr_desc;
2777         struct nfit_mem *nfit_mem;
2778         int rc;
2779
2780         if (!nvdimm) {
2781                 dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
2782                                 spa->range_index, memdev->device_handle);
2783                 return -ENODEV;
2784         }
2785
2786         mapping->nvdimm = nvdimm;
2787         switch (nfit_spa_type(spa)) {
2788         case NFIT_SPA_PM:
2789         case NFIT_SPA_VOLATILE:
2790                 mapping->start = memdev->address;
2791                 mapping->size = memdev->region_size;
2792                 break;
2793         case NFIT_SPA_DCR:
2794                 nfit_mem = nvdimm_provider_data(nvdimm);
2795                 if (!nfit_mem || !nfit_mem->bdw) {
2796                         dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
2797                                         spa->range_index, nvdimm_name(nvdimm));
2798                         break;
2799                 }
2800
2801                 mapping->size = nfit_mem->bdw->capacity;
2802                 mapping->start = nfit_mem->bdw->start_address;
2803                 ndr_desc->num_lanes = nfit_mem->bdw->windows;
2804                 ndr_desc->mapping = mapping;
2805                 ndr_desc->num_mappings = 1;
2806                 ndbr_desc = to_blk_region_desc(ndr_desc);
2807                 ndbr_desc->enable = acpi_nfit_blk_region_enable;
2808                 ndbr_desc->do_io = acpi_desc->blk_do_io;
2809                 rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2810                 if (rc)
2811                         return rc;
2812                 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
2813                                 ndr_desc);
2814                 if (!nfit_spa->nd_region)
2815                         return -ENOMEM;
2816                 break;
2817         }
2818
2819         return 0;
2820 }
2821
2822 static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
2823 {
2824         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2825                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2826                 nfit_spa_type(spa) == NFIT_SPA_PDISK ||
2827                 nfit_spa_type(spa) == NFIT_SPA_PCD);
2828 }
2829
2830 static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
2831 {
2832         return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
2833                 nfit_spa_type(spa) == NFIT_SPA_VCD   ||
2834                 nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
2835 }
2836
2837 static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2838                 struct nfit_spa *nfit_spa)
2839 {
2840         static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2841         struct acpi_nfit_system_address *spa = nfit_spa->spa;
2842         struct nd_blk_region_desc ndbr_desc;
2843         struct nd_region_desc *ndr_desc;
2844         struct nfit_memdev *nfit_memdev;
2845         struct nvdimm_bus *nvdimm_bus;
2846         struct resource res;
2847         int count = 0, rc;
2848
2849         if (nfit_spa->nd_region)
2850                 return 0;
2851
2852         if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2853                 dev_dbg(acpi_desc->dev, "detected invalid spa index\n");
2854                 return 0;
2855         }
2856
2857         memset(&res, 0, sizeof(res));
2858         memset(&mappings, 0, sizeof(mappings));
2859         memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2860         res.start = spa->address;
2861         res.end = res.start + spa->length - 1;
2862         ndr_desc = &ndbr_desc.ndr_desc;
2863         ndr_desc->res = &res;
2864         ndr_desc->provider_data = nfit_spa;
2865         ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2866         if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
2867                 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2868                                                 spa->proximity_domain);
2869         else
2870                 ndr_desc->numa_node = NUMA_NO_NODE;
2871
2872         /*
2873          * Persistence domain bits are hierarchical, if
2874          * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2875          * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2876          */
2877         if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2878                 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2879         else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2880                 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2881
2882         list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
2883                 struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2884                 struct nd_mapping_desc *mapping;
2885
2886                 if (memdev->range_index != spa->range_index)
2887                         continue;
2888                 if (count >= ND_MAX_MAPPINGS) {
2889                         dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
2890                                         spa->range_index, ND_MAX_MAPPINGS);
2891                         return -ENXIO;
2892                 }
2893                 mapping = &mappings[count++];
2894                 rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2895                                 memdev, nfit_spa);
2896                 if (rc)
2897                         goto out;
2898         }
2899
2900         ndr_desc->mapping = mappings;
2901         ndr_desc->num_mappings = count;
2902         rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2903         if (rc)
2904                 goto out;
2905
2906         nvdimm_bus = acpi_desc->nvdimm_bus;
2907         if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2908                 rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2909                 if (rc) {
2910                         dev_warn(acpi_desc->dev,
2911                                 "failed to insert pmem resource to iomem: %d\n",
2912                                 rc);
2913                         goto out;
2914                 }
2915
2916                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2917                                 ndr_desc);
2918                 if (!nfit_spa->nd_region)
2919                         rc = -ENOMEM;
2920         } else if (nfit_spa_is_volatile(spa)) {
2921                 nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
2922                                 ndr_desc);
2923                 if (!nfit_spa->nd_region)
2924                         rc = -ENOMEM;
2925         } else if (nfit_spa_is_virtual(spa)) {
2926                 nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
2927                                 ndr_desc);
2928                 if (!nfit_spa->nd_region)
2929                         rc = -ENOMEM;
2930         }
2931
2932  out:
2933         if (rc)
2934                 dev_err(acpi_desc->dev, "failed to register spa range %d\n",
2935                                 nfit_spa->spa->range_index);
2936         return rc;
2937 }
2938
2939 static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc)
2940 {
2941         struct device *dev = acpi_desc->dev;
2942         struct nd_cmd_ars_status *ars_status;
2943
2944         if (acpi_desc->ars_status) {
2945                 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2946                 return 0;
2947         }
2948
2949         ars_status = devm_kzalloc(dev, acpi_desc->max_ars, GFP_KERNEL);
2950         if (!ars_status)
2951                 return -ENOMEM;
2952         acpi_desc->ars_status = ars_status;
2953         return 0;
2954 }
2955
2956 static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
2957 {
2958         int rc;
2959
2960         if (ars_status_alloc(acpi_desc))
2961                 return -ENOMEM;
2962
2963         rc = ars_get_status(acpi_desc);
2964
2965         if (rc < 0 && rc != -ENOSPC)
2966                 return rc;
2967
2968         if (ars_status_process_records(acpi_desc))
2969                 dev_err(acpi_desc->dev, "Failed to process ARS records\n");
2970
2971         return rc;
2972 }
2973
2974 static int ars_register(struct acpi_nfit_desc *acpi_desc,
2975                 struct nfit_spa *nfit_spa)
2976 {
2977         int rc;
2978
2979         if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
2980                 return acpi_nfit_register_region(acpi_desc, nfit_spa);
2981
2982         set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2983         set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
2984
2985         switch (acpi_nfit_query_poison(acpi_desc)) {
2986         case 0:
2987         case -EAGAIN:
2988                 rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
2989                 /* shouldn't happen, try again later */
2990                 if (rc == -EBUSY)
2991                         break;
2992                 if (rc) {
2993                         set_bit(ARS_FAILED, &nfit_spa->ars_state);
2994                         break;
2995                 }
2996                 clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
2997                 rc = acpi_nfit_query_poison(acpi_desc);
2998                 if (rc)
2999                         break;
3000                 acpi_desc->scrub_spa = nfit_spa;
3001                 ars_complete(acpi_desc, nfit_spa);
3002                 /*
3003                  * If ars_complete() says we didn't complete the
3004                  * short scrub, we'll try again with a long
3005                  * request.
3006                  */
3007                 acpi_desc->scrub_spa = NULL;
3008                 break;
3009         case -EBUSY:
3010         case -ENOMEM:
3011         case -ENOSPC:
3012                 /*
3013                  * BIOS was using ARS, wait for it to complete (or
3014                  * resources to become available) and then perform our
3015                  * own scrubs.
3016                  */
3017                 break;
3018         default:
3019                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3020                 break;
3021         }
3022
3023         return acpi_nfit_register_region(acpi_desc, nfit_spa);
3024 }
3025
3026 static void ars_complete_all(struct acpi_nfit_desc *acpi_desc)
3027 {
3028         struct nfit_spa *nfit_spa;
3029
3030         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3031                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3032                         continue;
3033                 ars_complete(acpi_desc, nfit_spa);
3034         }
3035 }
3036
3037 static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
3038                 int query_rc)
3039 {
3040         unsigned int tmo = acpi_desc->scrub_tmo;
3041         struct device *dev = acpi_desc->dev;
3042         struct nfit_spa *nfit_spa;
3043
3044         lockdep_assert_held(&acpi_desc->init_mutex);
3045
3046         if (acpi_desc->cancel)
3047                 return 0;
3048
3049         if (query_rc == -EBUSY) {
3050                 dev_dbg(dev, "ARS: ARS busy\n");
3051                 return min(30U * 60U, tmo * 2);
3052         }
3053         if (query_rc == -ENOSPC) {
3054                 dev_dbg(dev, "ARS: ARS continue\n");
3055                 ars_continue(acpi_desc);
3056                 return 1;
3057         }
3058         if (query_rc && query_rc != -EAGAIN) {
3059                 unsigned long long addr, end;
3060
3061                 addr = acpi_desc->ars_status->address;
3062                 end = addr + acpi_desc->ars_status->length;
3063                 dev_dbg(dev, "ARS: %llx-%llx failed (%d)\n", addr, end,
3064                                 query_rc);
3065         }
3066
3067         ars_complete_all(acpi_desc);
3068         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3069                 enum nfit_ars_state req_type;
3070                 int rc;
3071
3072                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3073                         continue;
3074
3075                 /* prefer short ARS requests first */
3076                 if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
3077                         req_type = ARS_REQ_SHORT;
3078                 else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
3079                         req_type = ARS_REQ_LONG;
3080                 else
3081                         continue;
3082                 rc = ars_start(acpi_desc, nfit_spa, req_type);
3083
3084                 dev = nd_region_dev(nfit_spa->nd_region);
3085                 dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
3086                                 nfit_spa->spa->range_index,
3087                                 req_type == ARS_REQ_SHORT ? "short" : "long",
3088                                 rc);
3089                 /*
3090                  * Hmm, we raced someone else starting ARS? Try again in
3091                  * a bit.
3092                  */
3093                 if (rc == -EBUSY)
3094                         return 1;
3095                 if (rc == 0) {
3096                         dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
3097                                         "scrub start while range %d active\n",
3098                                         acpi_desc->scrub_spa->spa->range_index);
3099                         clear_bit(req_type, &nfit_spa->ars_state);
3100                         acpi_desc->scrub_spa = nfit_spa;
3101                         /*
3102                          * Consider this spa last for future scrub
3103                          * requests
3104                          */
3105                         list_move_tail(&nfit_spa->list, &acpi_desc->spas);
3106                         return 1;
3107                 }
3108
3109                 dev_err(dev, "ARS: range %d ARS failed (%d)\n",
3110                                 nfit_spa->spa->range_index, rc);
3111                 set_bit(ARS_FAILED, &nfit_spa->ars_state);
3112         }
3113         return 0;
3114 }
3115
3116 static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
3117 {
3118         lockdep_assert_held(&acpi_desc->init_mutex);
3119
3120         acpi_desc->scrub_busy = 1;
3121         /* note this should only be set from within the workqueue */
3122         if (tmo)
3123                 acpi_desc->scrub_tmo = tmo;
3124         queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
3125 }
3126
3127 static void sched_ars(struct acpi_nfit_desc *acpi_desc)
3128 {
3129         __sched_ars(acpi_desc, 0);
3130 }
3131
3132 static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
3133 {
3134         lockdep_assert_held(&acpi_desc->init_mutex);
3135
3136         acpi_desc->scrub_busy = 0;
3137         acpi_desc->scrub_count++;
3138         if (acpi_desc->scrub_count_state)
3139                 sysfs_notify_dirent(acpi_desc->scrub_count_state);
3140 }
3141
3142 static void acpi_nfit_scrub(struct work_struct *work)
3143 {
3144         struct acpi_nfit_desc *acpi_desc;
3145         unsigned int tmo;
3146         int query_rc;
3147
3148         acpi_desc = container_of(work, typeof(*acpi_desc), dwork.work);
3149         mutex_lock(&acpi_desc->init_mutex);
3150         query_rc = acpi_nfit_query_poison(acpi_desc);
3151         tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
3152         if (tmo)
3153                 __sched_ars(acpi_desc, tmo);
3154         else
3155                 notify_ars_done(acpi_desc);
3156         memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
3157         mutex_unlock(&acpi_desc->init_mutex);
3158 }
3159
3160 static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
3161                 struct nfit_spa *nfit_spa)
3162 {
3163         int type = nfit_spa_type(nfit_spa->spa);
3164         struct nd_cmd_ars_cap ars_cap;
3165         int rc;
3166
3167         set_bit(ARS_FAILED, &nfit_spa->ars_state);
3168         memset(&ars_cap, 0, sizeof(ars_cap));
3169         rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
3170         if (rc < 0)
3171                 return;
3172         /* check that the supported scrub types match the spa type */
3173         if (type == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16)
3174                                 & ND_ARS_VOLATILE) == 0)
3175                 return;
3176         if (type == NFIT_SPA_PM && ((ars_cap.status >> 16)
3177                                 & ND_ARS_PERSISTENT) == 0)
3178                 return;
3179
3180         nfit_spa->max_ars = ars_cap.max_ars_out;
3181         nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
3182         acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
3183         clear_bit(ARS_FAILED, &nfit_spa->ars_state);
3184 }
3185
3186 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3187 {
3188         struct nfit_spa *nfit_spa;
3189         int rc;
3190
3191         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3192                 switch (nfit_spa_type(nfit_spa->spa)) {
3193                 case NFIT_SPA_VOLATILE:
3194                 case NFIT_SPA_PM:
3195                         acpi_nfit_init_ars(acpi_desc, nfit_spa);
3196                         break;
3197                 }
3198         }
3199
3200         list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
3201                 switch (nfit_spa_type(nfit_spa->spa)) {
3202                 case NFIT_SPA_VOLATILE:
3203                 case NFIT_SPA_PM:
3204                         /* register regions and kick off initial ARS run */
3205                         rc = ars_register(acpi_desc, nfit_spa);
3206                         if (rc)
3207                                 return rc;
3208                         break;
3209                 case NFIT_SPA_BDW:
3210                         /* nothing to register */
3211                         break;
3212                 case NFIT_SPA_DCR:
3213                 case NFIT_SPA_VDISK:
3214                 case NFIT_SPA_VCD:
3215                 case NFIT_SPA_PDISK:
3216                 case NFIT_SPA_PCD:
3217                         /* register known regions that don't support ARS */
3218                         rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
3219                         if (rc)
3220                                 return rc;
3221                         break;
3222                 default:
3223                         /* don't register unknown regions */
3224                         break;
3225                 }
3226
3227         sched_ars(acpi_desc);
3228         return 0;
3229 }
3230
3231 static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
3232                 struct nfit_table_prev *prev)
3233 {
3234         struct device *dev = acpi_desc->dev;
3235
3236         if (!list_empty(&prev->spas) ||
3237                         !list_empty(&prev->memdevs) ||
3238                         !list_empty(&prev->dcrs) ||
3239                         !list_empty(&prev->bdws) ||
3240                         !list_empty(&prev->idts) ||
3241                         !list_empty(&prev->flushes)) {
3242                 dev_err(dev, "new nfit deletes entries (unsupported)\n");
3243                 return -ENXIO;
3244         }
3245         return 0;
3246 }
3247
3248 static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
3249 {
3250         struct device *dev = acpi_desc->dev;
3251         struct kernfs_node *nfit;
3252         struct device *bus_dev;
3253
3254         if (!ars_supported(acpi_desc->nvdimm_bus))
3255                 return 0;
3256
3257         bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3258         nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
3259         if (!nfit) {
3260                 dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
3261                 return -ENODEV;
3262         }
3263         acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
3264         sysfs_put(nfit);
3265         if (!acpi_desc->scrub_count_state) {
3266                 dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
3267                 return -ENODEV;
3268         }
3269
3270         return 0;
3271 }
3272
3273 static void acpi_nfit_unregister(void *data)
3274 {
3275         struct acpi_nfit_desc *acpi_desc = data;
3276
3277         nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
3278 }
3279
3280 int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3281 {
3282         struct device *dev = acpi_desc->dev;
3283         struct nfit_table_prev prev;
3284         const void *end;
3285         int rc;
3286
3287         if (!acpi_desc->nvdimm_bus) {
3288                 acpi_nfit_init_dsms(acpi_desc);
3289
3290                 acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
3291                                 &acpi_desc->nd_desc);
3292                 if (!acpi_desc->nvdimm_bus)
3293                         return -ENOMEM;
3294
3295                 rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3296                                 acpi_desc);
3297                 if (rc)
3298                         return rc;
3299
3300                 rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
3301                 if (rc)
3302                         return rc;
3303
3304                 /* register this acpi_desc for mce notifications */
3305                 mutex_lock(&acpi_desc_lock);
3306                 list_add_tail(&acpi_desc->list, &acpi_descs);
3307                 mutex_unlock(&acpi_desc_lock);
3308         }
3309
3310         mutex_lock(&acpi_desc->init_mutex);
3311
3312         INIT_LIST_HEAD(&prev.spas);
3313         INIT_LIST_HEAD(&prev.memdevs);
3314         INIT_LIST_HEAD(&prev.dcrs);
3315         INIT_LIST_HEAD(&prev.bdws);
3316         INIT_LIST_HEAD(&prev.idts);
3317         INIT_LIST_HEAD(&prev.flushes);
3318
3319         list_cut_position(&prev.spas, &acpi_desc->spas,
3320                                 acpi_desc->spas.prev);
3321         list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
3322                                 acpi_desc->memdevs.prev);
3323         list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
3324                                 acpi_desc->dcrs.prev);
3325         list_cut_position(&prev.bdws, &acpi_desc->bdws,
3326                                 acpi_desc->bdws.prev);
3327         list_cut_position(&prev.idts, &acpi_desc->idts,
3328                                 acpi_desc->idts.prev);
3329         list_cut_position(&prev.flushes, &acpi_desc->flushes,
3330                                 acpi_desc->flushes.prev);
3331
3332         end = data + sz;
3333         while (!IS_ERR_OR_NULL(data))
3334                 data = add_table(acpi_desc, &prev, data, end);
3335
3336         if (IS_ERR(data)) {
3337                 dev_dbg(dev, "nfit table parsing error: %ld\n", PTR_ERR(data));
3338                 rc = PTR_ERR(data);
3339                 goto out_unlock;
3340         }
3341
3342         rc = acpi_nfit_check_deletions(acpi_desc, &prev);
3343         if (rc)
3344                 goto out_unlock;
3345
3346         rc = nfit_mem_init(acpi_desc);
3347         if (rc)
3348                 goto out_unlock;
3349
3350         rc = acpi_nfit_register_dimms(acpi_desc);
3351         if (rc)
3352                 goto out_unlock;
3353
3354         rc = acpi_nfit_register_regions(acpi_desc);
3355
3356  out_unlock:
3357         mutex_unlock(&acpi_desc->init_mutex);
3358         return rc;
3359 }
3360 EXPORT_SYMBOL_GPL(acpi_nfit_init);
3361
3362 static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
3363 {
3364         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3365         struct device *dev = acpi_desc->dev;
3366
3367         /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
3368         device_lock(dev);
3369         device_unlock(dev);
3370
3371         /* Bounce the init_mutex to complete initial registration */
3372         mutex_lock(&acpi_desc->init_mutex);
3373         mutex_unlock(&acpi_desc->init_mutex);
3374
3375         return 0;
3376 }
3377
3378 static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3379                 struct nvdimm *nvdimm, unsigned int cmd)
3380 {
3381         struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
3382
3383         if (nvdimm)
3384                 return 0;
3385         if (cmd != ND_CMD_ARS_START)
3386                 return 0;
3387
3388         /*
3389          * The kernel and userspace may race to initiate a scrub, but
3390          * the scrub thread is prepared to lose that initial race.  It
3391          * just needs guarantees that any ARS it initiates are not
3392          * interrupted by any intervening start requests from userspace.
3393          */
3394         if (work_busy(&acpi_desc->dwork.work))
3395                 return -EBUSY;
3396
3397         return 0;
3398 }
3399
3400 /* prevent security commands from being issued via ioctl */
3401 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
3402                 struct nvdimm *nvdimm, unsigned int cmd, void *buf)
3403 {
3404         struct nd_cmd_pkg *call_pkg = buf;
3405         unsigned int func;
3406
3407         if (nvdimm && cmd == ND_CMD_CALL &&
3408                         call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
3409                 func = call_pkg->nd_command;
3410                 if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
3411                         return -EOPNOTSUPP;
3412         }
3413
3414         return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd);
3415 }
3416
3417 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
3418                 enum nfit_ars_state req_type)
3419 {
3420         struct device *dev = acpi_desc->dev;
3421         int scheduled = 0, busy = 0;
3422         struct nfit_spa *nfit_spa;
3423
3424         mutex_lock(&acpi_desc->init_mutex);
3425         if (acpi_desc->cancel) {
3426                 mutex_unlock(&acpi_desc->init_mutex);
3427                 return 0;
3428         }
3429
3430         list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
3431                 int type = nfit_spa_type(nfit_spa->spa);
3432
3433                 if (type != NFIT_SPA_PM && type != NFIT_SPA_VOLATILE)
3434                         continue;
3435                 if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
3436                         continue;
3437
3438                 if (test_and_set_bit(req_type, &nfit_spa->ars_state))
3439                         busy++;
3440                 else
3441                         scheduled++;
3442         }
3443         if (scheduled) {
3444                 sched_ars(acpi_desc);
3445                 dev_dbg(dev, "ars_scan triggered\n");
3446         }
3447         mutex_unlock(&acpi_desc->init_mutex);
3448
3449         if (scheduled)
3450                 return 0;
3451         if (busy)
3452                 return -EBUSY;
3453         return -ENOTTY;
3454 }
3455
3456 void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3457 {
3458         struct nvdimm_bus_descriptor *nd_desc;
3459
3460         dev_set_drvdata(dev, acpi_desc);
3461         acpi_desc->dev = dev;
3462         acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3463         nd_desc = &acpi_desc->nd_desc;
3464         nd_desc->provider_name = "ACPI.NFIT";
3465         nd_desc->module = THIS_MODULE;
3466         nd_desc->ndctl = acpi_nfit_ctl;
3467         nd_desc->flush_probe = acpi_nfit_flush_probe;
3468         nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3469         nd_desc->attr_groups = acpi_nfit_attribute_groups;
3470
3471         INIT_LIST_HEAD(&acpi_desc->spas);
3472         INIT_LIST_HEAD(&acpi_desc->dcrs);
3473         INIT_LIST_HEAD(&acpi_desc->bdws);
3474         INIT_LIST_HEAD(&acpi_desc->idts);
3475         INIT_LIST_HEAD(&acpi_desc->flushes);
3476         INIT_LIST_HEAD(&acpi_desc->memdevs);
3477         INIT_LIST_HEAD(&acpi_desc->dimms);
3478         INIT_LIST_HEAD(&acpi_desc->list);
3479         mutex_init(&acpi_desc->init_mutex);
3480         acpi_desc->scrub_tmo = 1;
3481         INIT_DELAYED_WORK(&acpi_desc->dwork, acpi_nfit_scrub);
3482 }
3483 EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
3484
3485 static void acpi_nfit_put_table(void *table)
3486 {
3487         acpi_put_table(table);
3488 }
3489
3490 void acpi_nfit_shutdown(void *data)
3491 {
3492         struct acpi_nfit_desc *acpi_desc = data;
3493         struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
3494
3495         /*
3496          * Destruct under acpi_desc_lock so that nfit_handle_mce does not
3497          * race teardown
3498          */
3499         mutex_lock(&acpi_desc_lock);
3500         list_del(&acpi_desc->list);
3501         mutex_unlock(&acpi_desc_lock);
3502
3503         mutex_lock(&acpi_desc->init_mutex);
3504         acpi_desc->cancel = 1;
3505         cancel_delayed_work_sync(&acpi_desc->dwork);
3506         mutex_unlock(&acpi_desc->init_mutex);
3507
3508         /*
3509          * Bounce the nvdimm bus lock to make sure any in-flight
3510          * acpi_nfit_ars_rescan() submissions have had a chance to
3511          * either submit or see ->cancel set.
3512          */
3513         device_lock(bus_dev);
3514         device_unlock(bus_dev);
3515
3516         flush_workqueue(nfit_wq);
3517 }
3518 EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);
3519
3520 static int acpi_nfit_add(struct acpi_device *adev)
3521 {
3522         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3523         struct acpi_nfit_desc *acpi_desc;
3524         struct device *dev = &adev->dev;
3525         struct acpi_table_header *tbl;
3526         acpi_status status = AE_OK;
3527         acpi_size sz;
3528         int rc = 0;
3529
3530         status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
3531         if (ACPI_FAILURE(status)) {
3532                 /* The NVDIMM root device allows OS to trigger enumeration of
3533                  * NVDIMMs through NFIT at boot time and re-enumeration at
3534                  * root level via the _FIT method during runtime.
3535                  * This is ok to return 0 here, we could have an nvdimm
3536                  * hotplugged later and evaluate _FIT method which returns
3537                  * data in the format of a series of NFIT Structures.
3538                  */
3539                 dev_dbg(dev, "failed to find NFIT at startup\n");
3540                 return 0;
3541         }
3542
3543         rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
3544         if (rc)
3545                 return rc;
3546         sz = tbl->length;
3547
3548         acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3549         if (!acpi_desc)
3550                 return -ENOMEM;
3551         acpi_nfit_desc_init(acpi_desc, &adev->dev);
3552
3553         /* Save the acpi header for exporting the revision via sysfs */
3554         acpi_desc->acpi_header = *tbl;
3555
3556         /* Evaluate _FIT and override with that if present */
3557         status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
3558         if (ACPI_SUCCESS(status) && buf.length > 0) {
3559                 union acpi_object *obj = buf.pointer;
3560
3561                 if (obj->type == ACPI_TYPE_BUFFER)
3562                         rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3563                                         obj->buffer.length);
3564                 else
3565                         dev_dbg(dev, "invalid type %d, ignoring _FIT\n",
3566                                 (int) obj->type);
3567                 kfree(buf.pointer);
3568         } else
3569                 /* skip over the lead-in header table */
3570                 rc = acpi_nfit_init(acpi_desc, (void *) tbl
3571                                 + sizeof(struct acpi_table_nfit),
3572                                 sz - sizeof(struct acpi_table_nfit));
3573
3574         if (rc)
3575                 return rc;
3576         return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3577 }
3578
3579 static int acpi_nfit_remove(struct acpi_device *adev)
3580 {
3581         /* see acpi_nfit_unregister */
3582         return 0;
3583 }
3584
3585 static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
3586 {
3587         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3588         struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3589         union acpi_object *obj;
3590         acpi_status status;
3591         int ret;
3592
3593         if (!dev->driver) {
3594                 /* dev->driver may be null if we're being removed */
3595                 dev_dbg(dev, "no driver found for dev\n");
3596                 return;
3597         }
3598
3599         if (!acpi_desc) {
3600                 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
3601                 if (!acpi_desc)
3602                         return;
3603                 acpi_nfit_desc_init(acpi_desc, dev);
3604         } else {
3605                 /*
3606                  * Finish previous registration before considering new
3607                  * regions.
3608                  */
3609                 flush_workqueue(nfit_wq);
3610         }
3611
3612         /* Evaluate _FIT */
3613         status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
3614         if (ACPI_FAILURE(status)) {
3615                 dev_err(dev, "failed to evaluate _FIT\n");
3616                 return;
3617         }
3618
3619         obj = buf.pointer;
3620         if (obj->type == ACPI_TYPE_BUFFER) {
3621                 ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
3622                                 obj->buffer.length);
3623                 if (ret)
3624                         dev_err(dev, "failed to merge updated NFIT\n");
3625         } else
3626                 dev_err(dev, "Invalid _FIT\n");
3627         kfree(buf.pointer);
3628 }
3629
3630 static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
3631 {
3632         struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3633
3634         if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
3635                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
3636         else
3637                 acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
3638 }
3639
3640 void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
3641 {
3642         dev_dbg(dev, "event: 0x%x\n", event);
3643
3644         switch (event) {
3645         case NFIT_NOTIFY_UPDATE:
3646                 return acpi_nfit_update_notify(dev, handle);
3647         case NFIT_NOTIFY_UC_MEMORY_ERROR:
3648                 return acpi_nfit_uc_error_notify(dev, handle);
3649         default:
3650                 return;
3651         }
3652 }
3653 EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
3654
3655 static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
3656 {
3657         device_lock(&adev->dev);
3658         __acpi_nfit_notify(&adev->dev, adev->handle, event);
3659         device_unlock(&adev->dev);
3660 }
3661
3662 static const struct acpi_device_id acpi_nfit_ids[] = {
3663         { "ACPI0012", 0 },
3664         { "", 0 },
3665 };
3666 MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);
3667
3668 static struct acpi_driver acpi_nfit_driver = {
3669         .name = KBUILD_MODNAME,
3670         .ids = acpi_nfit_ids,
3671         .ops = {
3672                 .add = acpi_nfit_add,
3673                 .remove = acpi_nfit_remove,
3674                 .notify = acpi_nfit_notify,
3675         },
3676 };
3677
3678 static __init int nfit_init(void)
3679 {
3680         int ret;
3681
3682         BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
3683         BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
3684         BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
3685         BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
3686         BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
3687         BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
3688         BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
3689         BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
3690
3691         guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
3692         guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
3693         guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
3694         guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
3695         guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
3696         guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
3697         guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
3698         guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
3699         guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
3700         guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
3701         guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
3702         guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
3703         guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3704
3705         nfit_wq = create_singlethread_workqueue("nfit");
3706         if (!nfit_wq)
3707                 return -ENOMEM;
3708
3709         nfit_mce_register();
3710         ret = acpi_bus_register_driver(&acpi_nfit_driver);
3711         if (ret) {
3712                 nfit_mce_unregister();
3713                 destroy_workqueue(nfit_wq);
3714         }
3715
3716         return ret;
3717
3718 }
3719
3720 static __exit void nfit_exit(void)
3721 {
3722         nfit_mce_unregister();
3723         acpi_bus_unregister_driver(&acpi_nfit_driver);
3724         destroy_workqueue(nfit_wq);
3725         WARN_ON(!list_empty(&acpi_descs));
3726 }
3727
3728 module_init(nfit_init);
3729 module_exit(nfit_exit);
3730 MODULE_LICENSE("GPL v2");
3731 MODULE_AUTHOR("Intel Corporation");