2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/platform_device.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/workqueue.h>
17 #include <linux/libnvdimm.h>
18 #include <linux/vmalloc.h>
19 #include <linux/device.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/ndctl.h>
23 #include <linux/sizes.h>
24 #include <linux/list.h>
25 #include <linux/slab.h>
29 #include "nfit_test.h"
32 * Generate an NFIT table to describe the following topology:
34 * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
36 * (a) (b) DIMM BLK-REGION
37 * +----------+--------------+----------+---------+
38 * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
39 * | imc0 +--+- - - - - region0 - - - -+----------+ +
40 * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
41 * | +----------+--------------v----------v v
45 * | +-------------------------^----------^ ^
46 * +--+---+ | blk4.0 | pm1.0 | 2 region4
47 * | imc1 +--+-------------------------+----------+ +
48 * +------+ | blk5.0 | pm1.0 | 3 region5
49 * +-------------------------+----------+-+-------+
53 * +--+---+ (Hotplug DIMM)
54 * | +----------------------------------------------+
55 * +--+---+ | blk6.0/pm7.0 | 4 region6/7
56 * | imc0 +--+----------------------------------------------+
60 * *) In this layout we have four dimms and two memory controllers in one
61 * socket. Each unique interface (BLK or PMEM) to DPA space
62 * is identified by a region device with a dynamically assigned id.
64 * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
65 * A single PMEM namespace "pm0.0" is created using half of the
66 * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
67 * allocate from from the bottom of a region. The unallocated
68 * portion of REGION0 aliases with REGION2 and REGION3. That
69 * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
70 * "blk3.0") starting at the base of each DIMM to offset (a) in those
71 * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
72 * names that can be assigned to a namespace.
74 * *) In the last portion of dimm0 and dimm1 we have an interleaved
75 * SPA range, REGION1, that spans those two dimms as well as dimm2
76 * and dimm3. Some of REGION1 allocated to a PMEM namespace named
77 * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
78 * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
81 * *) The portion of dimm2 and dimm3 that do not participate in the
82 * REGION1 interleaved SPA range (i.e. the DPA address below offset
83 * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
84 * Note, that BLK namespaces need not be contiguous in DPA-space, and
85 * can consume aliased capacity from multiple interleave sets.
87 * BUS1: Legacy NVDIMM (single contiguous range)
90 * +---------------------+
91 * |---------------------|
93 * |---------------------|
94 * +---------------------+
96 * *) A NFIT-table may describe a simple system-physical-address range
97 * with no BLK aliasing. This type of region may optionally
98 * reference an NVDIMM.
105 NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
106 NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
108 LABEL_SIZE = SZ_128K,
109 SPA_VCD_SIZE = SZ_4M,
110 SPA0_SIZE = DIMM_SIZE,
111 SPA1_SIZE = DIMM_SIZE*2,
112 SPA2_SIZE = DIMM_SIZE,
115 NUM_NFITS = 2, /* permit testing multiple NFITs per system */
118 struct nfit_test_dcr {
121 __u8 aperature[BDW_SIZE];
124 #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
125 (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
126 | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
128 static u32 handle[] = {
129 [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
130 [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
131 [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
132 [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
133 [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
134 [5] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 0),
135 [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
138 static unsigned long dimm_fail_cmd_flags[NUM_DCR];
141 struct acpi_nfit_desc acpi_desc;
142 struct platform_device pdev;
143 struct list_head resources;
151 dma_addr_t *dimm_dma;
153 dma_addr_t *flush_dma;
155 dma_addr_t *label_dma;
157 dma_addr_t *spa_set_dma;
158 struct nfit_test_dcr **dcr;
160 int (*alloc)(struct nfit_test *t);
161 void (*setup)(struct nfit_test *t);
163 union acpi_object **_fit;
166 struct nd_cmd_ars_status *ars_status;
167 unsigned long deadline;
170 struct device *dimm_dev[NUM_DCR];
171 struct badrange badrange;
172 struct work_struct work;
175 static struct workqueue_struct *nfit_wq;
177 static struct nfit_test *to_nfit_test(struct device *dev)
179 struct platform_device *pdev = to_platform_device(dev);
181 return container_of(pdev, struct nfit_test, pdev);
184 static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
185 unsigned int buf_len)
187 if (buf_len < sizeof(*nd_cmd))
191 nd_cmd->config_size = LABEL_SIZE;
192 nd_cmd->max_xfer = SZ_4K;
197 static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
198 *nd_cmd, unsigned int buf_len, void *label)
200 unsigned int len, offset = nd_cmd->in_offset;
203 if (buf_len < sizeof(*nd_cmd))
205 if (offset >= LABEL_SIZE)
207 if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
211 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
212 memcpy(nd_cmd->out_buf, label + offset, len);
213 rc = buf_len - sizeof(*nd_cmd) - len;
218 static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
219 unsigned int buf_len, void *label)
221 unsigned int len, offset = nd_cmd->in_offset;
225 if (buf_len < sizeof(*nd_cmd))
227 if (offset >= LABEL_SIZE)
229 if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
232 status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
234 len = min(nd_cmd->in_length, LABEL_SIZE - offset);
235 memcpy(label + offset, nd_cmd->in_buf, len);
236 rc = buf_len - sizeof(*nd_cmd) - (len + 4);
241 #define NFIT_TEST_CLEAR_ERR_UNIT 256
243 static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
244 unsigned int buf_len)
248 if (buf_len < sizeof(*nd_cmd))
251 /* for testing, only store up to n records that fit within 4k */
252 ars_recs = SZ_4K / sizeof(struct nd_ars_record);
254 nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
255 + ars_recs * sizeof(struct nd_ars_record);
256 nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
257 nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
262 static void post_ars_status(struct ars_state *ars_state,
263 struct badrange *badrange, u64 addr, u64 len)
265 struct nd_cmd_ars_status *ars_status;
266 struct nd_ars_record *ars_record;
267 struct badrange_entry *be;
268 u64 end = addr + len - 1;
271 ars_state->deadline = jiffies + 1*HZ;
272 ars_status = ars_state->ars_status;
273 ars_status->status = 0;
274 ars_status->address = addr;
275 ars_status->length = len;
276 ars_status->type = ND_ARS_PERSISTENT;
278 spin_lock(&badrange->lock);
279 list_for_each_entry(be, &badrange->list, list) {
280 u64 be_end = be->start + be->length - 1;
283 /* skip entries outside the range */
284 if (be_end < addr || be->start > end)
287 rstart = (be->start < addr) ? addr : be->start;
288 rend = (be_end < end) ? be_end : end;
289 ars_record = &ars_status->records[i];
290 ars_record->handle = 0;
291 ars_record->err_address = rstart;
292 ars_record->length = rend - rstart + 1;
295 spin_unlock(&badrange->lock);
296 ars_status->num_records = i;
297 ars_status->out_length = sizeof(struct nd_cmd_ars_status)
298 + i * sizeof(struct nd_ars_record);
301 static int nfit_test_cmd_ars_start(struct nfit_test *t,
302 struct ars_state *ars_state,
303 struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
306 if (buf_len < sizeof(*ars_start))
309 spin_lock(&ars_state->lock);
310 if (time_before(jiffies, ars_state->deadline)) {
311 ars_start->status = NFIT_ARS_START_BUSY;
314 ars_start->status = 0;
315 ars_start->scrub_time = 1;
316 post_ars_status(ars_state, &t->badrange, ars_start->address,
320 spin_unlock(&ars_state->lock);
325 static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
326 struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
329 if (buf_len < ars_state->ars_status->out_length)
332 spin_lock(&ars_state->lock);
333 if (time_before(jiffies, ars_state->deadline)) {
334 memset(ars_status, 0, buf_len);
335 ars_status->status = NFIT_ARS_STATUS_BUSY;
336 ars_status->out_length = sizeof(*ars_status);
339 memcpy(ars_status, ars_state->ars_status,
340 ars_state->ars_status->out_length);
343 spin_unlock(&ars_state->lock);
347 static int nfit_test_cmd_clear_error(struct nfit_test *t,
348 struct nd_cmd_clear_error *clear_err,
349 unsigned int buf_len, int *cmd_rc)
351 const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
352 if (buf_len < sizeof(*clear_err))
355 if ((clear_err->address & mask) || (clear_err->length & mask))
358 badrange_forget(&t->badrange, clear_err->address, clear_err->length);
359 clear_err->status = 0;
360 clear_err->cleared = clear_err->length;
365 struct region_search_spa {
367 struct nd_region *region;
370 static int is_region_device(struct device *dev)
372 return !strncmp(dev->kobj.name, "region", 6);
375 static int nfit_test_search_region_spa(struct device *dev, void *data)
377 struct region_search_spa *ctx = data;
378 struct nd_region *nd_region;
379 resource_size_t ndr_end;
381 if (!is_region_device(dev))
384 nd_region = to_nd_region(dev);
385 ndr_end = nd_region->ndr_start + nd_region->ndr_size;
387 if (ctx->addr >= nd_region->ndr_start && ctx->addr < ndr_end) {
388 ctx->region = nd_region;
395 static int nfit_test_search_spa(struct nvdimm_bus *bus,
396 struct nd_cmd_translate_spa *spa)
399 struct nd_region *nd_region = NULL;
400 struct nvdimm *nvdimm = NULL;
401 struct nd_mapping *nd_mapping = NULL;
402 struct region_search_spa ctx = {
408 ret = device_for_each_child(&bus->dev, &ctx,
409 nfit_test_search_region_spa);
414 nd_region = ctx.region;
416 dpa = ctx.addr - nd_region->ndr_start;
419 * last dimm is selected for test
421 nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];
422 nvdimm = nd_mapping->nvdimm;
424 spa->devices[0].nfit_device_handle = handle[nvdimm->id];
425 spa->num_nvdimms = 1;
426 spa->devices[0].dpa = dpa;
431 static int nfit_test_cmd_translate_spa(struct nvdimm_bus *bus,
432 struct nd_cmd_translate_spa *spa, unsigned int buf_len)
434 if (buf_len < spa->translate_length)
437 if (nfit_test_search_spa(bus, spa) < 0 || !spa->num_nvdimms)
443 static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
445 static const struct nd_smart_payload smart_data = {
446 .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
447 | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
448 | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
449 .health = ND_SMART_NON_CRITICAL_HEALTH,
450 .temperature = 23 * 16,
452 .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
458 if (buf_len < sizeof(*smart))
460 memcpy(smart->data, &smart_data, sizeof(smart_data));
464 static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
465 unsigned int buf_len)
467 static const struct nd_smart_threshold_payload smart_t_data = {
468 .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
469 .temperature = 40 * 16,
473 if (buf_len < sizeof(*smart_t))
475 memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
479 static void uc_error_notify(struct work_struct *work)
481 struct nfit_test *t = container_of(work, typeof(*t), work);
483 __acpi_nfit_notify(&t->pdev.dev, t, NFIT_NOTIFY_UC_MEMORY_ERROR);
486 static int nfit_test_cmd_ars_error_inject(struct nfit_test *t,
487 struct nd_cmd_ars_err_inj *err_inj, unsigned int buf_len)
491 if (buf_len != sizeof(*err_inj)) {
496 if (err_inj->err_inj_spa_range_length <= 0) {
501 rc = badrange_add(&t->badrange, err_inj->err_inj_spa_range_base,
502 err_inj->err_inj_spa_range_length);
506 if (err_inj->err_inj_options & (1 << ND_ARS_ERR_INJ_OPT_NOTIFY))
507 queue_work(nfit_wq, &t->work);
513 err_inj->status = NFIT_ARS_INJECT_INVALID;
517 static int nfit_test_cmd_ars_inject_clear(struct nfit_test *t,
518 struct nd_cmd_ars_err_inj_clr *err_clr, unsigned int buf_len)
522 if (buf_len != sizeof(*err_clr)) {
527 if (err_clr->err_inj_clr_spa_range_length <= 0) {
532 badrange_forget(&t->badrange, err_clr->err_inj_clr_spa_range_base,
533 err_clr->err_inj_clr_spa_range_length);
539 err_clr->status = NFIT_ARS_INJECT_INVALID;
543 static int nfit_test_cmd_ars_inject_status(struct nfit_test *t,
544 struct nd_cmd_ars_err_inj_stat *err_stat,
545 unsigned int buf_len)
547 struct badrange_entry *be;
548 int max = SZ_4K / sizeof(struct nd_error_stat_query_record);
551 err_stat->status = 0;
552 spin_lock(&t->badrange.lock);
553 list_for_each_entry(be, &t->badrange.list, list) {
554 err_stat->record[i].err_inj_stat_spa_range_base = be->start;
555 err_stat->record[i].err_inj_stat_spa_range_length = be->length;
560 spin_unlock(&t->badrange.lock);
561 err_stat->inj_err_rec_count = i;
566 static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
567 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
568 unsigned int buf_len, int *cmd_rc)
570 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
571 struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
572 unsigned int func = cmd;
573 int i, rc = 0, __cmd_rc;
580 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
581 unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
586 if (cmd == ND_CMD_CALL) {
587 struct nd_cmd_pkg *call_pkg = buf;
589 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
590 buf = (void *) call_pkg->nd_payload;
591 func = call_pkg->nd_command;
592 if (call_pkg->nd_family != nfit_mem->family)
596 if (!test_bit(cmd, &cmd_mask)
597 || !test_bit(func, &nfit_mem->dsm_mask))
600 /* lookup label space for the given dimm */
601 for (i = 0; i < ARRAY_SIZE(handle); i++)
602 if (__to_nfit_memdev(nfit_mem)->device_handle ==
605 if (i >= ARRAY_SIZE(handle))
608 if ((1 << func) & dimm_fail_cmd_flags[i])
612 case ND_CMD_GET_CONFIG_SIZE:
613 rc = nfit_test_cmd_get_config_size(buf, buf_len);
615 case ND_CMD_GET_CONFIG_DATA:
616 rc = nfit_test_cmd_get_config_data(buf, buf_len,
617 t->label[i - t->dcr_idx]);
619 case ND_CMD_SET_CONFIG_DATA:
620 rc = nfit_test_cmd_set_config_data(buf, buf_len,
621 t->label[i - t->dcr_idx]);
624 rc = nfit_test_cmd_smart(buf, buf_len);
626 case ND_CMD_SMART_THRESHOLD:
627 rc = nfit_test_cmd_smart_threshold(buf, buf_len);
628 device_lock(&t->pdev.dev);
629 __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
630 device_unlock(&t->pdev.dev);
636 struct ars_state *ars_state = &t->ars_state;
637 struct nd_cmd_pkg *call_pkg = buf;
642 if (cmd == ND_CMD_CALL) {
643 func = call_pkg->nd_command;
645 buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
646 buf = (void *) call_pkg->nd_payload;
649 case NFIT_CMD_TRANSLATE_SPA:
650 rc = nfit_test_cmd_translate_spa(
651 acpi_desc->nvdimm_bus, buf, buf_len);
653 case NFIT_CMD_ARS_INJECT_SET:
654 rc = nfit_test_cmd_ars_error_inject(t, buf,
657 case NFIT_CMD_ARS_INJECT_CLEAR:
658 rc = nfit_test_cmd_ars_inject_clear(t, buf,
661 case NFIT_CMD_ARS_INJECT_GET:
662 rc = nfit_test_cmd_ars_inject_status(t, buf,
670 if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
675 rc = nfit_test_cmd_ars_cap(buf, buf_len);
677 case ND_CMD_ARS_START:
678 rc = nfit_test_cmd_ars_start(t, ars_state, buf,
681 case ND_CMD_ARS_STATUS:
682 rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
685 case ND_CMD_CLEAR_ERROR:
686 rc = nfit_test_cmd_clear_error(t, buf, buf_len, cmd_rc);
696 static DEFINE_SPINLOCK(nfit_test_lock);
697 static struct nfit_test *instances[NUM_NFITS];
699 static void release_nfit_res(void *data)
701 struct nfit_test_resource *nfit_res = data;
703 spin_lock(&nfit_test_lock);
704 list_del(&nfit_res->list);
705 spin_unlock(&nfit_test_lock);
707 vfree(nfit_res->buf);
711 static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
714 struct device *dev = &t->pdev.dev;
715 struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
719 if (!buf || !nfit_res)
721 rc = devm_add_action(dev, release_nfit_res, nfit_res);
724 INIT_LIST_HEAD(&nfit_res->list);
725 memset(buf, 0, size);
728 nfit_res->res.start = *dma;
729 nfit_res->res.end = *dma + size - 1;
730 nfit_res->res.name = "NFIT";
731 spin_lock_init(&nfit_res->lock);
732 INIT_LIST_HEAD(&nfit_res->requests);
733 spin_lock(&nfit_test_lock);
734 list_add(&nfit_res->list, &t->resources);
735 spin_unlock(&nfit_test_lock);
737 return nfit_res->buf;
745 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
747 void *buf = vmalloc(size);
749 *dma = (unsigned long) buf;
750 return __test_alloc(t, size, dma, buf);
753 static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
757 for (i = 0; i < ARRAY_SIZE(instances); i++) {
758 struct nfit_test_resource *n, *nfit_res = NULL;
759 struct nfit_test *t = instances[i];
763 spin_lock(&nfit_test_lock);
764 list_for_each_entry(n, &t->resources, list) {
765 if (addr >= n->res.start && (addr < n->res.start
766 + resource_size(&n->res))) {
769 } else if (addr >= (unsigned long) n->buf
770 && (addr < (unsigned long) n->buf
771 + resource_size(&n->res))) {
776 spin_unlock(&nfit_test_lock);
784 static int ars_state_init(struct device *dev, struct ars_state *ars_state)
786 /* for testing, only store up to n records that fit within 4k */
787 ars_state->ars_status = devm_kzalloc(dev,
788 sizeof(struct nd_cmd_ars_status) + SZ_4K, GFP_KERNEL);
789 if (!ars_state->ars_status)
791 spin_lock_init(&ars_state->lock);
795 static void put_dimms(void *data)
797 struct device **dimm_dev = data;
800 for (i = 0; i < NUM_DCR; i++)
802 device_unregister(dimm_dev[i]);
805 static struct class *nfit_test_dimm;
807 static int dimm_name_to_id(struct device *dev)
811 if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
812 || dimm >= NUM_DCR || dimm < 0)
818 static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
821 int dimm = dimm_name_to_id(dev);
826 return sprintf(buf, "%#x", handle[dimm]);
828 DEVICE_ATTR_RO(handle);
830 static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
833 int dimm = dimm_name_to_id(dev);
838 return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
841 static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
842 const char *buf, size_t size)
844 int dimm = dimm_name_to_id(dev);
851 rc = kstrtol(buf, 0, &val);
855 dimm_fail_cmd_flags[dimm] = val;
858 static DEVICE_ATTR_RW(fail_cmd);
860 static struct attribute *nfit_test_dimm_attributes[] = {
861 &dev_attr_fail_cmd.attr,
862 &dev_attr_handle.attr,
866 static struct attribute_group nfit_test_dimm_attribute_group = {
867 .attrs = nfit_test_dimm_attributes,
870 static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
871 &nfit_test_dimm_attribute_group,
875 static int nfit_test0_alloc(struct nfit_test *t)
877 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
878 + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
879 + sizeof(struct acpi_nfit_control_region) * NUM_DCR
880 + offsetof(struct acpi_nfit_control_region,
881 window_size) * NUM_DCR
882 + sizeof(struct acpi_nfit_data_region) * NUM_BDW
883 + (sizeof(struct acpi_nfit_flush_address)
884 + sizeof(u64) * NUM_HINTS) * NUM_DCR;
887 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
890 t->nfit_size = nfit_size;
892 t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
896 t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
900 t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
904 for (i = 0; i < t->num_dcr; i++) {
905 t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
909 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
912 sprintf(t->label[i], "label%d", i);
914 t->flush[i] = test_alloc(t, max(PAGE_SIZE,
915 sizeof(u64) * NUM_HINTS),
921 for (i = 0; i < t->num_dcr; i++) {
922 t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
927 t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
931 if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
933 for (i = 0; i < NUM_DCR; i++) {
934 t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
935 &t->pdev.dev, 0, NULL,
936 nfit_test_dimm_attribute_groups,
942 return ars_state_init(&t->pdev.dev, &t->ars_state);
945 static int nfit_test1_alloc(struct nfit_test *t)
947 size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
948 + sizeof(struct acpi_nfit_memory_map) * 2
949 + offsetof(struct acpi_nfit_control_region, window_size) * 2;
952 t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
955 t->nfit_size = nfit_size;
957 t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
961 for (i = 0; i < t->num_dcr; i++) {
962 t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
965 sprintf(t->label[i], "label%d", i);
968 t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
972 return ars_state_init(&t->pdev.dev, &t->ars_state);
975 static void dcr_common_init(struct acpi_nfit_control_region *dcr)
977 dcr->vendor_id = 0xabcd;
979 dcr->revision_id = 1;
980 dcr->valid_fields = 1;
981 dcr->manufacturing_location = 0xa;
982 dcr->manufacturing_date = cpu_to_be16(2016);
985 static void nfit_test0_setup(struct nfit_test *t)
987 const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
988 + (sizeof(u64) * NUM_HINTS);
989 struct acpi_nfit_desc *acpi_desc;
990 struct acpi_nfit_memory_map *memdev;
991 void *nfit_buf = t->nfit_buf;
992 struct acpi_nfit_system_address *spa;
993 struct acpi_nfit_control_region *dcr;
994 struct acpi_nfit_data_region *bdw;
995 struct acpi_nfit_flush_address *flush;
996 unsigned int offset, i;
999 * spa0 (interleave first half of dimm0 and dimm1, note storage
1000 * does not actually alias the related block-data-window
1004 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1005 spa->header.length = sizeof(*spa);
1006 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1007 spa->range_index = 0+1;
1008 spa->address = t->spa_set_dma[0];
1009 spa->length = SPA0_SIZE;
1012 * spa1 (interleave last half of the 4 DIMMS, note storage
1013 * does not actually alias the related block-data-window
1016 spa = nfit_buf + sizeof(*spa);
1017 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1018 spa->header.length = sizeof(*spa);
1019 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1020 spa->range_index = 1+1;
1021 spa->address = t->spa_set_dma[1];
1022 spa->length = SPA1_SIZE;
1024 /* spa2 (dcr0) dimm0 */
1025 spa = nfit_buf + sizeof(*spa) * 2;
1026 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1027 spa->header.length = sizeof(*spa);
1028 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1029 spa->range_index = 2+1;
1030 spa->address = t->dcr_dma[0];
1031 spa->length = DCR_SIZE;
1033 /* spa3 (dcr1) dimm1 */
1034 spa = nfit_buf + sizeof(*spa) * 3;
1035 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1036 spa->header.length = sizeof(*spa);
1037 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1038 spa->range_index = 3+1;
1039 spa->address = t->dcr_dma[1];
1040 spa->length = DCR_SIZE;
1042 /* spa4 (dcr2) dimm2 */
1043 spa = nfit_buf + sizeof(*spa) * 4;
1044 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1045 spa->header.length = sizeof(*spa);
1046 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1047 spa->range_index = 4+1;
1048 spa->address = t->dcr_dma[2];
1049 spa->length = DCR_SIZE;
1051 /* spa5 (dcr3) dimm3 */
1052 spa = nfit_buf + sizeof(*spa) * 5;
1053 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1054 spa->header.length = sizeof(*spa);
1055 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1056 spa->range_index = 5+1;
1057 spa->address = t->dcr_dma[3];
1058 spa->length = DCR_SIZE;
1060 /* spa6 (bdw for dcr0) dimm0 */
1061 spa = nfit_buf + sizeof(*spa) * 6;
1062 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1063 spa->header.length = sizeof(*spa);
1064 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1065 spa->range_index = 6+1;
1066 spa->address = t->dimm_dma[0];
1067 spa->length = DIMM_SIZE;
1069 /* spa7 (bdw for dcr1) dimm1 */
1070 spa = nfit_buf + sizeof(*spa) * 7;
1071 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1072 spa->header.length = sizeof(*spa);
1073 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1074 spa->range_index = 7+1;
1075 spa->address = t->dimm_dma[1];
1076 spa->length = DIMM_SIZE;
1078 /* spa8 (bdw for dcr2) dimm2 */
1079 spa = nfit_buf + sizeof(*spa) * 8;
1080 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1081 spa->header.length = sizeof(*spa);
1082 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1083 spa->range_index = 8+1;
1084 spa->address = t->dimm_dma[2];
1085 spa->length = DIMM_SIZE;
1087 /* spa9 (bdw for dcr3) dimm3 */
1088 spa = nfit_buf + sizeof(*spa) * 9;
1089 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1090 spa->header.length = sizeof(*spa);
1091 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1092 spa->range_index = 9+1;
1093 spa->address = t->dimm_dma[3];
1094 spa->length = DIMM_SIZE;
1096 offset = sizeof(*spa) * 10;
1097 /* mem-region0 (spa0, dimm0) */
1098 memdev = nfit_buf + offset;
1099 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1100 memdev->header.length = sizeof(*memdev);
1101 memdev->device_handle = handle[0];
1102 memdev->physical_id = 0;
1103 memdev->region_id = 0;
1104 memdev->range_index = 0+1;
1105 memdev->region_index = 4+1;
1106 memdev->region_size = SPA0_SIZE/2;
1107 memdev->region_offset = 1;
1108 memdev->address = 0;
1109 memdev->interleave_index = 0;
1110 memdev->interleave_ways = 2;
1112 /* mem-region1 (spa0, dimm1) */
1113 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
1114 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1115 memdev->header.length = sizeof(*memdev);
1116 memdev->device_handle = handle[1];
1117 memdev->physical_id = 1;
1118 memdev->region_id = 0;
1119 memdev->range_index = 0+1;
1120 memdev->region_index = 5+1;
1121 memdev->region_size = SPA0_SIZE/2;
1122 memdev->region_offset = (1 << 8);
1123 memdev->address = 0;
1124 memdev->interleave_index = 0;
1125 memdev->interleave_ways = 2;
1126 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1128 /* mem-region2 (spa1, dimm0) */
1129 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
1130 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1131 memdev->header.length = sizeof(*memdev);
1132 memdev->device_handle = handle[0];
1133 memdev->physical_id = 0;
1134 memdev->region_id = 1;
1135 memdev->range_index = 1+1;
1136 memdev->region_index = 4+1;
1137 memdev->region_size = SPA1_SIZE/4;
1138 memdev->region_offset = (1 << 16);
1139 memdev->address = SPA0_SIZE/2;
1140 memdev->interleave_index = 0;
1141 memdev->interleave_ways = 4;
1142 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1144 /* mem-region3 (spa1, dimm1) */
1145 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
1146 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1147 memdev->header.length = sizeof(*memdev);
1148 memdev->device_handle = handle[1];
1149 memdev->physical_id = 1;
1150 memdev->region_id = 1;
1151 memdev->range_index = 1+1;
1152 memdev->region_index = 5+1;
1153 memdev->region_size = SPA1_SIZE/4;
1154 memdev->region_offset = (1 << 24);
1155 memdev->address = SPA0_SIZE/2;
1156 memdev->interleave_index = 0;
1157 memdev->interleave_ways = 4;
1159 /* mem-region4 (spa1, dimm2) */
1160 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
1161 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1162 memdev->header.length = sizeof(*memdev);
1163 memdev->device_handle = handle[2];
1164 memdev->physical_id = 2;
1165 memdev->region_id = 0;
1166 memdev->range_index = 1+1;
1167 memdev->region_index = 6+1;
1168 memdev->region_size = SPA1_SIZE/4;
1169 memdev->region_offset = (1ULL << 32);
1170 memdev->address = SPA0_SIZE/2;
1171 memdev->interleave_index = 0;
1172 memdev->interleave_ways = 4;
1173 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1175 /* mem-region5 (spa1, dimm3) */
1176 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
1177 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1178 memdev->header.length = sizeof(*memdev);
1179 memdev->device_handle = handle[3];
1180 memdev->physical_id = 3;
1181 memdev->region_id = 0;
1182 memdev->range_index = 1+1;
1183 memdev->region_index = 7+1;
1184 memdev->region_size = SPA1_SIZE/4;
1185 memdev->region_offset = (1ULL << 40);
1186 memdev->address = SPA0_SIZE/2;
1187 memdev->interleave_index = 0;
1188 memdev->interleave_ways = 4;
1190 /* mem-region6 (spa/dcr0, dimm0) */
1191 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
1192 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1193 memdev->header.length = sizeof(*memdev);
1194 memdev->device_handle = handle[0];
1195 memdev->physical_id = 0;
1196 memdev->region_id = 0;
1197 memdev->range_index = 2+1;
1198 memdev->region_index = 0+1;
1199 memdev->region_size = 0;
1200 memdev->region_offset = 0;
1201 memdev->address = 0;
1202 memdev->interleave_index = 0;
1203 memdev->interleave_ways = 1;
1205 /* mem-region7 (spa/dcr1, dimm1) */
1206 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
1207 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1208 memdev->header.length = sizeof(*memdev);
1209 memdev->device_handle = handle[1];
1210 memdev->physical_id = 1;
1211 memdev->region_id = 0;
1212 memdev->range_index = 3+1;
1213 memdev->region_index = 1+1;
1214 memdev->region_size = 0;
1215 memdev->region_offset = 0;
1216 memdev->address = 0;
1217 memdev->interleave_index = 0;
1218 memdev->interleave_ways = 1;
1220 /* mem-region8 (spa/dcr2, dimm2) */
1221 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
1222 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1223 memdev->header.length = sizeof(*memdev);
1224 memdev->device_handle = handle[2];
1225 memdev->physical_id = 2;
1226 memdev->region_id = 0;
1227 memdev->range_index = 4+1;
1228 memdev->region_index = 2+1;
1229 memdev->region_size = 0;
1230 memdev->region_offset = 0;
1231 memdev->address = 0;
1232 memdev->interleave_index = 0;
1233 memdev->interleave_ways = 1;
1235 /* mem-region9 (spa/dcr3, dimm3) */
1236 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
1237 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1238 memdev->header.length = sizeof(*memdev);
1239 memdev->device_handle = handle[3];
1240 memdev->physical_id = 3;
1241 memdev->region_id = 0;
1242 memdev->range_index = 5+1;
1243 memdev->region_index = 3+1;
1244 memdev->region_size = 0;
1245 memdev->region_offset = 0;
1246 memdev->address = 0;
1247 memdev->interleave_index = 0;
1248 memdev->interleave_ways = 1;
1250 /* mem-region10 (spa/bdw0, dimm0) */
1251 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
1252 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1253 memdev->header.length = sizeof(*memdev);
1254 memdev->device_handle = handle[0];
1255 memdev->physical_id = 0;
1256 memdev->region_id = 0;
1257 memdev->range_index = 6+1;
1258 memdev->region_index = 0+1;
1259 memdev->region_size = 0;
1260 memdev->region_offset = 0;
1261 memdev->address = 0;
1262 memdev->interleave_index = 0;
1263 memdev->interleave_ways = 1;
1265 /* mem-region11 (spa/bdw1, dimm1) */
1266 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
1267 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1268 memdev->header.length = sizeof(*memdev);
1269 memdev->device_handle = handle[1];
1270 memdev->physical_id = 1;
1271 memdev->region_id = 0;
1272 memdev->range_index = 7+1;
1273 memdev->region_index = 1+1;
1274 memdev->region_size = 0;
1275 memdev->region_offset = 0;
1276 memdev->address = 0;
1277 memdev->interleave_index = 0;
1278 memdev->interleave_ways = 1;
1280 /* mem-region12 (spa/bdw2, dimm2) */
1281 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
1282 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1283 memdev->header.length = sizeof(*memdev);
1284 memdev->device_handle = handle[2];
1285 memdev->physical_id = 2;
1286 memdev->region_id = 0;
1287 memdev->range_index = 8+1;
1288 memdev->region_index = 2+1;
1289 memdev->region_size = 0;
1290 memdev->region_offset = 0;
1291 memdev->address = 0;
1292 memdev->interleave_index = 0;
1293 memdev->interleave_ways = 1;
1295 /* mem-region13 (spa/dcr3, dimm3) */
1296 memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
1297 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1298 memdev->header.length = sizeof(*memdev);
1299 memdev->device_handle = handle[3];
1300 memdev->physical_id = 3;
1301 memdev->region_id = 0;
1302 memdev->range_index = 9+1;
1303 memdev->region_index = 3+1;
1304 memdev->region_size = 0;
1305 memdev->region_offset = 0;
1306 memdev->address = 0;
1307 memdev->interleave_index = 0;
1308 memdev->interleave_ways = 1;
1309 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1311 offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
1312 /* dcr-descriptor0: blk */
1313 dcr = nfit_buf + offset;
1314 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1315 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1316 dcr->region_index = 0+1;
1317 dcr_common_init(dcr);
1318 dcr->serial_number = ~handle[0];
1319 dcr->code = NFIT_FIC_BLK;
1321 dcr->window_size = DCR_SIZE;
1322 dcr->command_offset = 0;
1323 dcr->command_size = 8;
1324 dcr->status_offset = 8;
1325 dcr->status_size = 4;
1327 /* dcr-descriptor1: blk */
1328 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
1329 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1330 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1331 dcr->region_index = 1+1;
1332 dcr_common_init(dcr);
1333 dcr->serial_number = ~handle[1];
1334 dcr->code = NFIT_FIC_BLK;
1336 dcr->window_size = DCR_SIZE;
1337 dcr->command_offset = 0;
1338 dcr->command_size = 8;
1339 dcr->status_offset = 8;
1340 dcr->status_size = 4;
1342 /* dcr-descriptor2: blk */
1343 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
1344 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1345 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1346 dcr->region_index = 2+1;
1347 dcr_common_init(dcr);
1348 dcr->serial_number = ~handle[2];
1349 dcr->code = NFIT_FIC_BLK;
1351 dcr->window_size = DCR_SIZE;
1352 dcr->command_offset = 0;
1353 dcr->command_size = 8;
1354 dcr->status_offset = 8;
1355 dcr->status_size = 4;
1357 /* dcr-descriptor3: blk */
1358 dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
1359 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1360 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1361 dcr->region_index = 3+1;
1362 dcr_common_init(dcr);
1363 dcr->serial_number = ~handle[3];
1364 dcr->code = NFIT_FIC_BLK;
1366 dcr->window_size = DCR_SIZE;
1367 dcr->command_offset = 0;
1368 dcr->command_size = 8;
1369 dcr->status_offset = 8;
1370 dcr->status_size = 4;
1372 offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
1373 /* dcr-descriptor0: pmem */
1374 dcr = nfit_buf + offset;
1375 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1376 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1378 dcr->region_index = 4+1;
1379 dcr_common_init(dcr);
1380 dcr->serial_number = ~handle[0];
1381 dcr->code = NFIT_FIC_BYTEN;
1384 /* dcr-descriptor1: pmem */
1385 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1387 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1388 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1390 dcr->region_index = 5+1;
1391 dcr_common_init(dcr);
1392 dcr->serial_number = ~handle[1];
1393 dcr->code = NFIT_FIC_BYTEN;
1396 /* dcr-descriptor2: pmem */
1397 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1399 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1400 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1402 dcr->region_index = 6+1;
1403 dcr_common_init(dcr);
1404 dcr->serial_number = ~handle[2];
1405 dcr->code = NFIT_FIC_BYTEN;
1408 /* dcr-descriptor3: pmem */
1409 dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
1411 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1412 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1414 dcr->region_index = 7+1;
1415 dcr_common_init(dcr);
1416 dcr->serial_number = ~handle[3];
1417 dcr->code = NFIT_FIC_BYTEN;
1420 offset = offset + offsetof(struct acpi_nfit_control_region,
1422 /* bdw0 (spa/dcr0, dimm0) */
1423 bdw = nfit_buf + offset;
1424 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1425 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1426 bdw->region_index = 0+1;
1429 bdw->size = BDW_SIZE;
1430 bdw->capacity = DIMM_SIZE;
1431 bdw->start_address = 0;
1433 /* bdw1 (spa/dcr1, dimm1) */
1434 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
1435 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1436 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1437 bdw->region_index = 1+1;
1440 bdw->size = BDW_SIZE;
1441 bdw->capacity = DIMM_SIZE;
1442 bdw->start_address = 0;
1444 /* bdw2 (spa/dcr2, dimm2) */
1445 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
1446 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1447 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1448 bdw->region_index = 2+1;
1451 bdw->size = BDW_SIZE;
1452 bdw->capacity = DIMM_SIZE;
1453 bdw->start_address = 0;
1455 /* bdw3 (spa/dcr3, dimm3) */
1456 bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
1457 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1458 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1459 bdw->region_index = 3+1;
1462 bdw->size = BDW_SIZE;
1463 bdw->capacity = DIMM_SIZE;
1464 bdw->start_address = 0;
1466 offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
1467 /* flush0 (dimm0) */
1468 flush = nfit_buf + offset;
1469 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1470 flush->header.length = flush_hint_size;
1471 flush->device_handle = handle[0];
1472 flush->hint_count = NUM_HINTS;
1473 for (i = 0; i < NUM_HINTS; i++)
1474 flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
1476 /* flush1 (dimm1) */
1477 flush = nfit_buf + offset + flush_hint_size * 1;
1478 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1479 flush->header.length = flush_hint_size;
1480 flush->device_handle = handle[1];
1481 flush->hint_count = NUM_HINTS;
1482 for (i = 0; i < NUM_HINTS; i++)
1483 flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
1485 /* flush2 (dimm2) */
1486 flush = nfit_buf + offset + flush_hint_size * 2;
1487 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1488 flush->header.length = flush_hint_size;
1489 flush->device_handle = handle[2];
1490 flush->hint_count = NUM_HINTS;
1491 for (i = 0; i < NUM_HINTS; i++)
1492 flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
1494 /* flush3 (dimm3) */
1495 flush = nfit_buf + offset + flush_hint_size * 3;
1496 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1497 flush->header.length = flush_hint_size;
1498 flush->device_handle = handle[3];
1499 flush->hint_count = NUM_HINTS;
1500 for (i = 0; i < NUM_HINTS; i++)
1501 flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
1503 if (t->setup_hotplug) {
1504 offset = offset + flush_hint_size * 4;
1505 /* dcr-descriptor4: blk */
1506 dcr = nfit_buf + offset;
1507 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1508 dcr->header.length = sizeof(struct acpi_nfit_control_region);
1509 dcr->region_index = 8+1;
1510 dcr_common_init(dcr);
1511 dcr->serial_number = ~handle[4];
1512 dcr->code = NFIT_FIC_BLK;
1514 dcr->window_size = DCR_SIZE;
1515 dcr->command_offset = 0;
1516 dcr->command_size = 8;
1517 dcr->status_offset = 8;
1518 dcr->status_size = 4;
1520 offset = offset + sizeof(struct acpi_nfit_control_region);
1521 /* dcr-descriptor4: pmem */
1522 dcr = nfit_buf + offset;
1523 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1524 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1526 dcr->region_index = 9+1;
1527 dcr_common_init(dcr);
1528 dcr->serial_number = ~handle[4];
1529 dcr->code = NFIT_FIC_BYTEN;
1532 offset = offset + offsetof(struct acpi_nfit_control_region,
1534 /* bdw4 (spa/dcr4, dimm4) */
1535 bdw = nfit_buf + offset;
1536 bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
1537 bdw->header.length = sizeof(struct acpi_nfit_data_region);
1538 bdw->region_index = 8+1;
1541 bdw->size = BDW_SIZE;
1542 bdw->capacity = DIMM_SIZE;
1543 bdw->start_address = 0;
1545 offset = offset + sizeof(struct acpi_nfit_data_region);
1546 /* spa10 (dcr4) dimm4 */
1547 spa = nfit_buf + offset;
1548 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1549 spa->header.length = sizeof(*spa);
1550 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
1551 spa->range_index = 10+1;
1552 spa->address = t->dcr_dma[4];
1553 spa->length = DCR_SIZE;
1556 * spa11 (single-dimm interleave for hotplug, note storage
1557 * does not actually alias the related block-data-window
1560 spa = nfit_buf + offset + sizeof(*spa);
1561 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1562 spa->header.length = sizeof(*spa);
1563 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1564 spa->range_index = 11+1;
1565 spa->address = t->spa_set_dma[2];
1566 spa->length = SPA0_SIZE;
1568 /* spa12 (bdw for dcr4) dimm4 */
1569 spa = nfit_buf + offset + sizeof(*spa) * 2;
1570 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1571 spa->header.length = sizeof(*spa);
1572 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
1573 spa->range_index = 12+1;
1574 spa->address = t->dimm_dma[4];
1575 spa->length = DIMM_SIZE;
1577 offset = offset + sizeof(*spa) * 3;
1578 /* mem-region14 (spa/dcr4, dimm4) */
1579 memdev = nfit_buf + offset;
1580 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1581 memdev->header.length = sizeof(*memdev);
1582 memdev->device_handle = handle[4];
1583 memdev->physical_id = 4;
1584 memdev->region_id = 0;
1585 memdev->range_index = 10+1;
1586 memdev->region_index = 8+1;
1587 memdev->region_size = 0;
1588 memdev->region_offset = 0;
1589 memdev->address = 0;
1590 memdev->interleave_index = 0;
1591 memdev->interleave_ways = 1;
1593 /* mem-region15 (spa0, dimm4) */
1594 memdev = nfit_buf + offset +
1595 sizeof(struct acpi_nfit_memory_map);
1596 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1597 memdev->header.length = sizeof(*memdev);
1598 memdev->device_handle = handle[4];
1599 memdev->physical_id = 4;
1600 memdev->region_id = 0;
1601 memdev->range_index = 11+1;
1602 memdev->region_index = 9+1;
1603 memdev->region_size = SPA0_SIZE;
1604 memdev->region_offset = (1ULL << 48);
1605 memdev->address = 0;
1606 memdev->interleave_index = 0;
1607 memdev->interleave_ways = 1;
1608 memdev->flags = ACPI_NFIT_MEM_HEALTH_ENABLED;
1610 /* mem-region16 (spa/bdw4, dimm4) */
1611 memdev = nfit_buf + offset +
1612 sizeof(struct acpi_nfit_memory_map) * 2;
1613 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1614 memdev->header.length = sizeof(*memdev);
1615 memdev->device_handle = handle[4];
1616 memdev->physical_id = 4;
1617 memdev->region_id = 0;
1618 memdev->range_index = 12+1;
1619 memdev->region_index = 8+1;
1620 memdev->region_size = 0;
1621 memdev->region_offset = 0;
1622 memdev->address = 0;
1623 memdev->interleave_index = 0;
1624 memdev->interleave_ways = 1;
1626 offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
1627 /* flush3 (dimm4) */
1628 flush = nfit_buf + offset;
1629 flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
1630 flush->header.length = flush_hint_size;
1631 flush->device_handle = handle[4];
1632 flush->hint_count = NUM_HINTS;
1633 for (i = 0; i < NUM_HINTS; i++)
1634 flush->hint_address[i] = t->flush_dma[4]
1638 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1641 acpi_desc = &t->acpi_desc;
1642 set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
1643 set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1644 set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
1645 set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
1646 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1647 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1648 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1649 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1650 set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
1651 set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
1652 set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
1653 set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
1654 set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
1655 set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
1658 static void nfit_test1_setup(struct nfit_test *t)
1661 void *nfit_buf = t->nfit_buf;
1662 struct acpi_nfit_memory_map *memdev;
1663 struct acpi_nfit_control_region *dcr;
1664 struct acpi_nfit_system_address *spa;
1665 struct acpi_nfit_desc *acpi_desc;
1668 /* spa0 (flat range with no bdw aliasing) */
1669 spa = nfit_buf + offset;
1670 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1671 spa->header.length = sizeof(*spa);
1672 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
1673 spa->range_index = 0+1;
1674 spa->address = t->spa_set_dma[0];
1675 spa->length = SPA2_SIZE;
1677 /* virtual cd region */
1678 spa = nfit_buf + sizeof(*spa);
1679 spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
1680 spa->header.length = sizeof(*spa);
1681 memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
1682 spa->range_index = 0;
1683 spa->address = t->spa_set_dma[1];
1684 spa->length = SPA_VCD_SIZE;
1686 offset += sizeof(*spa) * 2;
1687 /* mem-region0 (spa0, dimm0) */
1688 memdev = nfit_buf + offset;
1689 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1690 memdev->header.length = sizeof(*memdev);
1691 memdev->device_handle = handle[5];
1692 memdev->physical_id = 0;
1693 memdev->region_id = 0;
1694 memdev->range_index = 0+1;
1695 memdev->region_index = 0+1;
1696 memdev->region_size = SPA2_SIZE;
1697 memdev->region_offset = 0;
1698 memdev->address = 0;
1699 memdev->interleave_index = 0;
1700 memdev->interleave_ways = 1;
1701 memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
1702 | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
1703 | ACPI_NFIT_MEM_NOT_ARMED;
1705 offset += sizeof(*memdev);
1706 /* dcr-descriptor0 */
1707 dcr = nfit_buf + offset;
1708 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1709 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1711 dcr->region_index = 0+1;
1712 dcr_common_init(dcr);
1713 dcr->serial_number = ~handle[5];
1714 dcr->code = NFIT_FIC_BYTE;
1717 offset += dcr->header.length;
1718 memdev = nfit_buf + offset;
1719 memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
1720 memdev->header.length = sizeof(*memdev);
1721 memdev->device_handle = handle[6];
1722 memdev->physical_id = 0;
1723 memdev->region_id = 0;
1724 memdev->range_index = 0;
1725 memdev->region_index = 0+2;
1726 memdev->region_size = SPA2_SIZE;
1727 memdev->region_offset = 0;
1728 memdev->address = 0;
1729 memdev->interleave_index = 0;
1730 memdev->interleave_ways = 1;
1731 memdev->flags = ACPI_NFIT_MEM_MAP_FAILED;
1733 /* dcr-descriptor1 */
1734 offset += sizeof(*memdev);
1735 dcr = nfit_buf + offset;
1736 dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
1737 dcr->header.length = offsetof(struct acpi_nfit_control_region,
1739 dcr->region_index = 0+2;
1740 dcr_common_init(dcr);
1741 dcr->serial_number = ~handle[6];
1742 dcr->code = NFIT_FIC_BYTE;
1745 post_ars_status(&t->ars_state, &t->badrange, t->spa_set_dma[0],
1748 acpi_desc = &t->acpi_desc;
1749 set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
1750 set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
1751 set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
1752 set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
1755 static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
1756 void *iobuf, u64 len, int rw)
1758 struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
1759 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
1760 struct nd_region *nd_region = &ndbr->nd_region;
1763 lane = nd_region_acquire_lane(nd_region);
1765 memcpy(mmio->addr.base + dpa, iobuf, len);
1767 memcpy(iobuf, mmio->addr.base + dpa, len);
1769 /* give us some some coverage of the arch_invalidate_pmem() API */
1770 arch_invalidate_pmem(mmio->addr.base + dpa, len);
1772 nd_region_release_lane(nd_region, lane);
1777 static unsigned long nfit_ctl_handle;
1779 union acpi_object *result;
1781 static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
1782 const guid_t *guid, u64 rev, u64 func, union acpi_object *argv4)
1784 if (handle != &nfit_ctl_handle)
1785 return ERR_PTR(-ENXIO);
1790 static int setup_result(void *buf, size_t size)
1792 result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
1795 result->package.type = ACPI_TYPE_BUFFER,
1796 result->buffer.pointer = (void *) (result + 1);
1797 result->buffer.length = size;
1798 memcpy(result->buffer.pointer, buf, size);
1799 memset(buf, 0, size);
1803 static int nfit_ctl_test(struct device *dev)
1806 struct nvdimm *nvdimm;
1807 struct acpi_device *adev;
1808 struct nfit_mem *nfit_mem;
1809 struct nd_ars_record *record;
1810 struct acpi_nfit_desc *acpi_desc;
1811 const u64 test_val = 0x0123456789abcdefULL;
1812 unsigned long mask, cmd_size, offset;
1814 struct nd_cmd_get_config_size cfg_size;
1815 struct nd_cmd_clear_error clear_err;
1816 struct nd_cmd_ars_status ars_stat;
1817 struct nd_cmd_ars_cap ars_cap;
1818 char buf[sizeof(struct nd_cmd_ars_status)
1819 + sizeof(struct nd_ars_record)];
1822 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
1825 *adev = (struct acpi_device) {
1826 .handle = &nfit_ctl_handle,
1828 .init_name = "test-adev",
1832 acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
1835 *acpi_desc = (struct acpi_nfit_desc) {
1837 .cmd_mask = 1UL << ND_CMD_ARS_CAP
1838 | 1UL << ND_CMD_ARS_START
1839 | 1UL << ND_CMD_ARS_STATUS
1840 | 1UL << ND_CMD_CLEAR_ERROR
1841 | 1UL << ND_CMD_CALL,
1842 .module = THIS_MODULE,
1843 .provider_name = "ACPI.NFIT",
1844 .ndctl = acpi_nfit_ctl,
1845 .bus_dsm_mask = 1UL << NFIT_CMD_TRANSLATE_SPA
1846 | 1UL << NFIT_CMD_ARS_INJECT_SET
1847 | 1UL << NFIT_CMD_ARS_INJECT_CLEAR
1848 | 1UL << NFIT_CMD_ARS_INJECT_GET,
1853 nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
1857 mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
1858 | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
1859 | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
1860 | 1UL << ND_CMD_VENDOR;
1861 *nfit_mem = (struct nfit_mem) {
1863 .family = NVDIMM_FAMILY_INTEL,
1867 nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
1870 *nvdimm = (struct nvdimm) {
1871 .provider_data = nfit_mem,
1874 .init_name = "test-dimm",
1879 /* basic checkout of a typical 'get config size' command */
1880 cmd_size = sizeof(cmds.cfg_size);
1881 cmds.cfg_size = (struct nd_cmd_get_config_size) {
1883 .config_size = SZ_128K,
1886 rc = setup_result(cmds.buf, cmd_size);
1889 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1890 cmds.buf, cmd_size, &cmd_rc);
1892 if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
1893 || cmds.cfg_size.config_size != SZ_128K
1894 || cmds.cfg_size.max_xfer != SZ_4K) {
1895 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1896 __func__, __LINE__, rc, cmd_rc);
1901 /* test ars_status with zero output */
1902 cmd_size = offsetof(struct nd_cmd_ars_status, address);
1903 cmds.ars_stat = (struct nd_cmd_ars_status) {
1906 rc = setup_result(cmds.buf, cmd_size);
1909 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1910 cmds.buf, cmd_size, &cmd_rc);
1912 if (rc < 0 || cmd_rc) {
1913 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1914 __func__, __LINE__, rc, cmd_rc);
1919 /* test ars_cap with benign extended status */
1920 cmd_size = sizeof(cmds.ars_cap);
1921 cmds.ars_cap = (struct nd_cmd_ars_cap) {
1922 .status = ND_ARS_PERSISTENT << 16,
1924 offset = offsetof(struct nd_cmd_ars_cap, status);
1925 rc = setup_result(cmds.buf + offset, cmd_size - offset);
1928 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
1929 cmds.buf, cmd_size, &cmd_rc);
1931 if (rc < 0 || cmd_rc) {
1932 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1933 __func__, __LINE__, rc, cmd_rc);
1938 /* test ars_status with 'status' trimmed from 'out_length' */
1939 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1940 cmds.ars_stat = (struct nd_cmd_ars_status) {
1941 .out_length = cmd_size - 4,
1943 record = &cmds.ars_stat.records[0];
1944 *record = (struct nd_ars_record) {
1947 rc = setup_result(cmds.buf, cmd_size);
1950 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1951 cmds.buf, cmd_size, &cmd_rc);
1953 if (rc < 0 || cmd_rc || record->length != test_val) {
1954 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1955 __func__, __LINE__, rc, cmd_rc);
1960 /* test ars_status with 'Output (Size)' including 'status' */
1961 cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
1962 cmds.ars_stat = (struct nd_cmd_ars_status) {
1963 .out_length = cmd_size,
1965 record = &cmds.ars_stat.records[0];
1966 *record = (struct nd_ars_record) {
1969 rc = setup_result(cmds.buf, cmd_size);
1972 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
1973 cmds.buf, cmd_size, &cmd_rc);
1975 if (rc < 0 || cmd_rc || record->length != test_val) {
1976 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1977 __func__, __LINE__, rc, cmd_rc);
1982 /* test extended status for get_config_size results in failure */
1983 cmd_size = sizeof(cmds.cfg_size);
1984 cmds.cfg_size = (struct nd_cmd_get_config_size) {
1987 rc = setup_result(cmds.buf, cmd_size);
1990 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
1991 cmds.buf, cmd_size, &cmd_rc);
1993 if (rc < 0 || cmd_rc >= 0) {
1994 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
1995 __func__, __LINE__, rc, cmd_rc);
1999 /* test clear error */
2000 cmd_size = sizeof(cmds.clear_err);
2001 cmds.clear_err = (struct nd_cmd_clear_error) {
2005 rc = setup_result(cmds.buf, cmd_size);
2008 rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_CLEAR_ERROR,
2009 cmds.buf, cmd_size, &cmd_rc);
2010 if (rc < 0 || cmd_rc) {
2011 dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
2012 __func__, __LINE__, rc, cmd_rc);
2019 static int nfit_test_probe(struct platform_device *pdev)
2021 struct nvdimm_bus_descriptor *nd_desc;
2022 struct acpi_nfit_desc *acpi_desc;
2023 struct device *dev = &pdev->dev;
2024 struct nfit_test *nfit_test;
2025 struct nfit_mem *nfit_mem;
2026 union acpi_object *obj;
2029 if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
2030 rc = nfit_ctl_test(&pdev->dev);
2035 nfit_test = to_nfit_test(&pdev->dev);
2038 if (nfit_test->num_dcr) {
2039 int num = nfit_test->num_dcr;
2041 nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
2043 nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
2045 nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
2047 nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
2049 nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
2051 nfit_test->label_dma = devm_kcalloc(dev, num,
2052 sizeof(dma_addr_t), GFP_KERNEL);
2053 nfit_test->dcr = devm_kcalloc(dev, num,
2054 sizeof(struct nfit_test_dcr *), GFP_KERNEL);
2055 nfit_test->dcr_dma = devm_kcalloc(dev, num,
2056 sizeof(dma_addr_t), GFP_KERNEL);
2057 if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
2058 && nfit_test->label_dma && nfit_test->dcr
2059 && nfit_test->dcr_dma && nfit_test->flush
2060 && nfit_test->flush_dma)
2066 if (nfit_test->num_pm) {
2067 int num = nfit_test->num_pm;
2069 nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
2071 nfit_test->spa_set_dma = devm_kcalloc(dev, num,
2072 sizeof(dma_addr_t), GFP_KERNEL);
2073 if (nfit_test->spa_set && nfit_test->spa_set_dma)
2079 /* per-nfit specific alloc */
2080 if (nfit_test->alloc(nfit_test))
2083 nfit_test->setup(nfit_test);
2084 acpi_desc = &nfit_test->acpi_desc;
2085 acpi_nfit_desc_init(acpi_desc, &pdev->dev);
2086 acpi_desc->blk_do_io = nfit_test_blk_do_io;
2087 nd_desc = &acpi_desc->nd_desc;
2088 nd_desc->provider_name = NULL;
2089 nd_desc->module = THIS_MODULE;
2090 nd_desc->ndctl = nfit_test_ctl;
2092 rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
2093 nfit_test->nfit_size);
2097 rc = devm_add_action_or_reset(&pdev->dev, acpi_nfit_shutdown, acpi_desc);
2101 if (nfit_test->setup != nfit_test0_setup)
2104 nfit_test->setup_hotplug = 1;
2105 nfit_test->setup(nfit_test);
2107 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
2110 obj->type = ACPI_TYPE_BUFFER;
2111 obj->buffer.length = nfit_test->nfit_size;
2112 obj->buffer.pointer = nfit_test->nfit_buf;
2113 *(nfit_test->_fit) = obj;
2114 __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
2116 /* associate dimm devices with nfit_mem data for notification testing */
2117 mutex_lock(&acpi_desc->init_mutex);
2118 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
2119 u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
2122 for (i = 0; i < NUM_DCR; i++)
2123 if (nfit_handle == handle[i])
2124 dev_set_drvdata(nfit_test->dimm_dev[i],
2127 mutex_unlock(&acpi_desc->init_mutex);
2132 static int nfit_test_remove(struct platform_device *pdev)
2137 static void nfit_test_release(struct device *dev)
2139 struct nfit_test *nfit_test = to_nfit_test(dev);
2144 static const struct platform_device_id nfit_test_id[] = {
2149 static struct platform_driver nfit_test_driver = {
2150 .probe = nfit_test_probe,
2151 .remove = nfit_test_remove,
2153 .name = KBUILD_MODNAME,
2155 .id_table = nfit_test_id,
2158 static __init int nfit_test_init(void)
2162 nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
2164 nfit_wq = create_singlethread_workqueue("nfit");
2168 nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
2169 if (IS_ERR(nfit_test_dimm)) {
2170 rc = PTR_ERR(nfit_test_dimm);
2174 for (i = 0; i < NUM_NFITS; i++) {
2175 struct nfit_test *nfit_test;
2176 struct platform_device *pdev;
2178 nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
2183 INIT_LIST_HEAD(&nfit_test->resources);
2184 badrange_init(&nfit_test->badrange);
2187 nfit_test->num_pm = NUM_PM;
2188 nfit_test->dcr_idx = 0;
2189 nfit_test->num_dcr = NUM_DCR;
2190 nfit_test->alloc = nfit_test0_alloc;
2191 nfit_test->setup = nfit_test0_setup;
2194 nfit_test->num_pm = 2;
2195 nfit_test->dcr_idx = NUM_DCR;
2196 nfit_test->num_dcr = 2;
2197 nfit_test->alloc = nfit_test1_alloc;
2198 nfit_test->setup = nfit_test1_setup;
2204 pdev = &nfit_test->pdev;
2205 pdev->name = KBUILD_MODNAME;
2207 pdev->dev.release = nfit_test_release;
2208 rc = platform_device_register(pdev);
2210 put_device(&pdev->dev);
2213 get_device(&pdev->dev);
2215 rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2219 instances[i] = nfit_test;
2220 INIT_WORK(&nfit_test->work, uc_error_notify);
2223 rc = platform_driver_register(&nfit_test_driver);
2229 destroy_workqueue(nfit_wq);
2230 for (i = 0; i < NUM_NFITS; i++)
2232 platform_device_unregister(&instances[i]->pdev);
2233 nfit_test_teardown();
2234 for (i = 0; i < NUM_NFITS; i++)
2236 put_device(&instances[i]->pdev.dev);
2241 static __exit void nfit_test_exit(void)
2245 flush_workqueue(nfit_wq);
2246 destroy_workqueue(nfit_wq);
2247 for (i = 0; i < NUM_NFITS; i++)
2248 platform_device_unregister(&instances[i]->pdev);
2249 platform_driver_unregister(&nfit_test_driver);
2250 nfit_test_teardown();
2252 for (i = 0; i < NUM_NFITS; i++)
2253 put_device(&instances[i]->pdev.dev);
2254 class_destroy(nfit_test_dimm);
2257 module_init(nfit_test_init);
2258 module_exit(nfit_test_exit);
2259 MODULE_LICENSE("GPL v2");
2260 MODULE_AUTHOR("Intel Corporation");