Merge branch 'for-4.16/nfit' into libnvdimm-for-next
authorRoss Zwisler <ross.zwisler@linux.intel.com>
Sat, 3 Feb 2018 07:26:26 +0000 (00:26 -0700)
committerRoss Zwisler <ross.zwisler@linux.intel.com>
Sat, 3 Feb 2018 07:26:26 +0000 (00:26 -0700)
1  2 
drivers/acpi/nfit/core.c
drivers/nvdimm/pmem.c
mm/page_alloc.c
mm/sparse.c
tools/testing/nvdimm/test/nfit.c

diff --combined drivers/acpi/nfit/core.c
index b28ce440a06f0879df027e9ea761ddc9adfbb2ab,aa9d00db763a2383663e36f20a3a8509c08907da..bbe48ad20886c8530fe525ffe9f35725d1df1ddc
@@@ -838,6 -838,18 +838,18 @@@ static bool add_flush(struct acpi_nfit_
        return true;
  }
  
+ static bool add_platform_cap(struct acpi_nfit_desc *acpi_desc,
+               struct acpi_nfit_capabilities *pcap)
+ {
+       struct device *dev = acpi_desc->dev;
+       u32 mask;
+       mask = (1 << (pcap->highest_capability + 1)) - 1;
+       acpi_desc->platform_cap = pcap->capabilities & mask;
+       dev_dbg(dev, "%s: cap: %#x\n", __func__, acpi_desc->platform_cap);
+       return true;
+ }
  static void *add_table(struct acpi_nfit_desc *acpi_desc,
                struct nfit_table_prev *prev, void *table, const void *end)
  {
        case ACPI_NFIT_TYPE_SMBIOS:
                dev_dbg(dev, "%s: smbios\n", __func__);
                break;
+       case ACPI_NFIT_TYPE_CAPABILITIES:
+               if (!add_platform_cap(acpi_desc, table))
+                       return err;
+               break;
        default:
                dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
                break;
@@@ -1867,9 -1883,6 +1883,9 @@@ static int acpi_nfit_register_dimms(str
                struct kernfs_node *nfit_kernfs;
  
                nvdimm = nfit_mem->nvdimm;
 +              if (!nvdimm)
 +                      continue;
 +
                nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
                if (nfit_kernfs)
                        nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
@@@ -2659,6 -2672,12 +2675,12 @@@ static int acpi_nfit_register_region(st
        else
                ndr_desc->numa_node = NUMA_NO_NODE;
  
+       if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
+               set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
+       if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
+               set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
        list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
                struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
                struct nd_mapping_desc *mapping;
@@@ -3467,6 -3486,7 +3489,7 @@@ static __init int nfit_init(void
        BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);
+       BUILD_BUG_ON(sizeof(struct acpi_nfit_capabilities) != 16);
  
        guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
        guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
diff --combined drivers/nvdimm/pmem.c
index cf074b1ce219e3f2d7c1e134beeae3fa37689e1a,8aa542398db4799ebecf8cca6ea5378281bc9abb..10041ac4032c038db09109b8f757a8719d26f902
@@@ -35,6 -35,7 +35,7 @@@
  #include "pmem.h"
  #include "pfn.h"
  #include "nd.h"
+ #include "nd-core.h"
  
  static struct device *to_dev(struct pmem_device *pmem)
  {
@@@ -298,34 -299,34 +299,34 @@@ static int pmem_attach_disk(struct devi
  {
        struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        struct nd_region *nd_region = to_nd_region(dev->parent);
 -      struct vmem_altmap __altmap, *altmap = NULL;
        int nid = dev_to_node(dev), fua, wbc;
        struct resource *res = &nsio->res;
 +      struct resource bb_res;
        struct nd_pfn *nd_pfn = NULL;
        struct dax_device *dax_dev;
        struct nd_pfn_sb *pfn_sb;
        struct pmem_device *pmem;
 -      struct resource pfn_res;
        struct request_queue *q;
        struct device *gendev;
        struct gendisk *disk;
        void *addr;
 +      int rc;
 +
 +      pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
 +      if (!pmem)
 +              return -ENOMEM;
  
        /* while nsio_rw_bytes is active, parse a pfn info block if present */
        if (is_nd_pfn(dev)) {
                nd_pfn = to_nd_pfn(dev);
 -              altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
 -              if (IS_ERR(altmap))
 -                      return PTR_ERR(altmap);
 +              rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
 +              if (rc)
 +                      return rc;
        }
  
        /* we're attaching a block device, disable raw namespace access */
        devm_nsio_disable(dev, nsio);
  
 -      pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
 -      if (!pmem)
 -              return -ENOMEM;
 -
        dev_set_drvdata(dev, pmem);
        pmem->phys_addr = res->start;
        pmem->size = resource_size(res);
                dev_warn(dev, "unable to guarantee persistence of writes\n");
                fua = 0;
        }
-       wbc = nvdimm_has_cache(nd_region);
+       wbc = nvdimm_has_cache(nd_region) &&
+               !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
  
        if (!devm_request_mem_region(dev, res->start, resource_size(res),
                                dev_name(&ndns->dev))) {
                return -ENOMEM;
  
        pmem->pfn_flags = PFN_DEV;
 +      pmem->pgmap.ref = &q->q_usage_counter;
        if (is_nd_pfn(dev)) {
 -              addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
 -                              altmap);
 +              addr = devm_memremap_pages(dev, &pmem->pgmap);
                pfn_sb = nd_pfn->pfn_sb;
                pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
 -              pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
 +              pmem->pfn_pad = resource_size(res) -
 +                      resource_size(&pmem->pgmap.res);
                pmem->pfn_flags |= PFN_MAP;
 -              res = &pfn_res; /* for badblocks populate */
 -              res->start += pmem->data_offset;
 +              memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
 +              bb_res.start += pmem->data_offset;
        } else if (pmem_should_map_pages(dev)) {
 -              addr = devm_memremap_pages(dev, &nsio->res,
 -                              &q->q_usage_counter, NULL);
 +              memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
 +              pmem->pgmap.altmap_valid = false;
 +              addr = devm_memremap_pages(dev, &pmem->pgmap);
                pmem->pfn_flags |= PFN_MAP;
 +              memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
        } else
                addr = devm_memremap(dev, pmem->phys_addr,
                                pmem->size, ARCH_MEMREMAP_PMEM);
                        / 512);
        if (devm_init_badblocks(dev, &pmem->bb))
                return -ENOMEM;
 -      nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
 +      nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
        disk->bb = &pmem->bb;
  
        dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
diff --combined mm/page_alloc.c
index 1748dd4a4b1bc064dcb7399888b32f0ea36cf240,76c9688b6a0a75fc1c28e90920a8c9498c5e6d06..2bb7f163baca178a2295e60401e8f3f9730d44a3
@@@ -5314,9 -5314,9 +5314,9 @@@ void __ref build_all_zonelists(pg_data_
   * done. Non-atomic initialization, single-pass.
   */
  void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 -              unsigned long start_pfn, enum memmap_context context)
 +              unsigned long start_pfn, enum memmap_context context,
 +              struct vmem_altmap *altmap)
  {
 -      struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
        unsigned long end_pfn = start_pfn + size;
        pg_data_t *pgdat = NODE_DATA(nid);
        unsigned long pfn;
@@@ -5417,7 -5417,7 +5417,7 @@@ static void __meminit zone_init_free_li
  
  #ifndef __HAVE_ARCH_MEMMAP_INIT
  #define memmap_init(size, nid, zone, start_pfn) \
 -      memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 +      memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
  #endif
  
  static int zone_batchsize(struct zone *zone)
@@@ -6260,6 -6260,8 +6260,8 @@@ void __paginginit zero_resv_unavail(voi
        pgcnt = 0;
        for_each_resv_unavail_range(i, &start, &end) {
                for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
+                       if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
+                               continue;
                        mm_zero_struct_page(pfn_to_page(pfn));
                        pgcnt++;
                }
diff --combined mm/sparse.c
index 06130c13dc99221ba421ff3483ba24346cc712b4,2609aba121e89cc5c8b656f4ef3539545c434091..2583174b1d625692f8f4235b5353a2e1308f7e98
@@@ -211,7 -211,7 +211,7 @@@ void __init memory_present(int nid, uns
        if (unlikely(!mem_section)) {
                unsigned long size, align;
  
-               size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+               size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
                align = 1 << (INTERNODE_CACHE_SHIFT);
                mem_section = memblock_virt_alloc(size, align);
        }
@@@ -417,8 -417,7 +417,8 @@@ static void __init sparse_early_usemaps
  }
  
  #ifndef CONFIG_SPARSEMEM_VMEMMAP
 -struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
 +struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
 +              struct vmem_altmap *altmap)
  {
        struct page *map;
        unsigned long size;
@@@ -473,7 -472,7 +473,7 @@@ void __init sparse_mem_maps_populate_no
  
                if (!present_section_nr(pnum))
                        continue;
 -              map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
 +              map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
                if (map_map[pnum])
                        continue;
                ms = __nr_to_section(pnum);
@@@ -501,7 -500,7 +501,7 @@@ static struct page __init *sparse_early
        struct mem_section *ms = __nr_to_section(pnum);
        int nid = sparse_early_nid(ms);
  
 -      map = sparse_mem_map_populate(pnum, nid);
 +      map = sparse_mem_map_populate(pnum, nid, NULL);
        if (map)
                return map;
  
@@@ -679,19 -678,17 +679,19 @@@ void offline_mem_sections(unsigned lon
  #endif
  
  #ifdef CONFIG_SPARSEMEM_VMEMMAP
 -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
 +              struct vmem_altmap *altmap)
  {
        /* This will make the necessary allocations eventually. */
 -      return sparse_mem_map_populate(pnum, nid);
 +      return sparse_mem_map_populate(pnum, nid, altmap);
  }
 -static void __kfree_section_memmap(struct page *memmap)
 +static void __kfree_section_memmap(struct page *memmap,
 +              struct vmem_altmap *altmap)
  {
        unsigned long start = (unsigned long)memmap;
        unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
  
 -      vmemmap_free(start, end);
 +      vmemmap_free(start, end, altmap);
  }
  #ifdef CONFIG_MEMORY_HOTREMOVE
  static void free_map_bootmem(struct page *memmap)
        unsigned long start = (unsigned long)memmap;
        unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
  
 -      vmemmap_free(start, end);
 +      vmemmap_free(start, end, NULL);
  }
  #endif /* CONFIG_MEMORY_HOTREMOVE */
  #else
@@@ -724,14 -721,12 +724,14 @@@ got_map_ptr
        return ret;
  }
  
 -static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
 +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
 +              struct vmem_altmap *altmap)
  {
        return __kmalloc_section_memmap();
  }
  
 -static void __kfree_section_memmap(struct page *memmap)
 +static void __kfree_section_memmap(struct page *memmap,
 +              struct vmem_altmap *altmap)
  {
        if (is_vmalloc_addr(memmap))
                vfree(memmap);
@@@ -778,8 -773,7 +778,8 @@@ static void free_map_bootmem(struct pag
   * set.  If this is <=0, then that means that the passed-in
   * map was not consumed and must be freed.
   */
 -int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn)
 +int __meminit sparse_add_one_section(struct pglist_data *pgdat,
 +              unsigned long start_pfn, struct vmem_altmap *altmap)
  {
        unsigned long section_nr = pfn_to_section_nr(start_pfn);
        struct mem_section *ms;
        ret = sparse_index_init(section_nr, pgdat->node_id);
        if (ret < 0 && ret != -EEXIST)
                return ret;
 -      memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
 +      memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
        if (!memmap)
                return -ENOMEM;
        usemap = __kmalloc_section_usemap();
        if (!usemap) {
 -              __kfree_section_memmap(memmap);
 +              __kfree_section_memmap(memmap, altmap);
                return -ENOMEM;
        }
  
@@@ -822,7 -816,7 +822,7 @@@ out
        pgdat_resize_unlock(pgdat, &flags);
        if (ret <= 0) {
                kfree(usemap);
 -              __kfree_section_memmap(memmap);
 +              __kfree_section_memmap(memmap, altmap);
        }
        return ret;
  }
@@@ -849,8 -843,7 +849,8 @@@ static inline void clear_hwpoisoned_pag
  }
  #endif
  
 -static void free_section_usemap(struct page *memmap, unsigned long *usemap)
 +static void free_section_usemap(struct page *memmap, unsigned long *usemap,
 +              struct vmem_altmap *altmap)
  {
        struct page *usemap_page;
  
        if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
                kfree(usemap);
                if (memmap)
 -                      __kfree_section_memmap(memmap);
 +                      __kfree_section_memmap(memmap, altmap);
                return;
        }
  
  }
  
  void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
 -              unsigned long map_offset)
 +              unsigned long map_offset, struct vmem_altmap *altmap)
  {
        struct page *memmap = NULL;
        unsigned long *usemap = NULL, flags;
  
        clear_hwpoisoned_pages(memmap + map_offset,
                        PAGES_PER_SECTION - map_offset);
 -      free_section_usemap(memmap, usemap);
 +      free_section_usemap(memmap, usemap, altmap);
  }
  #endif /* CONFIG_MEMORY_HOTREMOVE */
  #endif /* CONFIG_MEMORY_HOTPLUG */
index 450b4cbba6b6813b444dffdb2ebc93831e33027d,de1373a7ed4f0412cbad07d46922340686bf23e5..620fa78b3b1b33ab7e87f20e076bc6323b68e41f
@@@ -27,7 -27,6 +27,7 @@@
  #include <nfit.h>
  #include <nd.h>
  #include "nfit_test.h"
 +#include "../watermark.h"
  
  /*
   * Generate an NFIT table to describe the following topology:
@@@ -138,14 -137,6 +138,14 @@@ static u32 handle[] = 
  
  static unsigned long dimm_fail_cmd_flags[NUM_DCR];
  
 +struct nfit_test_fw {
 +      enum intel_fw_update_state state;
 +      u32 context;
 +      u64 version;
 +      u32 size_received;
 +      u64 end_time;
 +};
 +
  struct nfit_test {
        struct acpi_nfit_desc acpi_desc;
        struct platform_device pdev;
                spinlock_t lock;
        } ars_state;
        struct device *dimm_dev[NUM_DCR];
 +      struct nd_intel_smart *smart;
 +      struct nd_intel_smart_threshold *smart_threshold;
        struct badrange badrange;
        struct work_struct work;
 +      struct nfit_test_fw *fw;
  };
  
  static struct workqueue_struct *nfit_wq;
@@@ -193,226 -181,6 +193,226 @@@ static struct nfit_test *to_nfit_test(s
        return container_of(pdev, struct nfit_test, pdev);
  }
  
 +static int nd_intel_test_get_fw_info(struct nfit_test *t,
 +              struct nd_intel_fw_info *nd_cmd, unsigned int buf_len,
 +              int idx)
 +{
 +      struct device *dev = &t->pdev.dev;
 +      struct nfit_test_fw *fw = &t->fw[idx];
 +
 +      dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p, buf_len: %u, idx: %d\n",
 +                      __func__, t, nd_cmd, buf_len, idx);
 +
 +      if (buf_len < sizeof(*nd_cmd))
 +              return -EINVAL;
 +
 +      nd_cmd->status = 0;
 +      nd_cmd->storage_size = INTEL_FW_STORAGE_SIZE;
 +      nd_cmd->max_send_len = INTEL_FW_MAX_SEND_LEN;
 +      nd_cmd->query_interval = INTEL_FW_QUERY_INTERVAL;
 +      nd_cmd->max_query_time = INTEL_FW_QUERY_MAX_TIME;
 +      nd_cmd->update_cap = 0;
 +      nd_cmd->fis_version = INTEL_FW_FIS_VERSION;
 +      nd_cmd->run_version = 0;
 +      nd_cmd->updated_version = fw->version;
 +
 +      return 0;
 +}
 +
 +static int nd_intel_test_start_update(struct nfit_test *t,
 +              struct nd_intel_fw_start *nd_cmd, unsigned int buf_len,
 +              int idx)
 +{
 +      struct device *dev = &t->pdev.dev;
 +      struct nfit_test_fw *fw = &t->fw[idx];
 +
 +      dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
 +                      __func__, t, nd_cmd, buf_len, idx);
 +
 +      if (buf_len < sizeof(*nd_cmd))
 +              return -EINVAL;
 +
 +      if (fw->state != FW_STATE_NEW) {
 +              /* extended status, FW update in progress */
 +              nd_cmd->status = 0x10007;
 +              return 0;
 +      }
 +
 +      fw->state = FW_STATE_IN_PROGRESS;
 +      fw->context++;
 +      fw->size_received = 0;
 +      nd_cmd->status = 0;
 +      nd_cmd->context = fw->context;
 +
 +      dev_dbg(dev, "%s: context issued: %#x\n", __func__, nd_cmd->context);
 +
 +      return 0;
 +}
 +
 +static int nd_intel_test_send_data(struct nfit_test *t,
 +              struct nd_intel_fw_send_data *nd_cmd, unsigned int buf_len,
 +              int idx)
 +{
 +      struct device *dev = &t->pdev.dev;
 +      struct nfit_test_fw *fw = &t->fw[idx];
 +      u32 *status = (u32 *)&nd_cmd->data[nd_cmd->length];
 +
 +      dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
 +                      __func__, t, nd_cmd, buf_len, idx);
 +
 +      if (buf_len < sizeof(*nd_cmd))
 +              return -EINVAL;
 +
 +
 +      dev_dbg(dev, "%s: cmd->status: %#x\n", __func__, *status);
 +      dev_dbg(dev, "%s: cmd->data[0]: %#x\n", __func__, nd_cmd->data[0]);
 +      dev_dbg(dev, "%s: cmd->data[%u]: %#x\n", __func__, nd_cmd->length-1,
 +                      nd_cmd->data[nd_cmd->length-1]);
 +
 +      if (fw->state != FW_STATE_IN_PROGRESS) {
 +              dev_dbg(dev, "%s: not in IN_PROGRESS state\n", __func__);
 +              *status = 0x5;
 +              return 0;
 +      }
 +
 +      if (nd_cmd->context != fw->context) {
 +              dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
 +                              __func__, nd_cmd->context, fw->context);
 +              *status = 0x10007;
 +              return 0;
 +      }
 +
 +      /*
 +       * check offset + len > size of fw storage
 +       * check length is > max send length
 +       */
 +      if (nd_cmd->offset + nd_cmd->length > INTEL_FW_STORAGE_SIZE ||
 +                      nd_cmd->length > INTEL_FW_MAX_SEND_LEN) {
 +              *status = 0x3;
 +              dev_dbg(dev, "%s: buffer boundary violation\n", __func__);
 +              return 0;
 +      }
 +
 +      fw->size_received += nd_cmd->length;
 +      dev_dbg(dev, "%s: copying %u bytes, %u bytes so far\n",
 +                      __func__, nd_cmd->length, fw->size_received);
 +      *status = 0;
 +      return 0;
 +}
 +
 +static int nd_intel_test_finish_fw(struct nfit_test *t,
 +              struct nd_intel_fw_finish_update *nd_cmd,
 +              unsigned int buf_len, int idx)
 +{
 +      struct device *dev = &t->pdev.dev;
 +      struct nfit_test_fw *fw = &t->fw[idx];
 +
 +      dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
 +                      __func__, t, nd_cmd, buf_len, idx);
 +
 +      if (fw->state == FW_STATE_UPDATED) {
 +              /* update already done, need cold boot */
 +              nd_cmd->status = 0x20007;
 +              return 0;
 +      }
 +
 +      dev_dbg(dev, "%s: context: %#x  ctrl_flags: %#x\n",
 +                      __func__, nd_cmd->context, nd_cmd->ctrl_flags);
 +
 +      switch (nd_cmd->ctrl_flags) {
 +      case 0: /* finish */
 +              if (nd_cmd->context != fw->context) {
 +                      dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
 +                                      __func__, nd_cmd->context,
 +                                      fw->context);
 +                      nd_cmd->status = 0x10007;
 +                      return 0;
 +              }
 +              nd_cmd->status = 0;
 +              fw->state = FW_STATE_VERIFY;
 +              /* set 1 second of time for firmware "update" */
 +              fw->end_time = jiffies + HZ;
 +              break;
 +
 +      case 1: /* abort */
 +              fw->size_received = 0;
 +              /* successfully aborted status */
 +              nd_cmd->status = 0x40007;
 +              fw->state = FW_STATE_NEW;
 +              dev_dbg(dev, "%s: abort successful\n", __func__);
 +              break;
 +
 +      default: /* bad control flag */
 +              dev_warn(dev, "%s: unknown control flag: %#x\n",
 +                              __func__, nd_cmd->ctrl_flags);
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
 +static int nd_intel_test_finish_query(struct nfit_test *t,
 +              struct nd_intel_fw_finish_query *nd_cmd,
 +              unsigned int buf_len, int idx)
 +{
 +      struct device *dev = &t->pdev.dev;
 +      struct nfit_test_fw *fw = &t->fw[idx];
 +
 +      dev_dbg(dev, "%s(nfit_test: %p nd_cmd: %p buf_len: %u idx: %d)\n",
 +                      __func__, t, nd_cmd, buf_len, idx);
 +
 +      if (buf_len < sizeof(*nd_cmd))
 +              return -EINVAL;
 +
 +      if (nd_cmd->context != fw->context) {
 +              dev_dbg(dev, "%s: incorrect context: in: %#x correct: %#x\n",
 +                              __func__, nd_cmd->context, fw->context);
 +              nd_cmd->status = 0x10007;
 +              return 0;
 +      }
 +
 +      dev_dbg(dev, "%s context: %#x\n", __func__, nd_cmd->context);
 +
 +      switch (fw->state) {
 +      case FW_STATE_NEW:
 +              nd_cmd->updated_fw_rev = 0;
 +              nd_cmd->status = 0;
 +              dev_dbg(dev, "%s: new state\n", __func__);
 +              break;
 +
 +      case FW_STATE_IN_PROGRESS:
 +              /* sequencing error */
 +              nd_cmd->status = 0x40007;
 +              nd_cmd->updated_fw_rev = 0;
 +              dev_dbg(dev, "%s: sequence error\n", __func__);
 +              break;
 +
 +      case FW_STATE_VERIFY:
 +              if (time_is_after_jiffies64(fw->end_time)) {
 +                      nd_cmd->updated_fw_rev = 0;
 +                      nd_cmd->status = 0x20007;
 +                      dev_dbg(dev, "%s: still verifying\n", __func__);
 +                      break;
 +              }
 +
 +              dev_dbg(dev, "%s: transition out verify\n", __func__);
 +              fw->state = FW_STATE_UPDATED;
 +              /* we are going to fall through if it's "done" */
 +      case FW_STATE_UPDATED:
 +              nd_cmd->status = 0;
 +              /* bogus test version */
 +              fw->version = nd_cmd->updated_fw_rev =
 +                      INTEL_FW_FAKE_VERSION;
 +              dev_dbg(dev, "%s: updated\n", __func__);
 +              break;
 +
 +      default: /* we should never get here */
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
  static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
                unsigned int buf_len)
  {
@@@ -672,66 -440,39 +672,66 @@@ static int nfit_test_cmd_translate_spa(
        return 0;
  }
  
 -static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
 +static int nfit_test_cmd_smart(struct nd_intel_smart *smart, unsigned int buf_len,
 +              struct nd_intel_smart *smart_data)
  {
 -      static const struct nd_smart_payload smart_data = {
 -              .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
 -                      | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
 -                      | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
 -              .health = ND_SMART_NON_CRITICAL_HEALTH,
 -              .temperature = 23 * 16,
 -              .spares = 75,
 -              .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
 -              .life_used = 5,
 -              .shutdown_state = 0,
 -              .vendor_size = 0,
 -      };
 -
        if (buf_len < sizeof(*smart))
                return -EINVAL;
 -      memcpy(smart->data, &smart_data, sizeof(smart_data));
 +      memcpy(smart, smart_data, sizeof(*smart));
        return 0;
  }
  
 -static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
 -              unsigned int buf_len)
 +static int nfit_test_cmd_smart_threshold(
 +              struct nd_intel_smart_threshold *out,
 +              unsigned int buf_len,
 +              struct nd_intel_smart_threshold *smart_t)
  {
 -      static const struct nd_smart_threshold_payload smart_t_data = {
 -              .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
 -              .temperature = 40 * 16,
 -              .spares = 5,
 -      };
 -
        if (buf_len < sizeof(*smart_t))
                return -EINVAL;
 -      memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
 +      memcpy(out, smart_t, sizeof(*smart_t));
 +      return 0;
 +}
 +
 +static void smart_notify(struct device *bus_dev,
 +              struct device *dimm_dev, struct nd_intel_smart *smart,
 +              struct nd_intel_smart_threshold *thresh)
 +{
 +      dev_dbg(dimm_dev, "%s: alarm: %#x spares: %d (%d) mtemp: %d (%d) ctemp: %d (%d)\n",
 +                      __func__, thresh->alarm_control, thresh->spares,
 +                      smart->spares, thresh->media_temperature,
 +                      smart->media_temperature, thresh->ctrl_temperature,
 +                      smart->ctrl_temperature);
 +      if (((thresh->alarm_control & ND_INTEL_SMART_SPARE_TRIP)
 +                              && smart->spares
 +                              <= thresh->spares)
 +                      || ((thresh->alarm_control & ND_INTEL_SMART_TEMP_TRIP)
 +                              && smart->media_temperature
 +                              >= thresh->media_temperature)
 +                      || ((thresh->alarm_control & ND_INTEL_SMART_CTEMP_TRIP)
 +                              && smart->ctrl_temperature
 +                              >= thresh->ctrl_temperature)) {
 +              device_lock(bus_dev);
 +              __acpi_nvdimm_notify(dimm_dev, 0x81);
 +              device_unlock(bus_dev);
 +      }
 +}
 +
 +static int nfit_test_cmd_smart_set_threshold(
 +              struct nd_intel_smart_set_threshold *in,
 +              unsigned int buf_len,
 +              struct nd_intel_smart_threshold *thresh,
 +              struct nd_intel_smart *smart,
 +              struct device *bus_dev, struct device *dimm_dev)
 +{
 +      unsigned int size;
 +
 +      size = sizeof(*in) - 4;
 +      if (buf_len < size)
 +              return -EINVAL;
 +      memcpy(thresh->data, in, size);
 +      in->status = 0;
 +      smart_notify(bus_dev, dimm_dev, smart, thresh);
 +
        return 0;
  }
  
@@@ -822,52 -563,6 +822,52 @@@ static int nfit_test_cmd_ars_inject_sta
        return 0;
  }
  
 +static int nd_intel_test_cmd_set_lss_status(struct nfit_test *t,
 +              struct nd_intel_lss *nd_cmd, unsigned int buf_len)
 +{
 +      struct device *dev = &t->pdev.dev;
 +
 +      if (buf_len < sizeof(*nd_cmd))
 +              return -EINVAL;
 +
 +      switch (nd_cmd->enable) {
 +      case 0:
 +              nd_cmd->status = 0;
 +              dev_dbg(dev, "%s: Latch System Shutdown Status disabled\n",
 +                              __func__);
 +              break;
 +      case 1:
 +              nd_cmd->status = 0;
 +              dev_dbg(dev, "%s: Latch System Shutdown Status enabled\n",
 +                              __func__);
 +              break;
 +      default:
 +              dev_warn(dev, "Unknown enable value: %#x\n", nd_cmd->enable);
 +              nd_cmd->status = 0x3;
 +              break;
 +      }
 +
 +
 +      return 0;
 +}
 +
 +static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func)
 +{
 +      int i;
 +
 +      /* lookup per-dimm data */
 +      for (i = 0; i < ARRAY_SIZE(handle); i++)
 +              if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
 +                      break;
 +      if (i >= ARRAY_SIZE(handle))
 +              return -ENXIO;
 +
 +      if ((1 << func) & dimm_fail_cmd_flags[i])
 +              return -EIO;
 +
 +      return i;
 +}
 +
  static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
                struct nvdimm *nvdimm, unsigned int cmd, void *buf,
                unsigned int buf_len, int *cmd_rc)
                        func = call_pkg->nd_command;
                        if (call_pkg->nd_family != nfit_mem->family)
                                return -ENOTTY;
 +
 +                      i = get_dimm(nfit_mem, func);
 +                      if (i < 0)
 +                              return i;
 +
 +                      switch (func) {
 +                      case ND_INTEL_ENABLE_LSS_STATUS:
 +                              return nd_intel_test_cmd_set_lss_status(t,
 +                                              buf, buf_len);
 +                      case ND_INTEL_FW_GET_INFO:
 +                              return nd_intel_test_get_fw_info(t, buf,
 +                                              buf_len, i - t->dcr_idx);
 +                      case ND_INTEL_FW_START_UPDATE:
 +                              return nd_intel_test_start_update(t, buf,
 +                                              buf_len, i - t->dcr_idx);
 +                      case ND_INTEL_FW_SEND_DATA:
 +                              return nd_intel_test_send_data(t, buf,
 +                                              buf_len, i - t->dcr_idx);
 +                      case ND_INTEL_FW_FINISH_UPDATE:
 +                              return nd_intel_test_finish_fw(t, buf,
 +                                              buf_len, i - t->dcr_idx);
 +                      case ND_INTEL_FW_FINISH_QUERY:
 +                              return nd_intel_test_finish_query(t, buf,
 +                                              buf_len, i - t->dcr_idx);
 +                      case ND_INTEL_SMART:
 +                              return nfit_test_cmd_smart(buf, buf_len,
 +                                              &t->smart[i - t->dcr_idx]);
 +                      case ND_INTEL_SMART_THRESHOLD:
 +                              return nfit_test_cmd_smart_threshold(buf,
 +                                              buf_len,
 +                                              &t->smart_threshold[i -
 +                                                      t->dcr_idx]);
 +                      case ND_INTEL_SMART_SET_THRESHOLD:
 +                              return nfit_test_cmd_smart_set_threshold(buf,
 +                                              buf_len,
 +                                              &t->smart_threshold[i -
 +                                                      t->dcr_idx],
 +                                              &t->smart[i - t->dcr_idx],
 +                                              &t->pdev.dev, t->dimm_dev[i]);
 +                      default:
 +                              return -ENOTTY;
 +                      }
                }
  
                if (!test_bit(cmd, &cmd_mask)
                                || !test_bit(func, &nfit_mem->dsm_mask))
                        return -ENOTTY;
  
 -              /* lookup label space for the given dimm */
 -              for (i = 0; i < ARRAY_SIZE(handle); i++)
 -                      if (__to_nfit_memdev(nfit_mem)->device_handle ==
 -                                      handle[i])
 -                              break;
 -              if (i >= ARRAY_SIZE(handle))
 -                      return -ENXIO;
 -
 -              if ((1 << func) & dimm_fail_cmd_flags[i])
 -                      return -EIO;
 +              i = get_dimm(nfit_mem, func);
 +              if (i < 0)
 +                      return i;
  
                switch (func) {
                case ND_CMD_GET_CONFIG_SIZE:
                        rc = nfit_test_cmd_set_config_data(buf, buf_len,
                                t->label[i - t->dcr_idx]);
                        break;
 -              case ND_CMD_SMART:
 -                      rc = nfit_test_cmd_smart(buf, buf_len);
 -                      break;
 -              case ND_CMD_SMART_THRESHOLD:
 -                      rc = nfit_test_cmd_smart_threshold(buf, buf_len);
 -                      device_lock(&t->pdev.dev);
 -                      __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
 -                      device_unlock(&t->pdev.dev);
 -                      break;
                default:
                        return -ENOTTY;
                }
@@@ -1203,44 -872,6 +1203,44 @@@ static const struct attribute_group *nf
        NULL,
  };
  
 +static void smart_init(struct nfit_test *t)
 +{
 +      int i;
 +      const struct nd_intel_smart_threshold smart_t_data = {
 +              .alarm_control = ND_INTEL_SMART_SPARE_TRIP
 +                      | ND_INTEL_SMART_TEMP_TRIP,
 +              .media_temperature = 40 * 16,
 +              .ctrl_temperature = 30 * 16,
 +              .spares = 5,
 +      };
 +      const struct nd_intel_smart smart_data = {
 +              .flags = ND_INTEL_SMART_HEALTH_VALID
 +                      | ND_INTEL_SMART_SPARES_VALID
 +                      | ND_INTEL_SMART_ALARM_VALID
 +                      | ND_INTEL_SMART_USED_VALID
 +                      | ND_INTEL_SMART_SHUTDOWN_VALID
 +                      | ND_INTEL_SMART_MTEMP_VALID,
 +              .health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
 +              .media_temperature = 23 * 16,
 +              .ctrl_temperature = 30 * 16,
 +              .pmic_temperature = 40 * 16,
 +              .spares = 75,
 +              .alarm_flags = ND_INTEL_SMART_SPARE_TRIP
 +                      | ND_INTEL_SMART_TEMP_TRIP,
 +              .ait_status = 1,
 +              .life_used = 5,
 +              .shutdown_state = 0,
 +              .vendor_size = 0,
 +              .shutdown_count = 100,
 +      };
 +
 +      for (i = 0; i < t->num_dcr; i++) {
 +              memcpy(&t->smart[i], &smart_data, sizeof(smart_data));
 +              memcpy(&t->smart_threshold[i], &smart_t_data,
 +                              sizeof(smart_t_data));
 +      }
 +}
 +
  static int nfit_test0_alloc(struct nfit_test *t)
  {
        size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
                                        window_size) * NUM_DCR
                        + sizeof(struct acpi_nfit_data_region) * NUM_BDW
                        + (sizeof(struct acpi_nfit_flush_address)
-                                       + sizeof(u64) * NUM_HINTS) * NUM_DCR;
+                                       + sizeof(u64) * NUM_HINTS) * NUM_DCR
+                       + sizeof(struct acpi_nfit_capabilities);
        int i;
  
        t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
                        return -ENOMEM;
        }
  
 +      smart_init(t);
        return ars_state_init(&t->pdev.dev, &t->ars_state);
  }
  
@@@ -1339,7 -970,6 +1340,7 @@@ static int nfit_test1_alloc(struct nfit
        if (!t->spa_set[1])
                return -ENOMEM;
  
 +      smart_init(t);
        return ars_state_init(&t->pdev.dev, &t->ars_state);
  }
  
@@@ -1364,6 -994,7 +1365,7 @@@ static void nfit_test0_setup(struct nfi
        struct acpi_nfit_control_region *dcr;
        struct acpi_nfit_data_region *bdw;
        struct acpi_nfit_flush_address *flush;
+       struct acpi_nfit_capabilities *pcap;
        unsigned int offset, i;
  
        /*
        for (i = 0; i < NUM_HINTS; i++)
                flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  
+       /* platform capabilities */
+       pcap = nfit_buf + offset + flush_hint_size * 4;
+       pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
+       pcap->header.length = sizeof(*pcap);
+       pcap->highest_capability = 1;
+       pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
+               ACPI_NFIT_CAPABILITY_MEM_FLUSH;
        if (t->setup_hotplug) {
-               offset = offset + flush_hint_size * 4;
+               offset = offset + flush_hint_size * 4 + sizeof(*pcap);
                /* dcr-descriptor4: blk */
                dcr = nfit_buf + offset;
                dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
        set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
        set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
        set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
 -      set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_SMART, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_SMART_SET_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
        set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CALL, &acpi_desc->bus_cmd_force_en);
 -      set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
        set_bit(NFIT_CMD_TRANSLATE_SPA, &acpi_desc->bus_nfit_cmd_force_en);
        set_bit(NFIT_CMD_ARS_INJECT_SET, &acpi_desc->bus_nfit_cmd_force_en);
        set_bit(NFIT_CMD_ARS_INJECT_CLEAR, &acpi_desc->bus_nfit_cmd_force_en);
        set_bit(NFIT_CMD_ARS_INJECT_GET, &acpi_desc->bus_nfit_cmd_force_en);
 +      set_bit(ND_INTEL_FW_GET_INFO, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_FW_START_UPDATE, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_FW_SEND_DATA, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_FW_FINISH_UPDATE, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_FW_FINISH_QUERY, &acpi_desc->dimm_cmd_force_en);
 +      set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  }
  
  static void nfit_test1_setup(struct nfit_test *t)
        set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
        set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
 +      set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en);
  }
  
  static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
@@@ -2433,18 -2064,10 +2443,18 @@@ static int nfit_test_probe(struct platf
                                sizeof(struct nfit_test_dcr *), GFP_KERNEL);
                nfit_test->dcr_dma = devm_kcalloc(dev, num,
                                sizeof(dma_addr_t), GFP_KERNEL);
 +              nfit_test->smart = devm_kcalloc(dev, num,
 +                              sizeof(struct nd_intel_smart), GFP_KERNEL);
 +              nfit_test->smart_threshold = devm_kcalloc(dev, num,
 +                              sizeof(struct nd_intel_smart_threshold),
 +                              GFP_KERNEL);
 +              nfit_test->fw = devm_kcalloc(dev, num,
 +                              sizeof(struct nfit_test_fw), GFP_KERNEL);
                if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
                                && nfit_test->label_dma && nfit_test->dcr
                                && nfit_test->dcr_dma && nfit_test->flush
 -                              && nfit_test->flush_dma)
 +                              && nfit_test->flush_dma
 +                              && nfit_test->fw)
                        /* pass */;
                else
                        return -ENOMEM;
@@@ -2546,11 -2169,6 +2556,11 @@@ static __init int nfit_test_init(void
  {
        int rc, i;
  
 +      pmem_test();
 +      libnvdimm_test();
 +      acpi_nfit_test();
 +      device_dax_test();
 +
        nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  
        nfit_wq = create_singlethread_workqueue("nfit");