Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tools/testing/nvdimm: libnvdimm unit test infrastructure

'libnvdimm' is the first driver sub-system in the kernel to implement
mocking for unit test coverage. The nfit_test module gets built as an
external module and arranges for external module replacements of nfit,
libnvdimm, nd_pmem, and nd_blk. These replacements use the linker
--wrap option to redirect calls to ioremap() + request_mem_region() to
custom defined unit test resources. The end result is a fully
functional nvdimm_bus, as far as userspace is concerned, but with the
capability to perform otherwise destructive tests on emulated resources.

Q: Why not use QEMU for this emulation?
QEMU is not suitable for unit testing. QEMU's role is to faithfully
emulate the platform. A unit test's role is to unfaithfully implement
the platform with the goal of triggering bugs in the corners of the
sub-system implementation. As bugs are discovered in platforms, or the
sub-system itself, the unit tests are extended to backstop a fix with a
reproducer unit test.

Another problem with QEMU is that it would require coordination of 3
software projects instead of 2 (kernel + libndctl [1]) to maintain and
execute the tests. The chances for bit rot and the difficulty of
getting the tests running goes up non-linearly the more components
involved.


Q: Why submit this to the kernel tree instead of external modules in
libndctl?
Simple, to alleviate the same risk that out-of-tree external modules
face. Updates to drivers/nvdimm/ can be immediately evaluated to see if
they have any impact on tools/testing/nvdimm/.


Q: What are the negative implications of merging this?
It is a unique maintenance burden because the purpose of mocking an
interface to enable a unit test is to purposefully short circuit the
semantics of a routine to enable testing. For example
__wrap_ioremap_cache() fakes the pmem driver into "ioremap()'ing" a test
resource buffer allocated by dma_alloc_coherent(). The future
maintenance burden hits when someone changes the semantics of
ioremap_cache() and wonders what the implications are for the unit test.

[1]: https://github.com/pmem/ndctl

Cc: <linux-acpi@vger.kernel.org>
Cc: Lv Zheng <lv.zheng@intel.com>
Cc: Robert Moore <robert.moore@intel.com>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

+1377 -4
+8 -4
drivers/acpi/nfit.c
··· 33 33 34 34 static u8 nfit_uuid[NFIT_UUID_MAX][16]; 35 35 36 - static const u8 *to_nfit_uuid(enum nfit_uuids id) 36 + const u8 *to_nfit_uuid(enum nfit_uuids id) 37 37 { 38 38 return nfit_uuid[id]; 39 39 } 40 + EXPORT_SYMBOL(to_nfit_uuid); 40 41 41 42 static struct acpi_nfit_desc *to_acpi_nfit_desc( 42 43 struct nvdimm_bus_descriptor *nd_desc) ··· 582 581 .attrs = acpi_nfit_attributes, 583 582 }; 584 583 585 - static const struct attribute_group *acpi_nfit_attribute_groups[] = { 584 + const struct attribute_group *acpi_nfit_attribute_groups[] = { 586 585 &nvdimm_bus_attribute_group, 587 586 &acpi_nfit_attribute_group, 588 587 NULL, 589 588 }; 589 + EXPORT_SYMBOL_GPL(acpi_nfit_attribute_groups); 590 590 591 591 static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) 592 592 { ··· 1325 1323 ndbr_desc = to_blk_region_desc(ndr_desc); 1326 1324 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1327 1325 ndbr_desc->disable = acpi_nfit_blk_region_disable; 1328 - ndbr_desc->do_io = acpi_nfit_blk_region_do_io; 1326 + ndbr_desc->do_io = acpi_desc->blk_do_io; 1329 1327 if (!nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc)) 1330 1328 return -ENOMEM; 1331 1329 break; ··· 1409 1407 return 0; 1410 1408 } 1411 1409 1412 - static int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1410 + int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) 1413 1411 { 1414 1412 struct device *dev = acpi_desc->dev; 1415 1413 const void *end; ··· 1448 1446 1449 1447 return acpi_nfit_register_regions(acpi_desc); 1450 1448 } 1449 + EXPORT_SYMBOL_GPL(acpi_nfit_init); 1451 1450 1452 1451 static int acpi_nfit_add(struct acpi_device *adev) 1453 1452 { ··· 1473 1470 dev_set_drvdata(dev, acpi_desc); 1474 1471 acpi_desc->dev = dev; 1475 1472 acpi_desc->nfit = (struct acpi_table_nfit *) tbl; 1473 + acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; 1476 1474 nd_desc = &acpi_desc->nd_desc; 1477 1475 nd_desc->provider_name = "ACPI.NFIT"; 1478 1476 nd_desc->ndctl = acpi_nfit_ctl;
+6
drivers/acpi/nfit.h
··· 93 93 struct nvdimm_bus *nvdimm_bus; 94 94 struct device *dev; 95 95 unsigned long dimm_dsm_force_en; 96 + int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 97 + void *iobuf, u64 len, int rw); 96 98 }; 97 99 98 100 enum nd_blk_mmio_selector { ··· 148 146 { 149 147 return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); 150 148 } 149 + 150 + const u8 *to_nfit_uuid(enum nfit_uuids id); 151 + int acpi_nfit_init(struct acpi_nfit_desc *nfit, acpi_size sz); 152 + extern const struct attribute_group *acpi_nfit_attribute_groups[]; 151 153 #endif /* __NFIT_H__ */
+40
tools/testing/nvdimm/Kbuild
··· 1 + ldflags-y += --wrap=ioremap_cache 2 + ldflags-y += --wrap=ioremap_nocache 3 + ldflags-y += --wrap=iounmap 4 + ldflags-y += --wrap=__request_region 5 + ldflags-y += --wrap=__release_region 6 + 7 + DRIVERS := ../../../drivers 8 + NVDIMM_SRC := $(DRIVERS)/nvdimm 9 + ACPI_SRC := $(DRIVERS)/acpi 10 + 11 + obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o 12 + obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o 13 + obj-$(CONFIG_ND_BTT) += nd_btt.o 14 + obj-$(CONFIG_ND_BLK) += nd_blk.o 15 + obj-$(CONFIG_ACPI_NFIT) += nfit.o 16 + 17 + nfit-y := $(ACPI_SRC)/nfit.o 18 + nfit-y += config_check.o 19 + 20 + nd_pmem-y := $(NVDIMM_SRC)/pmem.o 21 + nd_pmem-y += config_check.o 22 + 23 + nd_btt-y := $(NVDIMM_SRC)/btt.o 24 + nd_btt-y += config_check.o 25 + 26 + nd_blk-y := $(NVDIMM_SRC)/blk.o 27 + nd_blk-y += config_check.o 28 + 29 + libnvdimm-y := $(NVDIMM_SRC)/core.o 30 + libnvdimm-y += $(NVDIMM_SRC)/bus.o 31 + libnvdimm-y += $(NVDIMM_SRC)/dimm_devs.o 32 + libnvdimm-y += $(NVDIMM_SRC)/dimm.o 33 + libnvdimm-y += $(NVDIMM_SRC)/region_devs.o 34 + libnvdimm-y += $(NVDIMM_SRC)/region.o 35 + libnvdimm-y += $(NVDIMM_SRC)/namespace_devs.o 36 + libnvdimm-y += $(NVDIMM_SRC)/label.o 37 + libnvdimm-$(CONFIG_BTT) += $(NVDIMM_SRC)/btt_devs.o 38 + libnvdimm-y += config_check.o 39 + 40 + obj-m += test/
+7
tools/testing/nvdimm/Makefile
··· 1 + KDIR ?= ../../../ 2 + 3 + default: 4 + $(MAKE) -C $(KDIR) M=$$PWD 5 + 6 + install: default 7 + $(MAKE) -C $(KDIR) M=$$PWD modules_install
+15
tools/testing/nvdimm/config_check.c
··· 1 + #include <linux/kconfig.h> 2 + #include <linux/bug.h> 3 + 4 + void check(void) 5 + { 6 + /* 7 + * These kconfig symbols must be set to "m" for nfit_test to 8 + * load and operate. 9 + */ 10 + BUILD_BUG_ON(!IS_MODULE(CONFIG_LIBNVDIMM)); 11 + BUILD_BUG_ON(!IS_MODULE(CONFIG_BLK_DEV_PMEM)); 12 + BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BTT)); 13 + BUILD_BUG_ON(!IS_MODULE(CONFIG_ND_BLK)); 14 + BUILD_BUG_ON(!IS_MODULE(CONFIG_ACPI_NFIT)); 15 + }
+8
tools/testing/nvdimm/test/Kbuild
··· 1 + ccflags-y := -I$(src)/../../../../drivers/nvdimm/ 2 + ccflags-y += -I$(src)/../../../../drivers/acpi/ 3 + 4 + obj-m += nfit_test.o 5 + obj-m += nfit_test_iomap.o 6 + 7 + nfit_test-y := nfit.o 8 + nfit_test_iomap-y := iomap.o
+151
tools/testing/nvdimm/test/iomap.c
··· 1 + /* 2 + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of version 2 of the GNU General Public License as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 + * General Public License for more details. 12 + */ 13 + #include <linux/rculist.h> 14 + #include <linux/export.h> 15 + #include <linux/ioport.h> 16 + #include <linux/module.h> 17 + #include <linux/types.h> 18 + #include <linux/io.h> 19 + #include "nfit_test.h" 20 + 21 + static LIST_HEAD(iomap_head); 22 + 23 + static struct iomap_ops { 24 + nfit_test_lookup_fn nfit_test_lookup; 25 + struct list_head list; 26 + } iomap_ops = { 27 + .list = LIST_HEAD_INIT(iomap_ops.list), 28 + }; 29 + 30 + void nfit_test_setup(nfit_test_lookup_fn lookup) 31 + { 32 + iomap_ops.nfit_test_lookup = lookup; 33 + list_add_rcu(&iomap_ops.list, &iomap_head); 34 + } 35 + EXPORT_SYMBOL(nfit_test_setup); 36 + 37 + void nfit_test_teardown(void) 38 + { 39 + list_del_rcu(&iomap_ops.list); 40 + synchronize_rcu(); 41 + } 42 + EXPORT_SYMBOL(nfit_test_teardown); 43 + 44 + static struct nfit_test_resource *get_nfit_res(resource_size_t resource) 45 + { 46 + struct iomap_ops *ops; 47 + 48 + ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list); 49 + if (ops) 50 + return ops->nfit_test_lookup(resource); 51 + return NULL; 52 + } 53 + 54 + void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size, 55 + void __iomem *(*fallback_fn)(resource_size_t, unsigned long)) 56 + { 57 + struct nfit_test_resource *nfit_res; 58 + 59 + rcu_read_lock(); 60 + nfit_res = get_nfit_res(offset); 61 + rcu_read_unlock(); 62 + if (nfit_res) 63 + return (void __iomem *) nfit_res->buf + offset 64 + - nfit_res->res->start; 65 + return fallback_fn(offset, size); 66 + } 67 + 68 + void __iomem *__wrap_ioremap_cache(resource_size_t offset, unsigned long size) 69 + { 70 + return __nfit_test_ioremap(offset, size, ioremap_cache); 71 + } 72 + EXPORT_SYMBOL(__wrap_ioremap_cache); 73 + 74 + void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size) 75 + { 76 + return __nfit_test_ioremap(offset, size, ioremap_nocache); 77 + } 78 + EXPORT_SYMBOL(__wrap_ioremap_nocache); 79 + 80 + void __wrap_iounmap(volatile void __iomem *addr) 81 + { 82 + struct nfit_test_resource *nfit_res; 83 + 84 + rcu_read_lock(); 85 + nfit_res = get_nfit_res((unsigned long) addr); 86 + rcu_read_unlock(); 87 + if (nfit_res) 88 + return; 89 + return iounmap(addr); 90 + } 91 + EXPORT_SYMBOL(__wrap_iounmap); 92 + 93 + struct resource *__wrap___request_region(struct resource *parent, 94 + resource_size_t start, resource_size_t n, const char *name, 95 + int flags) 96 + { 97 + struct nfit_test_resource *nfit_res; 98 + 99 + if (parent == &iomem_resource) { 100 + rcu_read_lock(); 101 + nfit_res = get_nfit_res(start); 102 + rcu_read_unlock(); 103 + if (nfit_res) { 104 + struct resource *res = nfit_res->res + 1; 105 + 106 + if (start + n > nfit_res->res->start 107 + + resource_size(nfit_res->res)) { 108 + pr_debug("%s: start: %llx n: %llx overflow: %pr\n", 109 + __func__, start, n, 110 + nfit_res->res); 111 + return NULL; 112 + } 113 + 114 + res->start = start; 115 + res->end = start + n - 1; 116 + res->name = name; 117 + res->flags = resource_type(parent); 118 + res->flags |= IORESOURCE_BUSY | flags; 119 + pr_debug("%s: %pr\n", __func__, res); 120 + return res; 121 + } 122 + } 123 + return __request_region(parent, start, n, name, flags); 124 + } 125 + EXPORT_SYMBOL(__wrap___request_region); 126 + 127 + void __wrap___release_region(struct resource *parent, resource_size_t start, 128 + resource_size_t n) 129 + { 130 + struct nfit_test_resource *nfit_res; 131 + 132 + if (parent == &iomem_resource) { 133 + rcu_read_lock(); 134 + nfit_res = get_nfit_res(start); 135 + rcu_read_unlock(); 136 + if (nfit_res) { 137 + struct resource *res = nfit_res->res + 1; 138 + 139 + if (start != res->start || resource_size(res) != n) 140 + pr_info("%s: start: %llx n: %llx mismatch: %pr\n", 141 + __func__, start, n, res); 142 + else 143 + memset(res, 0, sizeof(*res)); 144 + return; 145 + } 146 + } 147 + __release_region(parent, start, n); 148 + } 149 + EXPORT_SYMBOL(__wrap___release_region); 150 + 151 + MODULE_LICENSE("GPL v2");
+1113
tools/testing/nvdimm/test/nfit.c
··· 1 + /* 2 + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of version 2 of the GNU General Public License as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 + * General Public License for more details. 12 + */ 13 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 + #include <linux/platform_device.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/libnvdimm.h> 17 + #include <linux/vmalloc.h> 18 + #include <linux/device.h> 19 + #include <linux/module.h> 20 + #include <linux/ndctl.h> 21 + #include <linux/sizes.h> 22 + #include <linux/slab.h> 23 + #include <nfit.h> 24 + #include <nd.h> 25 + #include "nfit_test.h" 26 + 27 + /* 28 + * Generate an NFIT table to describe the following topology: 29 + * 30 + * BUS0: Interleaved PMEM regions, and aliasing with BLK regions 31 + * 32 + * (a) (b) DIMM BLK-REGION 33 + * +----------+--------------+----------+---------+ 34 + * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2 35 + * | imc0 +--+- - - - - region0 - - - -+----------+ + 36 + * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3 37 + * | +----------+--------------v----------v v 38 + * +--+---+ | | 39 + * | cpu0 | region1 40 + * +--+---+ | | 41 + * | +-------------------------^----------^ ^ 42 + * +--+---+ | blk4.0 | pm1.0 | 2 region4 43 + * | imc1 +--+-------------------------+----------+ + 44 + * +------+ | blk5.0 | pm1.0 | 3 region5 45 + * +-------------------------+----------+-+-------+ 46 + * 47 + * *) In this layout we have four dimms and two memory controllers in one 48 + * socket. Each unique interface (BLK or PMEM) to DPA space 49 + * is identified by a region device with a dynamically assigned id. 50 + * 51 + * *) The first portion of dimm0 and dimm1 are interleaved as REGION0. 52 + * A single PMEM namespace "pm0.0" is created using half of the 53 + * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace 54 + * allocate from from the bottom of a region. The unallocated 55 + * portion of REGION0 aliases with REGION2 and REGION3. That 56 + * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and 57 + * "blk3.0") starting at the base of each DIMM to offset (a) in those 58 + * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable 59 + * names that can be assigned to a namespace. 60 + * 61 + * *) In the last portion of dimm0 and dimm1 we have an interleaved 62 + * SPA range, REGION1, that spans those two dimms as well as dimm2 63 + * and dimm3. Some of REGION1 allocated to a PMEM namespace named 64 + * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each 65 + * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and 66 + * "blk5.0". 67 + * 68 + * *) The portion of dimm2 and dimm3 that do not participate in the 69 + * REGION1 interleaved SPA range (i.e. the DPA address below offset 70 + * (b) are also included in the "blk4.0" and "blk5.0" namespaces. 71 + * Note, that BLK namespaces need not be contiguous in DPA-space, and 72 + * can consume aliased capacity from multiple interleave sets. 73 + * 74 + * BUS1: Legacy NVDIMM (single contiguous range) 75 + * 76 + * region2 77 + * +---------------------+ 78 + * |---------------------| 79 + * || pm2.0 || 80 + * |---------------------| 81 + * +---------------------+ 82 + * 83 + * *) A NFIT-table may describe a simple system-physical-address range 84 + * with no BLK aliasing. This type of region may optionally 85 + * reference an NVDIMM. 86 + */ 87 + enum { 88 + NUM_PM = 2, 89 + NUM_DCR = 4, 90 + NUM_BDW = NUM_DCR, 91 + NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW, 92 + NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */, 93 + DIMM_SIZE = SZ_32M, 94 + LABEL_SIZE = SZ_128K, 95 + SPA0_SIZE = DIMM_SIZE, 96 + SPA1_SIZE = DIMM_SIZE*2, 97 + SPA2_SIZE = DIMM_SIZE, 98 + BDW_SIZE = 64 << 8, 99 + DCR_SIZE = 12, 100 + NUM_NFITS = 2, /* permit testing multiple NFITs per system */ 101 + }; 102 + 103 + struct nfit_test_dcr { 104 + __le64 bdw_addr; 105 + __le32 bdw_status; 106 + __u8 aperature[BDW_SIZE]; 107 + }; 108 + 109 + #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \ 110 + (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \ 111 + | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf)) 112 + 113 + static u32 handle[NUM_DCR] = { 114 + [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0), 115 + [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1), 116 + [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0), 117 + [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1), 118 + }; 119 + 120 + struct nfit_test { 121 + struct acpi_nfit_desc acpi_desc; 122 + struct platform_device pdev; 123 + struct list_head resources; 124 + void *nfit_buf; 125 + dma_addr_t nfit_dma; 126 + size_t nfit_size; 127 + int num_dcr; 128 + int num_pm; 129 + void **dimm; 130 + dma_addr_t *dimm_dma; 131 + void **label; 132 + dma_addr_t *label_dma; 133 + void **spa_set; 134 + dma_addr_t *spa_set_dma; 135 + struct nfit_test_dcr **dcr; 136 + dma_addr_t *dcr_dma; 137 + int (*alloc)(struct nfit_test *t); 138 + void (*setup)(struct nfit_test *t); 139 + }; 140 + 141 + static struct nfit_test *to_nfit_test(struct device *dev) 142 + { 143 + struct platform_device *pdev = to_platform_device(dev); 144 + 145 + return container_of(pdev, struct nfit_test, pdev); 146 + } 147 + 148 + static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc, 149 + struct nvdimm *nvdimm, unsigned int cmd, void *buf, 150 + unsigned int buf_len) 151 + { 152 + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); 153 + struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc); 154 + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); 155 + int i, rc; 156 + 157 + if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask)) 158 + return -ENXIO; 159 + 160 + /* lookup label space for the given dimm */ 161 + for (i = 0; i < ARRAY_SIZE(handle); i++) 162 + if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i]) 163 + break; 164 + if (i >= ARRAY_SIZE(handle)) 165 + return -ENXIO; 166 + 167 + switch (cmd) { 168 + case ND_CMD_GET_CONFIG_SIZE: { 169 + struct nd_cmd_get_config_size *nd_cmd = buf; 170 + 171 + if (buf_len < sizeof(*nd_cmd)) 172 + return -EINVAL; 173 + nd_cmd->status = 0; 174 + nd_cmd->config_size = LABEL_SIZE; 175 + nd_cmd->max_xfer = SZ_4K; 176 + rc = 0; 177 + break; 178 + } 179 + case ND_CMD_GET_CONFIG_DATA: { 180 + struct nd_cmd_get_config_data_hdr *nd_cmd = buf; 181 + unsigned int len, offset = nd_cmd->in_offset; 182 + 183 + if (buf_len < sizeof(*nd_cmd)) 184 + return -EINVAL; 185 + if (offset >= LABEL_SIZE) 186 + return -EINVAL; 187 + if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len) 188 + return -EINVAL; 189 + 190 + nd_cmd->status = 0; 191 + len = min(nd_cmd->in_length, LABEL_SIZE - offset); 192 + memcpy(nd_cmd->out_buf, t->label[i] + offset, len); 193 + rc = buf_len - sizeof(*nd_cmd) - len; 194 + break; 195 + } 196 + case ND_CMD_SET_CONFIG_DATA: { 197 + struct nd_cmd_set_config_hdr *nd_cmd = buf; 198 + unsigned int len, offset = nd_cmd->in_offset; 199 + u32 *status; 200 + 201 + if (buf_len < sizeof(*nd_cmd)) 202 + return -EINVAL; 203 + if (offset >= LABEL_SIZE) 204 + return -EINVAL; 205 + if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len) 206 + return -EINVAL; 207 + 208 + status = buf + nd_cmd->in_length + sizeof(*nd_cmd); 209 + *status = 0; 210 + len = min(nd_cmd->in_length, LABEL_SIZE - offset); 211 + memcpy(t->label[i] + offset, nd_cmd->in_buf, len); 212 + rc = buf_len - sizeof(*nd_cmd) - (len + 4); 213 + break; 214 + } 215 + default: 216 + return -ENOTTY; 217 + } 218 + 219 + return rc; 220 + } 221 + 222 + static DEFINE_SPINLOCK(nfit_test_lock); 223 + static struct nfit_test *instances[NUM_NFITS]; 224 + 225 + static void release_nfit_res(void *data) 226 + { 227 + struct nfit_test_resource *nfit_res = data; 228 + struct resource *res = nfit_res->res; 229 + 230 + spin_lock(&nfit_test_lock); 231 + list_del(&nfit_res->list); 232 + spin_unlock(&nfit_test_lock); 233 + 234 + if (is_vmalloc_addr(nfit_res->buf)) 235 + vfree(nfit_res->buf); 236 + else 237 + dma_free_coherent(nfit_res->dev, resource_size(res), 238 + nfit_res->buf, res->start); 239 + kfree(res); 240 + kfree(nfit_res); 241 + } 242 + 243 + static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma, 244 + void *buf) 245 + { 246 + struct device *dev = &t->pdev.dev; 247 + struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL); 248 + struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res), 249 + GFP_KERNEL); 250 + int rc; 251 + 252 + if (!res || !buf || !nfit_res) 253 + goto err; 254 + rc = devm_add_action(dev, release_nfit_res, nfit_res); 255 + if (rc) 256 + goto err; 257 + INIT_LIST_HEAD(&nfit_res->list); 258 + memset(buf, 0, size); 259 + nfit_res->dev = dev; 260 + nfit_res->buf = buf; 261 + nfit_res->res = res; 262 + res->start = *dma; 263 + res->end = *dma + size - 1; 264 + res->name = "NFIT"; 265 + spin_lock(&nfit_test_lock); 266 + list_add(&nfit_res->list, &t->resources); 267 + spin_unlock(&nfit_test_lock); 268 + 269 + return nfit_res->buf; 270 + err: 271 + if (buf && !is_vmalloc_addr(buf)) 272 + dma_free_coherent(dev, size, buf, *dma); 273 + else if (buf) 274 + vfree(buf); 275 + kfree(res); 276 + kfree(nfit_res); 277 + return NULL; 278 + } 279 + 280 + static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma) 281 + { 282 + void *buf = vmalloc(size); 283 + 284 + *dma = (unsigned long) buf; 285 + return __test_alloc(t, size, dma, buf); 286 + } 287 + 288 + static void *test_alloc_coherent(struct nfit_test *t, size_t size, 289 + dma_addr_t *dma) 290 + { 291 + struct device *dev = &t->pdev.dev; 292 + void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL); 293 + 294 + return __test_alloc(t, size, dma, buf); 295 + } 296 + 297 + static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) 298 + { 299 + int i; 300 + 301 + for (i = 0; i < ARRAY_SIZE(instances); i++) { 302 + struct nfit_test_resource *n, *nfit_res = NULL; 303 + struct nfit_test *t = instances[i]; 304 + 305 + if (!t) 306 + continue; 307 + spin_lock(&nfit_test_lock); 308 + list_for_each_entry(n, &t->resources, list) { 309 + if (addr >= n->res->start && (addr < n->res->start 310 + + resource_size(n->res))) { 311 + nfit_res = n; 312 + break; 313 + } else if (addr >= (unsigned long) n->buf 314 + && (addr < (unsigned long) n->buf 315 + + resource_size(n->res))) { 316 + nfit_res = n; 317 + break; 318 + } 319 + } 320 + spin_unlock(&nfit_test_lock); 321 + if (nfit_res) 322 + return nfit_res; 323 + } 324 + 325 + return NULL; 326 + } 327 + 328 + static int nfit_test0_alloc(struct nfit_test *t) 329 + { 330 + size_t nfit_size = sizeof(struct acpi_table_nfit) 331 + + sizeof(struct acpi_nfit_system_address) * NUM_SPA 332 + + sizeof(struct acpi_nfit_memory_map) * NUM_MEM 333 + + sizeof(struct acpi_nfit_control_region) * NUM_DCR 334 + + sizeof(struct acpi_nfit_data_region) * NUM_BDW; 335 + int i; 336 + 337 + t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 338 + if (!t->nfit_buf) 339 + return -ENOMEM; 340 + t->nfit_size = nfit_size; 341 + 342 + t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]); 343 + if (!t->spa_set[0]) 344 + return -ENOMEM; 345 + 346 + t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]); 347 + if (!t->spa_set[1]) 348 + return -ENOMEM; 349 + 350 + for (i = 0; i < NUM_DCR; i++) { 351 + t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]); 352 + if (!t->dimm[i]) 353 + return -ENOMEM; 354 + 355 + t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]); 356 + if (!t->label[i]) 357 + return -ENOMEM; 358 + sprintf(t->label[i], "label%d", i); 359 + } 360 + 361 + for (i = 0; i < NUM_DCR; i++) { 362 + t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]); 363 + if (!t->dcr[i]) 364 + return -ENOMEM; 365 + } 366 + 367 + return 0; 368 + } 369 + 370 + static int nfit_test1_alloc(struct nfit_test *t) 371 + { 372 + size_t nfit_size = sizeof(struct acpi_table_nfit) 373 + + sizeof(struct acpi_nfit_system_address) 374 + + sizeof(struct acpi_nfit_memory_map) 375 + + sizeof(struct acpi_nfit_control_region); 376 + 377 + t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma); 378 + if (!t->nfit_buf) 379 + return -ENOMEM; 380 + t->nfit_size = nfit_size; 381 + 382 + t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]); 383 + if (!t->spa_set[0]) 384 + return -ENOMEM; 385 + 386 + return 0; 387 + } 388 + 389 + static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size) 390 + { 391 + memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4); 392 + nfit->header.length = size; 393 + nfit->header.revision = 1; 394 + memcpy(nfit->header.oem_id, "LIBND", 6); 395 + memcpy(nfit->header.oem_table_id, "TEST", 5); 396 + nfit->header.oem_revision = 1; 397 + memcpy(nfit->header.asl_compiler_id, "TST", 4); 398 + nfit->header.asl_compiler_revision = 1; 399 + } 400 + 401 + static void nfit_test0_setup(struct nfit_test *t) 402 + { 403 + struct nvdimm_bus_descriptor *nd_desc; 404 + struct acpi_nfit_desc *acpi_desc; 405 + struct acpi_nfit_memory_map *memdev; 406 + void *nfit_buf = t->nfit_buf; 407 + size_t size = t->nfit_size; 408 + struct acpi_nfit_system_address *spa; 409 + struct acpi_nfit_control_region *dcr; 410 + struct acpi_nfit_data_region *bdw; 411 + unsigned int offset; 412 + 413 + nfit_test_init_header(nfit_buf, size); 414 + 415 + /* 416 + * spa0 (interleave first half of dimm0 and dimm1, note storage 417 + * does not actually alias the related block-data-window 418 + * regions) 419 + */ 420 + spa = nfit_buf + sizeof(struct acpi_table_nfit); 421 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 422 + spa->header.length = sizeof(*spa); 423 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 424 + spa->range_index = 0+1; 425 + spa->address = t->spa_set_dma[0]; 426 + spa->length = SPA0_SIZE; 427 + 428 + /* 429 + * spa1 (interleave last half of the 4 DIMMS, note storage 430 + * does not actually alias the related block-data-window 431 + * regions) 432 + */ 433 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); 434 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 435 + spa->header.length = sizeof(*spa); 436 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 437 + spa->range_index = 1+1; 438 + spa->address = t->spa_set_dma[1]; 439 + spa->length = SPA1_SIZE; 440 + 441 + /* spa2 (dcr0) dimm0 */ 442 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; 443 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 444 + spa->header.length = sizeof(*spa); 445 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 446 + spa->range_index = 2+1; 447 + spa->address = t->dcr_dma[0]; 448 + spa->length = DCR_SIZE; 449 + 450 + /* spa3 (dcr1) dimm1 */ 451 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; 452 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 453 + spa->header.length = sizeof(*spa); 454 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 455 + spa->range_index = 3+1; 456 + spa->address = t->dcr_dma[1]; 457 + spa->length = DCR_SIZE; 458 + 459 + /* spa4 (dcr2) dimm2 */ 460 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; 461 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 462 + spa->header.length = sizeof(*spa); 463 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 464 + spa->range_index = 4+1; 465 + spa->address = t->dcr_dma[2]; 466 + spa->length = DCR_SIZE; 467 + 468 + /* spa5 (dcr3) dimm3 */ 469 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; 470 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 471 + spa->header.length = sizeof(*spa); 472 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); 473 + spa->range_index = 5+1; 474 + spa->address = t->dcr_dma[3]; 475 + spa->length = DCR_SIZE; 476 + 477 + /* spa6 (bdw for dcr0) dimm0 */ 478 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; 479 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 480 + spa->header.length = sizeof(*spa); 481 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 482 + spa->range_index = 6+1; 483 + spa->address = t->dimm_dma[0]; 484 + spa->length = DIMM_SIZE; 485 + 486 + /* spa7 (bdw for dcr1) dimm1 */ 487 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; 488 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 489 + spa->header.length = sizeof(*spa); 490 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 491 + spa->range_index = 7+1; 492 + spa->address = t->dimm_dma[1]; 493 + spa->length = DIMM_SIZE; 494 + 495 + /* spa8 (bdw for dcr2) dimm2 */ 496 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; 497 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 498 + spa->header.length = sizeof(*spa); 499 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 500 + spa->range_index = 8+1; 501 + spa->address = t->dimm_dma[2]; 502 + spa->length = DIMM_SIZE; 503 + 504 + /* spa9 (bdw for dcr3) dimm3 */ 505 + spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; 506 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 507 + spa->header.length = sizeof(*spa); 508 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); 509 + spa->range_index = 9+1; 510 + spa->address = t->dimm_dma[3]; 511 + spa->length = DIMM_SIZE; 512 + 513 + offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; 514 + /* mem-region0 (spa0, dimm0) */ 515 + memdev = nfit_buf + offset; 516 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 517 + memdev->header.length = sizeof(*memdev); 518 + memdev->device_handle = handle[0]; 519 + memdev->physical_id = 0; 520 + memdev->region_id = 0; 521 + memdev->range_index = 0+1; 522 + memdev->region_index = 0+1; 523 + memdev->region_size = SPA0_SIZE/2; 524 + memdev->region_offset = t->spa_set_dma[0]; 525 + memdev->address = 0; 526 + memdev->interleave_index = 0; 527 + memdev->interleave_ways = 2; 528 + 529 + /* mem-region1 (spa0, dimm1) */ 530 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map); 531 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 532 + memdev->header.length = sizeof(*memdev); 533 + memdev->device_handle = handle[1]; 534 + memdev->physical_id = 1; 535 + memdev->region_id = 0; 536 + memdev->range_index = 0+1; 537 + memdev->region_index = 1+1; 538 + memdev->region_size = SPA0_SIZE/2; 539 + memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2; 540 + memdev->address = 0; 541 + memdev->interleave_index = 0; 542 + memdev->interleave_ways = 2; 543 + 544 + /* mem-region2 (spa1, dimm0) */ 545 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2; 546 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 547 + memdev->header.length = sizeof(*memdev); 548 + memdev->device_handle = handle[0]; 549 + memdev->physical_id = 0; 550 + memdev->region_id = 1; 551 + memdev->range_index = 1+1; 552 + memdev->region_index = 0+1; 553 + memdev->region_size = SPA1_SIZE/4; 554 + memdev->region_offset = t->spa_set_dma[1]; 555 + memdev->address = SPA0_SIZE/2; 556 + memdev->interleave_index = 0; 557 + memdev->interleave_ways = 4; 558 + 559 + /* mem-region3 (spa1, dimm1) */ 560 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3; 561 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 562 + memdev->header.length = sizeof(*memdev); 563 + memdev->device_handle = handle[1]; 564 + memdev->physical_id = 1; 565 + memdev->region_id = 1; 566 + memdev->range_index = 1+1; 567 + memdev->region_index = 1+1; 568 + memdev->region_size = SPA1_SIZE/4; 569 + memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4; 570 + memdev->address = SPA0_SIZE/2; 571 + memdev->interleave_index = 0; 572 + memdev->interleave_ways = 4; 573 + 574 + /* mem-region4 (spa1, dimm2) */ 575 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4; 576 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 577 + memdev->header.length = sizeof(*memdev); 578 + memdev->device_handle = handle[2]; 579 + memdev->physical_id = 2; 580 + memdev->region_id = 0; 581 + memdev->range_index = 1+1; 582 + memdev->region_index = 2+1; 583 + memdev->region_size = SPA1_SIZE/4; 584 + memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4; 585 + memdev->address = SPA0_SIZE/2; 586 + memdev->interleave_index = 0; 587 + memdev->interleave_ways = 4; 588 + 589 + /* mem-region5 (spa1, dimm3) */ 590 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5; 591 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 592 + memdev->header.length = sizeof(*memdev); 593 + memdev->device_handle = handle[3]; 594 + memdev->physical_id = 3; 595 + memdev->region_id = 0; 596 + memdev->range_index = 1+1; 597 + memdev->region_index = 3+1; 598 + memdev->region_size = SPA1_SIZE/4; 599 + memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4; 600 + memdev->address = SPA0_SIZE/2; 601 + memdev->interleave_index = 0; 602 + memdev->interleave_ways = 4; 603 + 604 + /* mem-region6 (spa/dcr0, dimm0) */ 605 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6; 606 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 607 + memdev->header.length = sizeof(*memdev); 608 + memdev->device_handle = handle[0]; 609 + memdev->physical_id = 0; 610 + memdev->region_id = 0; 611 + memdev->range_index = 2+1; 612 + memdev->region_index = 0+1; 613 + memdev->region_size = 0; 614 + memdev->region_offset = 0; 615 + memdev->address = 0; 616 + memdev->interleave_index = 0; 617 + memdev->interleave_ways = 1; 618 + 619 + /* mem-region7 (spa/dcr1, dimm1) */ 620 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7; 621 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 622 + memdev->header.length = sizeof(*memdev); 623 + memdev->device_handle = handle[1]; 624 + memdev->physical_id = 1; 625 + memdev->region_id = 0; 626 + memdev->range_index = 3+1; 627 + memdev->region_index = 1+1; 628 + memdev->region_size = 0; 629 + memdev->region_offset = 0; 630 + memdev->address = 0; 631 + memdev->interleave_index = 0; 632 + memdev->interleave_ways = 1; 633 + 634 + /* mem-region8 (spa/dcr2, dimm2) */ 635 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8; 636 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 637 + memdev->header.length = sizeof(*memdev); 638 + memdev->device_handle = handle[2]; 639 + memdev->physical_id = 2; 640 + memdev->region_id = 0; 641 + memdev->range_index = 4+1; 642 + memdev->region_index = 2+1; 643 + memdev->region_size = 0; 644 + memdev->region_offset = 0; 645 + memdev->address = 0; 646 + memdev->interleave_index = 0; 647 + memdev->interleave_ways = 1; 648 + 649 + /* mem-region9 (spa/dcr3, dimm3) */ 650 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9; 651 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 652 + memdev->header.length = sizeof(*memdev); 653 + memdev->device_handle = handle[3]; 654 + memdev->physical_id = 3; 655 + memdev->region_id = 0; 656 + memdev->range_index = 5+1; 657 + memdev->region_index = 3+1; 658 + memdev->region_size = 0; 659 + memdev->region_offset = 0; 660 + memdev->address = 0; 661 + memdev->interleave_index = 0; 662 + memdev->interleave_ways = 1; 663 + 664 + /* mem-region10 (spa/bdw0, dimm0) */ 665 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10; 666 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 667 + memdev->header.length = sizeof(*memdev); 668 + memdev->device_handle = handle[0]; 669 + memdev->physical_id = 0; 670 + memdev->region_id = 0; 671 + memdev->range_index = 6+1; 672 + memdev->region_index = 0+1; 673 + memdev->region_size = 0; 674 + memdev->region_offset = 0; 675 + memdev->address = 0; 676 + memdev->interleave_index = 0; 677 + memdev->interleave_ways = 1; 678 + 679 + /* mem-region11 (spa/bdw1, dimm1) */ 680 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11; 681 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 682 + memdev->header.length = sizeof(*memdev); 683 + memdev->device_handle = handle[1]; 684 + memdev->physical_id = 1; 685 + memdev->region_id = 0; 686 + memdev->range_index = 7+1; 687 + memdev->region_index = 1+1; 688 + memdev->region_size = 0; 689 + memdev->region_offset = 0; 690 + memdev->address = 0; 691 + memdev->interleave_index = 0; 692 + memdev->interleave_ways = 1; 693 + 694 + /* mem-region12 (spa/bdw2, dimm2) */ 695 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12; 696 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 697 + memdev->header.length = sizeof(*memdev); 698 + memdev->device_handle = handle[2]; 699 + memdev->physical_id = 2; 700 + memdev->region_id = 0; 701 + memdev->range_index = 8+1; 702 + memdev->region_index = 2+1; 703 + memdev->region_size = 0; 704 + memdev->region_offset = 0; 705 + memdev->address = 0; 706 + memdev->interleave_index = 0; 707 + memdev->interleave_ways = 1; 708 + 709 + /* mem-region13 (spa/dcr3, dimm3) */ 710 + memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13; 711 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 712 + memdev->header.length = sizeof(*memdev); 713 + memdev->device_handle = handle[3]; 714 + memdev->physical_id = 3; 715 + memdev->region_id = 0; 716 + memdev->range_index = 9+1; 717 + memdev->region_index = 3+1; 718 + memdev->region_size = 0; 719 + memdev->region_offset = 0; 720 + memdev->address = 0; 721 + memdev->interleave_index = 0; 722 + memdev->interleave_ways = 1; 723 + 724 + offset = offset + sizeof(struct acpi_nfit_memory_map) * 14; 725 + /* dcr-descriptor0 */ 726 + dcr = nfit_buf + offset; 727 + dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 728 + dcr->header.length = sizeof(struct acpi_nfit_control_region); 729 + dcr->region_index = 0+1; 730 + dcr->vendor_id = 0xabcd; 731 + dcr->device_id = 0; 732 + dcr->revision_id = 1; 733 + dcr->serial_number = ~handle[0]; 734 + dcr->windows = 1; 735 + dcr->window_size = DCR_SIZE; 736 + dcr->command_offset = 0; 737 + dcr->command_size = 8; 738 + dcr->status_offset = 8; 739 + dcr->status_size = 4; 740 + 741 + /* dcr-descriptor1 */ 742 + dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region); 743 + dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 744 + dcr->header.length = sizeof(struct acpi_nfit_control_region); 745 + dcr->region_index = 1+1; 746 + dcr->vendor_id = 0xabcd; 747 + dcr->device_id = 0; 748 + dcr->revision_id = 1; 749 + dcr->serial_number = ~handle[1]; 750 + dcr->windows = 1; 751 + dcr->window_size = DCR_SIZE; 752 + dcr->command_offset = 0; 753 + dcr->command_size = 8; 754 + dcr->status_offset = 8; 755 + dcr->status_size = 4; 756 + 757 + /* dcr-descriptor2 */ 758 + dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2; 759 + dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 760 + dcr->header.length = sizeof(struct acpi_nfit_control_region); 761 + dcr->region_index = 2+1; 762 + dcr->vendor_id = 0xabcd; 763 + dcr->device_id = 0; 764 + dcr->revision_id = 1; 765 + dcr->serial_number = ~handle[2]; 766 + dcr->windows = 1; 767 + dcr->window_size = DCR_SIZE; 768 + dcr->command_offset = 0; 769 + dcr->command_size = 8; 770 + dcr->status_offset = 8; 771 + dcr->status_size = 4; 772 + 773 + /* dcr-descriptor3 */ 774 + dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3; 775 + dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 776 + dcr->header.length = sizeof(struct acpi_nfit_control_region); 777 + dcr->region_index = 3+1; 778 + dcr->vendor_id = 0xabcd; 779 + dcr->device_id = 0; 780 + dcr->revision_id = 1; 781 + dcr->serial_number = ~handle[3]; 782 + dcr->windows = 1; 783 + dcr->window_size = DCR_SIZE; 784 + dcr->command_offset = 0; 785 + dcr->command_size = 8; 786 + dcr->status_offset = 8; 787 + dcr->status_size = 4; 788 + 789 + offset = offset + sizeof(struct acpi_nfit_control_region) * 4; 790 + /* bdw0 (spa/dcr0, dimm0) */ 791 + bdw = nfit_buf + offset; 792 + bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 793 + bdw->header.length = sizeof(struct acpi_nfit_data_region); 794 + bdw->region_index = 0+1; 795 + bdw->windows = 1; 796 + bdw->offset = 0; 797 + bdw->size = BDW_SIZE; 798 + bdw->capacity = DIMM_SIZE; 799 + bdw->start_address = 0; 800 + 801 + /* bdw1 (spa/dcr1, dimm1) */ 802 + bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region); 803 + bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 804 + bdw->header.length = sizeof(struct acpi_nfit_data_region); 805 + bdw->region_index = 1+1; 806 + bdw->windows = 1; 807 + bdw->offset = 0; 808 + bdw->size = BDW_SIZE; 809 + bdw->capacity = DIMM_SIZE; 810 + bdw->start_address = 0; 811 + 812 + /* bdw2 (spa/dcr2, dimm2) */ 813 + bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2; 814 + bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 815 + bdw->header.length = sizeof(struct acpi_nfit_data_region); 816 + bdw->region_index = 2+1; 817 + bdw->windows = 1; 818 + bdw->offset = 0; 819 + bdw->size = BDW_SIZE; 820 + bdw->capacity = DIMM_SIZE; 821 + bdw->start_address = 0; 822 + 823 + /* bdw3 (spa/dcr3, dimm3) */ 824 + bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3; 825 + bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION; 826 + bdw->header.length = sizeof(struct acpi_nfit_data_region); 827 + bdw->region_index = 3+1; 828 + bdw->windows = 1; 829 + bdw->offset = 0; 830 + bdw->size = BDW_SIZE; 831 + bdw->capacity = DIMM_SIZE; 832 + bdw->start_address = 0; 833 + 834 + acpi_desc = &t->acpi_desc; 835 + set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en); 836 + set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 837 + set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en); 838 + nd_desc = &acpi_desc->nd_desc; 839 + nd_desc->ndctl = nfit_test_ctl; 840 + } 841 + 842 + static void nfit_test1_setup(struct nfit_test *t) 843 + { 844 + size_t size = t->nfit_size, offset; 845 + void *nfit_buf = t->nfit_buf; 846 + struct acpi_nfit_memory_map *memdev; 847 + struct acpi_nfit_control_region *dcr; 848 + struct acpi_nfit_system_address *spa; 849 + 850 + nfit_test_init_header(nfit_buf, size); 851 + 852 + offset = sizeof(struct acpi_table_nfit); 853 + /* spa0 (flat range with no bdw aliasing) */ 854 + spa = nfit_buf + offset; 855 + spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; 856 + spa->header.length = sizeof(*spa); 857 + memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); 858 + spa->range_index = 0+1; 859 + spa->address = t->spa_set_dma[0]; 860 + spa->length = SPA2_SIZE; 861 + 862 + offset += sizeof(*spa); 863 + /* mem-region0 (spa0, dimm0) */ 864 + memdev = nfit_buf + offset; 865 + memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; 866 + memdev->header.length = sizeof(*memdev); 867 + memdev->device_handle = 0; 868 + memdev->physical_id = 0; 869 + memdev->region_id = 0; 870 + memdev->range_index = 0+1; 871 + memdev->region_index = 0+1; 872 + memdev->region_size = SPA2_SIZE; 873 + memdev->region_offset = 0; 874 + memdev->address = 0; 875 + memdev->interleave_index = 0; 876 + memdev->interleave_ways = 1; 877 + 878 + offset += sizeof(*memdev); 879 + /* dcr-descriptor0 */ 880 + dcr = nfit_buf + offset; 881 + dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION; 882 + dcr->header.length = sizeof(struct acpi_nfit_control_region); 883 + dcr->region_index = 0+1; 884 + dcr->vendor_id = 0xabcd; 885 + dcr->device_id = 0; 886 + dcr->revision_id = 1; 887 + dcr->serial_number = ~0; 888 + dcr->code = 0x201; 889 + dcr->windows = 0; 890 + dcr->window_size = 0; 891 + dcr->command_offset = 0; 892 + dcr->command_size = 0; 893 + dcr->status_offset = 0; 894 + dcr->status_size = 0; 895 + } 896 + 897 + static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, 898 + void *iobuf, u64 len, int rw) 899 + { 900 + struct nfit_blk *nfit_blk = ndbr->blk_provider_data; 901 + struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; 902 + struct nd_region *nd_region = &ndbr->nd_region; 903 + unsigned int lane; 904 + 905 + lane = nd_region_acquire_lane(nd_region); 906 + if (rw) 907 + memcpy(mmio->base + dpa, iobuf, len); 908 + else 909 + memcpy(iobuf, mmio->base + dpa, len); 910 + nd_region_release_lane(nd_region, lane); 911 + 912 + return 0; 913 + } 914 + 915 + static int nfit_test_probe(struct platform_device *pdev) 916 + { 917 + struct nvdimm_bus_descriptor *nd_desc; 918 + struct acpi_nfit_desc *acpi_desc; 919 + struct device *dev = &pdev->dev; 920 + struct nfit_test *nfit_test; 921 + int rc; 922 + 923 + nfit_test = to_nfit_test(&pdev->dev); 924 + 925 + /* common alloc */ 926 + if (nfit_test->num_dcr) { 927 + int num = nfit_test->num_dcr; 928 + 929 + nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *), 930 + GFP_KERNEL); 931 + nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t), 932 + GFP_KERNEL); 933 + nfit_test->label = devm_kcalloc(dev, num, sizeof(void *), 934 + GFP_KERNEL); 935 + nfit_test->label_dma = devm_kcalloc(dev, num, 936 + sizeof(dma_addr_t), GFP_KERNEL); 937 + nfit_test->dcr = devm_kcalloc(dev, num, 938 + sizeof(struct nfit_test_dcr *), GFP_KERNEL); 939 + nfit_test->dcr_dma = devm_kcalloc(dev, num, 940 + sizeof(dma_addr_t), GFP_KERNEL); 941 + if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label 942 + && nfit_test->label_dma && nfit_test->dcr 943 + && nfit_test->dcr_dma) 944 + /* pass */; 945 + else 946 + return -ENOMEM; 947 + } 948 + 949 + if (nfit_test->num_pm) { 950 + int num = nfit_test->num_pm; 951 + 952 + nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *), 953 + GFP_KERNEL); 954 + nfit_test->spa_set_dma = devm_kcalloc(dev, num, 955 + sizeof(dma_addr_t), GFP_KERNEL); 956 + if (nfit_test->spa_set && nfit_test->spa_set_dma) 957 + /* pass */; 958 + else 959 + return -ENOMEM; 960 + } 961 + 962 + /* per-nfit specific alloc */ 963 + if (nfit_test->alloc(nfit_test)) 964 + return -ENOMEM; 965 + 966 + nfit_test->setup(nfit_test); 967 + acpi_desc = &nfit_test->acpi_desc; 968 + acpi_desc->dev = &pdev->dev; 969 + acpi_desc->nfit = nfit_test->nfit_buf; 970 + acpi_desc->blk_do_io = nfit_test_blk_do_io; 971 + nd_desc = &acpi_desc->nd_desc; 972 + nd_desc->attr_groups = acpi_nfit_attribute_groups; 973 + acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc); 974 + if (!acpi_desc->nvdimm_bus) 975 + return -ENXIO; 976 + 977 + rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size); 978 + if (rc) { 979 + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 980 + return rc; 981 + } 982 + 983 + return 0; 984 + } 985 + 986 + static int nfit_test_remove(struct platform_device *pdev) 987 + { 988 + struct nfit_test *nfit_test = to_nfit_test(&pdev->dev); 989 + struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc; 990 + 991 + nvdimm_bus_unregister(acpi_desc->nvdimm_bus); 992 + 993 + return 0; 994 + } 995 + 996 + static void nfit_test_release(struct device *dev) 997 + { 998 + struct nfit_test *nfit_test = to_nfit_test(dev); 999 + 1000 + kfree(nfit_test); 1001 + } 1002 + 1003 + static const struct platform_device_id nfit_test_id[] = { 1004 + { KBUILD_MODNAME }, 1005 + { }, 1006 + }; 1007 + 1008 + static struct platform_driver nfit_test_driver = { 1009 + .probe = nfit_test_probe, 1010 + .remove = nfit_test_remove, 1011 + .driver = { 1012 + .name = KBUILD_MODNAME, 1013 + }, 1014 + .id_table = nfit_test_id, 1015 + }; 1016 + 1017 + #ifdef CONFIG_CMA_SIZE_MBYTES 1018 + #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 1019 + #else 1020 + #define CMA_SIZE_MBYTES 0 1021 + #endif 1022 + 1023 + static __init int nfit_test_init(void) 1024 + { 1025 + int rc, i; 1026 + 1027 + nfit_test_setup(nfit_test_lookup); 1028 + 1029 + for (i = 0; i < NUM_NFITS; i++) { 1030 + struct nfit_test *nfit_test; 1031 + struct platform_device *pdev; 1032 + static int once; 1033 + 1034 + nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL); 1035 + if (!nfit_test) { 1036 + rc = -ENOMEM; 1037 + goto err_register; 1038 + } 1039 + INIT_LIST_HEAD(&nfit_test->resources); 1040 + switch (i) { 1041 + case 0: 1042 + nfit_test->num_pm = NUM_PM; 1043 + nfit_test->num_dcr = NUM_DCR; 1044 + nfit_test->alloc = nfit_test0_alloc; 1045 + nfit_test->setup = nfit_test0_setup; 1046 + break; 1047 + case 1: 1048 + nfit_test->num_pm = 1; 1049 + nfit_test->alloc = nfit_test1_alloc; 1050 + nfit_test->setup = nfit_test1_setup; 1051 + break; 1052 + default: 1053 + rc = -EINVAL; 1054 + goto err_register; 1055 + } 1056 + pdev = &nfit_test->pdev; 1057 + pdev->name = KBUILD_MODNAME; 1058 + pdev->id = i; 1059 + pdev->dev.release = nfit_test_release; 1060 + rc = platform_device_register(pdev); 1061 + if (rc) { 1062 + put_device(&pdev->dev); 1063 + goto err_register; 1064 + } 1065 + 1066 + rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1067 + if (rc) 1068 + goto err_register; 1069 + 1070 + instances[i] = nfit_test; 1071 + 1072 + if (!once++) { 1073 + dma_addr_t dma; 1074 + void *buf; 1075 + 1076 + buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma, 1077 + GFP_KERNEL); 1078 + if (!buf) { 1079 + rc = -ENOMEM; 1080 + dev_warn(&pdev->dev, "need 128M of free cma\n"); 1081 + goto err_register; 1082 + } 1083 + dma_free_coherent(&pdev->dev, SZ_128M, buf, dma); 1084 + } 1085 + } 1086 + 1087 + rc = platform_driver_register(&nfit_test_driver); 1088 + if (rc) 1089 + goto err_register; 1090 + return 0; 1091 + 1092 + err_register: 1093 + for (i = 0; i < NUM_NFITS; i++) 1094 + if (instances[i]) 1095 + platform_device_unregister(&instances[i]->pdev); 1096 + nfit_test_teardown(); 1097 + return rc; 1098 + } 1099 + 1100 + static __exit void nfit_test_exit(void) 1101 + { 1102 + int i; 1103 + 1104 + platform_driver_unregister(&nfit_test_driver); 1105 + for (i = 0; i < NUM_NFITS; i++) 1106 + platform_device_unregister(&instances[i]->pdev); 1107 + nfit_test_teardown(); 1108 + } 1109 + 1110 + module_init(nfit_test_init); 1111 + module_exit(nfit_test_exit); 1112 + MODULE_LICENSE("GPL v2"); 1113 + MODULE_AUTHOR("Intel Corporation");
+29
tools/testing/nvdimm/test/nfit_test.h
··· 1 + /* 2 + * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of version 2 of the GNU General Public License as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, but 9 + * WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 + * General Public License for more details. 12 + */ 13 + #ifndef __NFIT_TEST_H__ 14 + #define __NFIT_TEST_H__ 15 + 16 + struct nfit_test_resource { 17 + struct list_head list; 18 + struct resource *res; 19 + struct device *dev; 20 + void *buf; 21 + }; 22 + 23 + typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t); 24 + void __iomem *__wrap_ioremap_nocache(resource_size_t offset, 25 + unsigned long size); 26 + void __wrap_iounmap(volatile void __iomem *addr); 27 + void nfit_test_setup(nfit_test_lookup_fn lookup); 28 + void nfit_test_teardown(void); 29 + #endif