+352
tools/testing/nvdimm/test/ndtest.c
+352
tools/testing/nvdimm/test/ndtest.c
···
23
23
LABEL_SIZE = SZ_128K,
24
24
NUM_INSTANCES = 2,
25
25
NUM_DCR = 4,
26
+
NDTEST_MAX_MAPPING = 6,
26
27
};
27
28
28
29
#define NDTEST_SCM_DIMM_CMD_MASK \
···
89
88
},
90
89
};
91
90
91
+
static struct ndtest_mapping region0_mapping[] = {
92
+
{
93
+
.dimm = 0,
94
+
.position = 0,
95
+
.start = 0,
96
+
.size = SZ_16M,
97
+
},
98
+
{
99
+
.dimm = 1,
100
+
.position = 1,
101
+
.start = 0,
102
+
.size = SZ_16M,
103
+
}
104
+
};
105
+
106
+
static struct ndtest_mapping region1_mapping[] = {
107
+
{
108
+
.dimm = 0,
109
+
.position = 0,
110
+
.start = SZ_16M,
111
+
.size = SZ_16M,
112
+
},
113
+
{
114
+
.dimm = 1,
115
+
.position = 1,
116
+
.start = SZ_16M,
117
+
.size = SZ_16M,
118
+
},
119
+
{
120
+
.dimm = 2,
121
+
.position = 2,
122
+
.start = SZ_16M,
123
+
.size = SZ_16M,
124
+
},
125
+
{
126
+
.dimm = 3,
127
+
.position = 3,
128
+
.start = SZ_16M,
129
+
.size = SZ_16M,
130
+
},
131
+
};
132
+
133
+
static struct ndtest_mapping region2_mapping[] = {
134
+
{
135
+
.dimm = 0,
136
+
.position = 0,
137
+
.start = 0,
138
+
.size = DIMM_SIZE,
139
+
},
140
+
};
141
+
142
+
static struct ndtest_mapping region3_mapping[] = {
143
+
{
144
+
.dimm = 1,
145
+
.start = 0,
146
+
.size = DIMM_SIZE,
147
+
}
148
+
};
149
+
150
+
static struct ndtest_mapping region4_mapping[] = {
151
+
{
152
+
.dimm = 2,
153
+
.start = 0,
154
+
.size = DIMM_SIZE,
155
+
}
156
+
};
157
+
158
+
static struct ndtest_mapping region5_mapping[] = {
159
+
{
160
+
.dimm = 3,
161
+
.start = 0,
162
+
.size = DIMM_SIZE,
163
+
}
164
+
};
165
+
166
+
static struct ndtest_region bus0_regions[] = {
167
+
{
168
+
.type = ND_DEVICE_NAMESPACE_PMEM,
169
+
.num_mappings = ARRAY_SIZE(region0_mapping),
170
+
.mapping = region0_mapping,
171
+
.size = DIMM_SIZE,
172
+
.range_index = 1,
173
+
},
174
+
{
175
+
.type = ND_DEVICE_NAMESPACE_PMEM,
176
+
.num_mappings = ARRAY_SIZE(region1_mapping),
177
+
.mapping = region1_mapping,
178
+
.size = DIMM_SIZE * 2,
179
+
.range_index = 2,
180
+
},
181
+
{
182
+
.type = ND_DEVICE_NAMESPACE_BLK,
183
+
.num_mappings = ARRAY_SIZE(region2_mapping),
184
+
.mapping = region2_mapping,
185
+
.size = DIMM_SIZE,
186
+
.range_index = 3,
187
+
},
188
+
{
189
+
.type = ND_DEVICE_NAMESPACE_BLK,
190
+
.num_mappings = ARRAY_SIZE(region3_mapping),
191
+
.mapping = region3_mapping,
192
+
.size = DIMM_SIZE,
193
+
.range_index = 4,
194
+
},
195
+
{
196
+
.type = ND_DEVICE_NAMESPACE_BLK,
197
+
.num_mappings = ARRAY_SIZE(region4_mapping),
198
+
.mapping = region4_mapping,
199
+
.size = DIMM_SIZE,
200
+
.range_index = 5,
201
+
},
202
+
{
203
+
.type = ND_DEVICE_NAMESPACE_BLK,
204
+
.num_mappings = ARRAY_SIZE(region5_mapping),
205
+
.mapping = region5_mapping,
206
+
.size = DIMM_SIZE,
207
+
.range_index = 6,
208
+
},
209
+
};
210
+
211
+
static struct ndtest_mapping region6_mapping[] = {
212
+
{
213
+
.dimm = 0,
214
+
.position = 0,
215
+
.start = 0,
216
+
.size = DIMM_SIZE,
217
+
},
218
+
};
219
+
220
+
static struct ndtest_region bus1_regions[] = {
221
+
{
222
+
.type = ND_DEVICE_NAMESPACE_IO,
223
+
.num_mappings = ARRAY_SIZE(region6_mapping),
224
+
.mapping = region6_mapping,
225
+
.size = DIMM_SIZE,
226
+
.range_index = 1,
227
+
},
228
+
};
229
+
92
230
static struct ndtest_config bus_configs[NUM_INSTANCES] = {
93
231
/* bus 1 */
94
232
{
95
233
.dimm_start = 0,
96
234
.dimm_count = ARRAY_SIZE(dimm_group1),
97
235
.dimms = dimm_group1,
236
+
.regions = bus0_regions,
237
+
.num_regions = ARRAY_SIZE(bus0_regions),
98
238
},
99
239
/* bus 2 */
100
240
{
101
241
.dimm_start = ARRAY_SIZE(dimm_group1),
102
242
.dimm_count = ARRAY_SIZE(dimm_group2),
103
243
.dimms = dimm_group2,
244
+
.regions = bus1_regions,
245
+
.num_regions = ARRAY_SIZE(bus1_regions),
104
246
},
105
247
};
106
248
···
282
138
}
283
139
284
140
return 0;
141
+
}
142
+
143
+
static int ndtest_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
144
+
void *iobuf, u64 len, int rw)
145
+
{
146
+
struct ndtest_dimm *dimm = ndbr->blk_provider_data;
147
+
struct ndtest_blk_mmio *mmio = dimm->mmio;
148
+
struct nd_region *nd_region = &ndbr->nd_region;
149
+
unsigned int lane;
150
+
151
+
if (!mmio)
152
+
return -ENOMEM;
153
+
154
+
lane = nd_region_acquire_lane(nd_region);
155
+
if (rw)
156
+
memcpy(mmio->base + dpa, iobuf, len);
157
+
else {
158
+
memcpy(iobuf, mmio->base + dpa, len);
159
+
arch_invalidate_pmem(mmio->base + dpa, len);
160
+
}
161
+
162
+
nd_region_release_lane(nd_region, lane);
163
+
164
+
return 0;
165
+
}
166
+
167
+
static int ndtest_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
168
+
struct device *dev)
169
+
{
170
+
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
171
+
struct nvdimm *nvdimm;
172
+
struct ndtest_dimm *dimm;
173
+
struct ndtest_blk_mmio *mmio;
174
+
175
+
nvdimm = nd_blk_region_to_dimm(ndbr);
176
+
dimm = nvdimm_provider_data(nvdimm);
177
+
178
+
nd_blk_region_set_provider_data(ndbr, dimm);
179
+
dimm->blk_region = to_nd_region(dev);
180
+
181
+
mmio = devm_kzalloc(dev, sizeof(struct ndtest_blk_mmio), GFP_KERNEL);
182
+
if (!mmio)
183
+
return -ENOMEM;
184
+
185
+
mmio->base = (void __iomem *) devm_nvdimm_memremap(
186
+
dev, dimm->address, 12, nd_blk_memremap_flags(ndbr));
187
+
if (!mmio->base) {
188
+
dev_err(dev, "%s failed to map blk dimm\n", nvdimm_name(nvdimm));
189
+
return -ENOMEM;
190
+
}
191
+
mmio->size = dimm->size;
192
+
mmio->base_offset = 0;
193
+
194
+
dimm->mmio = mmio;
195
+
196
+
return 0;
197
+
}
198
+
199
+
static struct nfit_test_resource *ndtest_resource_lookup(resource_size_t addr)
200
+
{
201
+
int i;
202
+
203
+
for (i = 0; i < NUM_INSTANCES; i++) {
204
+
struct nfit_test_resource *n, *nfit_res = NULL;
205
+
struct ndtest_priv *t = instances[i];
206
+
207
+
if (!t)
208
+
continue;
209
+
spin_lock(&ndtest_lock);
210
+
list_for_each_entry(n, &t->resources, list) {
211
+
if (addr >= n->res.start && (addr < n->res.start
212
+
+ resource_size(&n->res))) {
213
+
nfit_res = n;
214
+
break;
215
+
} else if (addr >= (unsigned long) n->buf
216
+
&& (addr < (unsigned long) n->buf
217
+
+ resource_size(&n->res))) {
218
+
nfit_res = n;
219
+
break;
220
+
}
221
+
}
222
+
spin_unlock(&ndtest_lock);
223
+
if (nfit_res)
224
+
return nfit_res;
225
+
}
226
+
227
+
pr_warn("Failed to get resource\n");
228
+
229
+
return NULL;
285
230
}
286
231
287
232
static void ndtest_release_resource(void *data)
···
438
205
kfree(res);
439
206
440
207
return NULL;
208
+
}
209
+
210
+
static ssize_t range_index_show(struct device *dev,
211
+
struct device_attribute *attr, char *buf)
212
+
{
213
+
struct nd_region *nd_region = to_nd_region(dev);
214
+
struct ndtest_region *region = nd_region_provider_data(nd_region);
215
+
216
+
return sprintf(buf, "%d\n", region->range_index);
217
+
}
218
+
static DEVICE_ATTR_RO(range_index);
219
+
220
+
static struct attribute *ndtest_region_attributes[] = {
221
+
&dev_attr_range_index.attr,
222
+
NULL,
223
+
};
224
+
225
+
static const struct attribute_group ndtest_region_attribute_group = {
226
+
.name = "papr",
227
+
.attrs = ndtest_region_attributes,
228
+
};
229
+
230
+
static const struct attribute_group *ndtest_region_attribute_groups[] = {
231
+
&ndtest_region_attribute_group,
232
+
NULL,
233
+
};
234
+
235
+
static int ndtest_create_region(struct ndtest_priv *p,
236
+
struct ndtest_region *region)
237
+
{
238
+
struct nd_mapping_desc mappings[NDTEST_MAX_MAPPING];
239
+
struct nd_blk_region_desc ndbr_desc;
240
+
struct nd_interleave_set *nd_set;
241
+
struct nd_region_desc *ndr_desc;
242
+
struct resource res;
243
+
int i, ndimm = region->mapping[0].dimm;
244
+
u64 uuid[2];
245
+
246
+
memset(&res, 0, sizeof(res));
247
+
memset(&mappings, 0, sizeof(mappings));
248
+
memset(&ndbr_desc, 0, sizeof(ndbr_desc));
249
+
ndr_desc = &ndbr_desc.ndr_desc;
250
+
251
+
if (!ndtest_alloc_resource(p, region->size, &res.start))
252
+
return -ENOMEM;
253
+
254
+
res.end = res.start + region->size - 1;
255
+
ndr_desc->mapping = mappings;
256
+
ndr_desc->res = &res;
257
+
ndr_desc->provider_data = region;
258
+
ndr_desc->attr_groups = ndtest_region_attribute_groups;
259
+
260
+
if (uuid_parse(p->config->dimms[ndimm].uuid_str, (uuid_t *)uuid)) {
261
+
pr_err("failed to parse UUID\n");
262
+
return -ENXIO;
263
+
}
264
+
265
+
nd_set = devm_kzalloc(&p->pdev.dev, sizeof(*nd_set), GFP_KERNEL);
266
+
if (!nd_set)
267
+
return -ENOMEM;
268
+
269
+
nd_set->cookie1 = cpu_to_le64(uuid[0]);
270
+
nd_set->cookie2 = cpu_to_le64(uuid[1]);
271
+
nd_set->altcookie = nd_set->cookie1;
272
+
ndr_desc->nd_set = nd_set;
273
+
274
+
if (region->type == ND_DEVICE_NAMESPACE_BLK) {
275
+
mappings[0].start = 0;
276
+
mappings[0].size = DIMM_SIZE;
277
+
mappings[0].nvdimm = p->config->dimms[ndimm].nvdimm;
278
+
279
+
ndr_desc->mapping = &mappings[0];
280
+
ndr_desc->num_mappings = 1;
281
+
ndr_desc->num_lanes = 1;
282
+
ndbr_desc.enable = ndtest_blk_region_enable;
283
+
ndbr_desc.do_io = ndtest_blk_do_io;
284
+
region->region = nvdimm_blk_region_create(p->bus, ndr_desc);
285
+
286
+
goto done;
287
+
}
288
+
289
+
for (i = 0; i < region->num_mappings; i++) {
290
+
ndimm = region->mapping[i].dimm;
291
+
mappings[i].start = region->mapping[i].start;
292
+
mappings[i].size = region->mapping[i].size;
293
+
mappings[i].position = region->mapping[i].position;
294
+
mappings[i].nvdimm = p->config->dimms[ndimm].nvdimm;
295
+
}
296
+
297
+
ndr_desc->num_mappings = region->num_mappings;
298
+
region->region = nvdimm_pmem_region_create(p->bus, ndr_desc);
299
+
300
+
done:
301
+
if (!region->region) {
302
+
dev_err(&p->pdev.dev, "Error registering region %pR\n",
303
+
ndr_desc->res);
304
+
return -ENXIO;
305
+
}
306
+
307
+
return 0;
308
+
}
309
+
310
+
static int ndtest_init_regions(struct ndtest_priv *p)
311
+
{
312
+
int i, ret = 0;
313
+
314
+
for (i = 0; i < p->config->num_regions; i++) {
315
+
ret = ndtest_create_region(p, &p->config->regions[i]);
316
+
if (ret)
317
+
return ret;
318
+
}
319
+
320
+
return 0;
441
321
}
442
322
443
323
static void put_dimms(void *data)
···
898
552
if (rc)
899
553
goto err;
900
554
555
+
rc = ndtest_init_regions(p);
556
+
if (rc)
557
+
goto err;
558
+
901
559
rc = devm_add_action_or_reset(&pdev->dev, put_dimms, p);
902
560
if (rc)
903
561
goto err;
···
966
616
#ifdef CONFIG_DEV_DAX_PMEM_COMPAT
967
617
dax_pmem_compat_test();
968
618
#endif
619
+
620
+
nfit_test_setup(ndtest_resource_lookup, NULL);
969
621
970
622
ndtest_dimm_class = class_create(THIS_MODULE, "nfit_test_dimm");
971
623
if (IS_ERR(ndtest_dimm_class)) {
+26
tools/testing/nvdimm/test/ndtest.h
+26
tools/testing/nvdimm/test/ndtest.h
···
20
20
dma_addr_t *dimm_dma;
21
21
};
22
22
23
+
struct ndtest_blk_mmio {
24
+
void __iomem *base;
25
+
u64 size;
26
+
u64 base_offset;
27
+
u32 line_size;
28
+
u32 num_lines;
29
+
u32 table_size;
30
+
};
31
+
23
32
struct ndtest_dimm {
24
33
struct device *dev;
25
34
struct nvdimm *nvdimm;
···
51
42
u8 no_alias;
52
43
};
53
44
45
+
struct ndtest_mapping {
46
+
u64 start;
47
+
u64 size;
48
+
u8 position;
49
+
u8 dimm;
50
+
};
51
+
52
+
struct ndtest_region {
53
+
struct nd_region *region;
54
+
struct ndtest_mapping *mapping;
55
+
u64 size;
56
+
u8 type;
57
+
u8 num_mappings;
58
+
u8 range_index;
59
+
};
60
+
54
61
struct ndtest_config {
55
62
struct ndtest_dimm *dimms;
63
+
struct ndtest_region *regions;
56
64
unsigned int dimm_count;
57
65
unsigned int dimm_start;
58
66
u8 num_regions;