Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/module.h>
6#include <linux/device.h>
7#include <linux/sort.h>
8#include <linux/slab.h>
9#include <linux/list.h>
10#include <linux/nd.h>
11#include "nd-core.h"
12#include "pmem.h"
13#include "pfn.h"
14#include "nd.h"
15
16static void namespace_io_release(struct device *dev)
17{
18 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
19
20 kfree(nsio);
21}
22
23static void namespace_pmem_release(struct device *dev)
24{
25 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
26 struct nd_region *nd_region = to_nd_region(dev->parent);
27
28 if (nspm->id >= 0)
29 ida_simple_remove(&nd_region->ns_ida, nspm->id);
30 kfree(nspm->alt_name);
31 kfree(nspm->uuid);
32 kfree(nspm);
33}
34
35static bool is_namespace_pmem(const struct device *dev);
36static bool is_namespace_io(const struct device *dev);
37
38static int is_uuid_busy(struct device *dev, void *data)
39{
40 uuid_t *uuid1 = data, *uuid2 = NULL;
41
42 if (is_namespace_pmem(dev)) {
43 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
44
45 uuid2 = nspm->uuid;
46 } else if (is_nd_btt(dev)) {
47 struct nd_btt *nd_btt = to_nd_btt(dev);
48
49 uuid2 = nd_btt->uuid;
50 } else if (is_nd_pfn(dev)) {
51 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
52
53 uuid2 = nd_pfn->uuid;
54 }
55
56 if (uuid2 && uuid_equal(uuid1, uuid2))
57 return -EBUSY;
58
59 return 0;
60}
61
62static int is_namespace_uuid_busy(struct device *dev, void *data)
63{
64 if (is_nd_region(dev))
65 return device_for_each_child(dev, data, is_uuid_busy);
66 return 0;
67}
68
69/**
70 * nd_is_uuid_unique - verify that no other namespace has @uuid
71 * @dev: any device on a nvdimm_bus
72 * @uuid: uuid to check
73 */
74bool nd_is_uuid_unique(struct device *dev, uuid_t *uuid)
75{
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77
78 if (!nvdimm_bus)
79 return false;
80 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
81 if (device_for_each_child(&nvdimm_bus->dev, uuid,
82 is_namespace_uuid_busy) != 0)
83 return false;
84 return true;
85}
86
87bool pmem_should_map_pages(struct device *dev)
88{
89 struct nd_region *nd_region = to_nd_region(dev->parent);
90 struct nd_namespace_common *ndns = to_ndns(dev);
91 struct nd_namespace_io *nsio;
92
93 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
94 return false;
95
96 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
97 return false;
98
99 if (is_nd_pfn(dev) || is_nd_btt(dev))
100 return false;
101
102 if (ndns->force_raw)
103 return false;
104
105 nsio = to_nd_namespace_io(dev);
106 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
107 IORESOURCE_SYSTEM_RAM,
108 IORES_DESC_NONE) == REGION_MIXED)
109 return false;
110
111 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
112}
113EXPORT_SYMBOL(pmem_should_map_pages);
114
115unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
116{
117 if (is_namespace_pmem(&ndns->dev)) {
118 struct nd_namespace_pmem *nspm;
119
120 nspm = to_nd_namespace_pmem(&ndns->dev);
121 if (nspm->lbasize == 0 || nspm->lbasize == 512)
122 /* default */;
123 else if (nspm->lbasize == 4096)
124 return 4096;
125 else
126 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
127 nspm->lbasize);
128 }
129
130 /*
131 * There is no namespace label (is_namespace_io()), or the label
132 * indicates the default sector size.
133 */
134 return 512;
135}
136EXPORT_SYMBOL(pmem_sector_size);
137
138const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
139 char *name)
140{
141 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
142 const char *suffix = NULL;
143
144 if (ndns->claim && is_nd_btt(ndns->claim))
145 suffix = "s";
146
147 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
148 int nsidx = 0;
149
150 if (is_namespace_pmem(&ndns->dev)) {
151 struct nd_namespace_pmem *nspm;
152
153 nspm = to_nd_namespace_pmem(&ndns->dev);
154 nsidx = nspm->id;
155 }
156
157 if (nsidx)
158 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
159 suffix ? suffix : "");
160 else
161 sprintf(name, "pmem%d%s", nd_region->id,
162 suffix ? suffix : "");
163 } else {
164 return NULL;
165 }
166
167 return name;
168}
169EXPORT_SYMBOL(nvdimm_namespace_disk_name);
170
171const uuid_t *nd_dev_to_uuid(struct device *dev)
172{
173 if (!dev)
174 return &uuid_null;
175
176 if (is_namespace_pmem(dev)) {
177 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
178
179 return nspm->uuid;
180 } else
181 return &uuid_null;
182}
183EXPORT_SYMBOL(nd_dev_to_uuid);
184
185static ssize_t nstype_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187{
188 struct nd_region *nd_region = to_nd_region(dev->parent);
189
190 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
191}
192static DEVICE_ATTR_RO(nstype);
193
194static ssize_t __alt_name_store(struct device *dev, const char *buf,
195 const size_t len)
196{
197 char *input, *pos, *alt_name, **ns_altname;
198 ssize_t rc;
199
200 if (is_namespace_pmem(dev)) {
201 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
202
203 ns_altname = &nspm->alt_name;
204 } else
205 return -ENXIO;
206
207 if (dev->driver || to_ndns(dev)->claim)
208 return -EBUSY;
209
210 input = kstrndup(buf, len, GFP_KERNEL);
211 if (!input)
212 return -ENOMEM;
213
214 pos = strim(input);
215 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
216 rc = -EINVAL;
217 goto out;
218 }
219
220 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
221 if (!alt_name) {
222 rc = -ENOMEM;
223 goto out;
224 }
225 kfree(*ns_altname);
226 *ns_altname = alt_name;
227 sprintf(*ns_altname, "%s", pos);
228 rc = len;
229
230out:
231 kfree(input);
232 return rc;
233}
234
235static int nd_namespace_label_update(struct nd_region *nd_region,
236 struct device *dev)
237{
238 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
239 "namespace must be idle during label update\n");
240 if (dev->driver || to_ndns(dev)->claim)
241 return 0;
242
243 /*
244 * Only allow label writes that will result in a valid namespace
245 * or deletion of an existing namespace.
246 */
247 if (is_namespace_pmem(dev)) {
248 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
249 resource_size_t size = resource_size(&nspm->nsio.res);
250
251 if (size == 0 && nspm->uuid)
252 /* delete allocation */;
253 else if (!nspm->uuid)
254 return 0;
255
256 return nd_pmem_namespace_label_update(nd_region, nspm, size);
257 } else
258 return -ENXIO;
259}
260
261static ssize_t alt_name_store(struct device *dev,
262 struct device_attribute *attr, const char *buf, size_t len)
263{
264 struct nd_region *nd_region = to_nd_region(dev->parent);
265 ssize_t rc;
266
267 device_lock(dev);
268 nvdimm_bus_lock(dev);
269 wait_nvdimm_bus_probe_idle(dev);
270 rc = __alt_name_store(dev, buf, len);
271 if (rc >= 0)
272 rc = nd_namespace_label_update(nd_region, dev);
273 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
274 nvdimm_bus_unlock(dev);
275 device_unlock(dev);
276
277 return rc < 0 ? rc : len;
278}
279
280static ssize_t alt_name_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282{
283 char *ns_altname;
284
285 if (is_namespace_pmem(dev)) {
286 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
287
288 ns_altname = nspm->alt_name;
289 } else
290 return -ENXIO;
291
292 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
293}
294static DEVICE_ATTR_RW(alt_name);
295
296static int scan_free(struct nd_region *nd_region,
297 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
298 resource_size_t n)
299{
300 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
301 int rc = 0;
302
303 while (n) {
304 struct resource *res, *last;
305
306 last = NULL;
307 for_each_dpa_resource(ndd, res)
308 if (strcmp(res->name, label_id->id) == 0)
309 last = res;
310 res = last;
311 if (!res)
312 return 0;
313
314 if (n >= resource_size(res)) {
315 n -= resource_size(res);
316 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
317 nvdimm_free_dpa(ndd, res);
318 /* retry with last resource deleted */
319 continue;
320 }
321
322 rc = adjust_resource(res, res->start, resource_size(res) - n);
323 if (rc == 0)
324 res->flags |= DPA_RESOURCE_ADJUSTED;
325 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
326 break;
327 }
328
329 return rc;
330}
331
332/**
333 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
334 * @nd_region: the set of dimms to reclaim @n bytes from
335 * @label_id: unique identifier for the namespace consuming this dpa range
336 * @n: number of bytes per-dimm to release
337 *
338 * Assumes resources are ordered. Starting from the end try to
339 * adjust_resource() the allocation to @n, but if @n is larger than the
340 * allocation delete it and find the 'new' last allocation in the label
341 * set.
342 */
343static int shrink_dpa_allocation(struct nd_region *nd_region,
344 struct nd_label_id *label_id, resource_size_t n)
345{
346 int i;
347
348 for (i = 0; i < nd_region->ndr_mappings; i++) {
349 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
350 int rc;
351
352 rc = scan_free(nd_region, nd_mapping, label_id, n);
353 if (rc)
354 return rc;
355 }
356
357 return 0;
358}
359
360static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
361 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
362 resource_size_t n)
363{
364 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
365 struct resource *res;
366 int rc = 0;
367
368 /* first resource allocation for this label-id or dimm */
369 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
370 if (!res)
371 rc = -EBUSY;
372
373 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
374 return rc ? n : 0;
375}
376
377
378/**
379 * space_valid() - validate free dpa space against constraints
380 * @nd_region: hosting region of the free space
381 * @ndd: dimm device data for debug
382 * @label_id: namespace id to allocate space
383 * @prev: potential allocation that precedes free space
384 * @next: allocation that follows the given free space range
385 * @exist: first allocation with same id in the mapping
386 * @n: range that must satisfied for pmem allocations
387 * @valid: free space range to validate
388 *
389 * BLK-space is valid as long as it does not precede a PMEM
390 * allocation in a given region. PMEM-space must be contiguous
391 * and adjacent to an existing existing allocation (if one
392 * exists). If reserving PMEM any space is valid.
393 */
394static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
395 struct nd_label_id *label_id, struct resource *prev,
396 struct resource *next, struct resource *exist,
397 resource_size_t n, struct resource *valid)
398{
399 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
400 unsigned long align;
401
402 align = nd_region->align / nd_region->ndr_mappings;
403 valid->start = ALIGN(valid->start, align);
404 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
405
406 if (valid->start >= valid->end)
407 goto invalid;
408
409 if (is_reserve)
410 return;
411
412 /* allocation needs to be contiguous, so this is all or nothing */
413 if (resource_size(valid) < n)
414 goto invalid;
415
416 /* we've got all the space we need and no existing allocation */
417 if (!exist)
418 return;
419
420 /* allocation needs to be contiguous with the existing namespace */
421 if (valid->start == exist->end + 1
422 || valid->end == exist->start - 1)
423 return;
424
425 invalid:
426 /* truncate @valid size to 0 */
427 valid->end = valid->start - 1;
428}
429
430enum alloc_loc {
431 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
432};
433
434static resource_size_t scan_allocate(struct nd_region *nd_region,
435 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
436 resource_size_t n)
437{
438 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
439 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
440 struct resource *res, *exist = NULL, valid;
441 const resource_size_t to_allocate = n;
442 int first;
443
444 for_each_dpa_resource(ndd, res)
445 if (strcmp(label_id->id, res->name) == 0)
446 exist = res;
447
448 valid.start = nd_mapping->start;
449 valid.end = mapping_end;
450 valid.name = "free space";
451 retry:
452 first = 0;
453 for_each_dpa_resource(ndd, res) {
454 struct resource *next = res->sibling, *new_res = NULL;
455 resource_size_t allocate, available = 0;
456 enum alloc_loc loc = ALLOC_ERR;
457 const char *action;
458 int rc = 0;
459
460 /* ignore resources outside this nd_mapping */
461 if (res->start > mapping_end)
462 continue;
463 if (res->end < nd_mapping->start)
464 continue;
465
466 /* space at the beginning of the mapping */
467 if (!first++ && res->start > nd_mapping->start) {
468 valid.start = nd_mapping->start;
469 valid.end = res->start - 1;
470 space_valid(nd_region, ndd, label_id, NULL, next, exist,
471 to_allocate, &valid);
472 available = resource_size(&valid);
473 if (available)
474 loc = ALLOC_BEFORE;
475 }
476
477 /* space between allocations */
478 if (!loc && next) {
479 valid.start = res->start + resource_size(res);
480 valid.end = min(mapping_end, next->start - 1);
481 space_valid(nd_region, ndd, label_id, res, next, exist,
482 to_allocate, &valid);
483 available = resource_size(&valid);
484 if (available)
485 loc = ALLOC_MID;
486 }
487
488 /* space at the end of the mapping */
489 if (!loc && !next) {
490 valid.start = res->start + resource_size(res);
491 valid.end = mapping_end;
492 space_valid(nd_region, ndd, label_id, res, next, exist,
493 to_allocate, &valid);
494 available = resource_size(&valid);
495 if (available)
496 loc = ALLOC_AFTER;
497 }
498
499 if (!loc || !available)
500 continue;
501 allocate = min(available, n);
502 switch (loc) {
503 case ALLOC_BEFORE:
504 if (strcmp(res->name, label_id->id) == 0) {
505 /* adjust current resource up */
506 rc = adjust_resource(res, res->start - allocate,
507 resource_size(res) + allocate);
508 action = "cur grow up";
509 } else
510 action = "allocate";
511 break;
512 case ALLOC_MID:
513 if (strcmp(next->name, label_id->id) == 0) {
514 /* adjust next resource up */
515 rc = adjust_resource(next, next->start
516 - allocate, resource_size(next)
517 + allocate);
518 new_res = next;
519 action = "next grow up";
520 } else if (strcmp(res->name, label_id->id) == 0) {
521 action = "grow down";
522 } else
523 action = "allocate";
524 break;
525 case ALLOC_AFTER:
526 if (strcmp(res->name, label_id->id) == 0)
527 action = "grow down";
528 else
529 action = "allocate";
530 break;
531 default:
532 return n;
533 }
534
535 if (strcmp(action, "allocate") == 0) {
536 new_res = nvdimm_allocate_dpa(ndd, label_id,
537 valid.start, allocate);
538 if (!new_res)
539 rc = -EBUSY;
540 } else if (strcmp(action, "grow down") == 0) {
541 /* adjust current resource down */
542 rc = adjust_resource(res, res->start, resource_size(res)
543 + allocate);
544 if (rc == 0)
545 res->flags |= DPA_RESOURCE_ADJUSTED;
546 }
547
548 if (!new_res)
549 new_res = res;
550
551 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
552 action, loc, rc);
553
554 if (rc)
555 return n;
556
557 n -= allocate;
558 if (n) {
559 /*
560 * Retry scan with newly inserted resources.
561 * For example, if we did an ALLOC_BEFORE
562 * insertion there may also have been space
563 * available for an ALLOC_AFTER insertion, so we
564 * need to check this same resource again
565 */
566 goto retry;
567 } else
568 return 0;
569 }
570
571 if (n == to_allocate)
572 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
573 return n;
574}
575
576static int merge_dpa(struct nd_region *nd_region,
577 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
578{
579 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
580 struct resource *res;
581
582 if (strncmp("pmem", label_id->id, 4) == 0)
583 return 0;
584 retry:
585 for_each_dpa_resource(ndd, res) {
586 int rc;
587 struct resource *next = res->sibling;
588 resource_size_t end = res->start + resource_size(res);
589
590 if (!next || strcmp(res->name, label_id->id) != 0
591 || strcmp(next->name, label_id->id) != 0
592 || end != next->start)
593 continue;
594 end += resource_size(next);
595 nvdimm_free_dpa(ndd, next);
596 rc = adjust_resource(res, res->start, end - res->start);
597 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
598 if (rc)
599 return rc;
600 res->flags |= DPA_RESOURCE_ADJUSTED;
601 goto retry;
602 }
603
604 return 0;
605}
606
607int __reserve_free_pmem(struct device *dev, void *data)
608{
609 struct nvdimm *nvdimm = data;
610 struct nd_region *nd_region;
611 struct nd_label_id label_id;
612 int i;
613
614 if (!is_memory(dev))
615 return 0;
616
617 nd_region = to_nd_region(dev);
618 if (nd_region->ndr_mappings == 0)
619 return 0;
620
621 memset(&label_id, 0, sizeof(label_id));
622 strcat(label_id.id, "pmem-reserve");
623 for (i = 0; i < nd_region->ndr_mappings; i++) {
624 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
625 resource_size_t n, rem = 0;
626
627 if (nd_mapping->nvdimm != nvdimm)
628 continue;
629
630 n = nd_pmem_available_dpa(nd_region, nd_mapping);
631 if (n == 0)
632 return 0;
633 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
634 dev_WARN_ONCE(&nd_region->dev, rem,
635 "pmem reserve underrun: %#llx of %#llx bytes\n",
636 (unsigned long long) n - rem,
637 (unsigned long long) n);
638 return rem ? -ENXIO : 0;
639 }
640
641 return 0;
642}
643
644void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
645 struct nd_mapping *nd_mapping)
646{
647 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
648 struct resource *res, *_res;
649
650 for_each_dpa_resource_safe(ndd, res, _res)
651 if (strcmp(res->name, "pmem-reserve") == 0)
652 nvdimm_free_dpa(ndd, res);
653}
654
655/**
656 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
657 * @nd_region: the set of dimms to allocate @n more bytes from
658 * @label_id: unique identifier for the namespace consuming this dpa range
659 * @n: number of bytes per-dimm to add to the existing allocation
660 *
661 * Assumes resources are ordered. For BLK regions, first consume
662 * BLK-only available DPA free space, then consume PMEM-aliased DPA
663 * space starting at the highest DPA. For PMEM regions start
664 * allocations from the start of an interleave set and end at the first
665 * BLK allocation or the end of the interleave set, whichever comes
666 * first.
667 */
668static int grow_dpa_allocation(struct nd_region *nd_region,
669 struct nd_label_id *label_id, resource_size_t n)
670{
671 int i;
672
673 for (i = 0; i < nd_region->ndr_mappings; i++) {
674 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
675 resource_size_t rem = n;
676 int rc;
677
678 rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
679 dev_WARN_ONCE(&nd_region->dev, rem,
680 "allocation underrun: %#llx of %#llx bytes\n",
681 (unsigned long long) n - rem,
682 (unsigned long long) n);
683 if (rem)
684 return -ENXIO;
685
686 rc = merge_dpa(nd_region, nd_mapping, label_id);
687 if (rc)
688 return rc;
689 }
690
691 return 0;
692}
693
694static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
695 struct nd_namespace_pmem *nspm, resource_size_t size)
696{
697 struct resource *res = &nspm->nsio.res;
698 resource_size_t offset = 0;
699
700 if (size && !nspm->uuid) {
701 WARN_ON_ONCE(1);
702 size = 0;
703 }
704
705 if (size && nspm->uuid) {
706 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
707 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
708 struct nd_label_id label_id;
709 struct resource *res;
710
711 if (!ndd) {
712 size = 0;
713 goto out;
714 }
715
716 nd_label_gen_id(&label_id, nspm->uuid, 0);
717
718 /* calculate a spa offset from the dpa allocation offset */
719 for_each_dpa_resource(ndd, res)
720 if (strcmp(res->name, label_id.id) == 0) {
721 offset = (res->start - nd_mapping->start)
722 * nd_region->ndr_mappings;
723 goto out;
724 }
725
726 WARN_ON_ONCE(1);
727 size = 0;
728 }
729
730 out:
731 res->start = nd_region->ndr_start + offset;
732 res->end = res->start + size - 1;
733}
734
735static bool uuid_not_set(const uuid_t *uuid, struct device *dev,
736 const char *where)
737{
738 if (!uuid) {
739 dev_dbg(dev, "%s: uuid not set\n", where);
740 return true;
741 }
742 return false;
743}
744
745static ssize_t __size_store(struct device *dev, unsigned long long val)
746{
747 resource_size_t allocated = 0, available = 0;
748 struct nd_region *nd_region = to_nd_region(dev->parent);
749 struct nd_namespace_common *ndns = to_ndns(dev);
750 struct nd_mapping *nd_mapping;
751 struct nvdimm_drvdata *ndd;
752 struct nd_label_id label_id;
753 u32 flags = 0, remainder;
754 int rc, i, id = -1;
755 uuid_t *uuid = NULL;
756
757 if (dev->driver || ndns->claim)
758 return -EBUSY;
759
760 if (is_namespace_pmem(dev)) {
761 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
762
763 uuid = nspm->uuid;
764 id = nspm->id;
765 }
766
767 /*
768 * We need a uuid for the allocation-label and dimm(s) on which
769 * to store the label.
770 */
771 if (uuid_not_set(uuid, dev, __func__))
772 return -ENXIO;
773 if (nd_region->ndr_mappings == 0) {
774 dev_dbg(dev, "not associated with dimm(s)\n");
775 return -ENXIO;
776 }
777
778 div_u64_rem(val, nd_region->align, &remainder);
779 if (remainder) {
780 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
781 nd_region->align / SZ_1K);
782 return -EINVAL;
783 }
784
785 nd_label_gen_id(&label_id, uuid, flags);
786 for (i = 0; i < nd_region->ndr_mappings; i++) {
787 nd_mapping = &nd_region->mapping[i];
788 ndd = to_ndd(nd_mapping);
789
790 /*
791 * All dimms in an interleave set, need to be enabled
792 * for the size to be changed.
793 */
794 if (!ndd)
795 return -ENXIO;
796
797 allocated += nvdimm_allocated_dpa(ndd, &label_id);
798 }
799 available = nd_region_allocatable_dpa(nd_region);
800
801 if (val > available + allocated)
802 return -ENOSPC;
803
804 if (val == allocated)
805 return 0;
806
807 val = div_u64(val, nd_region->ndr_mappings);
808 allocated = div_u64(allocated, nd_region->ndr_mappings);
809 if (val < allocated)
810 rc = shrink_dpa_allocation(nd_region, &label_id,
811 allocated - val);
812 else
813 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
814
815 if (rc)
816 return rc;
817
818 if (is_namespace_pmem(dev)) {
819 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
820
821 nd_namespace_pmem_set_resource(nd_region, nspm,
822 val * nd_region->ndr_mappings);
823 }
824
825 /*
826 * Try to delete the namespace if we deleted all of its
827 * allocation, this is not the seed or 0th device for the
828 * region, and it is not actively claimed by a btt, pfn, or dax
829 * instance.
830 */
831 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
832 nd_device_unregister(dev, ND_ASYNC);
833
834 return rc;
835}
836
837static ssize_t size_store(struct device *dev,
838 struct device_attribute *attr, const char *buf, size_t len)
839{
840 struct nd_region *nd_region = to_nd_region(dev->parent);
841 unsigned long long val;
842 uuid_t **uuid = NULL;
843 int rc;
844
845 rc = kstrtoull(buf, 0, &val);
846 if (rc)
847 return rc;
848
849 device_lock(dev);
850 nvdimm_bus_lock(dev);
851 wait_nvdimm_bus_probe_idle(dev);
852 rc = __size_store(dev, val);
853 if (rc >= 0)
854 rc = nd_namespace_label_update(nd_region, dev);
855
856 if (is_namespace_pmem(dev)) {
857 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
858
859 uuid = &nspm->uuid;
860 }
861
862 if (rc == 0 && val == 0 && uuid) {
863 /* setting size zero == 'delete namespace' */
864 kfree(*uuid);
865 *uuid = NULL;
866 }
867
868 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
869
870 nvdimm_bus_unlock(dev);
871 device_unlock(dev);
872
873 return rc < 0 ? rc : len;
874}
875
876resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
877{
878 struct device *dev = &ndns->dev;
879
880 if (is_namespace_pmem(dev)) {
881 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
882
883 return resource_size(&nspm->nsio.res);
884 } else if (is_namespace_io(dev)) {
885 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
886
887 return resource_size(&nsio->res);
888 } else
889 WARN_ONCE(1, "unknown namespace type\n");
890 return 0;
891}
892
893resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
894{
895 resource_size_t size;
896
897 nvdimm_bus_lock(&ndns->dev);
898 size = __nvdimm_namespace_capacity(ndns);
899 nvdimm_bus_unlock(&ndns->dev);
900
901 return size;
902}
903EXPORT_SYMBOL(nvdimm_namespace_capacity);
904
905bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
906{
907 int i;
908 bool locked = false;
909 struct device *dev = &ndns->dev;
910 struct nd_region *nd_region = to_nd_region(dev->parent);
911
912 for (i = 0; i < nd_region->ndr_mappings; i++) {
913 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
914 struct nvdimm *nvdimm = nd_mapping->nvdimm;
915
916 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
917 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
918 locked = true;
919 }
920 }
921 return locked;
922}
923EXPORT_SYMBOL(nvdimm_namespace_locked);
924
925static ssize_t size_show(struct device *dev,
926 struct device_attribute *attr, char *buf)
927{
928 return sprintf(buf, "%llu\n", (unsigned long long)
929 nvdimm_namespace_capacity(to_ndns(dev)));
930}
931static DEVICE_ATTR(size, 0444, size_show, size_store);
932
933static uuid_t *namespace_to_uuid(struct device *dev)
934{
935 if (is_namespace_pmem(dev)) {
936 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
937
938 return nspm->uuid;
939 }
940 return ERR_PTR(-ENXIO);
941}
942
943static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
944 char *buf)
945{
946 uuid_t *uuid = namespace_to_uuid(dev);
947
948 if (IS_ERR(uuid))
949 return PTR_ERR(uuid);
950 if (uuid)
951 return sprintf(buf, "%pUb\n", uuid);
952 return sprintf(buf, "\n");
953}
954
955/**
956 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
957 * @nd_region: parent region so we can updates all dimms in the set
958 * @dev: namespace type for generating label_id
959 * @new_uuid: incoming uuid
960 * @old_uuid: reference to the uuid storage location in the namespace object
961 */
962static int namespace_update_uuid(struct nd_region *nd_region,
963 struct device *dev, uuid_t *new_uuid,
964 uuid_t **old_uuid)
965{
966 struct nd_label_id old_label_id;
967 struct nd_label_id new_label_id;
968 int i;
969
970 if (!nd_is_uuid_unique(dev, new_uuid))
971 return -EINVAL;
972
973 if (*old_uuid == NULL)
974 goto out;
975
976 /*
977 * If we've already written a label with this uuid, then it's
978 * too late to rename because we can't reliably update the uuid
979 * without losing the old namespace. Userspace must delete this
980 * namespace to abandon the old uuid.
981 */
982 for (i = 0; i < nd_region->ndr_mappings; i++) {
983 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
984
985 /*
986 * This check by itself is sufficient because old_uuid
987 * would be NULL above if this uuid did not exist in the
988 * currently written set.
989 *
990 * FIXME: can we delete uuid with zero dpa allocated?
991 */
992 if (list_empty(&nd_mapping->labels))
993 return -EBUSY;
994 }
995
996 nd_label_gen_id(&old_label_id, *old_uuid, 0);
997 nd_label_gen_id(&new_label_id, new_uuid, 0);
998 for (i = 0; i < nd_region->ndr_mappings; i++) {
999 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1000 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1001 struct nd_label_ent *label_ent;
1002 struct resource *res;
1003
1004 for_each_dpa_resource(ndd, res)
1005 if (strcmp(res->name, old_label_id.id) == 0)
1006 sprintf((void *) res->name, "%s",
1007 new_label_id.id);
1008
1009 mutex_lock(&nd_mapping->lock);
1010 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1011 struct nd_namespace_label *nd_label = label_ent->label;
1012 struct nd_label_id label_id;
1013 uuid_t uuid;
1014
1015 if (!nd_label)
1016 continue;
1017 nsl_get_uuid(ndd, nd_label, &uuid);
1018 nd_label_gen_id(&label_id, &uuid,
1019 nsl_get_flags(ndd, nd_label));
1020 if (strcmp(old_label_id.id, label_id.id) == 0)
1021 set_bit(ND_LABEL_REAP, &label_ent->flags);
1022 }
1023 mutex_unlock(&nd_mapping->lock);
1024 }
1025 kfree(*old_uuid);
1026 out:
1027 *old_uuid = new_uuid;
1028 return 0;
1029}
1030
1031static ssize_t uuid_store(struct device *dev,
1032 struct device_attribute *attr, const char *buf, size_t len)
1033{
1034 struct nd_region *nd_region = to_nd_region(dev->parent);
1035 uuid_t *uuid = NULL;
1036 uuid_t **ns_uuid;
1037 ssize_t rc = 0;
1038
1039 if (is_namespace_pmem(dev)) {
1040 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1041
1042 ns_uuid = &nspm->uuid;
1043 } else
1044 return -ENXIO;
1045
1046 device_lock(dev);
1047 nvdimm_bus_lock(dev);
1048 wait_nvdimm_bus_probe_idle(dev);
1049 if (to_ndns(dev)->claim)
1050 rc = -EBUSY;
1051 if (rc >= 0)
1052 rc = nd_uuid_store(dev, &uuid, buf, len);
1053 if (rc >= 0)
1054 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1055 if (rc >= 0)
1056 rc = nd_namespace_label_update(nd_region, dev);
1057 else
1058 kfree(uuid);
1059 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1060 buf[len - 1] == '\n' ? "" : "\n");
1061 nvdimm_bus_unlock(dev);
1062 device_unlock(dev);
1063
1064 return rc < 0 ? rc : len;
1065}
1066static DEVICE_ATTR_RW(uuid);
1067
1068static ssize_t resource_show(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1070{
1071 struct resource *res;
1072
1073 if (is_namespace_pmem(dev)) {
1074 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1075
1076 res = &nspm->nsio.res;
1077 } else if (is_namespace_io(dev)) {
1078 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1079
1080 res = &nsio->res;
1081 } else
1082 return -ENXIO;
1083
1084 /* no address to convey if the namespace has no allocation */
1085 if (resource_size(res) == 0)
1086 return -ENXIO;
1087 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1088}
1089static DEVICE_ATTR_ADMIN_RO(resource);
1090
1091static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1092
1093static ssize_t sector_size_show(struct device *dev,
1094 struct device_attribute *attr, char *buf)
1095{
1096 if (is_namespace_pmem(dev)) {
1097 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1098
1099 return nd_size_select_show(nspm->lbasize,
1100 pmem_lbasize_supported, buf);
1101 }
1102 return -ENXIO;
1103}
1104
1105static ssize_t sector_size_store(struct device *dev,
1106 struct device_attribute *attr, const char *buf, size_t len)
1107{
1108 struct nd_region *nd_region = to_nd_region(dev->parent);
1109 const unsigned long *supported;
1110 unsigned long *lbasize;
1111 ssize_t rc = 0;
1112
1113 if (is_namespace_pmem(dev)) {
1114 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1115
1116 lbasize = &nspm->lbasize;
1117 supported = pmem_lbasize_supported;
1118 } else
1119 return -ENXIO;
1120
1121 device_lock(dev);
1122 nvdimm_bus_lock(dev);
1123 if (to_ndns(dev)->claim)
1124 rc = -EBUSY;
1125 if (rc >= 0)
1126 rc = nd_size_select_store(dev, buf, lbasize, supported);
1127 if (rc >= 0)
1128 rc = nd_namespace_label_update(nd_region, dev);
1129 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1130 buf, buf[len - 1] == '\n' ? "" : "\n");
1131 nvdimm_bus_unlock(dev);
1132 device_unlock(dev);
1133
1134 return rc ? rc : len;
1135}
1136static DEVICE_ATTR_RW(sector_size);
1137
1138static ssize_t dpa_extents_show(struct device *dev,
1139 struct device_attribute *attr, char *buf)
1140{
1141 struct nd_region *nd_region = to_nd_region(dev->parent);
1142 struct nd_label_id label_id;
1143 uuid_t *uuid = NULL;
1144 int count = 0, i;
1145 u32 flags = 0;
1146
1147 nvdimm_bus_lock(dev);
1148 if (is_namespace_pmem(dev)) {
1149 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1150
1151 uuid = nspm->uuid;
1152 flags = 0;
1153 }
1154
1155 if (!uuid)
1156 goto out;
1157
1158 nd_label_gen_id(&label_id, uuid, flags);
1159 for (i = 0; i < nd_region->ndr_mappings; i++) {
1160 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1161 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1162 struct resource *res;
1163
1164 for_each_dpa_resource(ndd, res)
1165 if (strcmp(res->name, label_id.id) == 0)
1166 count++;
1167 }
1168 out:
1169 nvdimm_bus_unlock(dev);
1170
1171 return sprintf(buf, "%d\n", count);
1172}
1173static DEVICE_ATTR_RO(dpa_extents);
1174
1175static int btt_claim_class(struct device *dev)
1176{
1177 struct nd_region *nd_region = to_nd_region(dev->parent);
1178 int i, loop_bitmask = 0;
1179
1180 for (i = 0; i < nd_region->ndr_mappings; i++) {
1181 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1182 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1183 struct nd_namespace_index *nsindex;
1184
1185 /*
1186 * If any of the DIMMs do not support labels the only
1187 * possible BTT format is v1.
1188 */
1189 if (!ndd) {
1190 loop_bitmask = 0;
1191 break;
1192 }
1193
1194 nsindex = to_namespace_index(ndd, ndd->ns_current);
1195 if (nsindex == NULL)
1196 loop_bitmask |= 1;
1197 else {
1198 /* check whether existing labels are v1.1 or v1.2 */
1199 if (__le16_to_cpu(nsindex->major) == 1
1200 && __le16_to_cpu(nsindex->minor) == 1)
1201 loop_bitmask |= 2;
1202 else
1203 loop_bitmask |= 4;
1204 }
1205 }
1206 /*
1207 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1208 * block is found, a v1.1 label for any mapping will set bit 1, and a
1209 * v1.2 label will set bit 2.
1210 *
1211 * At the end of the loop, at most one of the three bits must be set.
1212 * If multiple bits were set, it means the different mappings disagree
1213 * about their labels, and this must be cleaned up first.
1214 *
1215 * If all the label index blocks are found to agree, nsindex of NULL
1216 * implies labels haven't been initialized yet, and when they will,
1217 * they will be of the 1.2 format, so we can assume BTT2.0
1218 *
1219 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1220 * found, we enforce BTT2.0
1221 *
1222 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1223 */
1224 switch (loop_bitmask) {
1225 case 0:
1226 case 2:
1227 return NVDIMM_CCLASS_BTT;
1228 case 1:
1229 case 4:
1230 return NVDIMM_CCLASS_BTT2;
1231 default:
1232 return -ENXIO;
1233 }
1234}
1235
1236static ssize_t holder_show(struct device *dev,
1237 struct device_attribute *attr, char *buf)
1238{
1239 struct nd_namespace_common *ndns = to_ndns(dev);
1240 ssize_t rc;
1241
1242 device_lock(dev);
1243 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1244 device_unlock(dev);
1245
1246 return rc;
1247}
1248static DEVICE_ATTR_RO(holder);
1249
1250static int __holder_class_store(struct device *dev, const char *buf)
1251{
1252 struct nd_namespace_common *ndns = to_ndns(dev);
1253
1254 if (dev->driver || ndns->claim)
1255 return -EBUSY;
1256
1257 if (sysfs_streq(buf, "btt")) {
1258 int rc = btt_claim_class(dev);
1259
1260 if (rc < NVDIMM_CCLASS_NONE)
1261 return rc;
1262 ndns->claim_class = rc;
1263 } else if (sysfs_streq(buf, "pfn"))
1264 ndns->claim_class = NVDIMM_CCLASS_PFN;
1265 else if (sysfs_streq(buf, "dax"))
1266 ndns->claim_class = NVDIMM_CCLASS_DAX;
1267 else if (sysfs_streq(buf, ""))
1268 ndns->claim_class = NVDIMM_CCLASS_NONE;
1269 else
1270 return -EINVAL;
1271
1272 return 0;
1273}
1274
1275static ssize_t holder_class_store(struct device *dev,
1276 struct device_attribute *attr, const char *buf, size_t len)
1277{
1278 struct nd_region *nd_region = to_nd_region(dev->parent);
1279 int rc;
1280
1281 device_lock(dev);
1282 nvdimm_bus_lock(dev);
1283 wait_nvdimm_bus_probe_idle(dev);
1284 rc = __holder_class_store(dev, buf);
1285 if (rc >= 0)
1286 rc = nd_namespace_label_update(nd_region, dev);
1287 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1288 nvdimm_bus_unlock(dev);
1289 device_unlock(dev);
1290
1291 return rc < 0 ? rc : len;
1292}
1293
1294static ssize_t holder_class_show(struct device *dev,
1295 struct device_attribute *attr, char *buf)
1296{
1297 struct nd_namespace_common *ndns = to_ndns(dev);
1298 ssize_t rc;
1299
1300 device_lock(dev);
1301 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1302 rc = sprintf(buf, "\n");
1303 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1304 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1305 rc = sprintf(buf, "btt\n");
1306 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1307 rc = sprintf(buf, "pfn\n");
1308 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1309 rc = sprintf(buf, "dax\n");
1310 else
1311 rc = sprintf(buf, "<unknown>\n");
1312 device_unlock(dev);
1313
1314 return rc;
1315}
1316static DEVICE_ATTR_RW(holder_class);
1317
1318static ssize_t mode_show(struct device *dev,
1319 struct device_attribute *attr, char *buf)
1320{
1321 struct nd_namespace_common *ndns = to_ndns(dev);
1322 struct device *claim;
1323 char *mode;
1324 ssize_t rc;
1325
1326 device_lock(dev);
1327 claim = ndns->claim;
1328 if (claim && is_nd_btt(claim))
1329 mode = "safe";
1330 else if (claim && is_nd_pfn(claim))
1331 mode = "memory";
1332 else if (claim && is_nd_dax(claim))
1333 mode = "dax";
1334 else if (!claim && pmem_should_map_pages(dev))
1335 mode = "memory";
1336 else
1337 mode = "raw";
1338 rc = sprintf(buf, "%s\n", mode);
1339 device_unlock(dev);
1340
1341 return rc;
1342}
1343static DEVICE_ATTR_RO(mode);
1344
1345static ssize_t force_raw_store(struct device *dev,
1346 struct device_attribute *attr, const char *buf, size_t len)
1347{
1348 bool force_raw;
1349 int rc = strtobool(buf, &force_raw);
1350
1351 if (rc)
1352 return rc;
1353
1354 to_ndns(dev)->force_raw = force_raw;
1355 return len;
1356}
1357
1358static ssize_t force_raw_show(struct device *dev,
1359 struct device_attribute *attr, char *buf)
1360{
1361 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1362}
1363static DEVICE_ATTR_RW(force_raw);
1364
1365static struct attribute *nd_namespace_attributes[] = {
1366 &dev_attr_nstype.attr,
1367 &dev_attr_size.attr,
1368 &dev_attr_mode.attr,
1369 &dev_attr_uuid.attr,
1370 &dev_attr_holder.attr,
1371 &dev_attr_resource.attr,
1372 &dev_attr_alt_name.attr,
1373 &dev_attr_force_raw.attr,
1374 &dev_attr_sector_size.attr,
1375 &dev_attr_dpa_extents.attr,
1376 &dev_attr_holder_class.attr,
1377 NULL,
1378};
1379
1380static umode_t namespace_visible(struct kobject *kobj,
1381 struct attribute *a, int n)
1382{
1383 struct device *dev = container_of(kobj, struct device, kobj);
1384
1385 if (is_namespace_pmem(dev)) {
1386 if (a == &dev_attr_size.attr)
1387 return 0644;
1388
1389 return a->mode;
1390 }
1391
1392 /* base is_namespace_io() attributes */
1393 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr ||
1394 a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr ||
1395 a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr ||
1396 a == &dev_attr_resource.attr)
1397 return a->mode;
1398
1399 return 0;
1400}
1401
1402static struct attribute_group nd_namespace_attribute_group = {
1403 .attrs = nd_namespace_attributes,
1404 .is_visible = namespace_visible,
1405};
1406
1407static const struct attribute_group *nd_namespace_attribute_groups[] = {
1408 &nd_device_attribute_group,
1409 &nd_namespace_attribute_group,
1410 &nd_numa_attribute_group,
1411 NULL,
1412};
1413
1414static const struct device_type namespace_io_device_type = {
1415 .name = "nd_namespace_io",
1416 .release = namespace_io_release,
1417 .groups = nd_namespace_attribute_groups,
1418};
1419
1420static const struct device_type namespace_pmem_device_type = {
1421 .name = "nd_namespace_pmem",
1422 .release = namespace_pmem_release,
1423 .groups = nd_namespace_attribute_groups,
1424};
1425
1426static bool is_namespace_pmem(const struct device *dev)
1427{
1428 return dev ? dev->type == &namespace_pmem_device_type : false;
1429}
1430
1431static bool is_namespace_io(const struct device *dev)
1432{
1433 return dev ? dev->type == &namespace_io_device_type : false;
1434}
1435
1436struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1437{
1438 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1439 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1440 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1441 struct nd_namespace_common *ndns = NULL;
1442 resource_size_t size;
1443
1444 if (nd_btt || nd_pfn || nd_dax) {
1445 if (nd_btt)
1446 ndns = nd_btt->ndns;
1447 else if (nd_pfn)
1448 ndns = nd_pfn->ndns;
1449 else if (nd_dax)
1450 ndns = nd_dax->nd_pfn.ndns;
1451
1452 if (!ndns)
1453 return ERR_PTR(-ENODEV);
1454
1455 /*
1456 * Flush any in-progess probes / removals in the driver
1457 * for the raw personality of this namespace.
1458 */
1459 device_lock(&ndns->dev);
1460 device_unlock(&ndns->dev);
1461 if (ndns->dev.driver) {
1462 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1463 dev_name(dev));
1464 return ERR_PTR(-EBUSY);
1465 }
1466 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1467 "host (%s) vs claim (%s) mismatch\n",
1468 dev_name(dev),
1469 dev_name(ndns->claim)))
1470 return ERR_PTR(-ENXIO);
1471 } else {
1472 ndns = to_ndns(dev);
1473 if (ndns->claim) {
1474 dev_dbg(dev, "claimed by %s, failing probe\n",
1475 dev_name(ndns->claim));
1476
1477 return ERR_PTR(-ENXIO);
1478 }
1479 }
1480
1481 if (nvdimm_namespace_locked(ndns))
1482 return ERR_PTR(-EACCES);
1483
1484 size = nvdimm_namespace_capacity(ndns);
1485 if (size < ND_MIN_NAMESPACE_SIZE) {
1486 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1487 &size, ND_MIN_NAMESPACE_SIZE);
1488 return ERR_PTR(-ENODEV);
1489 }
1490
1491 /*
1492 * Note, alignment validation for fsdax and devdax mode
1493 * namespaces happens in nd_pfn_validate() where infoblock
1494 * padding parameters can be applied.
1495 */
1496 if (pmem_should_map_pages(dev)) {
1497 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1498 struct resource *res = &nsio->res;
1499
1500 if (!IS_ALIGNED(res->start | (res->end + 1),
1501 memremap_compat_align())) {
1502 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1503 return ERR_PTR(-EOPNOTSUPP);
1504 }
1505 }
1506
1507 if (is_namespace_pmem(&ndns->dev)) {
1508 struct nd_namespace_pmem *nspm;
1509
1510 nspm = to_nd_namespace_pmem(&ndns->dev);
1511 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1512 return ERR_PTR(-ENODEV);
1513 }
1514
1515 return ndns;
1516}
1517EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1518
1519int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1520 resource_size_t size)
1521{
1522 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1523}
1524EXPORT_SYMBOL_GPL(devm_namespace_enable);
1525
1526void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1527{
1528 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1529}
1530EXPORT_SYMBOL_GPL(devm_namespace_disable);
1531
1532static struct device **create_namespace_io(struct nd_region *nd_region)
1533{
1534 struct nd_namespace_io *nsio;
1535 struct device *dev, **devs;
1536 struct resource *res;
1537
1538 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1539 if (!nsio)
1540 return NULL;
1541
1542 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1543 if (!devs) {
1544 kfree(nsio);
1545 return NULL;
1546 }
1547
1548 dev = &nsio->common.dev;
1549 dev->type = &namespace_io_device_type;
1550 dev->parent = &nd_region->dev;
1551 res = &nsio->res;
1552 res->name = dev_name(&nd_region->dev);
1553 res->flags = IORESOURCE_MEM;
1554 res->start = nd_region->ndr_start;
1555 res->end = res->start + nd_region->ndr_size - 1;
1556
1557 devs[0] = dev;
1558 return devs;
1559}
1560
1561static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1562 u64 cookie, u16 pos)
1563{
1564 struct nd_namespace_label *found = NULL;
1565 int i;
1566
1567 for (i = 0; i < nd_region->ndr_mappings; i++) {
1568 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1569 struct nd_interleave_set *nd_set = nd_region->nd_set;
1570 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1571 struct nd_label_ent *label_ent;
1572 bool found_uuid = false;
1573
1574 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1575 struct nd_namespace_label *nd_label = label_ent->label;
1576 u16 position;
1577
1578 if (!nd_label)
1579 continue;
1580 position = nsl_get_position(ndd, nd_label);
1581
1582 if (!nsl_validate_isetcookie(ndd, nd_label, cookie))
1583 continue;
1584
1585 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1586 continue;
1587
1588 if (!nsl_validate_type_guid(ndd, nd_label,
1589 &nd_set->type_guid))
1590 continue;
1591
1592 if (found_uuid) {
1593 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1594 return false;
1595 }
1596 found_uuid = true;
1597 if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1598 continue;
1599 if (position != pos)
1600 continue;
1601 found = nd_label;
1602 break;
1603 }
1604 if (found)
1605 break;
1606 }
1607 return found != NULL;
1608}
1609
1610static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1611{
1612 int i;
1613
1614 if (!pmem_id)
1615 return -ENODEV;
1616
1617 for (i = 0; i < nd_region->ndr_mappings; i++) {
1618 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1619 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1620 struct nd_namespace_label *nd_label = NULL;
1621 u64 hw_start, hw_end, pmem_start, pmem_end;
1622 struct nd_label_ent *label_ent;
1623
1624 lockdep_assert_held(&nd_mapping->lock);
1625 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1626 nd_label = label_ent->label;
1627 if (!nd_label)
1628 continue;
1629 if (nsl_uuid_equal(ndd, nd_label, pmem_id))
1630 break;
1631 nd_label = NULL;
1632 }
1633
1634 if (!nd_label) {
1635 WARN_ON(1);
1636 return -EINVAL;
1637 }
1638
1639 /*
1640 * Check that this label is compliant with the dpa
1641 * range published in NFIT
1642 */
1643 hw_start = nd_mapping->start;
1644 hw_end = hw_start + nd_mapping->size;
1645 pmem_start = nsl_get_dpa(ndd, nd_label);
1646 pmem_end = pmem_start + nsl_get_rawsize(ndd, nd_label);
1647 if (pmem_start >= hw_start && pmem_start < hw_end
1648 && pmem_end <= hw_end && pmem_end > hw_start)
1649 /* pass */;
1650 else {
1651 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1652 dev_name(ndd->dev),
1653 nsl_uuid_raw(ndd, nd_label));
1654 return -EINVAL;
1655 }
1656
1657 /* move recently validated label to the front of the list */
1658 list_move(&label_ent->list, &nd_mapping->labels);
1659 }
1660 return 0;
1661}
1662
1663/**
1664 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1665 * @nd_region: region with mappings to validate
1666 * @nspm: target namespace to create
1667 * @nd_label: target pmem namespace label to evaluate
1668 */
1669static struct device *create_namespace_pmem(struct nd_region *nd_region,
1670 struct nd_mapping *nd_mapping,
1671 struct nd_namespace_label *nd_label)
1672{
1673 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1674 struct nd_namespace_index *nsindex =
1675 to_namespace_index(ndd, ndd->ns_current);
1676 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1677 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1678 struct nd_label_ent *label_ent;
1679 struct nd_namespace_pmem *nspm;
1680 resource_size_t size = 0;
1681 struct resource *res;
1682 struct device *dev;
1683 uuid_t uuid;
1684 int rc = 0;
1685 u16 i;
1686
1687 if (cookie == 0) {
1688 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1689 return ERR_PTR(-ENXIO);
1690 }
1691
1692 if (!nsl_validate_isetcookie(ndd, nd_label, cookie)) {
1693 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1694 nsl_uuid_raw(ndd, nd_label));
1695 if (!nsl_validate_isetcookie(ndd, nd_label, altcookie))
1696 return ERR_PTR(-EAGAIN);
1697
1698 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1699 nsl_uuid_raw(ndd, nd_label));
1700 }
1701
1702 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1703 if (!nspm)
1704 return ERR_PTR(-ENOMEM);
1705
1706 nspm->id = -1;
1707 dev = &nspm->nsio.common.dev;
1708 dev->type = &namespace_pmem_device_type;
1709 dev->parent = &nd_region->dev;
1710 res = &nspm->nsio.res;
1711 res->name = dev_name(&nd_region->dev);
1712 res->flags = IORESOURCE_MEM;
1713
1714 for (i = 0; i < nd_region->ndr_mappings; i++) {
1715 nsl_get_uuid(ndd, nd_label, &uuid);
1716 if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1717 continue;
1718 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1719 continue;
1720 break;
1721 }
1722
1723 if (i < nd_region->ndr_mappings) {
1724 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1725
1726 /*
1727 * Give up if we don't find an instance of a uuid at each
1728 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1729 * find a dimm with two instances of the same uuid.
1730 */
1731 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1732 nvdimm_name(nvdimm), nsl_uuid_raw(ndd, nd_label));
1733 rc = -EINVAL;
1734 goto err;
1735 }
1736
1737 /*
1738 * Fix up each mapping's 'labels' to have the validated pmem label for
1739 * that position at labels[0], and NULL at labels[1]. In the process,
1740 * check that the namespace aligns with interleave-set.
1741 */
1742 nsl_get_uuid(ndd, nd_label, &uuid);
1743 rc = select_pmem_id(nd_region, &uuid);
1744 if (rc)
1745 goto err;
1746
1747 /* Calculate total size and populate namespace properties from label0 */
1748 for (i = 0; i < nd_region->ndr_mappings; i++) {
1749 struct nd_namespace_label *label0;
1750 struct nvdimm_drvdata *ndd;
1751
1752 nd_mapping = &nd_region->mapping[i];
1753 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1754 typeof(*label_ent), list);
1755 label0 = label_ent ? label_ent->label : NULL;
1756
1757 if (!label0) {
1758 WARN_ON(1);
1759 continue;
1760 }
1761
1762 ndd = to_ndd(nd_mapping);
1763 size += nsl_get_rawsize(ndd, label0);
1764 if (nsl_get_position(ndd, label0) != 0)
1765 continue;
1766 WARN_ON(nspm->alt_name || nspm->uuid);
1767 nspm->alt_name = kmemdup(nsl_ref_name(ndd, label0),
1768 NSLABEL_NAME_LEN, GFP_KERNEL);
1769 nsl_get_uuid(ndd, label0, &uuid);
1770 nspm->uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
1771 nspm->lbasize = nsl_get_lbasize(ndd, label0);
1772 nspm->nsio.common.claim_class =
1773 nsl_get_claim_class(ndd, label0);
1774 }
1775
1776 if (!nspm->alt_name || !nspm->uuid) {
1777 rc = -ENOMEM;
1778 goto err;
1779 }
1780
1781 nd_namespace_pmem_set_resource(nd_region, nspm, size);
1782
1783 return dev;
1784 err:
1785 namespace_pmem_release(dev);
1786 switch (rc) {
1787 case -EINVAL:
1788 dev_dbg(&nd_region->dev, "invalid label(s)\n");
1789 break;
1790 case -ENODEV:
1791 dev_dbg(&nd_region->dev, "label not found\n");
1792 break;
1793 default:
1794 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
1795 break;
1796 }
1797 return ERR_PTR(rc);
1798}
1799
1800static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
1801{
1802 struct nd_namespace_pmem *nspm;
1803 struct resource *res;
1804 struct device *dev;
1805
1806 if (!is_memory(&nd_region->dev))
1807 return NULL;
1808
1809 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1810 if (!nspm)
1811 return NULL;
1812
1813 dev = &nspm->nsio.common.dev;
1814 dev->type = &namespace_pmem_device_type;
1815 dev->parent = &nd_region->dev;
1816 res = &nspm->nsio.res;
1817 res->name = dev_name(&nd_region->dev);
1818 res->flags = IORESOURCE_MEM;
1819
1820 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1821 if (nspm->id < 0) {
1822 kfree(nspm);
1823 return NULL;
1824 }
1825 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
1826 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1827
1828 return dev;
1829}
1830
1831static struct lock_class_key nvdimm_namespace_key;
1832
1833void nd_region_create_ns_seed(struct nd_region *nd_region)
1834{
1835 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1836
1837 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
1838 return;
1839
1840 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1841
1842 /*
1843 * Seed creation failures are not fatal, provisioning is simply
1844 * disabled until memory becomes available
1845 */
1846 if (!nd_region->ns_seed)
1847 dev_err(&nd_region->dev, "failed to create namespace\n");
1848 else {
1849 device_initialize(nd_region->ns_seed);
1850 lockdep_set_class(&nd_region->ns_seed->mutex,
1851 &nvdimm_namespace_key);
1852 nd_device_register(nd_region->ns_seed);
1853 }
1854}
1855
1856void nd_region_create_dax_seed(struct nd_region *nd_region)
1857{
1858 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1859 nd_region->dax_seed = nd_dax_create(nd_region);
1860 /*
1861 * Seed creation failures are not fatal, provisioning is simply
1862 * disabled until memory becomes available
1863 */
1864 if (!nd_region->dax_seed)
1865 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1866}
1867
1868void nd_region_create_pfn_seed(struct nd_region *nd_region)
1869{
1870 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1871 nd_region->pfn_seed = nd_pfn_create(nd_region);
1872 /*
1873 * Seed creation failures are not fatal, provisioning is simply
1874 * disabled until memory becomes available
1875 */
1876 if (!nd_region->pfn_seed)
1877 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1878}
1879
1880void nd_region_create_btt_seed(struct nd_region *nd_region)
1881{
1882 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1883 nd_region->btt_seed = nd_btt_create(nd_region);
1884 /*
1885 * Seed creation failures are not fatal, provisioning is simply
1886 * disabled until memory becomes available
1887 */
1888 if (!nd_region->btt_seed)
1889 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1890}
1891
1892static int add_namespace_resource(struct nd_region *nd_region,
1893 struct nd_namespace_label *nd_label, struct device **devs,
1894 int count)
1895{
1896 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1897 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1898 int i;
1899
1900 for (i = 0; i < count; i++) {
1901 uuid_t *uuid = namespace_to_uuid(devs[i]);
1902
1903 if (IS_ERR(uuid)) {
1904 WARN_ON(1);
1905 continue;
1906 }
1907
1908 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1909 continue;
1910 dev_err(&nd_region->dev,
1911 "error: conflicting extents for uuid: %pUb\n", uuid);
1912 return -ENXIO;
1913 }
1914
1915 return i;
1916}
1917
1918static int cmp_dpa(const void *a, const void *b)
1919{
1920 const struct device *dev_a = *(const struct device **) a;
1921 const struct device *dev_b = *(const struct device **) b;
1922 struct nd_namespace_pmem *nspm_a, *nspm_b;
1923
1924 if (is_namespace_io(dev_a))
1925 return 0;
1926
1927 nspm_a = to_nd_namespace_pmem(dev_a);
1928 nspm_b = to_nd_namespace_pmem(dev_b);
1929
1930 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
1931 sizeof(resource_size_t));
1932}
1933
1934static struct device **scan_labels(struct nd_region *nd_region)
1935{
1936 int i, count = 0;
1937 struct device *dev, **devs = NULL;
1938 struct nd_label_ent *label_ent, *e;
1939 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1940 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1941 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1942
1943 /* "safe" because create_namespace_pmem() might list_move() label_ent */
1944 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1945 struct nd_namespace_label *nd_label = label_ent->label;
1946 struct device **__devs;
1947
1948 if (!nd_label)
1949 continue;
1950
1951 /* skip labels that describe extents outside of the region */
1952 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
1953 nsl_get_dpa(ndd, nd_label) > map_end)
1954 continue;
1955
1956 i = add_namespace_resource(nd_region, nd_label, devs, count);
1957 if (i < 0)
1958 goto err;
1959 if (i < count)
1960 continue;
1961 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1962 if (!__devs)
1963 goto err;
1964 memcpy(__devs, devs, sizeof(dev) * count);
1965 kfree(devs);
1966 devs = __devs;
1967
1968 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1969 if (IS_ERR(dev)) {
1970 switch (PTR_ERR(dev)) {
1971 case -EAGAIN:
1972 /* skip invalid labels */
1973 continue;
1974 case -ENODEV:
1975 /* fallthrough to seed creation */
1976 break;
1977 default:
1978 goto err;
1979 }
1980 } else
1981 devs[count++] = dev;
1982
1983 }
1984
1985 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
1986 count == 1 ? "" : "s");
1987
1988 if (count == 0) {
1989 struct nd_namespace_pmem *nspm;
1990
1991 /* Publish a zero-sized namespace for userspace to configure. */
1992 nd_mapping_free_labels(nd_mapping);
1993
1994 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1995 if (!devs)
1996 goto err;
1997
1998 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1999 if (!nspm)
2000 goto err;
2001 dev = &nspm->nsio.common.dev;
2002 dev->type = &namespace_pmem_device_type;
2003 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2004 dev->parent = &nd_region->dev;
2005 devs[count++] = dev;
2006 } else if (is_memory(&nd_region->dev)) {
2007 /* clean unselected labels */
2008 for (i = 0; i < nd_region->ndr_mappings; i++) {
2009 struct list_head *l, *e;
2010 LIST_HEAD(list);
2011 int j;
2012
2013 nd_mapping = &nd_region->mapping[i];
2014 if (list_empty(&nd_mapping->labels)) {
2015 WARN_ON(1);
2016 continue;
2017 }
2018
2019 j = count;
2020 list_for_each_safe(l, e, &nd_mapping->labels) {
2021 if (!j--)
2022 break;
2023 list_move_tail(l, &list);
2024 }
2025 nd_mapping_free_labels(nd_mapping);
2026 list_splice_init(&list, &nd_mapping->labels);
2027 }
2028 }
2029
2030 if (count > 1)
2031 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2032
2033 return devs;
2034
2035 err:
2036 if (devs) {
2037 for (i = 0; devs[i]; i++)
2038 namespace_pmem_release(devs[i]);
2039 kfree(devs);
2040 }
2041 return NULL;
2042}
2043
2044static struct device **create_namespaces(struct nd_region *nd_region)
2045{
2046 struct nd_mapping *nd_mapping;
2047 struct device **devs;
2048 int i;
2049
2050 if (nd_region->ndr_mappings == 0)
2051 return NULL;
2052
2053 /* lock down all mappings while we scan labels */
2054 for (i = 0; i < nd_region->ndr_mappings; i++) {
2055 nd_mapping = &nd_region->mapping[i];
2056 mutex_lock_nested(&nd_mapping->lock, i);
2057 }
2058
2059 devs = scan_labels(nd_region);
2060
2061 for (i = 0; i < nd_region->ndr_mappings; i++) {
2062 int reverse = nd_region->ndr_mappings - 1 - i;
2063
2064 nd_mapping = &nd_region->mapping[reverse];
2065 mutex_unlock(&nd_mapping->lock);
2066 }
2067
2068 return devs;
2069}
2070
2071static void deactivate_labels(void *region)
2072{
2073 struct nd_region *nd_region = region;
2074 int i;
2075
2076 for (i = 0; i < nd_region->ndr_mappings; i++) {
2077 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2078 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2079 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2080
2081 mutex_lock(&nd_mapping->lock);
2082 nd_mapping_free_labels(nd_mapping);
2083 mutex_unlock(&nd_mapping->lock);
2084
2085 put_ndd(ndd);
2086 nd_mapping->ndd = NULL;
2087 if (ndd)
2088 atomic_dec(&nvdimm->busy);
2089 }
2090}
2091
2092static int init_active_labels(struct nd_region *nd_region)
2093{
2094 int i, rc = 0;
2095
2096 for (i = 0; i < nd_region->ndr_mappings; i++) {
2097 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2098 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2099 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2100 struct nd_label_ent *label_ent;
2101 int count, j;
2102
2103 /*
2104 * If the dimm is disabled then we may need to prevent
2105 * the region from being activated.
2106 */
2107 if (!ndd) {
2108 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2109 /* fail, label data may be unreadable */;
2110 else if (test_bit(NDD_LABELING, &nvdimm->flags))
2111 /* fail, labels needed to disambiguate dpa */;
2112 else
2113 continue;
2114
2115 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2116 dev_name(&nd_mapping->nvdimm->dev),
2117 test_bit(NDD_LOCKED, &nvdimm->flags)
2118 ? "locked" : "disabled");
2119 rc = -ENXIO;
2120 goto out;
2121 }
2122 nd_mapping->ndd = ndd;
2123 atomic_inc(&nvdimm->busy);
2124 get_ndd(ndd);
2125
2126 count = nd_label_active_count(ndd);
2127 dev_dbg(ndd->dev, "count: %d\n", count);
2128 if (!count)
2129 continue;
2130 for (j = 0; j < count; j++) {
2131 struct nd_namespace_label *label;
2132
2133 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2134 if (!label_ent)
2135 break;
2136 label = nd_label_active(ndd, j);
2137 label_ent->label = label;
2138
2139 mutex_lock(&nd_mapping->lock);
2140 list_add_tail(&label_ent->list, &nd_mapping->labels);
2141 mutex_unlock(&nd_mapping->lock);
2142 }
2143
2144 if (j < count)
2145 break;
2146 }
2147
2148 if (i < nd_region->ndr_mappings)
2149 rc = -ENOMEM;
2150
2151out:
2152 if (rc) {
2153 deactivate_labels(nd_region);
2154 return rc;
2155 }
2156
2157 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2158 nd_region);
2159}
2160
2161int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2162{
2163 struct device **devs = NULL;
2164 int i, rc = 0, type;
2165
2166 *err = 0;
2167 nvdimm_bus_lock(&nd_region->dev);
2168 rc = init_active_labels(nd_region);
2169 if (rc) {
2170 nvdimm_bus_unlock(&nd_region->dev);
2171 return rc;
2172 }
2173
2174 type = nd_region_to_nstype(nd_region);
2175 switch (type) {
2176 case ND_DEVICE_NAMESPACE_IO:
2177 devs = create_namespace_io(nd_region);
2178 break;
2179 case ND_DEVICE_NAMESPACE_PMEM:
2180 devs = create_namespaces(nd_region);
2181 break;
2182 default:
2183 break;
2184 }
2185 nvdimm_bus_unlock(&nd_region->dev);
2186
2187 if (!devs)
2188 return -ENODEV;
2189
2190 for (i = 0; devs[i]; i++) {
2191 struct device *dev = devs[i];
2192 int id;
2193
2194 if (type == ND_DEVICE_NAMESPACE_PMEM) {
2195 struct nd_namespace_pmem *nspm;
2196
2197 nspm = to_nd_namespace_pmem(dev);
2198 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2199 GFP_KERNEL);
2200 nspm->id = id;
2201 } else
2202 id = i;
2203
2204 if (id < 0)
2205 break;
2206 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2207 device_initialize(dev);
2208 lockdep_set_class(&dev->mutex, &nvdimm_namespace_key);
2209 nd_device_register(dev);
2210 }
2211 if (i)
2212 nd_region->ns_seed = devs[0];
2213
2214 if (devs[i]) {
2215 int j;
2216
2217 for (j = i; devs[j]; j++) {
2218 struct device *dev = devs[j];
2219
2220 device_initialize(dev);
2221 put_device(dev);
2222 }
2223 *err = j - i;
2224 /*
2225 * All of the namespaces we tried to register failed, so
2226 * fail region activation.
2227 */
2228 if (*err == 0)
2229 rc = -ENODEV;
2230 }
2231 kfree(devs);
2232
2233 if (rc == -ENODEV)
2234 return rc;
2235
2236 return i;
2237}