2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
21 static void namespace_io_release(struct device
*dev
)
23 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
28 static void namespace_pmem_release(struct device
*dev
)
30 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
32 kfree(nspm
->alt_name
);
37 static void namespace_blk_release(struct device
*dev
)
39 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
40 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
43 ida_simple_remove(&nd_region
->ns_ida
, nsblk
->id
);
44 kfree(nsblk
->alt_name
);
50 static struct device_type namespace_io_device_type
= {
51 .name
= "nd_namespace_io",
52 .release
= namespace_io_release
,
55 static struct device_type namespace_pmem_device_type
= {
56 .name
= "nd_namespace_pmem",
57 .release
= namespace_pmem_release
,
60 static struct device_type namespace_blk_device_type
= {
61 .name
= "nd_namespace_blk",
62 .release
= namespace_blk_release
,
65 static bool is_namespace_pmem(struct device
*dev
)
67 return dev
? dev
->type
== &namespace_pmem_device_type
: false;
70 static bool is_namespace_blk(struct device
*dev
)
72 return dev
? dev
->type
== &namespace_blk_device_type
: false;
75 static bool is_namespace_io(struct device
*dev
)
77 return dev
? dev
->type
== &namespace_io_device_type
: false;
80 bool pmem_should_map_pages(struct device
*dev
)
82 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
84 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
))
87 if (!test_bit(ND_REGION_PAGEMAP
, &nd_region
->flags
))
90 if (is_nd_pfn(dev
) || is_nd_btt(dev
))
93 #ifdef ARCH_MEMREMAP_PMEM
94 return ARCH_MEMREMAP_PMEM
== MEMREMAP_WB
;
99 EXPORT_SYMBOL(pmem_should_map_pages
);
101 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
104 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
105 const char *suffix
= NULL
;
108 if (is_nd_btt(ndns
->claim
))
110 else if (is_nd_pfn(ndns
->claim
))
113 dev_WARN_ONCE(&ndns
->dev
, 1,
114 "unknown claim type by %s\n",
115 dev_name(ndns
->claim
));
118 if (is_namespace_pmem(&ndns
->dev
) || is_namespace_io(&ndns
->dev
)) {
119 if (!suffix
&& pmem_should_map_pages(&ndns
->dev
))
121 sprintf(name
, "pmem%d%s", nd_region
->id
, suffix
? suffix
: "");
122 } else if (is_namespace_blk(&ndns
->dev
)) {
123 struct nd_namespace_blk
*nsblk
;
125 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
126 sprintf(name
, "ndblk%d.%d%s", nd_region
->id
, nsblk
->id
,
127 suffix
? suffix
: "");
134 EXPORT_SYMBOL(nvdimm_namespace_disk_name
);
136 const u8
*nd_dev_to_uuid(struct device
*dev
)
138 static const u8 null_uuid
[16];
143 if (is_namespace_pmem(dev
)) {
144 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
147 } else if (is_namespace_blk(dev
)) {
148 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
154 EXPORT_SYMBOL(nd_dev_to_uuid
);
156 static ssize_t
nstype_show(struct device
*dev
,
157 struct device_attribute
*attr
, char *buf
)
159 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
161 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
163 static DEVICE_ATTR_RO(nstype
);
165 static ssize_t
__alt_name_store(struct device
*dev
, const char *buf
,
168 char *input
, *pos
, *alt_name
, **ns_altname
;
171 if (is_namespace_pmem(dev
)) {
172 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
174 ns_altname
= &nspm
->alt_name
;
175 } else if (is_namespace_blk(dev
)) {
176 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
178 ns_altname
= &nsblk
->alt_name
;
182 if (dev
->driver
|| to_ndns(dev
)->claim
)
185 input
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
191 if (strlen(pos
) + 1 > NSLABEL_NAME_LEN
) {
196 alt_name
= kzalloc(NSLABEL_NAME_LEN
, GFP_KERNEL
);
202 *ns_altname
= alt_name
;
203 sprintf(*ns_altname
, "%s", pos
);
211 static resource_size_t
nd_namespace_blk_size(struct nd_namespace_blk
*nsblk
)
213 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
214 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
215 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
216 struct nd_label_id label_id
;
217 resource_size_t size
= 0;
218 struct resource
*res
;
222 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
223 for_each_dpa_resource(ndd
, res
)
224 if (strcmp(res
->name
, label_id
.id
) == 0)
225 size
+= resource_size(res
);
229 static bool __nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
231 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
232 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
233 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
234 struct nd_label_id label_id
;
235 struct resource
*res
;
238 if (!nsblk
->uuid
|| !nsblk
->lbasize
|| !ndd
)
242 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
243 for_each_dpa_resource(ndd
, res
) {
244 if (strcmp(res
->name
, label_id
.id
) != 0)
247 * Resources with unacknoweldged adjustments indicate a
248 * failure to update labels
250 if (res
->flags
& DPA_RESOURCE_ADJUSTED
)
255 /* These values match after a successful label update */
256 if (count
!= nsblk
->num_resources
)
259 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
260 struct resource
*found
= NULL
;
262 for_each_dpa_resource(ndd
, res
)
263 if (res
== nsblk
->res
[i
]) {
275 resource_size_t
nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
277 resource_size_t size
;
279 nvdimm_bus_lock(&nsblk
->common
.dev
);
280 size
= __nd_namespace_blk_validate(nsblk
);
281 nvdimm_bus_unlock(&nsblk
->common
.dev
);
285 EXPORT_SYMBOL(nd_namespace_blk_validate
);
288 static int nd_namespace_label_update(struct nd_region
*nd_region
,
291 dev_WARN_ONCE(dev
, dev
->driver
|| to_ndns(dev
)->claim
,
292 "namespace must be idle during label update\n");
293 if (dev
->driver
|| to_ndns(dev
)->claim
)
297 * Only allow label writes that will result in a valid namespace
298 * or deletion of an existing namespace.
300 if (is_namespace_pmem(dev
)) {
301 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
302 resource_size_t size
= resource_size(&nspm
->nsio
.res
);
304 if (size
== 0 && nspm
->uuid
)
305 /* delete allocation */;
306 else if (!nspm
->uuid
)
309 return nd_pmem_namespace_label_update(nd_region
, nspm
, size
);
310 } else if (is_namespace_blk(dev
)) {
311 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
312 resource_size_t size
= nd_namespace_blk_size(nsblk
);
314 if (size
== 0 && nsblk
->uuid
)
315 /* delete allocation */;
316 else if (!nsblk
->uuid
|| !nsblk
->lbasize
)
319 return nd_blk_namespace_label_update(nd_region
, nsblk
, size
);
324 static ssize_t
alt_name_store(struct device
*dev
,
325 struct device_attribute
*attr
, const char *buf
, size_t len
)
327 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
331 nvdimm_bus_lock(dev
);
332 wait_nvdimm_bus_probe_idle(dev
);
333 rc
= __alt_name_store(dev
, buf
, len
);
335 rc
= nd_namespace_label_update(nd_region
, dev
);
336 dev_dbg(dev
, "%s: %s(%zd)\n", __func__
, rc
< 0 ? "fail " : "", rc
);
337 nvdimm_bus_unlock(dev
);
340 return rc
< 0 ? rc
: len
;
343 static ssize_t
alt_name_show(struct device
*dev
,
344 struct device_attribute
*attr
, char *buf
)
348 if (is_namespace_pmem(dev
)) {
349 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
351 ns_altname
= nspm
->alt_name
;
352 } else if (is_namespace_blk(dev
)) {
353 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
355 ns_altname
= nsblk
->alt_name
;
359 return sprintf(buf
, "%s\n", ns_altname
? ns_altname
: "");
361 static DEVICE_ATTR_RW(alt_name
);
363 static int scan_free(struct nd_region
*nd_region
,
364 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
367 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
368 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
372 struct resource
*res
, *last
;
373 resource_size_t new_start
;
376 for_each_dpa_resource(ndd
, res
)
377 if (strcmp(res
->name
, label_id
->id
) == 0)
383 if (n
>= resource_size(res
)) {
384 n
-= resource_size(res
);
385 nd_dbg_dpa(nd_region
, ndd
, res
, "delete %d\n", rc
);
386 nvdimm_free_dpa(ndd
, res
);
387 /* retry with last resource deleted */
392 * Keep BLK allocations relegated to high DPA as much as
396 new_start
= res
->start
+ n
;
398 new_start
= res
->start
;
400 rc
= adjust_resource(res
, new_start
, resource_size(res
) - n
);
402 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
403 nd_dbg_dpa(nd_region
, ndd
, res
, "shrink %d\n", rc
);
411 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
412 * @nd_region: the set of dimms to reclaim @n bytes from
413 * @label_id: unique identifier for the namespace consuming this dpa range
414 * @n: number of bytes per-dimm to release
416 * Assumes resources are ordered. Starting from the end try to
417 * adjust_resource() the allocation to @n, but if @n is larger than the
418 * allocation delete it and find the 'new' last allocation in the label
421 static int shrink_dpa_allocation(struct nd_region
*nd_region
,
422 struct nd_label_id
*label_id
, resource_size_t n
)
426 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
427 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
430 rc
= scan_free(nd_region
, nd_mapping
, label_id
, n
);
438 static resource_size_t
init_dpa_allocation(struct nd_label_id
*label_id
,
439 struct nd_region
*nd_region
, struct nd_mapping
*nd_mapping
,
442 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
443 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
444 resource_size_t first_dpa
;
445 struct resource
*res
;
448 /* allocate blk from highest dpa first */
450 first_dpa
= nd_mapping
->start
+ nd_mapping
->size
- n
;
452 first_dpa
= nd_mapping
->start
;
454 /* first resource allocation for this label-id or dimm */
455 res
= nvdimm_allocate_dpa(ndd
, label_id
, first_dpa
, n
);
459 nd_dbg_dpa(nd_region
, ndd
, res
, "init %d\n", rc
);
463 static bool space_valid(bool is_pmem
, bool is_reserve
,
464 struct nd_label_id
*label_id
, struct resource
*res
)
467 * For BLK-space any space is valid, for PMEM-space, it must be
468 * contiguous with an existing allocation unless we are
471 if (is_reserve
|| !is_pmem
)
473 if (!res
|| strcmp(res
->name
, label_id
->id
) == 0)
479 ALLOC_ERR
= 0, ALLOC_BEFORE
, ALLOC_MID
, ALLOC_AFTER
,
482 static resource_size_t
scan_allocate(struct nd_region
*nd_region
,
483 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
486 resource_size_t mapping_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
487 bool is_reserve
= strcmp(label_id
->id
, "pmem-reserve") == 0;
488 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
489 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
490 const resource_size_t to_allocate
= n
;
491 struct resource
*res
;
496 for_each_dpa_resource(ndd
, res
) {
497 resource_size_t allocate
, available
= 0, free_start
, free_end
;
498 struct resource
*next
= res
->sibling
, *new_res
= NULL
;
499 enum alloc_loc loc
= ALLOC_ERR
;
503 /* ignore resources outside this nd_mapping */
504 if (res
->start
> mapping_end
)
506 if (res
->end
< nd_mapping
->start
)
509 /* space at the beginning of the mapping */
510 if (!first
++ && res
->start
> nd_mapping
->start
) {
511 free_start
= nd_mapping
->start
;
512 available
= res
->start
- free_start
;
513 if (space_valid(is_pmem
, is_reserve
, label_id
, NULL
))
517 /* space between allocations */
519 free_start
= res
->start
+ resource_size(res
);
520 free_end
= min(mapping_end
, next
->start
- 1);
521 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
522 && free_start
< free_end
) {
523 available
= free_end
+ 1 - free_start
;
528 /* space at the end of the mapping */
530 free_start
= res
->start
+ resource_size(res
);
531 free_end
= mapping_end
;
532 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
533 && free_start
< free_end
) {
534 available
= free_end
+ 1 - free_start
;
539 if (!loc
|| !available
)
541 allocate
= min(available
, n
);
544 if (strcmp(res
->name
, label_id
->id
) == 0) {
545 /* adjust current resource up */
546 if (is_pmem
&& !is_reserve
)
548 rc
= adjust_resource(res
, res
->start
- allocate
,
549 resource_size(res
) + allocate
);
550 action
= "cur grow up";
555 if (strcmp(next
->name
, label_id
->id
) == 0) {
556 /* adjust next resource up */
557 if (is_pmem
&& !is_reserve
)
559 rc
= adjust_resource(next
, next
->start
560 - allocate
, resource_size(next
)
563 action
= "next grow up";
564 } else if (strcmp(res
->name
, label_id
->id
) == 0) {
565 action
= "grow down";
570 if (strcmp(res
->name
, label_id
->id
) == 0)
571 action
= "grow down";
579 if (strcmp(action
, "allocate") == 0) {
580 /* BLK allocate bottom up */
582 free_start
+= available
- allocate
;
583 else if (!is_reserve
&& free_start
!= nd_mapping
->start
)
586 new_res
= nvdimm_allocate_dpa(ndd
, label_id
,
587 free_start
, allocate
);
590 } else if (strcmp(action
, "grow down") == 0) {
591 /* adjust current resource down */
592 rc
= adjust_resource(res
, res
->start
, resource_size(res
)
595 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
601 nd_dbg_dpa(nd_region
, ndd
, new_res
, "%s(%d) %d\n",
610 * Retry scan with newly inserted resources.
611 * For example, if we did an ALLOC_BEFORE
612 * insertion there may also have been space
613 * available for an ALLOC_AFTER insertion, so we
614 * need to check this same resource again
622 * If we allocated nothing in the BLK case it may be because we are in
623 * an initial "pmem-reserve pass". Only do an initial BLK allocation
624 * when none of the DPA space is reserved.
626 if ((is_pmem
|| !ndd
->dpa
.child
) && n
== to_allocate
)
627 return init_dpa_allocation(label_id
, nd_region
, nd_mapping
, n
);
631 static int merge_dpa(struct nd_region
*nd_region
,
632 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
)
634 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
635 struct resource
*res
;
637 if (strncmp("pmem", label_id
->id
, 4) == 0)
640 for_each_dpa_resource(ndd
, res
) {
642 struct resource
*next
= res
->sibling
;
643 resource_size_t end
= res
->start
+ resource_size(res
);
645 if (!next
|| strcmp(res
->name
, label_id
->id
) != 0
646 || strcmp(next
->name
, label_id
->id
) != 0
647 || end
!= next
->start
)
649 end
+= resource_size(next
);
650 nvdimm_free_dpa(ndd
, next
);
651 rc
= adjust_resource(res
, res
->start
, end
- res
->start
);
652 nd_dbg_dpa(nd_region
, ndd
, res
, "merge %d\n", rc
);
655 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
662 static int __reserve_free_pmem(struct device
*dev
, void *data
)
664 struct nvdimm
*nvdimm
= data
;
665 struct nd_region
*nd_region
;
666 struct nd_label_id label_id
;
669 if (!is_nd_pmem(dev
))
672 nd_region
= to_nd_region(dev
);
673 if (nd_region
->ndr_mappings
== 0)
676 memset(&label_id
, 0, sizeof(label_id
));
677 strcat(label_id
.id
, "pmem-reserve");
678 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
679 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
680 resource_size_t n
, rem
= 0;
682 if (nd_mapping
->nvdimm
!= nvdimm
)
685 n
= nd_pmem_available_dpa(nd_region
, nd_mapping
, &rem
);
688 rem
= scan_allocate(nd_region
, nd_mapping
, &label_id
, n
);
689 dev_WARN_ONCE(&nd_region
->dev
, rem
,
690 "pmem reserve underrun: %#llx of %#llx bytes\n",
691 (unsigned long long) n
- rem
,
692 (unsigned long long) n
);
693 return rem
? -ENXIO
: 0;
699 static void release_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
700 struct nd_mapping
*nd_mapping
)
702 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
703 struct resource
*res
, *_res
;
705 for_each_dpa_resource_safe(ndd
, res
, _res
)
706 if (strcmp(res
->name
, "pmem-reserve") == 0)
707 nvdimm_free_dpa(ndd
, res
);
710 static int reserve_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
711 struct nd_mapping
*nd_mapping
)
713 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
716 rc
= device_for_each_child(&nvdimm_bus
->dev
, nvdimm
,
717 __reserve_free_pmem
);
719 release_free_pmem(nvdimm_bus
, nd_mapping
);
724 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
725 * @nd_region: the set of dimms to allocate @n more bytes from
726 * @label_id: unique identifier for the namespace consuming this dpa range
727 * @n: number of bytes per-dimm to add to the existing allocation
729 * Assumes resources are ordered. For BLK regions, first consume
730 * BLK-only available DPA free space, then consume PMEM-aliased DPA
731 * space starting at the highest DPA. For PMEM regions start
732 * allocations from the start of an interleave set and end at the first
733 * BLK allocation or the end of the interleave set, whichever comes
736 static int grow_dpa_allocation(struct nd_region
*nd_region
,
737 struct nd_label_id
*label_id
, resource_size_t n
)
739 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
740 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
743 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
744 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
745 resource_size_t rem
= n
;
749 * In the BLK case try once with all unallocated PMEM
750 * reserved, and once without
752 for (j
= is_pmem
; j
< 2; j
++) {
753 bool blk_only
= j
== 0;
756 rc
= reserve_free_pmem(nvdimm_bus
, nd_mapping
);
760 rem
= scan_allocate(nd_region
, nd_mapping
,
763 release_free_pmem(nvdimm_bus
, nd_mapping
);
765 /* try again and allow encroachments into PMEM */
770 dev_WARN_ONCE(&nd_region
->dev
, rem
,
771 "allocation underrun: %#llx of %#llx bytes\n",
772 (unsigned long long) n
- rem
,
773 (unsigned long long) n
);
777 rc
= merge_dpa(nd_region
, nd_mapping
, label_id
);
785 static void nd_namespace_pmem_set_size(struct nd_region
*nd_region
,
786 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
788 struct resource
*res
= &nspm
->nsio
.res
;
790 res
->start
= nd_region
->ndr_start
;
791 res
->end
= nd_region
->ndr_start
+ size
- 1;
794 static bool uuid_not_set(const u8
*uuid
, struct device
*dev
, const char *where
)
797 dev_dbg(dev
, "%s: uuid not set\n", where
);
803 static ssize_t
__size_store(struct device
*dev
, unsigned long long val
)
805 resource_size_t allocated
= 0, available
= 0;
806 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
807 struct nd_mapping
*nd_mapping
;
808 struct nvdimm_drvdata
*ndd
;
809 struct nd_label_id label_id
;
810 u32 flags
= 0, remainder
;
814 if (dev
->driver
|| to_ndns(dev
)->claim
)
817 if (is_namespace_pmem(dev
)) {
818 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
821 } else if (is_namespace_blk(dev
)) {
822 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
825 flags
= NSLABEL_FLAG_LOCAL
;
829 * We need a uuid for the allocation-label and dimm(s) on which
830 * to store the label.
832 if (uuid_not_set(uuid
, dev
, __func__
))
834 if (nd_region
->ndr_mappings
== 0) {
835 dev_dbg(dev
, "%s: not associated with dimm(s)\n", __func__
);
839 div_u64_rem(val
, SZ_4K
* nd_region
->ndr_mappings
, &remainder
);
841 dev_dbg(dev
, "%llu is not %dK aligned\n", val
,
842 (SZ_4K
* nd_region
->ndr_mappings
) / SZ_1K
);
846 nd_label_gen_id(&label_id
, uuid
, flags
);
847 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
848 nd_mapping
= &nd_region
->mapping
[i
];
849 ndd
= to_ndd(nd_mapping
);
852 * All dimms in an interleave set, or the base dimm for a blk
853 * region, need to be enabled for the size to be changed.
858 allocated
+= nvdimm_allocated_dpa(ndd
, &label_id
);
860 available
= nd_region_available_dpa(nd_region
);
862 if (val
> available
+ allocated
)
865 if (val
== allocated
)
868 val
= div_u64(val
, nd_region
->ndr_mappings
);
869 allocated
= div_u64(allocated
, nd_region
->ndr_mappings
);
871 rc
= shrink_dpa_allocation(nd_region
, &label_id
,
874 rc
= grow_dpa_allocation(nd_region
, &label_id
, val
- allocated
);
879 if (is_namespace_pmem(dev
)) {
880 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
882 nd_namespace_pmem_set_size(nd_region
, nspm
,
883 val
* nd_region
->ndr_mappings
);
884 } else if (is_namespace_blk(dev
)) {
885 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
888 * Try to delete the namespace if we deleted all of its
889 * allocation, this is not the seed device for the
890 * region, and it is not actively claimed by a btt
893 if (val
== 0 && nd_region
->ns_seed
!= dev
894 && !nsblk
->common
.claim
)
895 nd_device_unregister(dev
, ND_ASYNC
);
901 static ssize_t
size_store(struct device
*dev
,
902 struct device_attribute
*attr
, const char *buf
, size_t len
)
904 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
905 unsigned long long val
;
909 rc
= kstrtoull(buf
, 0, &val
);
914 nvdimm_bus_lock(dev
);
915 wait_nvdimm_bus_probe_idle(dev
);
916 rc
= __size_store(dev
, val
);
918 rc
= nd_namespace_label_update(nd_region
, dev
);
920 if (is_namespace_pmem(dev
)) {
921 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
924 } else if (is_namespace_blk(dev
)) {
925 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
930 if (rc
== 0 && val
== 0 && uuid
) {
931 /* setting size zero == 'delete namespace' */
936 dev_dbg(dev
, "%s: %llx %s (%d)\n", __func__
, val
, rc
< 0
937 ? "fail" : "success", rc
);
939 nvdimm_bus_unlock(dev
);
942 return rc
< 0 ? rc
: len
;
945 resource_size_t
__nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
947 struct device
*dev
= &ndns
->dev
;
949 if (is_namespace_pmem(dev
)) {
950 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
952 return resource_size(&nspm
->nsio
.res
);
953 } else if (is_namespace_blk(dev
)) {
954 return nd_namespace_blk_size(to_nd_namespace_blk(dev
));
955 } else if (is_namespace_io(dev
)) {
956 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
958 return resource_size(&nsio
->res
);
960 WARN_ONCE(1, "unknown namespace type\n");
964 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
966 resource_size_t size
;
968 nvdimm_bus_lock(&ndns
->dev
);
969 size
= __nvdimm_namespace_capacity(ndns
);
970 nvdimm_bus_unlock(&ndns
->dev
);
974 EXPORT_SYMBOL(nvdimm_namespace_capacity
);
976 static ssize_t
size_show(struct device
*dev
,
977 struct device_attribute
*attr
, char *buf
)
979 return sprintf(buf
, "%llu\n", (unsigned long long)
980 nvdimm_namespace_capacity(to_ndns(dev
)));
982 static DEVICE_ATTR(size
, S_IRUGO
, size_show
, size_store
);
984 static ssize_t
uuid_show(struct device
*dev
,
985 struct device_attribute
*attr
, char *buf
)
989 if (is_namespace_pmem(dev
)) {
990 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
993 } else if (is_namespace_blk(dev
)) {
994 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1001 return sprintf(buf
, "%pUb\n", uuid
);
1002 return sprintf(buf
, "\n");
1006 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1007 * @nd_region: parent region so we can updates all dimms in the set
1008 * @dev: namespace type for generating label_id
1009 * @new_uuid: incoming uuid
1010 * @old_uuid: reference to the uuid storage location in the namespace object
1012 static int namespace_update_uuid(struct nd_region
*nd_region
,
1013 struct device
*dev
, u8
*new_uuid
, u8
**old_uuid
)
1015 u32 flags
= is_namespace_blk(dev
) ? NSLABEL_FLAG_LOCAL
: 0;
1016 struct nd_label_id old_label_id
;
1017 struct nd_label_id new_label_id
;
1020 if (!nd_is_uuid_unique(dev
, new_uuid
))
1023 if (*old_uuid
== NULL
)
1027 * If we've already written a label with this uuid, then it's
1028 * too late to rename because we can't reliably update the uuid
1029 * without losing the old namespace. Userspace must delete this
1030 * namespace to abandon the old uuid.
1032 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1033 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1036 * This check by itself is sufficient because old_uuid
1037 * would be NULL above if this uuid did not exist in the
1038 * currently written set.
1040 * FIXME: can we delete uuid with zero dpa allocated?
1042 if (nd_mapping
->labels
)
1046 nd_label_gen_id(&old_label_id
, *old_uuid
, flags
);
1047 nd_label_gen_id(&new_label_id
, new_uuid
, flags
);
1048 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1049 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1050 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1051 struct resource
*res
;
1053 for_each_dpa_resource(ndd
, res
)
1054 if (strcmp(res
->name
, old_label_id
.id
) == 0)
1055 sprintf((void *) res
->name
, "%s",
1060 *old_uuid
= new_uuid
;
1064 static ssize_t
uuid_store(struct device
*dev
,
1065 struct device_attribute
*attr
, const char *buf
, size_t len
)
1067 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1072 if (is_namespace_pmem(dev
)) {
1073 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1075 ns_uuid
= &nspm
->uuid
;
1076 } else if (is_namespace_blk(dev
)) {
1077 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1079 ns_uuid
= &nsblk
->uuid
;
1084 nvdimm_bus_lock(dev
);
1085 wait_nvdimm_bus_probe_idle(dev
);
1086 if (to_ndns(dev
)->claim
)
1089 rc
= nd_uuid_store(dev
, &uuid
, buf
, len
);
1091 rc
= namespace_update_uuid(nd_region
, dev
, uuid
, ns_uuid
);
1093 rc
= nd_namespace_label_update(nd_region
, dev
);
1096 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
1097 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
1098 nvdimm_bus_unlock(dev
);
1101 return rc
< 0 ? rc
: len
;
1103 static DEVICE_ATTR_RW(uuid
);
1105 static ssize_t
resource_show(struct device
*dev
,
1106 struct device_attribute
*attr
, char *buf
)
1108 struct resource
*res
;
1110 if (is_namespace_pmem(dev
)) {
1111 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1113 res
= &nspm
->nsio
.res
;
1114 } else if (is_namespace_io(dev
)) {
1115 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
1121 /* no address to convey if the namespace has no allocation */
1122 if (resource_size(res
) == 0)
1124 return sprintf(buf
, "%#llx\n", (unsigned long long) res
->start
);
1126 static DEVICE_ATTR_RO(resource
);
1128 static const unsigned long ns_lbasize_supported
[] = { 512, 520, 528,
1129 4096, 4104, 4160, 4224, 0 };
1131 static ssize_t
sector_size_show(struct device
*dev
,
1132 struct device_attribute
*attr
, char *buf
)
1134 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1136 if (!is_namespace_blk(dev
))
1139 return nd_sector_size_show(nsblk
->lbasize
, ns_lbasize_supported
, buf
);
1142 static ssize_t
sector_size_store(struct device
*dev
,
1143 struct device_attribute
*attr
, const char *buf
, size_t len
)
1145 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1146 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1149 if (!is_namespace_blk(dev
))
1153 nvdimm_bus_lock(dev
);
1154 if (to_ndns(dev
)->claim
)
1157 rc
= nd_sector_size_store(dev
, buf
, &nsblk
->lbasize
,
1158 ns_lbasize_supported
);
1160 rc
= nd_namespace_label_update(nd_region
, dev
);
1161 dev_dbg(dev
, "%s: result: %zd %s: %s%s", __func__
,
1162 rc
, rc
< 0 ? "tried" : "wrote", buf
,
1163 buf
[len
- 1] == '\n' ? "" : "\n");
1164 nvdimm_bus_unlock(dev
);
1167 return rc
? rc
: len
;
1169 static DEVICE_ATTR_RW(sector_size
);
1171 static ssize_t
dpa_extents_show(struct device
*dev
,
1172 struct device_attribute
*attr
, char *buf
)
1174 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1175 struct nd_label_id label_id
;
1180 nvdimm_bus_lock(dev
);
1181 if (is_namespace_pmem(dev
)) {
1182 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1186 } else if (is_namespace_blk(dev
)) {
1187 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1190 flags
= NSLABEL_FLAG_LOCAL
;
1196 nd_label_gen_id(&label_id
, uuid
, flags
);
1197 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1198 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1199 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1200 struct resource
*res
;
1202 for_each_dpa_resource(ndd
, res
)
1203 if (strcmp(res
->name
, label_id
.id
) == 0)
1207 nvdimm_bus_unlock(dev
);
1209 return sprintf(buf
, "%d\n", count
);
1211 static DEVICE_ATTR_RO(dpa_extents
);
1213 static ssize_t
holder_show(struct device
*dev
,
1214 struct device_attribute
*attr
, char *buf
)
1216 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1220 rc
= sprintf(buf
, "%s\n", ndns
->claim
? dev_name(ndns
->claim
) : "");
1225 static DEVICE_ATTR_RO(holder
);
1227 static ssize_t
force_raw_store(struct device
*dev
,
1228 struct device_attribute
*attr
, const char *buf
, size_t len
)
1231 int rc
= strtobool(buf
, &force_raw
);
1236 to_ndns(dev
)->force_raw
= force_raw
;
1240 static ssize_t
force_raw_show(struct device
*dev
,
1241 struct device_attribute
*attr
, char *buf
)
1243 return sprintf(buf
, "%d\n", to_ndns(dev
)->force_raw
);
1245 static DEVICE_ATTR_RW(force_raw
);
1247 static struct attribute
*nd_namespace_attributes
[] = {
1248 &dev_attr_nstype
.attr
,
1249 &dev_attr_size
.attr
,
1250 &dev_attr_uuid
.attr
,
1251 &dev_attr_holder
.attr
,
1252 &dev_attr_resource
.attr
,
1253 &dev_attr_alt_name
.attr
,
1254 &dev_attr_force_raw
.attr
,
1255 &dev_attr_sector_size
.attr
,
1256 &dev_attr_dpa_extents
.attr
,
1260 static umode_t
namespace_visible(struct kobject
*kobj
,
1261 struct attribute
*a
, int n
)
1263 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1265 if (a
== &dev_attr_resource
.attr
) {
1266 if (is_namespace_blk(dev
))
1271 if (is_namespace_pmem(dev
) || is_namespace_blk(dev
)) {
1272 if (a
== &dev_attr_size
.attr
)
1273 return S_IWUSR
| S_IRUGO
;
1275 if (is_namespace_pmem(dev
) && a
== &dev_attr_sector_size
.attr
)
1281 if (a
== &dev_attr_nstype
.attr
|| a
== &dev_attr_size
.attr
1282 || a
== &dev_attr_holder
.attr
1283 || a
== &dev_attr_force_raw
.attr
)
1289 static struct attribute_group nd_namespace_attribute_group
= {
1290 .attrs
= nd_namespace_attributes
,
1291 .is_visible
= namespace_visible
,
1294 static const struct attribute_group
*nd_namespace_attribute_groups
[] = {
1295 &nd_device_attribute_group
,
1296 &nd_namespace_attribute_group
,
1297 &nd_numa_attribute_group
,
1301 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
)
1303 struct nd_btt
*nd_btt
= is_nd_btt(dev
) ? to_nd_btt(dev
) : NULL
;
1304 struct nd_pfn
*nd_pfn
= is_nd_pfn(dev
) ? to_nd_pfn(dev
) : NULL
;
1305 struct nd_namespace_common
*ndns
;
1306 resource_size_t size
;
1308 if (nd_btt
|| nd_pfn
) {
1309 struct device
*host
= NULL
;
1312 host
= &nd_btt
->dev
;
1313 ndns
= nd_btt
->ndns
;
1314 } else if (nd_pfn
) {
1315 host
= &nd_pfn
->dev
;
1316 ndns
= nd_pfn
->ndns
;
1320 return ERR_PTR(-ENODEV
);
1323 * Flush any in-progess probes / removals in the driver
1324 * for the raw personality of this namespace.
1326 device_lock(&ndns
->dev
);
1327 device_unlock(&ndns
->dev
);
1328 if (ndns
->dev
.driver
) {
1329 dev_dbg(&ndns
->dev
, "is active, can't bind %s\n",
1331 return ERR_PTR(-EBUSY
);
1333 if (dev_WARN_ONCE(&ndns
->dev
, ndns
->claim
!= host
,
1334 "host (%s) vs claim (%s) mismatch\n",
1336 dev_name(ndns
->claim
)))
1337 return ERR_PTR(-ENXIO
);
1339 ndns
= to_ndns(dev
);
1341 dev_dbg(dev
, "claimed by %s, failing probe\n",
1342 dev_name(ndns
->claim
));
1344 return ERR_PTR(-ENXIO
);
1348 size
= nvdimm_namespace_capacity(ndns
);
1349 if (size
< ND_MIN_NAMESPACE_SIZE
) {
1350 dev_dbg(&ndns
->dev
, "%pa, too small must be at least %#x\n",
1351 &size
, ND_MIN_NAMESPACE_SIZE
);
1352 return ERR_PTR(-ENODEV
);
1355 if (is_namespace_pmem(&ndns
->dev
)) {
1356 struct nd_namespace_pmem
*nspm
;
1358 nspm
= to_nd_namespace_pmem(&ndns
->dev
);
1359 if (uuid_not_set(nspm
->uuid
, &ndns
->dev
, __func__
))
1360 return ERR_PTR(-ENODEV
);
1361 } else if (is_namespace_blk(&ndns
->dev
)) {
1362 struct nd_namespace_blk
*nsblk
;
1364 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
1365 if (uuid_not_set(nsblk
->uuid
, &ndns
->dev
, __func__
))
1366 return ERR_PTR(-ENODEV
);
1367 if (!nsblk
->lbasize
) {
1368 dev_dbg(&ndns
->dev
, "%s: sector size not set\n",
1370 return ERR_PTR(-ENODEV
);
1372 if (!nd_namespace_blk_validate(nsblk
))
1373 return ERR_PTR(-ENODEV
);
1378 EXPORT_SYMBOL(nvdimm_namespace_common_probe
);
1380 static struct device
**create_namespace_io(struct nd_region
*nd_region
)
1382 struct nd_namespace_io
*nsio
;
1383 struct device
*dev
, **devs
;
1384 struct resource
*res
;
1386 nsio
= kzalloc(sizeof(*nsio
), GFP_KERNEL
);
1390 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1396 dev
= &nsio
->common
.dev
;
1397 dev
->type
= &namespace_io_device_type
;
1398 dev
->parent
= &nd_region
->dev
;
1400 res
->name
= dev_name(&nd_region
->dev
);
1401 res
->flags
= IORESOURCE_MEM
;
1402 res
->start
= nd_region
->ndr_start
;
1403 res
->end
= res
->start
+ nd_region
->ndr_size
- 1;
1409 static bool has_uuid_at_pos(struct nd_region
*nd_region
, u8
*uuid
,
1410 u64 cookie
, u16 pos
)
1412 struct nd_namespace_label
*found
= NULL
;
1415 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1416 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1417 struct nd_namespace_label
*nd_label
;
1418 bool found_uuid
= false;
1421 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1422 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1423 u16 position
= __le16_to_cpu(nd_label
->position
);
1424 u16 nlabel
= __le16_to_cpu(nd_label
->nlabel
);
1426 if (isetcookie
!= cookie
)
1429 if (memcmp(nd_label
->uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1433 dev_dbg(to_ndd(nd_mapping
)->dev
,
1434 "%s duplicate entry for uuid\n",
1439 if (nlabel
!= nd_region
->ndr_mappings
)
1441 if (position
!= pos
)
1449 return found
!= NULL
;
1452 static int select_pmem_id(struct nd_region
*nd_region
, u8
*pmem_id
)
1454 struct nd_namespace_label
*select
= NULL
;
1460 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1461 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1462 struct nd_namespace_label
*nd_label
;
1463 u64 hw_start
, hw_end
, pmem_start
, pmem_end
;
1466 for_each_label(l
, nd_label
, nd_mapping
->labels
)
1467 if (memcmp(nd_label
->uuid
, pmem_id
, NSLABEL_UUID_LEN
) == 0)
1477 * Check that this label is compliant with the dpa
1478 * range published in NFIT
1480 hw_start
= nd_mapping
->start
;
1481 hw_end
= hw_start
+ nd_mapping
->size
;
1482 pmem_start
= __le64_to_cpu(select
->dpa
);
1483 pmem_end
= pmem_start
+ __le64_to_cpu(select
->rawsize
);
1484 if (pmem_start
== hw_start
&& pmem_end
<= hw_end
)
1489 nd_mapping
->labels
[0] = select
;
1490 nd_mapping
->labels
[1] = NULL
;
1496 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1497 * @nd_region: region with mappings to validate
1499 static int find_pmem_label_set(struct nd_region
*nd_region
,
1500 struct nd_namespace_pmem
*nspm
)
1502 u64 cookie
= nd_region_interleave_set_cookie(nd_region
);
1503 struct nd_namespace_label
*nd_label
;
1504 u8 select_id
[NSLABEL_UUID_LEN
];
1505 resource_size_t size
= 0;
1507 int rc
= -ENODEV
, l
;
1514 * Find a complete set of labels by uuid. By definition we can start
1515 * with any mapping as the reference label
1517 for_each_label(l
, nd_label
, nd_region
->mapping
[0].labels
) {
1518 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1520 if (isetcookie
!= cookie
)
1523 for (i
= 0; nd_region
->ndr_mappings
; i
++)
1524 if (!has_uuid_at_pos(nd_region
, nd_label
->uuid
,
1527 if (i
< nd_region
->ndr_mappings
) {
1529 * Give up if we don't find an instance of a
1530 * uuid at each position (from 0 to
1531 * nd_region->ndr_mappings - 1), or if we find a
1532 * dimm with two instances of the same uuid.
1536 } else if (pmem_id
) {
1538 * If there is more than one valid uuid set, we
1539 * need userspace to clean this up.
1544 memcpy(select_id
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1545 pmem_id
= select_id
;
1549 * Fix up each mapping's 'labels' to have the validated pmem label for
1550 * that position at labels[0], and NULL at labels[1]. In the process,
1551 * check that the namespace aligns with interleave-set. We know
1552 * that it does not overlap with any blk namespaces by virtue of
1553 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1556 rc
= select_pmem_id(nd_region
, pmem_id
);
1560 /* Calculate total size and populate namespace properties from label0 */
1561 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1562 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1563 struct nd_namespace_label
*label0
= nd_mapping
->labels
[0];
1565 size
+= __le64_to_cpu(label0
->rawsize
);
1566 if (__le16_to_cpu(label0
->position
) != 0)
1568 WARN_ON(nspm
->alt_name
|| nspm
->uuid
);
1569 nspm
->alt_name
= kmemdup((void __force
*) label0
->name
,
1570 NSLABEL_NAME_LEN
, GFP_KERNEL
);
1571 nspm
->uuid
= kmemdup((void __force
*) label0
->uuid
,
1572 NSLABEL_UUID_LEN
, GFP_KERNEL
);
1575 if (!nspm
->alt_name
|| !nspm
->uuid
) {
1580 nd_namespace_pmem_set_size(nd_region
, nspm
, size
);
1586 dev_dbg(&nd_region
->dev
, "%s: invalid label(s)\n", __func__
);
1589 dev_dbg(&nd_region
->dev
, "%s: label not found\n", __func__
);
1592 dev_dbg(&nd_region
->dev
, "%s: unexpected err: %d\n",
1599 static struct device
**create_namespace_pmem(struct nd_region
*nd_region
)
1601 struct nd_namespace_pmem
*nspm
;
1602 struct device
*dev
, **devs
;
1603 struct resource
*res
;
1606 nspm
= kzalloc(sizeof(*nspm
), GFP_KERNEL
);
1610 dev
= &nspm
->nsio
.common
.dev
;
1611 dev
->type
= &namespace_pmem_device_type
;
1612 dev
->parent
= &nd_region
->dev
;
1613 res
= &nspm
->nsio
.res
;
1614 res
->name
= dev_name(&nd_region
->dev
);
1615 res
->flags
= IORESOURCE_MEM
;
1616 rc
= find_pmem_label_set(nd_region
, nspm
);
1617 if (rc
== -ENODEV
) {
1620 /* Pass, try to permit namespace creation... */
1621 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1622 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1624 kfree(nd_mapping
->labels
);
1625 nd_mapping
->labels
= NULL
;
1628 /* Publish a zero-sized namespace for userspace to configure. */
1629 nd_namespace_pmem_set_size(nd_region
, nspm
, 0);
1635 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1643 namespace_pmem_release(&nspm
->nsio
.common
.dev
);
1647 struct resource
*nsblk_add_resource(struct nd_region
*nd_region
,
1648 struct nvdimm_drvdata
*ndd
, struct nd_namespace_blk
*nsblk
,
1649 resource_size_t start
)
1651 struct nd_label_id label_id
;
1652 struct resource
*res
;
1654 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
1655 res
= krealloc(nsblk
->res
,
1656 sizeof(void *) * (nsblk
->num_resources
+ 1),
1660 nsblk
->res
= (struct resource
**) res
;
1661 for_each_dpa_resource(ndd
, res
)
1662 if (strcmp(res
->name
, label_id
.id
) == 0
1663 && res
->start
== start
) {
1664 nsblk
->res
[nsblk
->num_resources
++] = res
;
1670 static struct device
*nd_namespace_blk_create(struct nd_region
*nd_region
)
1672 struct nd_namespace_blk
*nsblk
;
1675 if (!is_nd_blk(&nd_region
->dev
))
1678 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1682 dev
= &nsblk
->common
.dev
;
1683 dev
->type
= &namespace_blk_device_type
;
1684 nsblk
->id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0, GFP_KERNEL
);
1685 if (nsblk
->id
< 0) {
1689 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, nsblk
->id
);
1690 dev
->parent
= &nd_region
->dev
;
1691 dev
->groups
= nd_namespace_attribute_groups
;
1693 return &nsblk
->common
.dev
;
1696 void nd_region_create_blk_seed(struct nd_region
*nd_region
)
1698 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1699 nd_region
->ns_seed
= nd_namespace_blk_create(nd_region
);
1701 * Seed creation failures are not fatal, provisioning is simply
1702 * disabled until memory becomes available
1704 if (!nd_region
->ns_seed
)
1705 dev_err(&nd_region
->dev
, "failed to create blk namespace\n");
1707 nd_device_register(nd_region
->ns_seed
);
1710 void nd_region_create_btt_seed(struct nd_region
*nd_region
)
1712 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1713 nd_region
->btt_seed
= nd_btt_create(nd_region
);
1715 * Seed creation failures are not fatal, provisioning is simply
1716 * disabled until memory becomes available
1718 if (!nd_region
->btt_seed
)
1719 dev_err(&nd_region
->dev
, "failed to create btt namespace\n");
1722 static struct device
**create_namespace_blk(struct nd_region
*nd_region
)
1724 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1725 struct nd_namespace_label
*nd_label
;
1726 struct device
*dev
, **devs
= NULL
;
1727 struct nd_namespace_blk
*nsblk
;
1728 struct nvdimm_drvdata
*ndd
;
1729 int i
, l
, count
= 0;
1730 struct resource
*res
;
1732 if (nd_region
->ndr_mappings
== 0)
1735 ndd
= to_ndd(nd_mapping
);
1736 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1737 u32 flags
= __le32_to_cpu(nd_label
->flags
);
1738 char *name
[NSLABEL_NAME_LEN
];
1739 struct device
**__devs
;
1741 if (flags
& NSLABEL_FLAG_LOCAL
)
1746 for (i
= 0; i
< count
; i
++) {
1747 nsblk
= to_nd_namespace_blk(devs
[i
]);
1748 if (memcmp(nsblk
->uuid
, nd_label
->uuid
,
1749 NSLABEL_UUID_LEN
) == 0) {
1750 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1751 __le64_to_cpu(nd_label
->dpa
));
1754 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1755 dev_name(&nsblk
->common
.dev
));
1761 __devs
= kcalloc(count
+ 2, sizeof(dev
), GFP_KERNEL
);
1764 memcpy(__devs
, devs
, sizeof(dev
) * count
);
1768 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1771 dev
= &nsblk
->common
.dev
;
1772 dev
->type
= &namespace_blk_device_type
;
1773 dev
->parent
= &nd_region
->dev
;
1774 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, count
);
1775 devs
[count
++] = dev
;
1777 nsblk
->lbasize
= __le64_to_cpu(nd_label
->lbasize
);
1778 nsblk
->uuid
= kmemdup(nd_label
->uuid
, NSLABEL_UUID_LEN
,
1782 memcpy(name
, nd_label
->name
, NSLABEL_NAME_LEN
);
1784 nsblk
->alt_name
= kmemdup(name
, NSLABEL_NAME_LEN
,
1786 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1787 __le64_to_cpu(nd_label
->dpa
));
1790 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1791 dev_name(&nsblk
->common
.dev
));
1794 dev_dbg(&nd_region
->dev
, "%s: discovered %d blk namespace%s\n",
1795 __func__
, count
, count
== 1 ? "" : "s");
1798 /* Publish a zero-sized namespace for userspace to configure. */
1799 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1800 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1802 kfree(nd_mapping
->labels
);
1803 nd_mapping
->labels
= NULL
;
1806 devs
= kcalloc(2, sizeof(dev
), GFP_KERNEL
);
1809 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1812 dev
= &nsblk
->common
.dev
;
1813 dev
->type
= &namespace_blk_device_type
;
1814 dev
->parent
= &nd_region
->dev
;
1815 devs
[count
++] = dev
;
1821 for (i
= 0; i
< count
; i
++) {
1822 nsblk
= to_nd_namespace_blk(devs
[i
]);
1823 namespace_blk_release(&nsblk
->common
.dev
);
1829 static int init_active_labels(struct nd_region
*nd_region
)
1833 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1834 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1835 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1836 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1840 * If the dimm is disabled then prevent the region from
1841 * being activated if it aliases DPA.
1844 if ((nvdimm
->flags
& NDD_ALIASING
) == 0)
1846 dev_dbg(&nd_region
->dev
, "%s: is disabled, failing probe\n",
1847 dev_name(&nd_mapping
->nvdimm
->dev
));
1850 nd_mapping
->ndd
= ndd
;
1851 atomic_inc(&nvdimm
->busy
);
1854 count
= nd_label_active_count(ndd
);
1855 dev_dbg(ndd
->dev
, "%s: %d\n", __func__
, count
);
1858 nd_mapping
->labels
= kcalloc(count
+ 1, sizeof(void *),
1860 if (!nd_mapping
->labels
)
1862 for (j
= 0; j
< count
; j
++) {
1863 struct nd_namespace_label
*label
;
1865 label
= nd_label_active(ndd
, j
);
1866 nd_mapping
->labels
[j
] = label
;
1873 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
)
1875 struct device
**devs
= NULL
;
1876 int i
, rc
= 0, type
;
1879 nvdimm_bus_lock(&nd_region
->dev
);
1880 rc
= init_active_labels(nd_region
);
1882 nvdimm_bus_unlock(&nd_region
->dev
);
1886 type
= nd_region_to_nstype(nd_region
);
1888 case ND_DEVICE_NAMESPACE_IO
:
1889 devs
= create_namespace_io(nd_region
);
1891 case ND_DEVICE_NAMESPACE_PMEM
:
1892 devs
= create_namespace_pmem(nd_region
);
1894 case ND_DEVICE_NAMESPACE_BLK
:
1895 devs
= create_namespace_blk(nd_region
);
1900 nvdimm_bus_unlock(&nd_region
->dev
);
1905 for (i
= 0; devs
[i
]; i
++) {
1906 struct device
*dev
= devs
[i
];
1909 if (type
== ND_DEVICE_NAMESPACE_BLK
) {
1910 struct nd_namespace_blk
*nsblk
;
1912 nsblk
= to_nd_namespace_blk(dev
);
1913 id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0,
1921 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, id
);
1922 dev
->groups
= nd_namespace_attribute_groups
;
1923 nd_device_register(dev
);
1926 nd_region
->ns_seed
= devs
[0];
1931 for (j
= i
; devs
[j
]; j
++) {
1932 struct device
*dev
= devs
[j
];
1934 device_initialize(dev
);
1939 * All of the namespaces we tried to register failed, so
1940 * fail region activation.