2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
21 static void namespace_io_release(struct device
*dev
)
23 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
28 static void namespace_pmem_release(struct device
*dev
)
30 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
32 kfree(nspm
->alt_name
);
37 static void namespace_blk_release(struct device
*dev
)
39 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
40 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
43 ida_simple_remove(&nd_region
->ns_ida
, nsblk
->id
);
44 kfree(nsblk
->alt_name
);
50 static struct device_type namespace_io_device_type
= {
51 .name
= "nd_namespace_io",
52 .release
= namespace_io_release
,
55 static struct device_type namespace_pmem_device_type
= {
56 .name
= "nd_namespace_pmem",
57 .release
= namespace_pmem_release
,
60 static struct device_type namespace_blk_device_type
= {
61 .name
= "nd_namespace_blk",
62 .release
= namespace_blk_release
,
65 static bool is_namespace_pmem(struct device
*dev
)
67 return dev
? dev
->type
== &namespace_pmem_device_type
: false;
70 static bool is_namespace_blk(struct device
*dev
)
72 return dev
? dev
->type
== &namespace_blk_device_type
: false;
75 static bool is_namespace_io(struct device
*dev
)
77 return dev
? dev
->type
== &namespace_io_device_type
: false;
80 bool pmem_should_map_pages(struct device
*dev
)
82 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
84 if (!IS_ENABLED(CONFIG_ZONE_DEVICE
))
87 if (!test_bit(ND_REGION_PAGEMAP
, &nd_region
->flags
))
90 if (is_nd_pfn(dev
) || is_nd_btt(dev
))
93 #ifdef ARCH_MEMREMAP_PMEM
94 return ARCH_MEMREMAP_PMEM
== MEMREMAP_WB
;
99 EXPORT_SYMBOL(pmem_should_map_pages
);
101 const char *nvdimm_namespace_disk_name(struct nd_namespace_common
*ndns
,
104 struct nd_region
*nd_region
= to_nd_region(ndns
->dev
.parent
);
105 const char *suffix
= NULL
;
107 if (ndns
->claim
&& is_nd_btt(ndns
->claim
))
110 if (is_namespace_pmem(&ndns
->dev
) || is_namespace_io(&ndns
->dev
)) {
111 sprintf(name
, "pmem%d%s", nd_region
->id
, suffix
? suffix
: "");
112 } else if (is_namespace_blk(&ndns
->dev
)) {
113 struct nd_namespace_blk
*nsblk
;
115 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
116 sprintf(name
, "ndblk%d.%d%s", nd_region
->id
, nsblk
->id
,
117 suffix
? suffix
: "");
124 EXPORT_SYMBOL(nvdimm_namespace_disk_name
);
126 const u8
*nd_dev_to_uuid(struct device
*dev
)
128 static const u8 null_uuid
[16];
133 if (is_namespace_pmem(dev
)) {
134 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
137 } else if (is_namespace_blk(dev
)) {
138 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
144 EXPORT_SYMBOL(nd_dev_to_uuid
);
146 static ssize_t
nstype_show(struct device
*dev
,
147 struct device_attribute
*attr
, char *buf
)
149 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
151 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
153 static DEVICE_ATTR_RO(nstype
);
155 static ssize_t
__alt_name_store(struct device
*dev
, const char *buf
,
158 char *input
, *pos
, *alt_name
, **ns_altname
;
161 if (is_namespace_pmem(dev
)) {
162 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
164 ns_altname
= &nspm
->alt_name
;
165 } else if (is_namespace_blk(dev
)) {
166 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
168 ns_altname
= &nsblk
->alt_name
;
172 if (dev
->driver
|| to_ndns(dev
)->claim
)
175 input
= kmemdup(buf
, len
+ 1, GFP_KERNEL
);
181 if (strlen(pos
) + 1 > NSLABEL_NAME_LEN
) {
186 alt_name
= kzalloc(NSLABEL_NAME_LEN
, GFP_KERNEL
);
192 *ns_altname
= alt_name
;
193 sprintf(*ns_altname
, "%s", pos
);
201 static resource_size_t
nd_namespace_blk_size(struct nd_namespace_blk
*nsblk
)
203 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
204 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
205 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
206 struct nd_label_id label_id
;
207 resource_size_t size
= 0;
208 struct resource
*res
;
212 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
213 for_each_dpa_resource(ndd
, res
)
214 if (strcmp(res
->name
, label_id
.id
) == 0)
215 size
+= resource_size(res
);
219 static bool __nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
221 struct nd_region
*nd_region
= to_nd_region(nsblk
->common
.dev
.parent
);
222 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
223 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
224 struct nd_label_id label_id
;
225 struct resource
*res
;
228 if (!nsblk
->uuid
|| !nsblk
->lbasize
|| !ndd
)
232 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
233 for_each_dpa_resource(ndd
, res
) {
234 if (strcmp(res
->name
, label_id
.id
) != 0)
237 * Resources with unacknoweldged adjustments indicate a
238 * failure to update labels
240 if (res
->flags
& DPA_RESOURCE_ADJUSTED
)
245 /* These values match after a successful label update */
246 if (count
!= nsblk
->num_resources
)
249 for (i
= 0; i
< nsblk
->num_resources
; i
++) {
250 struct resource
*found
= NULL
;
252 for_each_dpa_resource(ndd
, res
)
253 if (res
== nsblk
->res
[i
]) {
265 resource_size_t
nd_namespace_blk_validate(struct nd_namespace_blk
*nsblk
)
267 resource_size_t size
;
269 nvdimm_bus_lock(&nsblk
->common
.dev
);
270 size
= __nd_namespace_blk_validate(nsblk
);
271 nvdimm_bus_unlock(&nsblk
->common
.dev
);
275 EXPORT_SYMBOL(nd_namespace_blk_validate
);
278 static int nd_namespace_label_update(struct nd_region
*nd_region
,
281 dev_WARN_ONCE(dev
, dev
->driver
|| to_ndns(dev
)->claim
,
282 "namespace must be idle during label update\n");
283 if (dev
->driver
|| to_ndns(dev
)->claim
)
287 * Only allow label writes that will result in a valid namespace
288 * or deletion of an existing namespace.
290 if (is_namespace_pmem(dev
)) {
291 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
292 resource_size_t size
= resource_size(&nspm
->nsio
.res
);
294 if (size
== 0 && nspm
->uuid
)
295 /* delete allocation */;
296 else if (!nspm
->uuid
)
299 return nd_pmem_namespace_label_update(nd_region
, nspm
, size
);
300 } else if (is_namespace_blk(dev
)) {
301 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
302 resource_size_t size
= nd_namespace_blk_size(nsblk
);
304 if (size
== 0 && nsblk
->uuid
)
305 /* delete allocation */;
306 else if (!nsblk
->uuid
|| !nsblk
->lbasize
)
309 return nd_blk_namespace_label_update(nd_region
, nsblk
, size
);
314 static ssize_t
alt_name_store(struct device
*dev
,
315 struct device_attribute
*attr
, const char *buf
, size_t len
)
317 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
321 nvdimm_bus_lock(dev
);
322 wait_nvdimm_bus_probe_idle(dev
);
323 rc
= __alt_name_store(dev
, buf
, len
);
325 rc
= nd_namespace_label_update(nd_region
, dev
);
326 dev_dbg(dev
, "%s: %s(%zd)\n", __func__
, rc
< 0 ? "fail " : "", rc
);
327 nvdimm_bus_unlock(dev
);
330 return rc
< 0 ? rc
: len
;
333 static ssize_t
alt_name_show(struct device
*dev
,
334 struct device_attribute
*attr
, char *buf
)
338 if (is_namespace_pmem(dev
)) {
339 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
341 ns_altname
= nspm
->alt_name
;
342 } else if (is_namespace_blk(dev
)) {
343 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
345 ns_altname
= nsblk
->alt_name
;
349 return sprintf(buf
, "%s\n", ns_altname
? ns_altname
: "");
351 static DEVICE_ATTR_RW(alt_name
);
353 static int scan_free(struct nd_region
*nd_region
,
354 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
357 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
358 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
362 struct resource
*res
, *last
;
363 resource_size_t new_start
;
366 for_each_dpa_resource(ndd
, res
)
367 if (strcmp(res
->name
, label_id
->id
) == 0)
373 if (n
>= resource_size(res
)) {
374 n
-= resource_size(res
);
375 nd_dbg_dpa(nd_region
, ndd
, res
, "delete %d\n", rc
);
376 nvdimm_free_dpa(ndd
, res
);
377 /* retry with last resource deleted */
382 * Keep BLK allocations relegated to high DPA as much as
386 new_start
= res
->start
+ n
;
388 new_start
= res
->start
;
390 rc
= adjust_resource(res
, new_start
, resource_size(res
) - n
);
392 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
393 nd_dbg_dpa(nd_region
, ndd
, res
, "shrink %d\n", rc
);
401 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
402 * @nd_region: the set of dimms to reclaim @n bytes from
403 * @label_id: unique identifier for the namespace consuming this dpa range
404 * @n: number of bytes per-dimm to release
406 * Assumes resources are ordered. Starting from the end try to
407 * adjust_resource() the allocation to @n, but if @n is larger than the
408 * allocation delete it and find the 'new' last allocation in the label
411 static int shrink_dpa_allocation(struct nd_region
*nd_region
,
412 struct nd_label_id
*label_id
, resource_size_t n
)
416 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
417 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
420 rc
= scan_free(nd_region
, nd_mapping
, label_id
, n
);
428 static resource_size_t
init_dpa_allocation(struct nd_label_id
*label_id
,
429 struct nd_region
*nd_region
, struct nd_mapping
*nd_mapping
,
432 bool is_blk
= strncmp(label_id
->id
, "blk", 3) == 0;
433 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
434 resource_size_t first_dpa
;
435 struct resource
*res
;
438 /* allocate blk from highest dpa first */
440 first_dpa
= nd_mapping
->start
+ nd_mapping
->size
- n
;
442 first_dpa
= nd_mapping
->start
;
444 /* first resource allocation for this label-id or dimm */
445 res
= nvdimm_allocate_dpa(ndd
, label_id
, first_dpa
, n
);
449 nd_dbg_dpa(nd_region
, ndd
, res
, "init %d\n", rc
);
453 static bool space_valid(bool is_pmem
, bool is_reserve
,
454 struct nd_label_id
*label_id
, struct resource
*res
)
457 * For BLK-space any space is valid, for PMEM-space, it must be
458 * contiguous with an existing allocation unless we are
461 if (is_reserve
|| !is_pmem
)
463 if (!res
|| strcmp(res
->name
, label_id
->id
) == 0)
469 ALLOC_ERR
= 0, ALLOC_BEFORE
, ALLOC_MID
, ALLOC_AFTER
,
472 static resource_size_t
scan_allocate(struct nd_region
*nd_region
,
473 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
,
476 resource_size_t mapping_end
= nd_mapping
->start
+ nd_mapping
->size
- 1;
477 bool is_reserve
= strcmp(label_id
->id
, "pmem-reserve") == 0;
478 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
479 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
480 const resource_size_t to_allocate
= n
;
481 struct resource
*res
;
486 for_each_dpa_resource(ndd
, res
) {
487 resource_size_t allocate
, available
= 0, free_start
, free_end
;
488 struct resource
*next
= res
->sibling
, *new_res
= NULL
;
489 enum alloc_loc loc
= ALLOC_ERR
;
493 /* ignore resources outside this nd_mapping */
494 if (res
->start
> mapping_end
)
496 if (res
->end
< nd_mapping
->start
)
499 /* space at the beginning of the mapping */
500 if (!first
++ && res
->start
> nd_mapping
->start
) {
501 free_start
= nd_mapping
->start
;
502 available
= res
->start
- free_start
;
503 if (space_valid(is_pmem
, is_reserve
, label_id
, NULL
))
507 /* space between allocations */
509 free_start
= res
->start
+ resource_size(res
);
510 free_end
= min(mapping_end
, next
->start
- 1);
511 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
512 && free_start
< free_end
) {
513 available
= free_end
+ 1 - free_start
;
518 /* space at the end of the mapping */
520 free_start
= res
->start
+ resource_size(res
);
521 free_end
= mapping_end
;
522 if (space_valid(is_pmem
, is_reserve
, label_id
, res
)
523 && free_start
< free_end
) {
524 available
= free_end
+ 1 - free_start
;
529 if (!loc
|| !available
)
531 allocate
= min(available
, n
);
534 if (strcmp(res
->name
, label_id
->id
) == 0) {
535 /* adjust current resource up */
536 if (is_pmem
&& !is_reserve
)
538 rc
= adjust_resource(res
, res
->start
- allocate
,
539 resource_size(res
) + allocate
);
540 action
= "cur grow up";
545 if (strcmp(next
->name
, label_id
->id
) == 0) {
546 /* adjust next resource up */
547 if (is_pmem
&& !is_reserve
)
549 rc
= adjust_resource(next
, next
->start
550 - allocate
, resource_size(next
)
553 action
= "next grow up";
554 } else if (strcmp(res
->name
, label_id
->id
) == 0) {
555 action
= "grow down";
560 if (strcmp(res
->name
, label_id
->id
) == 0)
561 action
= "grow down";
569 if (strcmp(action
, "allocate") == 0) {
570 /* BLK allocate bottom up */
572 free_start
+= available
- allocate
;
573 else if (!is_reserve
&& free_start
!= nd_mapping
->start
)
576 new_res
= nvdimm_allocate_dpa(ndd
, label_id
,
577 free_start
, allocate
);
580 } else if (strcmp(action
, "grow down") == 0) {
581 /* adjust current resource down */
582 rc
= adjust_resource(res
, res
->start
, resource_size(res
)
585 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
591 nd_dbg_dpa(nd_region
, ndd
, new_res
, "%s(%d) %d\n",
600 * Retry scan with newly inserted resources.
601 * For example, if we did an ALLOC_BEFORE
602 * insertion there may also have been space
603 * available for an ALLOC_AFTER insertion, so we
604 * need to check this same resource again
612 * If we allocated nothing in the BLK case it may be because we are in
613 * an initial "pmem-reserve pass". Only do an initial BLK allocation
614 * when none of the DPA space is reserved.
616 if ((is_pmem
|| !ndd
->dpa
.child
) && n
== to_allocate
)
617 return init_dpa_allocation(label_id
, nd_region
, nd_mapping
, n
);
621 static int merge_dpa(struct nd_region
*nd_region
,
622 struct nd_mapping
*nd_mapping
, struct nd_label_id
*label_id
)
624 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
625 struct resource
*res
;
627 if (strncmp("pmem", label_id
->id
, 4) == 0)
630 for_each_dpa_resource(ndd
, res
) {
632 struct resource
*next
= res
->sibling
;
633 resource_size_t end
= res
->start
+ resource_size(res
);
635 if (!next
|| strcmp(res
->name
, label_id
->id
) != 0
636 || strcmp(next
->name
, label_id
->id
) != 0
637 || end
!= next
->start
)
639 end
+= resource_size(next
);
640 nvdimm_free_dpa(ndd
, next
);
641 rc
= adjust_resource(res
, res
->start
, end
- res
->start
);
642 nd_dbg_dpa(nd_region
, ndd
, res
, "merge %d\n", rc
);
645 res
->flags
|= DPA_RESOURCE_ADJUSTED
;
652 static int __reserve_free_pmem(struct device
*dev
, void *data
)
654 struct nvdimm
*nvdimm
= data
;
655 struct nd_region
*nd_region
;
656 struct nd_label_id label_id
;
659 if (!is_nd_pmem(dev
))
662 nd_region
= to_nd_region(dev
);
663 if (nd_region
->ndr_mappings
== 0)
666 memset(&label_id
, 0, sizeof(label_id
));
667 strcat(label_id
.id
, "pmem-reserve");
668 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
669 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
670 resource_size_t n
, rem
= 0;
672 if (nd_mapping
->nvdimm
!= nvdimm
)
675 n
= nd_pmem_available_dpa(nd_region
, nd_mapping
, &rem
);
678 rem
= scan_allocate(nd_region
, nd_mapping
, &label_id
, n
);
679 dev_WARN_ONCE(&nd_region
->dev
, rem
,
680 "pmem reserve underrun: %#llx of %#llx bytes\n",
681 (unsigned long long) n
- rem
,
682 (unsigned long long) n
);
683 return rem
? -ENXIO
: 0;
689 static void release_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
690 struct nd_mapping
*nd_mapping
)
692 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
693 struct resource
*res
, *_res
;
695 for_each_dpa_resource_safe(ndd
, res
, _res
)
696 if (strcmp(res
->name
, "pmem-reserve") == 0)
697 nvdimm_free_dpa(ndd
, res
);
700 static int reserve_free_pmem(struct nvdimm_bus
*nvdimm_bus
,
701 struct nd_mapping
*nd_mapping
)
703 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
706 rc
= device_for_each_child(&nvdimm_bus
->dev
, nvdimm
,
707 __reserve_free_pmem
);
709 release_free_pmem(nvdimm_bus
, nd_mapping
);
714 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
715 * @nd_region: the set of dimms to allocate @n more bytes from
716 * @label_id: unique identifier for the namespace consuming this dpa range
717 * @n: number of bytes per-dimm to add to the existing allocation
719 * Assumes resources are ordered. For BLK regions, first consume
720 * BLK-only available DPA free space, then consume PMEM-aliased DPA
721 * space starting at the highest DPA. For PMEM regions start
722 * allocations from the start of an interleave set and end at the first
723 * BLK allocation or the end of the interleave set, whichever comes
726 static int grow_dpa_allocation(struct nd_region
*nd_region
,
727 struct nd_label_id
*label_id
, resource_size_t n
)
729 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(&nd_region
->dev
);
730 bool is_pmem
= strncmp(label_id
->id
, "pmem", 4) == 0;
733 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
734 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
735 resource_size_t rem
= n
;
739 * In the BLK case try once with all unallocated PMEM
740 * reserved, and once without
742 for (j
= is_pmem
; j
< 2; j
++) {
743 bool blk_only
= j
== 0;
746 rc
= reserve_free_pmem(nvdimm_bus
, nd_mapping
);
750 rem
= scan_allocate(nd_region
, nd_mapping
,
753 release_free_pmem(nvdimm_bus
, nd_mapping
);
755 /* try again and allow encroachments into PMEM */
760 dev_WARN_ONCE(&nd_region
->dev
, rem
,
761 "allocation underrun: %#llx of %#llx bytes\n",
762 (unsigned long long) n
- rem
,
763 (unsigned long long) n
);
767 rc
= merge_dpa(nd_region
, nd_mapping
, label_id
);
775 static void nd_namespace_pmem_set_size(struct nd_region
*nd_region
,
776 struct nd_namespace_pmem
*nspm
, resource_size_t size
)
778 struct resource
*res
= &nspm
->nsio
.res
;
780 res
->start
= nd_region
->ndr_start
;
781 res
->end
= nd_region
->ndr_start
+ size
- 1;
784 static bool uuid_not_set(const u8
*uuid
, struct device
*dev
, const char *where
)
787 dev_dbg(dev
, "%s: uuid not set\n", where
);
793 static ssize_t
__size_store(struct device
*dev
, unsigned long long val
)
795 resource_size_t allocated
= 0, available
= 0;
796 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
797 struct nd_mapping
*nd_mapping
;
798 struct nvdimm_drvdata
*ndd
;
799 struct nd_label_id label_id
;
800 u32 flags
= 0, remainder
;
804 if (dev
->driver
|| to_ndns(dev
)->claim
)
807 if (is_namespace_pmem(dev
)) {
808 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
811 } else if (is_namespace_blk(dev
)) {
812 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
815 flags
= NSLABEL_FLAG_LOCAL
;
819 * We need a uuid for the allocation-label and dimm(s) on which
820 * to store the label.
822 if (uuid_not_set(uuid
, dev
, __func__
))
824 if (nd_region
->ndr_mappings
== 0) {
825 dev_dbg(dev
, "%s: not associated with dimm(s)\n", __func__
);
829 div_u64_rem(val
, SZ_4K
* nd_region
->ndr_mappings
, &remainder
);
831 dev_dbg(dev
, "%llu is not %dK aligned\n", val
,
832 (SZ_4K
* nd_region
->ndr_mappings
) / SZ_1K
);
836 nd_label_gen_id(&label_id
, uuid
, flags
);
837 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
838 nd_mapping
= &nd_region
->mapping
[i
];
839 ndd
= to_ndd(nd_mapping
);
842 * All dimms in an interleave set, or the base dimm for a blk
843 * region, need to be enabled for the size to be changed.
848 allocated
+= nvdimm_allocated_dpa(ndd
, &label_id
);
850 available
= nd_region_available_dpa(nd_region
);
852 if (val
> available
+ allocated
)
855 if (val
== allocated
)
858 val
= div_u64(val
, nd_region
->ndr_mappings
);
859 allocated
= div_u64(allocated
, nd_region
->ndr_mappings
);
861 rc
= shrink_dpa_allocation(nd_region
, &label_id
,
864 rc
= grow_dpa_allocation(nd_region
, &label_id
, val
- allocated
);
869 if (is_namespace_pmem(dev
)) {
870 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
872 nd_namespace_pmem_set_size(nd_region
, nspm
,
873 val
* nd_region
->ndr_mappings
);
874 } else if (is_namespace_blk(dev
)) {
875 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
878 * Try to delete the namespace if we deleted all of its
879 * allocation, this is not the seed device for the
880 * region, and it is not actively claimed by a btt
883 if (val
== 0 && nd_region
->ns_seed
!= dev
884 && !nsblk
->common
.claim
)
885 nd_device_unregister(dev
, ND_ASYNC
);
891 static ssize_t
size_store(struct device
*dev
,
892 struct device_attribute
*attr
, const char *buf
, size_t len
)
894 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
895 unsigned long long val
;
899 rc
= kstrtoull(buf
, 0, &val
);
904 nvdimm_bus_lock(dev
);
905 wait_nvdimm_bus_probe_idle(dev
);
906 rc
= __size_store(dev
, val
);
908 rc
= nd_namespace_label_update(nd_region
, dev
);
910 if (is_namespace_pmem(dev
)) {
911 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
914 } else if (is_namespace_blk(dev
)) {
915 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
920 if (rc
== 0 && val
== 0 && uuid
) {
921 /* setting size zero == 'delete namespace' */
926 dev_dbg(dev
, "%s: %llx %s (%d)\n", __func__
, val
, rc
< 0
927 ? "fail" : "success", rc
);
929 nvdimm_bus_unlock(dev
);
932 return rc
< 0 ? rc
: len
;
935 resource_size_t
__nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
937 struct device
*dev
= &ndns
->dev
;
939 if (is_namespace_pmem(dev
)) {
940 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
942 return resource_size(&nspm
->nsio
.res
);
943 } else if (is_namespace_blk(dev
)) {
944 return nd_namespace_blk_size(to_nd_namespace_blk(dev
));
945 } else if (is_namespace_io(dev
)) {
946 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
948 return resource_size(&nsio
->res
);
950 WARN_ONCE(1, "unknown namespace type\n");
954 resource_size_t
nvdimm_namespace_capacity(struct nd_namespace_common
*ndns
)
956 resource_size_t size
;
958 nvdimm_bus_lock(&ndns
->dev
);
959 size
= __nvdimm_namespace_capacity(ndns
);
960 nvdimm_bus_unlock(&ndns
->dev
);
964 EXPORT_SYMBOL(nvdimm_namespace_capacity
);
966 static ssize_t
size_show(struct device
*dev
,
967 struct device_attribute
*attr
, char *buf
)
969 return sprintf(buf
, "%llu\n", (unsigned long long)
970 nvdimm_namespace_capacity(to_ndns(dev
)));
972 static DEVICE_ATTR(size
, S_IRUGO
, size_show
, size_store
);
974 static ssize_t
uuid_show(struct device
*dev
,
975 struct device_attribute
*attr
, char *buf
)
979 if (is_namespace_pmem(dev
)) {
980 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
983 } else if (is_namespace_blk(dev
)) {
984 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
991 return sprintf(buf
, "%pUb\n", uuid
);
992 return sprintf(buf
, "\n");
996 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
997 * @nd_region: parent region so we can updates all dimms in the set
998 * @dev: namespace type for generating label_id
999 * @new_uuid: incoming uuid
1000 * @old_uuid: reference to the uuid storage location in the namespace object
1002 static int namespace_update_uuid(struct nd_region
*nd_region
,
1003 struct device
*dev
, u8
*new_uuid
, u8
**old_uuid
)
1005 u32 flags
= is_namespace_blk(dev
) ? NSLABEL_FLAG_LOCAL
: 0;
1006 struct nd_label_id old_label_id
;
1007 struct nd_label_id new_label_id
;
1010 if (!nd_is_uuid_unique(dev
, new_uuid
))
1013 if (*old_uuid
== NULL
)
1017 * If we've already written a label with this uuid, then it's
1018 * too late to rename because we can't reliably update the uuid
1019 * without losing the old namespace. Userspace must delete this
1020 * namespace to abandon the old uuid.
1022 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1023 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1026 * This check by itself is sufficient because old_uuid
1027 * would be NULL above if this uuid did not exist in the
1028 * currently written set.
1030 * FIXME: can we delete uuid with zero dpa allocated?
1032 if (nd_mapping
->labels
)
1036 nd_label_gen_id(&old_label_id
, *old_uuid
, flags
);
1037 nd_label_gen_id(&new_label_id
, new_uuid
, flags
);
1038 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1039 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1040 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1041 struct resource
*res
;
1043 for_each_dpa_resource(ndd
, res
)
1044 if (strcmp(res
->name
, old_label_id
.id
) == 0)
1045 sprintf((void *) res
->name
, "%s",
1050 *old_uuid
= new_uuid
;
1054 static ssize_t
uuid_store(struct device
*dev
,
1055 struct device_attribute
*attr
, const char *buf
, size_t len
)
1057 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1062 if (is_namespace_pmem(dev
)) {
1063 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1065 ns_uuid
= &nspm
->uuid
;
1066 } else if (is_namespace_blk(dev
)) {
1067 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1069 ns_uuid
= &nsblk
->uuid
;
1074 nvdimm_bus_lock(dev
);
1075 wait_nvdimm_bus_probe_idle(dev
);
1076 if (to_ndns(dev
)->claim
)
1079 rc
= nd_uuid_store(dev
, &uuid
, buf
, len
);
1081 rc
= namespace_update_uuid(nd_region
, dev
, uuid
, ns_uuid
);
1083 rc
= nd_namespace_label_update(nd_region
, dev
);
1086 dev_dbg(dev
, "%s: result: %zd wrote: %s%s", __func__
,
1087 rc
, buf
, buf
[len
- 1] == '\n' ? "" : "\n");
1088 nvdimm_bus_unlock(dev
);
1091 return rc
< 0 ? rc
: len
;
1093 static DEVICE_ATTR_RW(uuid
);
1095 static ssize_t
resource_show(struct device
*dev
,
1096 struct device_attribute
*attr
, char *buf
)
1098 struct resource
*res
;
1100 if (is_namespace_pmem(dev
)) {
1101 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1103 res
= &nspm
->nsio
.res
;
1104 } else if (is_namespace_io(dev
)) {
1105 struct nd_namespace_io
*nsio
= to_nd_namespace_io(dev
);
1111 /* no address to convey if the namespace has no allocation */
1112 if (resource_size(res
) == 0)
1114 return sprintf(buf
, "%#llx\n", (unsigned long long) res
->start
);
1116 static DEVICE_ATTR_RO(resource
);
1118 static const unsigned long ns_lbasize_supported
[] = { 512, 520, 528,
1119 4096, 4104, 4160, 4224, 0 };
1121 static ssize_t
sector_size_show(struct device
*dev
,
1122 struct device_attribute
*attr
, char *buf
)
1124 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1126 if (!is_namespace_blk(dev
))
1129 return nd_sector_size_show(nsblk
->lbasize
, ns_lbasize_supported
, buf
);
1132 static ssize_t
sector_size_store(struct device
*dev
,
1133 struct device_attribute
*attr
, const char *buf
, size_t len
)
1135 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1136 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1139 if (!is_namespace_blk(dev
))
1143 nvdimm_bus_lock(dev
);
1144 if (to_ndns(dev
)->claim
)
1147 rc
= nd_sector_size_store(dev
, buf
, &nsblk
->lbasize
,
1148 ns_lbasize_supported
);
1150 rc
= nd_namespace_label_update(nd_region
, dev
);
1151 dev_dbg(dev
, "%s: result: %zd %s: %s%s", __func__
,
1152 rc
, rc
< 0 ? "tried" : "wrote", buf
,
1153 buf
[len
- 1] == '\n' ? "" : "\n");
1154 nvdimm_bus_unlock(dev
);
1157 return rc
? rc
: len
;
1159 static DEVICE_ATTR_RW(sector_size
);
1161 static ssize_t
dpa_extents_show(struct device
*dev
,
1162 struct device_attribute
*attr
, char *buf
)
1164 struct nd_region
*nd_region
= to_nd_region(dev
->parent
);
1165 struct nd_label_id label_id
;
1170 nvdimm_bus_lock(dev
);
1171 if (is_namespace_pmem(dev
)) {
1172 struct nd_namespace_pmem
*nspm
= to_nd_namespace_pmem(dev
);
1176 } else if (is_namespace_blk(dev
)) {
1177 struct nd_namespace_blk
*nsblk
= to_nd_namespace_blk(dev
);
1180 flags
= NSLABEL_FLAG_LOCAL
;
1186 nd_label_gen_id(&label_id
, uuid
, flags
);
1187 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1188 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1189 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1190 struct resource
*res
;
1192 for_each_dpa_resource(ndd
, res
)
1193 if (strcmp(res
->name
, label_id
.id
) == 0)
1197 nvdimm_bus_unlock(dev
);
1199 return sprintf(buf
, "%d\n", count
);
1201 static DEVICE_ATTR_RO(dpa_extents
);
1203 static ssize_t
holder_show(struct device
*dev
,
1204 struct device_attribute
*attr
, char *buf
)
1206 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1210 rc
= sprintf(buf
, "%s\n", ndns
->claim
? dev_name(ndns
->claim
) : "");
1215 static DEVICE_ATTR_RO(holder
);
1217 static ssize_t
mode_show(struct device
*dev
,
1218 struct device_attribute
*attr
, char *buf
)
1220 struct nd_namespace_common
*ndns
= to_ndns(dev
);
1221 struct device
*claim
;
1226 claim
= ndns
->claim
;
1227 if (pmem_should_map_pages(dev
) || (claim
&& is_nd_pfn(claim
)))
1229 else if (claim
&& is_nd_btt(claim
))
1233 rc
= sprintf(buf
, "%s\n", mode
);
1238 static DEVICE_ATTR_RO(mode
);
1240 static ssize_t
force_raw_store(struct device
*dev
,
1241 struct device_attribute
*attr
, const char *buf
, size_t len
)
1244 int rc
= strtobool(buf
, &force_raw
);
1249 to_ndns(dev
)->force_raw
= force_raw
;
1253 static ssize_t
force_raw_show(struct device
*dev
,
1254 struct device_attribute
*attr
, char *buf
)
1256 return sprintf(buf
, "%d\n", to_ndns(dev
)->force_raw
);
1258 static DEVICE_ATTR_RW(force_raw
);
1260 static struct attribute
*nd_namespace_attributes
[] = {
1261 &dev_attr_nstype
.attr
,
1262 &dev_attr_size
.attr
,
1263 &dev_attr_mode
.attr
,
1264 &dev_attr_uuid
.attr
,
1265 &dev_attr_holder
.attr
,
1266 &dev_attr_resource
.attr
,
1267 &dev_attr_alt_name
.attr
,
1268 &dev_attr_force_raw
.attr
,
1269 &dev_attr_sector_size
.attr
,
1270 &dev_attr_dpa_extents
.attr
,
1274 static umode_t
namespace_visible(struct kobject
*kobj
,
1275 struct attribute
*a
, int n
)
1277 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
1279 if (a
== &dev_attr_resource
.attr
) {
1280 if (is_namespace_blk(dev
))
1285 if (is_namespace_pmem(dev
) || is_namespace_blk(dev
)) {
1286 if (a
== &dev_attr_size
.attr
)
1287 return S_IWUSR
| S_IRUGO
;
1289 if (is_namespace_pmem(dev
) && a
== &dev_attr_sector_size
.attr
)
1295 if (a
== &dev_attr_nstype
.attr
|| a
== &dev_attr_size
.attr
1296 || a
== &dev_attr_holder
.attr
1297 || a
== &dev_attr_force_raw
.attr
1298 || a
== &dev_attr_mode
.attr
)
1304 static struct attribute_group nd_namespace_attribute_group
= {
1305 .attrs
= nd_namespace_attributes
,
1306 .is_visible
= namespace_visible
,
1309 static const struct attribute_group
*nd_namespace_attribute_groups
[] = {
1310 &nd_device_attribute_group
,
1311 &nd_namespace_attribute_group
,
1312 &nd_numa_attribute_group
,
1316 struct nd_namespace_common
*nvdimm_namespace_common_probe(struct device
*dev
)
1318 struct nd_btt
*nd_btt
= is_nd_btt(dev
) ? to_nd_btt(dev
) : NULL
;
1319 struct nd_pfn
*nd_pfn
= is_nd_pfn(dev
) ? to_nd_pfn(dev
) : NULL
;
1320 struct nd_namespace_common
*ndns
;
1321 resource_size_t size
;
1323 if (nd_btt
|| nd_pfn
) {
1324 struct device
*host
= NULL
;
1327 host
= &nd_btt
->dev
;
1328 ndns
= nd_btt
->ndns
;
1329 } else if (nd_pfn
) {
1330 host
= &nd_pfn
->dev
;
1331 ndns
= nd_pfn
->ndns
;
1335 return ERR_PTR(-ENODEV
);
1338 * Flush any in-progess probes / removals in the driver
1339 * for the raw personality of this namespace.
1341 device_lock(&ndns
->dev
);
1342 device_unlock(&ndns
->dev
);
1343 if (ndns
->dev
.driver
) {
1344 dev_dbg(&ndns
->dev
, "is active, can't bind %s\n",
1346 return ERR_PTR(-EBUSY
);
1348 if (dev_WARN_ONCE(&ndns
->dev
, ndns
->claim
!= host
,
1349 "host (%s) vs claim (%s) mismatch\n",
1351 dev_name(ndns
->claim
)))
1352 return ERR_PTR(-ENXIO
);
1354 ndns
= to_ndns(dev
);
1356 dev_dbg(dev
, "claimed by %s, failing probe\n",
1357 dev_name(ndns
->claim
));
1359 return ERR_PTR(-ENXIO
);
1363 size
= nvdimm_namespace_capacity(ndns
);
1364 if (size
< ND_MIN_NAMESPACE_SIZE
) {
1365 dev_dbg(&ndns
->dev
, "%pa, too small must be at least %#x\n",
1366 &size
, ND_MIN_NAMESPACE_SIZE
);
1367 return ERR_PTR(-ENODEV
);
1370 if (is_namespace_pmem(&ndns
->dev
)) {
1371 struct nd_namespace_pmem
*nspm
;
1373 nspm
= to_nd_namespace_pmem(&ndns
->dev
);
1374 if (uuid_not_set(nspm
->uuid
, &ndns
->dev
, __func__
))
1375 return ERR_PTR(-ENODEV
);
1376 } else if (is_namespace_blk(&ndns
->dev
)) {
1377 struct nd_namespace_blk
*nsblk
;
1379 nsblk
= to_nd_namespace_blk(&ndns
->dev
);
1380 if (uuid_not_set(nsblk
->uuid
, &ndns
->dev
, __func__
))
1381 return ERR_PTR(-ENODEV
);
1382 if (!nsblk
->lbasize
) {
1383 dev_dbg(&ndns
->dev
, "%s: sector size not set\n",
1385 return ERR_PTR(-ENODEV
);
1387 if (!nd_namespace_blk_validate(nsblk
))
1388 return ERR_PTR(-ENODEV
);
1393 EXPORT_SYMBOL(nvdimm_namespace_common_probe
);
1395 static struct device
**create_namespace_io(struct nd_region
*nd_region
)
1397 struct nd_namespace_io
*nsio
;
1398 struct device
*dev
, **devs
;
1399 struct resource
*res
;
1401 nsio
= kzalloc(sizeof(*nsio
), GFP_KERNEL
);
1405 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1411 dev
= &nsio
->common
.dev
;
1412 dev
->type
= &namespace_io_device_type
;
1413 dev
->parent
= &nd_region
->dev
;
1415 res
->name
= dev_name(&nd_region
->dev
);
1416 res
->flags
= IORESOURCE_MEM
;
1417 res
->start
= nd_region
->ndr_start
;
1418 res
->end
= res
->start
+ nd_region
->ndr_size
- 1;
1424 static bool has_uuid_at_pos(struct nd_region
*nd_region
, u8
*uuid
,
1425 u64 cookie
, u16 pos
)
1427 struct nd_namespace_label
*found
= NULL
;
1430 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1431 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1432 struct nd_namespace_label
*nd_label
;
1433 bool found_uuid
= false;
1436 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1437 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1438 u16 position
= __le16_to_cpu(nd_label
->position
);
1439 u16 nlabel
= __le16_to_cpu(nd_label
->nlabel
);
1441 if (isetcookie
!= cookie
)
1444 if (memcmp(nd_label
->uuid
, uuid
, NSLABEL_UUID_LEN
) != 0)
1448 dev_dbg(to_ndd(nd_mapping
)->dev
,
1449 "%s duplicate entry for uuid\n",
1454 if (nlabel
!= nd_region
->ndr_mappings
)
1456 if (position
!= pos
)
1464 return found
!= NULL
;
1467 static int select_pmem_id(struct nd_region
*nd_region
, u8
*pmem_id
)
1469 struct nd_namespace_label
*select
= NULL
;
1475 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1476 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1477 struct nd_namespace_label
*nd_label
;
1478 u64 hw_start
, hw_end
, pmem_start
, pmem_end
;
1481 for_each_label(l
, nd_label
, nd_mapping
->labels
)
1482 if (memcmp(nd_label
->uuid
, pmem_id
, NSLABEL_UUID_LEN
) == 0)
1492 * Check that this label is compliant with the dpa
1493 * range published in NFIT
1495 hw_start
= nd_mapping
->start
;
1496 hw_end
= hw_start
+ nd_mapping
->size
;
1497 pmem_start
= __le64_to_cpu(select
->dpa
);
1498 pmem_end
= pmem_start
+ __le64_to_cpu(select
->rawsize
);
1499 if (pmem_start
== hw_start
&& pmem_end
<= hw_end
)
1504 nd_mapping
->labels
[0] = select
;
1505 nd_mapping
->labels
[1] = NULL
;
1511 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1512 * @nd_region: region with mappings to validate
1514 static int find_pmem_label_set(struct nd_region
*nd_region
,
1515 struct nd_namespace_pmem
*nspm
)
1517 u64 cookie
= nd_region_interleave_set_cookie(nd_region
);
1518 struct nd_namespace_label
*nd_label
;
1519 u8 select_id
[NSLABEL_UUID_LEN
];
1520 resource_size_t size
= 0;
1522 int rc
= -ENODEV
, l
;
1529 * Find a complete set of labels by uuid. By definition we can start
1530 * with any mapping as the reference label
1532 for_each_label(l
, nd_label
, nd_region
->mapping
[0].labels
) {
1533 u64 isetcookie
= __le64_to_cpu(nd_label
->isetcookie
);
1535 if (isetcookie
!= cookie
)
1538 for (i
= 0; nd_region
->ndr_mappings
; i
++)
1539 if (!has_uuid_at_pos(nd_region
, nd_label
->uuid
,
1542 if (i
< nd_region
->ndr_mappings
) {
1544 * Give up if we don't find an instance of a
1545 * uuid at each position (from 0 to
1546 * nd_region->ndr_mappings - 1), or if we find a
1547 * dimm with two instances of the same uuid.
1551 } else if (pmem_id
) {
1553 * If there is more than one valid uuid set, we
1554 * need userspace to clean this up.
1559 memcpy(select_id
, nd_label
->uuid
, NSLABEL_UUID_LEN
);
1560 pmem_id
= select_id
;
1564 * Fix up each mapping's 'labels' to have the validated pmem label for
1565 * that position at labels[0], and NULL at labels[1]. In the process,
1566 * check that the namespace aligns with interleave-set. We know
1567 * that it does not overlap with any blk namespaces by virtue of
1568 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1571 rc
= select_pmem_id(nd_region
, pmem_id
);
1575 /* Calculate total size and populate namespace properties from label0 */
1576 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1577 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1578 struct nd_namespace_label
*label0
= nd_mapping
->labels
[0];
1580 size
+= __le64_to_cpu(label0
->rawsize
);
1581 if (__le16_to_cpu(label0
->position
) != 0)
1583 WARN_ON(nspm
->alt_name
|| nspm
->uuid
);
1584 nspm
->alt_name
= kmemdup((void __force
*) label0
->name
,
1585 NSLABEL_NAME_LEN
, GFP_KERNEL
);
1586 nspm
->uuid
= kmemdup((void __force
*) label0
->uuid
,
1587 NSLABEL_UUID_LEN
, GFP_KERNEL
);
1590 if (!nspm
->alt_name
|| !nspm
->uuid
) {
1595 nd_namespace_pmem_set_size(nd_region
, nspm
, size
);
1601 dev_dbg(&nd_region
->dev
, "%s: invalid label(s)\n", __func__
);
1604 dev_dbg(&nd_region
->dev
, "%s: label not found\n", __func__
);
1607 dev_dbg(&nd_region
->dev
, "%s: unexpected err: %d\n",
1614 static struct device
**create_namespace_pmem(struct nd_region
*nd_region
)
1616 struct nd_namespace_pmem
*nspm
;
1617 struct device
*dev
, **devs
;
1618 struct resource
*res
;
1621 nspm
= kzalloc(sizeof(*nspm
), GFP_KERNEL
);
1625 dev
= &nspm
->nsio
.common
.dev
;
1626 dev
->type
= &namespace_pmem_device_type
;
1627 dev
->parent
= &nd_region
->dev
;
1628 res
= &nspm
->nsio
.res
;
1629 res
->name
= dev_name(&nd_region
->dev
);
1630 res
->flags
= IORESOURCE_MEM
;
1631 rc
= find_pmem_label_set(nd_region
, nspm
);
1632 if (rc
== -ENODEV
) {
1635 /* Pass, try to permit namespace creation... */
1636 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1637 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1639 kfree(nd_mapping
->labels
);
1640 nd_mapping
->labels
= NULL
;
1643 /* Publish a zero-sized namespace for userspace to configure. */
1644 nd_namespace_pmem_set_size(nd_region
, nspm
, 0);
1650 devs
= kcalloc(2, sizeof(struct device
*), GFP_KERNEL
);
1658 namespace_pmem_release(&nspm
->nsio
.common
.dev
);
1662 struct resource
*nsblk_add_resource(struct nd_region
*nd_region
,
1663 struct nvdimm_drvdata
*ndd
, struct nd_namespace_blk
*nsblk
,
1664 resource_size_t start
)
1666 struct nd_label_id label_id
;
1667 struct resource
*res
;
1669 nd_label_gen_id(&label_id
, nsblk
->uuid
, NSLABEL_FLAG_LOCAL
);
1670 res
= krealloc(nsblk
->res
,
1671 sizeof(void *) * (nsblk
->num_resources
+ 1),
1675 nsblk
->res
= (struct resource
**) res
;
1676 for_each_dpa_resource(ndd
, res
)
1677 if (strcmp(res
->name
, label_id
.id
) == 0
1678 && res
->start
== start
) {
1679 nsblk
->res
[nsblk
->num_resources
++] = res
;
1685 static struct device
*nd_namespace_blk_create(struct nd_region
*nd_region
)
1687 struct nd_namespace_blk
*nsblk
;
1690 if (!is_nd_blk(&nd_region
->dev
))
1693 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1697 dev
= &nsblk
->common
.dev
;
1698 dev
->type
= &namespace_blk_device_type
;
1699 nsblk
->id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0, GFP_KERNEL
);
1700 if (nsblk
->id
< 0) {
1704 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, nsblk
->id
);
1705 dev
->parent
= &nd_region
->dev
;
1706 dev
->groups
= nd_namespace_attribute_groups
;
1708 return &nsblk
->common
.dev
;
1711 void nd_region_create_blk_seed(struct nd_region
*nd_region
)
1713 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1714 nd_region
->ns_seed
= nd_namespace_blk_create(nd_region
);
1716 * Seed creation failures are not fatal, provisioning is simply
1717 * disabled until memory becomes available
1719 if (!nd_region
->ns_seed
)
1720 dev_err(&nd_region
->dev
, "failed to create blk namespace\n");
1722 nd_device_register(nd_region
->ns_seed
);
1725 void nd_region_create_pfn_seed(struct nd_region
*nd_region
)
1727 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1728 nd_region
->pfn_seed
= nd_pfn_create(nd_region
);
1730 * Seed creation failures are not fatal, provisioning is simply
1731 * disabled until memory becomes available
1733 if (!nd_region
->pfn_seed
)
1734 dev_err(&nd_region
->dev
, "failed to create pfn namespace\n");
1737 void nd_region_create_btt_seed(struct nd_region
*nd_region
)
1739 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
1740 nd_region
->btt_seed
= nd_btt_create(nd_region
);
1742 * Seed creation failures are not fatal, provisioning is simply
1743 * disabled until memory becomes available
1745 if (!nd_region
->btt_seed
)
1746 dev_err(&nd_region
->dev
, "failed to create btt namespace\n");
1749 static struct device
**create_namespace_blk(struct nd_region
*nd_region
)
1751 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
1752 struct nd_namespace_label
*nd_label
;
1753 struct device
*dev
, **devs
= NULL
;
1754 struct nd_namespace_blk
*nsblk
;
1755 struct nvdimm_drvdata
*ndd
;
1756 int i
, l
, count
= 0;
1757 struct resource
*res
;
1759 if (nd_region
->ndr_mappings
== 0)
1762 ndd
= to_ndd(nd_mapping
);
1763 for_each_label(l
, nd_label
, nd_mapping
->labels
) {
1764 u32 flags
= __le32_to_cpu(nd_label
->flags
);
1765 char *name
[NSLABEL_NAME_LEN
];
1766 struct device
**__devs
;
1768 if (flags
& NSLABEL_FLAG_LOCAL
)
1773 for (i
= 0; i
< count
; i
++) {
1774 nsblk
= to_nd_namespace_blk(devs
[i
]);
1775 if (memcmp(nsblk
->uuid
, nd_label
->uuid
,
1776 NSLABEL_UUID_LEN
) == 0) {
1777 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1778 __le64_to_cpu(nd_label
->dpa
));
1781 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1782 dev_name(&nsblk
->common
.dev
));
1788 __devs
= kcalloc(count
+ 2, sizeof(dev
), GFP_KERNEL
);
1791 memcpy(__devs
, devs
, sizeof(dev
) * count
);
1795 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1798 dev
= &nsblk
->common
.dev
;
1799 dev
->type
= &namespace_blk_device_type
;
1800 dev
->parent
= &nd_region
->dev
;
1801 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, count
);
1802 devs
[count
++] = dev
;
1804 nsblk
->lbasize
= __le64_to_cpu(nd_label
->lbasize
);
1805 nsblk
->uuid
= kmemdup(nd_label
->uuid
, NSLABEL_UUID_LEN
,
1809 memcpy(name
, nd_label
->name
, NSLABEL_NAME_LEN
);
1811 nsblk
->alt_name
= kmemdup(name
, NSLABEL_NAME_LEN
,
1813 res
= nsblk_add_resource(nd_region
, ndd
, nsblk
,
1814 __le64_to_cpu(nd_label
->dpa
));
1817 nd_dbg_dpa(nd_region
, ndd
, res
, "%s assign\n",
1818 dev_name(&nsblk
->common
.dev
));
1821 dev_dbg(&nd_region
->dev
, "%s: discovered %d blk namespace%s\n",
1822 __func__
, count
, count
== 1 ? "" : "s");
1825 /* Publish a zero-sized namespace for userspace to configure. */
1826 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1827 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1829 kfree(nd_mapping
->labels
);
1830 nd_mapping
->labels
= NULL
;
1833 devs
= kcalloc(2, sizeof(dev
), GFP_KERNEL
);
1836 nsblk
= kzalloc(sizeof(*nsblk
), GFP_KERNEL
);
1839 dev
= &nsblk
->common
.dev
;
1840 dev
->type
= &namespace_blk_device_type
;
1841 dev
->parent
= &nd_region
->dev
;
1842 devs
[count
++] = dev
;
1848 for (i
= 0; i
< count
; i
++) {
1849 nsblk
= to_nd_namespace_blk(devs
[i
]);
1850 namespace_blk_release(&nsblk
->common
.dev
);
1856 static int init_active_labels(struct nd_region
*nd_region
)
1860 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
1861 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
1862 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
1863 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
1867 * If the dimm is disabled then prevent the region from
1868 * being activated if it aliases DPA.
1871 if ((nvdimm
->flags
& NDD_ALIASING
) == 0)
1873 dev_dbg(&nd_region
->dev
, "%s: is disabled, failing probe\n",
1874 dev_name(&nd_mapping
->nvdimm
->dev
));
1877 nd_mapping
->ndd
= ndd
;
1878 atomic_inc(&nvdimm
->busy
);
1881 count
= nd_label_active_count(ndd
);
1882 dev_dbg(ndd
->dev
, "%s: %d\n", __func__
, count
);
1885 nd_mapping
->labels
= kcalloc(count
+ 1, sizeof(void *),
1887 if (!nd_mapping
->labels
)
1889 for (j
= 0; j
< count
; j
++) {
1890 struct nd_namespace_label
*label
;
1892 label
= nd_label_active(ndd
, j
);
1893 nd_mapping
->labels
[j
] = label
;
1900 int nd_region_register_namespaces(struct nd_region
*nd_region
, int *err
)
1902 struct device
**devs
= NULL
;
1903 int i
, rc
= 0, type
;
1906 nvdimm_bus_lock(&nd_region
->dev
);
1907 rc
= init_active_labels(nd_region
);
1909 nvdimm_bus_unlock(&nd_region
->dev
);
1913 type
= nd_region_to_nstype(nd_region
);
1915 case ND_DEVICE_NAMESPACE_IO
:
1916 devs
= create_namespace_io(nd_region
);
1918 case ND_DEVICE_NAMESPACE_PMEM
:
1919 devs
= create_namespace_pmem(nd_region
);
1921 case ND_DEVICE_NAMESPACE_BLK
:
1922 devs
= create_namespace_blk(nd_region
);
1927 nvdimm_bus_unlock(&nd_region
->dev
);
1932 for (i
= 0; devs
[i
]; i
++) {
1933 struct device
*dev
= devs
[i
];
1936 if (type
== ND_DEVICE_NAMESPACE_BLK
) {
1937 struct nd_namespace_blk
*nsblk
;
1939 nsblk
= to_nd_namespace_blk(dev
);
1940 id
= ida_simple_get(&nd_region
->ns_ida
, 0, 0,
1948 dev_set_name(dev
, "namespace%d.%d", nd_region
->id
, id
);
1949 dev
->groups
= nd_namespace_attribute_groups
;
1950 nd_device_register(dev
);
1953 nd_region
->ns_seed
= devs
[0];
1958 for (j
= i
; devs
[j
]; j
++) {
1959 struct device
*dev
= devs
[j
];
1961 device_initialize(dev
);
1966 * All of the namespaces we tried to register failed, so
1967 * fail region activation.