2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/pmem.h>
18 #include <linux/sort.h>
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
28 #include <linux/io-64-nonatomic-hi-lo.h>
30 static DEFINE_IDA(region_ida
);
32 static int nvdimm_map_flush(struct device
*dev
, struct nvdimm
*nvdimm
, int dimm
,
33 struct nd_region_data
*ndrd
)
37 dev_dbg(dev
, "%s: map %d flush address%s\n", nvdimm_name(nvdimm
),
38 nvdimm
->num_flush
, nvdimm
->num_flush
== 1 ? "" : "es");
39 for (i
= 0; i
< nvdimm
->num_flush
; i
++) {
40 struct resource
*res
= &nvdimm
->flush_wpq
[i
];
41 unsigned long pfn
= PHYS_PFN(res
->start
);
42 void __iomem
*flush_page
;
44 /* check if flush hints share a page */
45 for (j
= 0; j
< i
; j
++) {
46 struct resource
*res_j
= &nvdimm
->flush_wpq
[j
];
47 unsigned long pfn_j
= PHYS_PFN(res_j
->start
);
54 flush_page
= (void __iomem
*) ((unsigned long)
55 ndrd
->flush_wpq
[dimm
][j
] & PAGE_MASK
);
57 flush_page
= devm_nvdimm_ioremap(dev
,
58 PHYS_PFN(pfn
), PAGE_SIZE
);
61 ndrd
->flush_wpq
[dimm
][i
] = flush_page
62 + (res
->start
& ~PAGE_MASK
);
68 int nd_region_activate(struct nd_region
*nd_region
)
71 struct nd_region_data
*ndrd
;
72 struct device
*dev
= &nd_region
->dev
;
73 size_t flush_data_size
= sizeof(void *);
75 nvdimm_bus_lock(&nd_region
->dev
);
76 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
77 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
78 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
80 /* at least one null hint slot per-dimm for the "no-hint" case */
81 flush_data_size
+= sizeof(void *);
82 if (!nvdimm
->num_flush
)
84 flush_data_size
+= nvdimm
->num_flush
* sizeof(void *);
86 nvdimm_bus_unlock(&nd_region
->dev
);
88 ndrd
= devm_kzalloc(dev
, sizeof(*ndrd
) + flush_data_size
, GFP_KERNEL
);
91 dev_set_drvdata(dev
, ndrd
);
93 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
94 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
95 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
96 int rc
= nvdimm_map_flush(&nd_region
->dev
, nvdimm
, i
, ndrd
);
105 static void nd_region_release(struct device
*dev
)
107 struct nd_region
*nd_region
= to_nd_region(dev
);
110 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
111 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
112 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
114 put_device(&nvdimm
->dev
);
116 free_percpu(nd_region
->lane
);
117 ida_simple_remove(®ion_ida
, nd_region
->id
);
119 kfree(to_nd_blk_region(dev
));
124 static struct device_type nd_blk_device_type
= {
126 .release
= nd_region_release
,
129 static struct device_type nd_pmem_device_type
= {
131 .release
= nd_region_release
,
134 static struct device_type nd_volatile_device_type
= {
135 .name
= "nd_volatile",
136 .release
= nd_region_release
,
139 bool is_nd_pmem(struct device
*dev
)
141 return dev
? dev
->type
== &nd_pmem_device_type
: false;
144 bool is_nd_blk(struct device
*dev
)
146 return dev
? dev
->type
== &nd_blk_device_type
: false;
149 struct nd_region
*to_nd_region(struct device
*dev
)
151 struct nd_region
*nd_region
= container_of(dev
, struct nd_region
, dev
);
153 WARN_ON(dev
->type
->release
!= nd_region_release
);
156 EXPORT_SYMBOL_GPL(to_nd_region
);
158 struct nd_blk_region
*to_nd_blk_region(struct device
*dev
)
160 struct nd_region
*nd_region
= to_nd_region(dev
);
162 WARN_ON(!is_nd_blk(dev
));
163 return container_of(nd_region
, struct nd_blk_region
, nd_region
);
165 EXPORT_SYMBOL_GPL(to_nd_blk_region
);
167 void *nd_region_provider_data(struct nd_region
*nd_region
)
169 return nd_region
->provider_data
;
171 EXPORT_SYMBOL_GPL(nd_region_provider_data
);
173 void *nd_blk_region_provider_data(struct nd_blk_region
*ndbr
)
175 return ndbr
->blk_provider_data
;
177 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data
);
179 void nd_blk_region_set_provider_data(struct nd_blk_region
*ndbr
, void *data
)
181 ndbr
->blk_provider_data
= data
;
183 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data
);
186 * nd_region_to_nstype() - region to an integer namespace type
187 * @nd_region: region-device to interrogate
189 * This is the 'nstype' attribute of a region as well, an input to the
190 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
191 * namespace devices with namespace drivers.
193 int nd_region_to_nstype(struct nd_region
*nd_region
)
195 if (is_nd_pmem(&nd_region
->dev
)) {
198 for (i
= 0, alias
= 0; i
< nd_region
->ndr_mappings
; i
++) {
199 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
200 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
202 if (nvdimm
->flags
& NDD_ALIASING
)
206 return ND_DEVICE_NAMESPACE_PMEM
;
208 return ND_DEVICE_NAMESPACE_IO
;
209 } else if (is_nd_blk(&nd_region
->dev
)) {
210 return ND_DEVICE_NAMESPACE_BLK
;
215 EXPORT_SYMBOL(nd_region_to_nstype
);
217 static ssize_t
size_show(struct device
*dev
,
218 struct device_attribute
*attr
, char *buf
)
220 struct nd_region
*nd_region
= to_nd_region(dev
);
221 unsigned long long size
= 0;
223 if (is_nd_pmem(dev
)) {
224 size
= nd_region
->ndr_size
;
225 } else if (nd_region
->ndr_mappings
== 1) {
226 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[0];
228 size
= nd_mapping
->size
;
231 return sprintf(buf
, "%llu\n", size
);
233 static DEVICE_ATTR_RO(size
);
235 static ssize_t
mappings_show(struct device
*dev
,
236 struct device_attribute
*attr
, char *buf
)
238 struct nd_region
*nd_region
= to_nd_region(dev
);
240 return sprintf(buf
, "%d\n", nd_region
->ndr_mappings
);
242 static DEVICE_ATTR_RO(mappings
);
244 static ssize_t
nstype_show(struct device
*dev
,
245 struct device_attribute
*attr
, char *buf
)
247 struct nd_region
*nd_region
= to_nd_region(dev
);
249 return sprintf(buf
, "%d\n", nd_region_to_nstype(nd_region
));
251 static DEVICE_ATTR_RO(nstype
);
253 static ssize_t
set_cookie_show(struct device
*dev
,
254 struct device_attribute
*attr
, char *buf
)
256 struct nd_region
*nd_region
= to_nd_region(dev
);
257 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
259 if (is_nd_pmem(dev
) && nd_set
)
260 /* pass, should be precluded by region_visible */;
264 return sprintf(buf
, "%#llx\n", nd_set
->cookie
);
266 static DEVICE_ATTR_RO(set_cookie
);
268 resource_size_t
nd_region_available_dpa(struct nd_region
*nd_region
)
270 resource_size_t blk_max_overlap
= 0, available
, overlap
;
273 WARN_ON(!is_nvdimm_bus_locked(&nd_region
->dev
));
277 overlap
= blk_max_overlap
;
278 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
279 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
280 struct nvdimm_drvdata
*ndd
= to_ndd(nd_mapping
);
282 /* if a dimm is disabled the available capacity is zero */
286 if (is_nd_pmem(&nd_region
->dev
)) {
287 available
+= nd_pmem_available_dpa(nd_region
,
288 nd_mapping
, &overlap
);
289 if (overlap
> blk_max_overlap
) {
290 blk_max_overlap
= overlap
;
293 } else if (is_nd_blk(&nd_region
->dev
)) {
294 available
+= nd_blk_available_dpa(nd_mapping
);
301 static ssize_t
available_size_show(struct device
*dev
,
302 struct device_attribute
*attr
, char *buf
)
304 struct nd_region
*nd_region
= to_nd_region(dev
);
305 unsigned long long available
= 0;
308 * Flush in-flight updates and grab a snapshot of the available
309 * size. Of course, this value is potentially invalidated the
310 * memory nvdimm_bus_lock() is dropped, but that's userspace's
311 * problem to not race itself.
313 nvdimm_bus_lock(dev
);
314 wait_nvdimm_bus_probe_idle(dev
);
315 available
= nd_region_available_dpa(nd_region
);
316 nvdimm_bus_unlock(dev
);
318 return sprintf(buf
, "%llu\n", available
);
320 static DEVICE_ATTR_RO(available_size
);
322 static ssize_t
init_namespaces_show(struct device
*dev
,
323 struct device_attribute
*attr
, char *buf
)
325 struct nd_region_data
*ndrd
= dev_get_drvdata(dev
);
328 nvdimm_bus_lock(dev
);
330 rc
= sprintf(buf
, "%d/%d\n", ndrd
->ns_active
, ndrd
->ns_count
);
333 nvdimm_bus_unlock(dev
);
337 static DEVICE_ATTR_RO(init_namespaces
);
339 static ssize_t
namespace_seed_show(struct device
*dev
,
340 struct device_attribute
*attr
, char *buf
)
342 struct nd_region
*nd_region
= to_nd_region(dev
);
345 nvdimm_bus_lock(dev
);
346 if (nd_region
->ns_seed
)
347 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->ns_seed
));
349 rc
= sprintf(buf
, "\n");
350 nvdimm_bus_unlock(dev
);
353 static DEVICE_ATTR_RO(namespace_seed
);
355 static ssize_t
btt_seed_show(struct device
*dev
,
356 struct device_attribute
*attr
, char *buf
)
358 struct nd_region
*nd_region
= to_nd_region(dev
);
361 nvdimm_bus_lock(dev
);
362 if (nd_region
->btt_seed
)
363 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->btt_seed
));
365 rc
= sprintf(buf
, "\n");
366 nvdimm_bus_unlock(dev
);
370 static DEVICE_ATTR_RO(btt_seed
);
372 static ssize_t
pfn_seed_show(struct device
*dev
,
373 struct device_attribute
*attr
, char *buf
)
375 struct nd_region
*nd_region
= to_nd_region(dev
);
378 nvdimm_bus_lock(dev
);
379 if (nd_region
->pfn_seed
)
380 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->pfn_seed
));
382 rc
= sprintf(buf
, "\n");
383 nvdimm_bus_unlock(dev
);
387 static DEVICE_ATTR_RO(pfn_seed
);
389 static ssize_t
dax_seed_show(struct device
*dev
,
390 struct device_attribute
*attr
, char *buf
)
392 struct nd_region
*nd_region
= to_nd_region(dev
);
395 nvdimm_bus_lock(dev
);
396 if (nd_region
->dax_seed
)
397 rc
= sprintf(buf
, "%s\n", dev_name(nd_region
->dax_seed
));
399 rc
= sprintf(buf
, "\n");
400 nvdimm_bus_unlock(dev
);
404 static DEVICE_ATTR_RO(dax_seed
);
406 static ssize_t
read_only_show(struct device
*dev
,
407 struct device_attribute
*attr
, char *buf
)
409 struct nd_region
*nd_region
= to_nd_region(dev
);
411 return sprintf(buf
, "%d\n", nd_region
->ro
);
414 static ssize_t
read_only_store(struct device
*dev
,
415 struct device_attribute
*attr
, const char *buf
, size_t len
)
418 int rc
= strtobool(buf
, &ro
);
419 struct nd_region
*nd_region
= to_nd_region(dev
);
427 static DEVICE_ATTR_RW(read_only
);
429 static struct attribute
*nd_region_attributes
[] = {
431 &dev_attr_nstype
.attr
,
432 &dev_attr_mappings
.attr
,
433 &dev_attr_btt_seed
.attr
,
434 &dev_attr_pfn_seed
.attr
,
435 &dev_attr_dax_seed
.attr
,
436 &dev_attr_read_only
.attr
,
437 &dev_attr_set_cookie
.attr
,
438 &dev_attr_available_size
.attr
,
439 &dev_attr_namespace_seed
.attr
,
440 &dev_attr_init_namespaces
.attr
,
444 static umode_t
region_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
446 struct device
*dev
= container_of(kobj
, typeof(*dev
), kobj
);
447 struct nd_region
*nd_region
= to_nd_region(dev
);
448 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
449 int type
= nd_region_to_nstype(nd_region
);
451 if (!is_nd_pmem(dev
) && a
== &dev_attr_pfn_seed
.attr
)
454 if (!is_nd_pmem(dev
) && a
== &dev_attr_dax_seed
.attr
)
457 if (a
!= &dev_attr_set_cookie
.attr
458 && a
!= &dev_attr_available_size
.attr
)
461 if ((type
== ND_DEVICE_NAMESPACE_PMEM
462 || type
== ND_DEVICE_NAMESPACE_BLK
)
463 && a
== &dev_attr_available_size
.attr
)
465 else if (is_nd_pmem(dev
) && nd_set
)
471 struct attribute_group nd_region_attribute_group
= {
472 .attrs
= nd_region_attributes
,
473 .is_visible
= region_visible
,
475 EXPORT_SYMBOL_GPL(nd_region_attribute_group
);
477 u64
nd_region_interleave_set_cookie(struct nd_region
*nd_region
)
479 struct nd_interleave_set
*nd_set
= nd_region
->nd_set
;
482 return nd_set
->cookie
;
487 * Upon successful probe/remove, take/release a reference on the
488 * associated interleave set (if present), and plant new btt + namespace
489 * seeds. Also, on the removal of a BLK region, notify the provider to
490 * disable the region.
492 static void nd_region_notify_driver_action(struct nvdimm_bus
*nvdimm_bus
,
493 struct device
*dev
, bool probe
)
495 struct nd_region
*nd_region
;
497 if (!probe
&& (is_nd_pmem(dev
) || is_nd_blk(dev
))) {
500 nd_region
= to_nd_region(dev
);
501 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++) {
502 struct nd_mapping
*nd_mapping
= &nd_region
->mapping
[i
];
503 struct nvdimm_drvdata
*ndd
= nd_mapping
->ndd
;
504 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
506 kfree(nd_mapping
->labels
);
507 nd_mapping
->labels
= NULL
;
509 nd_mapping
->ndd
= NULL
;
511 atomic_dec(&nvdimm
->busy
);
517 if (dev
->parent
&& is_nd_blk(dev
->parent
) && probe
) {
518 nd_region
= to_nd_region(dev
->parent
);
519 nvdimm_bus_lock(dev
);
520 if (nd_region
->ns_seed
== dev
)
521 nd_region_create_blk_seed(nd_region
);
522 nvdimm_bus_unlock(dev
);
524 if (is_nd_btt(dev
) && probe
) {
525 struct nd_btt
*nd_btt
= to_nd_btt(dev
);
527 nd_region
= to_nd_region(dev
->parent
);
528 nvdimm_bus_lock(dev
);
529 if (nd_region
->btt_seed
== dev
)
530 nd_region_create_btt_seed(nd_region
);
531 if (nd_region
->ns_seed
== &nd_btt
->ndns
->dev
&&
532 is_nd_blk(dev
->parent
))
533 nd_region_create_blk_seed(nd_region
);
534 nvdimm_bus_unlock(dev
);
536 if (is_nd_pfn(dev
) && probe
) {
537 nd_region
= to_nd_region(dev
->parent
);
538 nvdimm_bus_lock(dev
);
539 if (nd_region
->pfn_seed
== dev
)
540 nd_region_create_pfn_seed(nd_region
);
541 nvdimm_bus_unlock(dev
);
543 if (is_nd_dax(dev
) && probe
) {
544 nd_region
= to_nd_region(dev
->parent
);
545 nvdimm_bus_lock(dev
);
546 if (nd_region
->dax_seed
== dev
)
547 nd_region_create_dax_seed(nd_region
);
548 nvdimm_bus_unlock(dev
);
552 void nd_region_probe_success(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
554 nd_region_notify_driver_action(nvdimm_bus
, dev
, true);
557 void nd_region_disable(struct nvdimm_bus
*nvdimm_bus
, struct device
*dev
)
559 nd_region_notify_driver_action(nvdimm_bus
, dev
, false);
562 static ssize_t
mappingN(struct device
*dev
, char *buf
, int n
)
564 struct nd_region
*nd_region
= to_nd_region(dev
);
565 struct nd_mapping
*nd_mapping
;
566 struct nvdimm
*nvdimm
;
568 if (n
>= nd_region
->ndr_mappings
)
570 nd_mapping
= &nd_region
->mapping
[n
];
571 nvdimm
= nd_mapping
->nvdimm
;
573 return sprintf(buf
, "%s,%llu,%llu\n", dev_name(&nvdimm
->dev
),
574 nd_mapping
->start
, nd_mapping
->size
);
577 #define REGION_MAPPING(idx) \
578 static ssize_t mapping##idx##_show(struct device *dev, \
579 struct device_attribute *attr, char *buf) \
581 return mappingN(dev, buf, idx); \
583 static DEVICE_ATTR_RO(mapping##idx)
586 * 32 should be enough for a while, even in the presence of socket
587 * interleave a 32-way interleave set is a degenerate case.
622 static umode_t
mapping_visible(struct kobject
*kobj
, struct attribute
*a
, int n
)
624 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
625 struct nd_region
*nd_region
= to_nd_region(dev
);
627 if (n
< nd_region
->ndr_mappings
)
632 static struct attribute
*mapping_attributes
[] = {
633 &dev_attr_mapping0
.attr
,
634 &dev_attr_mapping1
.attr
,
635 &dev_attr_mapping2
.attr
,
636 &dev_attr_mapping3
.attr
,
637 &dev_attr_mapping4
.attr
,
638 &dev_attr_mapping5
.attr
,
639 &dev_attr_mapping6
.attr
,
640 &dev_attr_mapping7
.attr
,
641 &dev_attr_mapping8
.attr
,
642 &dev_attr_mapping9
.attr
,
643 &dev_attr_mapping10
.attr
,
644 &dev_attr_mapping11
.attr
,
645 &dev_attr_mapping12
.attr
,
646 &dev_attr_mapping13
.attr
,
647 &dev_attr_mapping14
.attr
,
648 &dev_attr_mapping15
.attr
,
649 &dev_attr_mapping16
.attr
,
650 &dev_attr_mapping17
.attr
,
651 &dev_attr_mapping18
.attr
,
652 &dev_attr_mapping19
.attr
,
653 &dev_attr_mapping20
.attr
,
654 &dev_attr_mapping21
.attr
,
655 &dev_attr_mapping22
.attr
,
656 &dev_attr_mapping23
.attr
,
657 &dev_attr_mapping24
.attr
,
658 &dev_attr_mapping25
.attr
,
659 &dev_attr_mapping26
.attr
,
660 &dev_attr_mapping27
.attr
,
661 &dev_attr_mapping28
.attr
,
662 &dev_attr_mapping29
.attr
,
663 &dev_attr_mapping30
.attr
,
664 &dev_attr_mapping31
.attr
,
668 struct attribute_group nd_mapping_attribute_group
= {
669 .is_visible
= mapping_visible
,
670 .attrs
= mapping_attributes
,
672 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group
);
674 int nd_blk_region_init(struct nd_region
*nd_region
)
676 struct device
*dev
= &nd_region
->dev
;
677 struct nvdimm_bus
*nvdimm_bus
= walk_to_nvdimm_bus(dev
);
682 if (nd_region
->ndr_mappings
< 1) {
683 dev_err(dev
, "invalid BLK region\n");
687 return to_nd_blk_region(dev
)->enable(nvdimm_bus
, dev
);
691 * nd_region_acquire_lane - allocate and lock a lane
692 * @nd_region: region id and number of lanes possible
694 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
695 * We optimize for the common case where there are 256 lanes, one
696 * per-cpu. For larger systems we need to lock to share lanes. For now
697 * this implementation assumes the cost of maintaining an allocator for
698 * free lanes is on the order of the lock hold time, so it implements a
699 * static lane = cpu % num_lanes mapping.
701 * In the case of a BTT instance on top of a BLK namespace a lane may be
702 * acquired recursively. We lock on the first instance.
704 * In the case of a BTT instance on top of PMEM, we only acquire a lane
705 * for the BTT metadata updates.
707 unsigned int nd_region_acquire_lane(struct nd_region
*nd_region
)
709 unsigned int cpu
, lane
;
712 if (nd_region
->num_lanes
< nr_cpu_ids
) {
713 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
715 lane
= cpu
% nd_region
->num_lanes
;
716 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
717 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
718 if (ndl_count
->count
++ == 0)
719 spin_lock(&ndl_lock
->lock
);
725 EXPORT_SYMBOL(nd_region_acquire_lane
);
727 void nd_region_release_lane(struct nd_region
*nd_region
, unsigned int lane
)
729 if (nd_region
->num_lanes
< nr_cpu_ids
) {
730 unsigned int cpu
= get_cpu();
731 struct nd_percpu_lane
*ndl_lock
, *ndl_count
;
733 ndl_count
= per_cpu_ptr(nd_region
->lane
, cpu
);
734 ndl_lock
= per_cpu_ptr(nd_region
->lane
, lane
);
735 if (--ndl_count
->count
== 0)
736 spin_unlock(&ndl_lock
->lock
);
741 EXPORT_SYMBOL(nd_region_release_lane
);
743 static struct nd_region
*nd_region_create(struct nvdimm_bus
*nvdimm_bus
,
744 struct nd_region_desc
*ndr_desc
, struct device_type
*dev_type
,
747 struct nd_region
*nd_region
;
753 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
754 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
755 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
757 if ((nd_mapping
->start
| nd_mapping
->size
) % SZ_4K
) {
758 dev_err(&nvdimm_bus
->dev
, "%s: %s mapping%d is not 4K aligned\n",
759 caller
, dev_name(&nvdimm
->dev
), i
);
764 if (nvdimm
->flags
& NDD_UNARMED
)
768 if (dev_type
== &nd_blk_device_type
) {
769 struct nd_blk_region_desc
*ndbr_desc
;
770 struct nd_blk_region
*ndbr
;
772 ndbr_desc
= to_blk_region_desc(ndr_desc
);
773 ndbr
= kzalloc(sizeof(*ndbr
) + sizeof(struct nd_mapping
)
774 * ndr_desc
->num_mappings
,
777 nd_region
= &ndbr
->nd_region
;
778 ndbr
->enable
= ndbr_desc
->enable
;
779 ndbr
->do_io
= ndbr_desc
->do_io
;
783 nd_region
= kzalloc(sizeof(struct nd_region
)
784 + sizeof(struct nd_mapping
)
785 * ndr_desc
->num_mappings
,
787 region_buf
= nd_region
;
792 nd_region
->id
= ida_simple_get(®ion_ida
, 0, 0, GFP_KERNEL
);
793 if (nd_region
->id
< 0)
796 nd_region
->lane
= alloc_percpu(struct nd_percpu_lane
);
797 if (!nd_region
->lane
)
800 for (i
= 0; i
< nr_cpu_ids
; i
++) {
801 struct nd_percpu_lane
*ndl
;
803 ndl
= per_cpu_ptr(nd_region
->lane
, i
);
804 spin_lock_init(&ndl
->lock
);
808 memcpy(nd_region
->mapping
, ndr_desc
->nd_mapping
,
809 sizeof(struct nd_mapping
) * ndr_desc
->num_mappings
);
810 for (i
= 0; i
< ndr_desc
->num_mappings
; i
++) {
811 struct nd_mapping
*nd_mapping
= &ndr_desc
->nd_mapping
[i
];
812 struct nvdimm
*nvdimm
= nd_mapping
->nvdimm
;
814 get_device(&nvdimm
->dev
);
816 nd_region
->ndr_mappings
= ndr_desc
->num_mappings
;
817 nd_region
->provider_data
= ndr_desc
->provider_data
;
818 nd_region
->nd_set
= ndr_desc
->nd_set
;
819 nd_region
->num_lanes
= ndr_desc
->num_lanes
;
820 nd_region
->flags
= ndr_desc
->flags
;
822 nd_region
->numa_node
= ndr_desc
->numa_node
;
823 ida_init(&nd_region
->ns_ida
);
824 ida_init(&nd_region
->btt_ida
);
825 ida_init(&nd_region
->pfn_ida
);
826 ida_init(&nd_region
->dax_ida
);
827 dev
= &nd_region
->dev
;
828 dev_set_name(dev
, "region%d", nd_region
->id
);
829 dev
->parent
= &nvdimm_bus
->dev
;
830 dev
->type
= dev_type
;
831 dev
->groups
= ndr_desc
->attr_groups
;
832 nd_region
->ndr_size
= resource_size(ndr_desc
->res
);
833 nd_region
->ndr_start
= ndr_desc
->res
->start
;
834 nd_device_register(dev
);
839 ida_simple_remove(®ion_ida
, nd_region
->id
);
845 struct nd_region
*nvdimm_pmem_region_create(struct nvdimm_bus
*nvdimm_bus
,
846 struct nd_region_desc
*ndr_desc
)
848 ndr_desc
->num_lanes
= ND_MAX_LANES
;
849 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_pmem_device_type
,
852 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create
);
854 struct nd_region
*nvdimm_blk_region_create(struct nvdimm_bus
*nvdimm_bus
,
855 struct nd_region_desc
*ndr_desc
)
857 if (ndr_desc
->num_mappings
> 1)
859 ndr_desc
->num_lanes
= min(ndr_desc
->num_lanes
, ND_MAX_LANES
);
860 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_blk_device_type
,
863 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create
);
865 struct nd_region
*nvdimm_volatile_region_create(struct nvdimm_bus
*nvdimm_bus
,
866 struct nd_region_desc
*ndr_desc
)
868 ndr_desc
->num_lanes
= ND_MAX_LANES
;
869 return nd_region_create(nvdimm_bus
, ndr_desc
, &nd_volatile_device_type
,
872 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create
);
875 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
876 * @nd_region: blk or interleaved pmem region
878 void nvdimm_flush(struct nd_region
*nd_region
)
880 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
884 * The first wmb() is needed to 'sfence' all previous writes
885 * such that they are architecturally visible for the platform
886 * buffer flush. Note that we've already arranged for pmem
887 * writes to avoid the cache via arch_memcpy_to_pmem(). The
888 * final wmb() ensures ordering for the NVDIMM flush write.
891 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
892 if (ndrd
->flush_wpq
[i
][0])
893 writeq(1, ndrd
->flush_wpq
[i
][0]);
896 EXPORT_SYMBOL_GPL(nvdimm_flush
);
899 * nvdimm_has_flush - determine write flushing requirements
900 * @nd_region: blk or interleaved pmem region
902 * Returns 1 if writes require flushing
903 * Returns 0 if writes do not require flushing
904 * Returns -ENXIO if flushing capability can not be determined
906 int nvdimm_has_flush(struct nd_region
*nd_region
)
908 struct nd_region_data
*ndrd
= dev_get_drvdata(&nd_region
->dev
);
911 /* no nvdimm == flushing capability unknown */
912 if (nd_region
->ndr_mappings
== 0)
915 for (i
= 0; i
< nd_region
->ndr_mappings
; i
++)
916 /* flush hints present, flushing required */
917 if (ndrd
->flush_wpq
[i
][0])
921 * The platform defines dimm devices without hints, assume
922 * platform persistence mechanism like ADR
926 EXPORT_SYMBOL_GPL(nvdimm_has_flush
);
928 void __exit
nd_region_devs_exit(void)
930 ida_destroy(®ion_ida
);