Commit | Line | Data |
---|---|---|
1f7df6f8 DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
eaf96153 | 13 | #include <linux/scatterlist.h> |
047fc8a1 | 14 | #include <linux/highmem.h> |
eaf96153 | 15 | #include <linux/sched.h> |
1f7df6f8 | 16 | #include <linux/slab.h> |
0c27af60 | 17 | #include <linux/hash.h> |
f284a4f2 | 18 | #include <linux/pmem.h> |
eaf96153 | 19 | #include <linux/sort.h> |
1f7df6f8 | 20 | #include <linux/io.h> |
bf9bccc1 | 21 | #include <linux/nd.h> |
1f7df6f8 DW |
22 | #include "nd-core.h" |
23 | #include "nd.h" | |
24 | ||
f284a4f2 DW |
25 | /* |
26 | * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is | |
27 | * irrelevant. | |
28 | */ | |
29 | #include <linux/io-64-nonatomic-hi-lo.h> | |
30 | ||
1f7df6f8 | 31 | static DEFINE_IDA(region_ida); |
0c27af60 | 32 | static DEFINE_PER_CPU(int, flush_idx); |
1f7df6f8 | 33 | |
e5ae3b25 DW |
34 | static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm, |
35 | struct nd_region_data *ndrd) | |
36 | { | |
37 | int i, j; | |
38 | ||
39 | dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), | |
40 | nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); | |
41 | for (i = 0; i < nvdimm->num_flush; i++) { | |
42 | struct resource *res = &nvdimm->flush_wpq[i]; | |
43 | unsigned long pfn = PHYS_PFN(res->start); | |
44 | void __iomem *flush_page; | |
45 | ||
46 | /* check if flush hints share a page */ | |
47 | for (j = 0; j < i; j++) { | |
48 | struct resource *res_j = &nvdimm->flush_wpq[j]; | |
49 | unsigned long pfn_j = PHYS_PFN(res_j->start); | |
50 | ||
51 | if (pfn == pfn_j) | |
52 | break; | |
53 | } | |
54 | ||
55 | if (j < i) | |
56 | flush_page = (void __iomem *) ((unsigned long) | |
57 | ndrd->flush_wpq[dimm][j] & PAGE_MASK); | |
58 | else | |
59 | flush_page = devm_nvdimm_ioremap(dev, | |
60 | PHYS_PFN(pfn), PAGE_SIZE); | |
61 | if (!flush_page) | |
62 | return -ENXIO; | |
63 | ndrd->flush_wpq[dimm][i] = flush_page | |
64 | + (res->start & ~PAGE_MASK); | |
65 | } | |
66 | ||
67 | return 0; | |
68 | } | |
69 | ||
70 | int nd_region_activate(struct nd_region *nd_region) | |
71 | { | |
0c27af60 | 72 | int i, num_flush = 0; |
e5ae3b25 DW |
73 | struct nd_region_data *ndrd; |
74 | struct device *dev = &nd_region->dev; | |
75 | size_t flush_data_size = sizeof(void *); | |
76 | ||
77 | nvdimm_bus_lock(&nd_region->dev); | |
78 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
79 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
80 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
81 | ||
82 | /* at least one null hint slot per-dimm for the "no-hint" case */ | |
83 | flush_data_size += sizeof(void *); | |
0c27af60 | 84 | num_flush = min_not_zero(num_flush, nvdimm->num_flush); |
e5ae3b25 DW |
85 | if (!nvdimm->num_flush) |
86 | continue; | |
87 | flush_data_size += nvdimm->num_flush * sizeof(void *); | |
88 | } | |
89 | nvdimm_bus_unlock(&nd_region->dev); | |
90 | ||
91 | ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL); | |
92 | if (!ndrd) | |
93 | return -ENOMEM; | |
94 | dev_set_drvdata(dev, ndrd); | |
95 | ||
0c27af60 | 96 | ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; |
e5ae3b25 DW |
97 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
98 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
99 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
100 | int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); | |
101 | ||
102 | if (rc) | |
103 | return rc; | |
104 | } | |
105 | ||
106 | return 0; | |
107 | } | |
108 | ||
1f7df6f8 DW |
109 | static void nd_region_release(struct device *dev) |
110 | { | |
111 | struct nd_region *nd_region = to_nd_region(dev); | |
112 | u16 i; | |
113 | ||
114 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
115 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
116 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
117 | ||
118 | put_device(&nvdimm->dev); | |
119 | } | |
5212e11f | 120 | free_percpu(nd_region->lane); |
1f7df6f8 | 121 | ida_simple_remove(®ion_ida, nd_region->id); |
047fc8a1 RZ |
122 | if (is_nd_blk(dev)) |
123 | kfree(to_nd_blk_region(dev)); | |
124 | else | |
125 | kfree(nd_region); | |
1f7df6f8 DW |
126 | } |
127 | ||
128 | static struct device_type nd_blk_device_type = { | |
129 | .name = "nd_blk", | |
130 | .release = nd_region_release, | |
131 | }; | |
132 | ||
133 | static struct device_type nd_pmem_device_type = { | |
134 | .name = "nd_pmem", | |
135 | .release = nd_region_release, | |
136 | }; | |
137 | ||
138 | static struct device_type nd_volatile_device_type = { | |
139 | .name = "nd_volatile", | |
140 | .release = nd_region_release, | |
141 | }; | |
142 | ||
3d88002e | 143 | bool is_nd_pmem(struct device *dev) |
1f7df6f8 DW |
144 | { |
145 | return dev ? dev->type == &nd_pmem_device_type : false; | |
146 | } | |
147 | ||
3d88002e DW |
148 | bool is_nd_blk(struct device *dev) |
149 | { | |
150 | return dev ? dev->type == &nd_blk_device_type : false; | |
151 | } | |
152 | ||
1f7df6f8 DW |
153 | struct nd_region *to_nd_region(struct device *dev) |
154 | { | |
155 | struct nd_region *nd_region = container_of(dev, struct nd_region, dev); | |
156 | ||
157 | WARN_ON(dev->type->release != nd_region_release); | |
158 | return nd_region; | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(to_nd_region); | |
161 | ||
047fc8a1 RZ |
162 | struct nd_blk_region *to_nd_blk_region(struct device *dev) |
163 | { | |
164 | struct nd_region *nd_region = to_nd_region(dev); | |
165 | ||
166 | WARN_ON(!is_nd_blk(dev)); | |
167 | return container_of(nd_region, struct nd_blk_region, nd_region); | |
168 | } | |
169 | EXPORT_SYMBOL_GPL(to_nd_blk_region); | |
170 | ||
171 | void *nd_region_provider_data(struct nd_region *nd_region) | |
172 | { | |
173 | return nd_region->provider_data; | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(nd_region_provider_data); | |
176 | ||
177 | void *nd_blk_region_provider_data(struct nd_blk_region *ndbr) | |
178 | { | |
179 | return ndbr->blk_provider_data; | |
180 | } | |
181 | EXPORT_SYMBOL_GPL(nd_blk_region_provider_data); | |
182 | ||
183 | void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data) | |
184 | { | |
185 | ndbr->blk_provider_data = data; | |
186 | } | |
187 | EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); | |
188 | ||
3d88002e DW |
189 | /** |
190 | * nd_region_to_nstype() - region to an integer namespace type | |
191 | * @nd_region: region-device to interrogate | |
192 | * | |
193 | * This is the 'nstype' attribute of a region as well, an input to the | |
194 | * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match | |
195 | * namespace devices with namespace drivers. | |
196 | */ | |
197 | int nd_region_to_nstype(struct nd_region *nd_region) | |
198 | { | |
199 | if (is_nd_pmem(&nd_region->dev)) { | |
200 | u16 i, alias; | |
201 | ||
202 | for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { | |
203 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
204 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
205 | ||
206 | if (nvdimm->flags & NDD_ALIASING) | |
207 | alias++; | |
208 | } | |
209 | if (alias) | |
210 | return ND_DEVICE_NAMESPACE_PMEM; | |
211 | else | |
212 | return ND_DEVICE_NAMESPACE_IO; | |
213 | } else if (is_nd_blk(&nd_region->dev)) { | |
214 | return ND_DEVICE_NAMESPACE_BLK; | |
215 | } | |
216 | ||
217 | return 0; | |
218 | } | |
bf9bccc1 DW |
219 | EXPORT_SYMBOL(nd_region_to_nstype); |
220 | ||
1f7df6f8 DW |
221 | static ssize_t size_show(struct device *dev, |
222 | struct device_attribute *attr, char *buf) | |
223 | { | |
224 | struct nd_region *nd_region = to_nd_region(dev); | |
225 | unsigned long long size = 0; | |
226 | ||
227 | if (is_nd_pmem(dev)) { | |
228 | size = nd_region->ndr_size; | |
229 | } else if (nd_region->ndr_mappings == 1) { | |
230 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
231 | ||
232 | size = nd_mapping->size; | |
233 | } | |
234 | ||
235 | return sprintf(buf, "%llu\n", size); | |
236 | } | |
237 | static DEVICE_ATTR_RO(size); | |
238 | ||
239 | static ssize_t mappings_show(struct device *dev, | |
240 | struct device_attribute *attr, char *buf) | |
241 | { | |
242 | struct nd_region *nd_region = to_nd_region(dev); | |
243 | ||
244 | return sprintf(buf, "%d\n", nd_region->ndr_mappings); | |
245 | } | |
246 | static DEVICE_ATTR_RO(mappings); | |
247 | ||
3d88002e DW |
248 | static ssize_t nstype_show(struct device *dev, |
249 | struct device_attribute *attr, char *buf) | |
250 | { | |
251 | struct nd_region *nd_region = to_nd_region(dev); | |
252 | ||
253 | return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); | |
254 | } | |
255 | static DEVICE_ATTR_RO(nstype); | |
256 | ||
eaf96153 DW |
257 | static ssize_t set_cookie_show(struct device *dev, |
258 | struct device_attribute *attr, char *buf) | |
259 | { | |
260 | struct nd_region *nd_region = to_nd_region(dev); | |
261 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
262 | ||
263 | if (is_nd_pmem(dev) && nd_set) | |
264 | /* pass, should be precluded by region_visible */; | |
265 | else | |
266 | return -ENXIO; | |
267 | ||
268 | return sprintf(buf, "%#llx\n", nd_set->cookie); | |
269 | } | |
270 | static DEVICE_ATTR_RO(set_cookie); | |
271 | ||
bf9bccc1 DW |
272 | resource_size_t nd_region_available_dpa(struct nd_region *nd_region) |
273 | { | |
274 | resource_size_t blk_max_overlap = 0, available, overlap; | |
275 | int i; | |
276 | ||
277 | WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); | |
278 | ||
279 | retry: | |
280 | available = 0; | |
281 | overlap = blk_max_overlap; | |
282 | for (i = 0; i < nd_region->ndr_mappings; i++) { | |
283 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
284 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
285 | ||
286 | /* if a dimm is disabled the available capacity is zero */ | |
287 | if (!ndd) | |
288 | return 0; | |
289 | ||
290 | if (is_nd_pmem(&nd_region->dev)) { | |
291 | available += nd_pmem_available_dpa(nd_region, | |
292 | nd_mapping, &overlap); | |
293 | if (overlap > blk_max_overlap) { | |
294 | blk_max_overlap = overlap; | |
295 | goto retry; | |
296 | } | |
297 | } else if (is_nd_blk(&nd_region->dev)) { | |
1b40e09a | 298 | available += nd_blk_available_dpa(nd_mapping); |
bf9bccc1 DW |
299 | } |
300 | } | |
301 | ||
302 | return available; | |
303 | } | |
304 | ||
305 | static ssize_t available_size_show(struct device *dev, | |
306 | struct device_attribute *attr, char *buf) | |
307 | { | |
308 | struct nd_region *nd_region = to_nd_region(dev); | |
309 | unsigned long long available = 0; | |
310 | ||
311 | /* | |
312 | * Flush in-flight updates and grab a snapshot of the available | |
313 | * size. Of course, this value is potentially invalidated the | |
314 | * memory nvdimm_bus_lock() is dropped, but that's userspace's | |
315 | * problem to not race itself. | |
316 | */ | |
317 | nvdimm_bus_lock(dev); | |
318 | wait_nvdimm_bus_probe_idle(dev); | |
319 | available = nd_region_available_dpa(nd_region); | |
320 | nvdimm_bus_unlock(dev); | |
321 | ||
322 | return sprintf(buf, "%llu\n", available); | |
323 | } | |
324 | static DEVICE_ATTR_RO(available_size); | |
325 | ||
3d88002e DW |
326 | static ssize_t init_namespaces_show(struct device *dev, |
327 | struct device_attribute *attr, char *buf) | |
328 | { | |
e5ae3b25 | 329 | struct nd_region_data *ndrd = dev_get_drvdata(dev); |
3d88002e DW |
330 | ssize_t rc; |
331 | ||
332 | nvdimm_bus_lock(dev); | |
e5ae3b25 DW |
333 | if (ndrd) |
334 | rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count); | |
3d88002e DW |
335 | else |
336 | rc = -ENXIO; | |
337 | nvdimm_bus_unlock(dev); | |
338 | ||
339 | return rc; | |
340 | } | |
341 | static DEVICE_ATTR_RO(init_namespaces); | |
342 | ||
bf9bccc1 DW |
343 | static ssize_t namespace_seed_show(struct device *dev, |
344 | struct device_attribute *attr, char *buf) | |
345 | { | |
346 | struct nd_region *nd_region = to_nd_region(dev); | |
347 | ssize_t rc; | |
348 | ||
349 | nvdimm_bus_lock(dev); | |
350 | if (nd_region->ns_seed) | |
351 | rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); | |
352 | else | |
353 | rc = sprintf(buf, "\n"); | |
354 | nvdimm_bus_unlock(dev); | |
355 | return rc; | |
356 | } | |
357 | static DEVICE_ATTR_RO(namespace_seed); | |
358 | ||
8c2f7e86 DW |
359 | static ssize_t btt_seed_show(struct device *dev, |
360 | struct device_attribute *attr, char *buf) | |
361 | { | |
362 | struct nd_region *nd_region = to_nd_region(dev); | |
363 | ssize_t rc; | |
364 | ||
365 | nvdimm_bus_lock(dev); | |
366 | if (nd_region->btt_seed) | |
367 | rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); | |
368 | else | |
369 | rc = sprintf(buf, "\n"); | |
370 | nvdimm_bus_unlock(dev); | |
371 | ||
372 | return rc; | |
373 | } | |
374 | static DEVICE_ATTR_RO(btt_seed); | |
375 | ||
e1455744 DW |
376 | static ssize_t pfn_seed_show(struct device *dev, |
377 | struct device_attribute *attr, char *buf) | |
378 | { | |
379 | struct nd_region *nd_region = to_nd_region(dev); | |
380 | ssize_t rc; | |
381 | ||
382 | nvdimm_bus_lock(dev); | |
383 | if (nd_region->pfn_seed) | |
384 | rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); | |
385 | else | |
386 | rc = sprintf(buf, "\n"); | |
387 | nvdimm_bus_unlock(dev); | |
388 | ||
389 | return rc; | |
390 | } | |
391 | static DEVICE_ATTR_RO(pfn_seed); | |
392 | ||
cd03412a DW |
393 | static ssize_t dax_seed_show(struct device *dev, |
394 | struct device_attribute *attr, char *buf) | |
395 | { | |
396 | struct nd_region *nd_region = to_nd_region(dev); | |
397 | ssize_t rc; | |
398 | ||
399 | nvdimm_bus_lock(dev); | |
400 | if (nd_region->dax_seed) | |
401 | rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); | |
402 | else | |
403 | rc = sprintf(buf, "\n"); | |
404 | nvdimm_bus_unlock(dev); | |
405 | ||
406 | return rc; | |
407 | } | |
408 | static DEVICE_ATTR_RO(dax_seed); | |
409 | ||
58138820 DW |
410 | static ssize_t read_only_show(struct device *dev, |
411 | struct device_attribute *attr, char *buf) | |
412 | { | |
413 | struct nd_region *nd_region = to_nd_region(dev); | |
414 | ||
415 | return sprintf(buf, "%d\n", nd_region->ro); | |
416 | } | |
417 | ||
418 | static ssize_t read_only_store(struct device *dev, | |
419 | struct device_attribute *attr, const char *buf, size_t len) | |
420 | { | |
421 | bool ro; | |
422 | int rc = strtobool(buf, &ro); | |
423 | struct nd_region *nd_region = to_nd_region(dev); | |
424 | ||
425 | if (rc) | |
426 | return rc; | |
427 | ||
428 | nd_region->ro = ro; | |
429 | return len; | |
430 | } | |
431 | static DEVICE_ATTR_RW(read_only); | |
432 | ||
1f7df6f8 DW |
433 | static struct attribute *nd_region_attributes[] = { |
434 | &dev_attr_size.attr, | |
3d88002e | 435 | &dev_attr_nstype.attr, |
1f7df6f8 | 436 | &dev_attr_mappings.attr, |
8c2f7e86 | 437 | &dev_attr_btt_seed.attr, |
e1455744 | 438 | &dev_attr_pfn_seed.attr, |
cd03412a | 439 | &dev_attr_dax_seed.attr, |
58138820 | 440 | &dev_attr_read_only.attr, |
eaf96153 | 441 | &dev_attr_set_cookie.attr, |
bf9bccc1 DW |
442 | &dev_attr_available_size.attr, |
443 | &dev_attr_namespace_seed.attr, | |
3d88002e | 444 | &dev_attr_init_namespaces.attr, |
1f7df6f8 DW |
445 | NULL, |
446 | }; | |
447 | ||
eaf96153 DW |
448 | static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) |
449 | { | |
450 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
451 | struct nd_region *nd_region = to_nd_region(dev); | |
452 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
bf9bccc1 | 453 | int type = nd_region_to_nstype(nd_region); |
eaf96153 | 454 | |
6bb691ac DK |
455 | if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) |
456 | return 0; | |
457 | ||
cd03412a DW |
458 | if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) |
459 | return 0; | |
460 | ||
bf9bccc1 DW |
461 | if (a != &dev_attr_set_cookie.attr |
462 | && a != &dev_attr_available_size.attr) | |
eaf96153 DW |
463 | return a->mode; |
464 | ||
bf9bccc1 DW |
465 | if ((type == ND_DEVICE_NAMESPACE_PMEM |
466 | || type == ND_DEVICE_NAMESPACE_BLK) | |
467 | && a == &dev_attr_available_size.attr) | |
468 | return a->mode; | |
469 | else if (is_nd_pmem(dev) && nd_set) | |
470 | return a->mode; | |
eaf96153 DW |
471 | |
472 | return 0; | |
473 | } | |
474 | ||
1f7df6f8 DW |
475 | struct attribute_group nd_region_attribute_group = { |
476 | .attrs = nd_region_attributes, | |
eaf96153 | 477 | .is_visible = region_visible, |
1f7df6f8 DW |
478 | }; |
479 | EXPORT_SYMBOL_GPL(nd_region_attribute_group); | |
480 | ||
bf9bccc1 DW |
481 | u64 nd_region_interleave_set_cookie(struct nd_region *nd_region) |
482 | { | |
483 | struct nd_interleave_set *nd_set = nd_region->nd_set; | |
484 | ||
485 | if (nd_set) | |
486 | return nd_set->cookie; | |
487 | return 0; | |
488 | } | |
489 | ||
eaf96153 DW |
490 | /* |
491 | * Upon successful probe/remove, take/release a reference on the | |
8c2f7e86 | 492 | * associated interleave set (if present), and plant new btt + namespace |
047fc8a1 RZ |
493 | * seeds. Also, on the removal of a BLK region, notify the provider to |
494 | * disable the region. | |
eaf96153 DW |
495 | */ |
496 | static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |
497 | struct device *dev, bool probe) | |
498 | { | |
8c2f7e86 DW |
499 | struct nd_region *nd_region; |
500 | ||
bf9bccc1 | 501 | if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { |
eaf96153 DW |
502 | int i; |
503 | ||
8c2f7e86 | 504 | nd_region = to_nd_region(dev); |
eaf96153 DW |
505 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
506 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; | |
bf9bccc1 | 507 | struct nvdimm_drvdata *ndd = nd_mapping->ndd; |
eaf96153 DW |
508 | struct nvdimm *nvdimm = nd_mapping->nvdimm; |
509 | ||
bf9bccc1 DW |
510 | kfree(nd_mapping->labels); |
511 | nd_mapping->labels = NULL; | |
512 | put_ndd(ndd); | |
513 | nd_mapping->ndd = NULL; | |
047fc8a1 RZ |
514 | if (ndd) |
515 | atomic_dec(&nvdimm->busy); | |
eaf96153 | 516 | } |
047fc8a1 RZ |
517 | |
518 | if (is_nd_pmem(dev)) | |
519 | return; | |
8c2f7e86 DW |
520 | } |
521 | if (dev->parent && is_nd_blk(dev->parent) && probe) { | |
522 | nd_region = to_nd_region(dev->parent); | |
1b40e09a DW |
523 | nvdimm_bus_lock(dev); |
524 | if (nd_region->ns_seed == dev) | |
525 | nd_region_create_blk_seed(nd_region); | |
526 | nvdimm_bus_unlock(dev); | |
eaf96153 | 527 | } |
8c2f7e86 | 528 | if (is_nd_btt(dev) && probe) { |
8ca24353 DW |
529 | struct nd_btt *nd_btt = to_nd_btt(dev); |
530 | ||
8c2f7e86 DW |
531 | nd_region = to_nd_region(dev->parent); |
532 | nvdimm_bus_lock(dev); | |
533 | if (nd_region->btt_seed == dev) | |
534 | nd_region_create_btt_seed(nd_region); | |
8ca24353 DW |
535 | if (nd_region->ns_seed == &nd_btt->ndns->dev && |
536 | is_nd_blk(dev->parent)) | |
537 | nd_region_create_blk_seed(nd_region); | |
8c2f7e86 DW |
538 | nvdimm_bus_unlock(dev); |
539 | } | |
2dc43331 DW |
540 | if (is_nd_pfn(dev) && probe) { |
541 | nd_region = to_nd_region(dev->parent); | |
542 | nvdimm_bus_lock(dev); | |
543 | if (nd_region->pfn_seed == dev) | |
544 | nd_region_create_pfn_seed(nd_region); | |
545 | nvdimm_bus_unlock(dev); | |
546 | } | |
cd03412a DW |
547 | if (is_nd_dax(dev) && probe) { |
548 | nd_region = to_nd_region(dev->parent); | |
549 | nvdimm_bus_lock(dev); | |
550 | if (nd_region->dax_seed == dev) | |
551 | nd_region_create_dax_seed(nd_region); | |
552 | nvdimm_bus_unlock(dev); | |
553 | } | |
eaf96153 DW |
554 | } |
555 | ||
556 | void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev) | |
557 | { | |
558 | nd_region_notify_driver_action(nvdimm_bus, dev, true); | |
559 | } | |
560 | ||
561 | void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev) | |
562 | { | |
563 | nd_region_notify_driver_action(nvdimm_bus, dev, false); | |
564 | } | |
565 | ||
1f7df6f8 DW |
566 | static ssize_t mappingN(struct device *dev, char *buf, int n) |
567 | { | |
568 | struct nd_region *nd_region = to_nd_region(dev); | |
569 | struct nd_mapping *nd_mapping; | |
570 | struct nvdimm *nvdimm; | |
571 | ||
572 | if (n >= nd_region->ndr_mappings) | |
573 | return -ENXIO; | |
574 | nd_mapping = &nd_region->mapping[n]; | |
575 | nvdimm = nd_mapping->nvdimm; | |
576 | ||
577 | return sprintf(buf, "%s,%llu,%llu\n", dev_name(&nvdimm->dev), | |
578 | nd_mapping->start, nd_mapping->size); | |
579 | } | |
580 | ||
581 | #define REGION_MAPPING(idx) \ | |
582 | static ssize_t mapping##idx##_show(struct device *dev, \ | |
583 | struct device_attribute *attr, char *buf) \ | |
584 | { \ | |
585 | return mappingN(dev, buf, idx); \ | |
586 | } \ | |
587 | static DEVICE_ATTR_RO(mapping##idx) | |
588 | ||
589 | /* | |
590 | * 32 should be enough for a while, even in the presence of socket | |
591 | * interleave a 32-way interleave set is a degenerate case. | |
592 | */ | |
593 | REGION_MAPPING(0); | |
594 | REGION_MAPPING(1); | |
595 | REGION_MAPPING(2); | |
596 | REGION_MAPPING(3); | |
597 | REGION_MAPPING(4); | |
598 | REGION_MAPPING(5); | |
599 | REGION_MAPPING(6); | |
600 | REGION_MAPPING(7); | |
601 | REGION_MAPPING(8); | |
602 | REGION_MAPPING(9); | |
603 | REGION_MAPPING(10); | |
604 | REGION_MAPPING(11); | |
605 | REGION_MAPPING(12); | |
606 | REGION_MAPPING(13); | |
607 | REGION_MAPPING(14); | |
608 | REGION_MAPPING(15); | |
609 | REGION_MAPPING(16); | |
610 | REGION_MAPPING(17); | |
611 | REGION_MAPPING(18); | |
612 | REGION_MAPPING(19); | |
613 | REGION_MAPPING(20); | |
614 | REGION_MAPPING(21); | |
615 | REGION_MAPPING(22); | |
616 | REGION_MAPPING(23); | |
617 | REGION_MAPPING(24); | |
618 | REGION_MAPPING(25); | |
619 | REGION_MAPPING(26); | |
620 | REGION_MAPPING(27); | |
621 | REGION_MAPPING(28); | |
622 | REGION_MAPPING(29); | |
623 | REGION_MAPPING(30); | |
624 | REGION_MAPPING(31); | |
625 | ||
626 | static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n) | |
627 | { | |
628 | struct device *dev = container_of(kobj, struct device, kobj); | |
629 | struct nd_region *nd_region = to_nd_region(dev); | |
630 | ||
631 | if (n < nd_region->ndr_mappings) | |
632 | return a->mode; | |
633 | return 0; | |
634 | } | |
635 | ||
636 | static struct attribute *mapping_attributes[] = { | |
637 | &dev_attr_mapping0.attr, | |
638 | &dev_attr_mapping1.attr, | |
639 | &dev_attr_mapping2.attr, | |
640 | &dev_attr_mapping3.attr, | |
641 | &dev_attr_mapping4.attr, | |
642 | &dev_attr_mapping5.attr, | |
643 | &dev_attr_mapping6.attr, | |
644 | &dev_attr_mapping7.attr, | |
645 | &dev_attr_mapping8.attr, | |
646 | &dev_attr_mapping9.attr, | |
647 | &dev_attr_mapping10.attr, | |
648 | &dev_attr_mapping11.attr, | |
649 | &dev_attr_mapping12.attr, | |
650 | &dev_attr_mapping13.attr, | |
651 | &dev_attr_mapping14.attr, | |
652 | &dev_attr_mapping15.attr, | |
653 | &dev_attr_mapping16.attr, | |
654 | &dev_attr_mapping17.attr, | |
655 | &dev_attr_mapping18.attr, | |
656 | &dev_attr_mapping19.attr, | |
657 | &dev_attr_mapping20.attr, | |
658 | &dev_attr_mapping21.attr, | |
659 | &dev_attr_mapping22.attr, | |
660 | &dev_attr_mapping23.attr, | |
661 | &dev_attr_mapping24.attr, | |
662 | &dev_attr_mapping25.attr, | |
663 | &dev_attr_mapping26.attr, | |
664 | &dev_attr_mapping27.attr, | |
665 | &dev_attr_mapping28.attr, | |
666 | &dev_attr_mapping29.attr, | |
667 | &dev_attr_mapping30.attr, | |
668 | &dev_attr_mapping31.attr, | |
669 | NULL, | |
670 | }; | |
671 | ||
672 | struct attribute_group nd_mapping_attribute_group = { | |
673 | .is_visible = mapping_visible, | |
674 | .attrs = mapping_attributes, | |
675 | }; | |
676 | EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); | |
677 | ||
047fc8a1 | 678 | int nd_blk_region_init(struct nd_region *nd_region) |
1f7df6f8 | 679 | { |
047fc8a1 RZ |
680 | struct device *dev = &nd_region->dev; |
681 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); | |
682 | ||
683 | if (!is_nd_blk(dev)) | |
684 | return 0; | |
685 | ||
686 | if (nd_region->ndr_mappings < 1) { | |
687 | dev_err(dev, "invalid BLK region\n"); | |
688 | return -ENXIO; | |
689 | } | |
690 | ||
691 | return to_nd_blk_region(dev)->enable(nvdimm_bus, dev); | |
1f7df6f8 | 692 | } |
1f7df6f8 | 693 | |
5212e11f VV |
694 | /** |
695 | * nd_region_acquire_lane - allocate and lock a lane | |
696 | * @nd_region: region id and number of lanes possible | |
697 | * | |
698 | * A lane correlates to a BLK-data-window and/or a log slot in the BTT. | |
699 | * We optimize for the common case where there are 256 lanes, one | |
700 | * per-cpu. For larger systems we need to lock to share lanes. For now | |
701 | * this implementation assumes the cost of maintaining an allocator for | |
702 | * free lanes is on the order of the lock hold time, so it implements a | |
703 | * static lane = cpu % num_lanes mapping. | |
704 | * | |
705 | * In the case of a BTT instance on top of a BLK namespace a lane may be | |
706 | * acquired recursively. We lock on the first instance. | |
707 | * | |
708 | * In the case of a BTT instance on top of PMEM, we only acquire a lane | |
709 | * for the BTT metadata updates. | |
710 | */ | |
711 | unsigned int nd_region_acquire_lane(struct nd_region *nd_region) | |
712 | { | |
713 | unsigned int cpu, lane; | |
714 | ||
715 | cpu = get_cpu(); | |
716 | if (nd_region->num_lanes < nr_cpu_ids) { | |
717 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
718 | ||
719 | lane = cpu % nd_region->num_lanes; | |
720 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
721 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
722 | if (ndl_count->count++ == 0) | |
723 | spin_lock(&ndl_lock->lock); | |
724 | } else | |
725 | lane = cpu; | |
726 | ||
727 | return lane; | |
728 | } | |
729 | EXPORT_SYMBOL(nd_region_acquire_lane); | |
730 | ||
731 | void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) | |
732 | { | |
733 | if (nd_region->num_lanes < nr_cpu_ids) { | |
734 | unsigned int cpu = get_cpu(); | |
735 | struct nd_percpu_lane *ndl_lock, *ndl_count; | |
736 | ||
737 | ndl_count = per_cpu_ptr(nd_region->lane, cpu); | |
738 | ndl_lock = per_cpu_ptr(nd_region->lane, lane); | |
739 | if (--ndl_count->count == 0) | |
740 | spin_unlock(&ndl_lock->lock); | |
741 | put_cpu(); | |
742 | } | |
743 | put_cpu(); | |
744 | } | |
745 | EXPORT_SYMBOL(nd_region_release_lane); | |
746 | ||
1f7df6f8 DW |
747 | static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, |
748 | struct nd_region_desc *ndr_desc, struct device_type *dev_type, | |
749 | const char *caller) | |
750 | { | |
751 | struct nd_region *nd_region; | |
752 | struct device *dev; | |
047fc8a1 | 753 | void *region_buf; |
5212e11f | 754 | unsigned int i; |
58138820 | 755 | int ro = 0; |
1f7df6f8 DW |
756 | |
757 | for (i = 0; i < ndr_desc->num_mappings; i++) { | |
758 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; | |
759 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
760 | ||
761 | if ((nd_mapping->start | nd_mapping->size) % SZ_4K) { | |
762 | dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n", | |
763 | caller, dev_name(&nvdimm->dev), i); | |
764 | ||
765 | return NULL; | |
766 | } | |
58138820 DW |
767 | |
768 | if (nvdimm->flags & NDD_UNARMED) | |
769 | ro = 1; | |
1f7df6f8 DW |
770 | } |
771 | ||
047fc8a1 RZ |
772 | if (dev_type == &nd_blk_device_type) { |
773 | struct nd_blk_region_desc *ndbr_desc; | |
774 | struct nd_blk_region *ndbr; | |
775 | ||
776 | ndbr_desc = to_blk_region_desc(ndr_desc); | |
777 | ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping) | |
778 | * ndr_desc->num_mappings, | |
779 | GFP_KERNEL); | |
780 | if (ndbr) { | |
781 | nd_region = &ndbr->nd_region; | |
782 | ndbr->enable = ndbr_desc->enable; | |
047fc8a1 RZ |
783 | ndbr->do_io = ndbr_desc->do_io; |
784 | } | |
785 | region_buf = ndbr; | |
786 | } else { | |
787 | nd_region = kzalloc(sizeof(struct nd_region) | |
788 | + sizeof(struct nd_mapping) | |
789 | * ndr_desc->num_mappings, | |
790 | GFP_KERNEL); | |
791 | region_buf = nd_region; | |
792 | } | |
793 | ||
794 | if (!region_buf) | |
1f7df6f8 DW |
795 | return NULL; |
796 | nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL); | |
5212e11f VV |
797 | if (nd_region->id < 0) |
798 | goto err_id; | |
799 | ||
800 | nd_region->lane = alloc_percpu(struct nd_percpu_lane); | |
801 | if (!nd_region->lane) | |
802 | goto err_percpu; | |
803 | ||
804 | for (i = 0; i < nr_cpu_ids; i++) { | |
805 | struct nd_percpu_lane *ndl; | |
806 | ||
807 | ndl = per_cpu_ptr(nd_region->lane, i); | |
808 | spin_lock_init(&ndl->lock); | |
809 | ndl->count = 0; | |
1f7df6f8 DW |
810 | } |
811 | ||
812 | memcpy(nd_region->mapping, ndr_desc->nd_mapping, | |
813 | sizeof(struct nd_mapping) * ndr_desc->num_mappings); | |
814 | for (i = 0; i < ndr_desc->num_mappings; i++) { | |
815 | struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; | |
816 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
817 | ||
818 | get_device(&nvdimm->dev); | |
819 | } | |
820 | nd_region->ndr_mappings = ndr_desc->num_mappings; | |
821 | nd_region->provider_data = ndr_desc->provider_data; | |
eaf96153 | 822 | nd_region->nd_set = ndr_desc->nd_set; |
5212e11f | 823 | nd_region->num_lanes = ndr_desc->num_lanes; |
004f1afb | 824 | nd_region->flags = ndr_desc->flags; |
58138820 | 825 | nd_region->ro = ro; |
41d7a6d6 | 826 | nd_region->numa_node = ndr_desc->numa_node; |
1b40e09a | 827 | ida_init(&nd_region->ns_ida); |
8c2f7e86 | 828 | ida_init(&nd_region->btt_ida); |
e1455744 | 829 | ida_init(&nd_region->pfn_ida); |
cd03412a | 830 | ida_init(&nd_region->dax_ida); |
1f7df6f8 DW |
831 | dev = &nd_region->dev; |
832 | dev_set_name(dev, "region%d", nd_region->id); | |
833 | dev->parent = &nvdimm_bus->dev; | |
834 | dev->type = dev_type; | |
835 | dev->groups = ndr_desc->attr_groups; | |
836 | nd_region->ndr_size = resource_size(ndr_desc->res); | |
837 | nd_region->ndr_start = ndr_desc->res->start; | |
838 | nd_device_register(dev); | |
839 | ||
840 | return nd_region; | |
5212e11f VV |
841 | |
842 | err_percpu: | |
843 | ida_simple_remove(®ion_ida, nd_region->id); | |
844 | err_id: | |
047fc8a1 | 845 | kfree(region_buf); |
5212e11f | 846 | return NULL; |
1f7df6f8 DW |
847 | } |
848 | ||
849 | struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, | |
850 | struct nd_region_desc *ndr_desc) | |
851 | { | |
5212e11f | 852 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
853 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type, |
854 | __func__); | |
855 | } | |
856 | EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create); | |
857 | ||
858 | struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, | |
859 | struct nd_region_desc *ndr_desc) | |
860 | { | |
861 | if (ndr_desc->num_mappings > 1) | |
862 | return NULL; | |
5212e11f | 863 | ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES); |
1f7df6f8 DW |
864 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type, |
865 | __func__); | |
866 | } | |
867 | EXPORT_SYMBOL_GPL(nvdimm_blk_region_create); | |
868 | ||
869 | struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, | |
870 | struct nd_region_desc *ndr_desc) | |
871 | { | |
5212e11f | 872 | ndr_desc->num_lanes = ND_MAX_LANES; |
1f7df6f8 DW |
873 | return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type, |
874 | __func__); | |
875 | } | |
876 | EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create); | |
b354aba0 | 877 | |
f284a4f2 DW |
878 | /** |
879 | * nvdimm_flush - flush any posted write queues between the cpu and pmem media | |
880 | * @nd_region: blk or interleaved pmem region | |
881 | */ | |
882 | void nvdimm_flush(struct nd_region *nd_region) | |
883 | { | |
884 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
0c27af60 DW |
885 | int i, idx; |
886 | ||
887 | /* | |
888 | * Try to encourage some diversity in flush hint addresses | |
889 | * across cpus assuming a limited number of flush hints. | |
890 | */ | |
891 | idx = this_cpu_read(flush_idx); | |
892 | idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8)); | |
f284a4f2 DW |
893 | |
894 | /* | |
895 | * The first wmb() is needed to 'sfence' all previous writes | |
896 | * such that they are architecturally visible for the platform | |
897 | * buffer flush. Note that we've already arranged for pmem | |
898 | * writes to avoid the cache via arch_memcpy_to_pmem(). The | |
899 | * final wmb() ensures ordering for the NVDIMM flush write. | |
900 | */ | |
901 | wmb(); | |
902 | for (i = 0; i < nd_region->ndr_mappings; i++) | |
903 | if (ndrd->flush_wpq[i][0]) | |
0c27af60 | 904 | writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); |
f284a4f2 DW |
905 | wmb(); |
906 | } | |
907 | EXPORT_SYMBOL_GPL(nvdimm_flush); | |
908 | ||
909 | /** | |
910 | * nvdimm_has_flush - determine write flushing requirements | |
911 | * @nd_region: blk or interleaved pmem region | |
912 | * | |
913 | * Returns 1 if writes require flushing | |
914 | * Returns 0 if writes do not require flushing | |
915 | * Returns -ENXIO if flushing capability can not be determined | |
916 | */ | |
917 | int nvdimm_has_flush(struct nd_region *nd_region) | |
918 | { | |
919 | struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); | |
920 | int i; | |
921 | ||
922 | /* no nvdimm == flushing capability unknown */ | |
923 | if (nd_region->ndr_mappings == 0) | |
924 | return -ENXIO; | |
925 | ||
926 | for (i = 0; i < nd_region->ndr_mappings; i++) | |
927 | /* flush hints present, flushing required */ | |
928 | if (ndrd->flush_wpq[i][0]) | |
929 | return 1; | |
930 | ||
931 | /* | |
932 | * The platform defines dimm devices without hints, assume | |
933 | * platform persistence mechanism like ADR | |
934 | */ | |
935 | return 0; | |
936 | } | |
937 | EXPORT_SYMBOL_GPL(nvdimm_has_flush); | |
938 | ||
b354aba0 DW |
939 | void __exit nd_region_devs_exit(void) |
940 | { | |
941 | ida_destroy(®ion_ida); | |
942 | } |