Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[deliverable/linux.git] / drivers / nvdimm / namespace_devs.c
CommitLineData
3d88002e
DW
1/*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/slab.h>
004f1afb 16#include <linux/pmem.h>
3d88002e 17#include <linux/nd.h>
bf9bccc1 18#include "nd-core.h"
3d88002e
DW
19#include "nd.h"
20
21static void namespace_io_release(struct device *dev)
22{
23 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
24
25 kfree(nsio);
26}
27
bf9bccc1
DW
28static void namespace_pmem_release(struct device *dev)
29{
30 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
31
32 kfree(nspm->alt_name);
33 kfree(nspm->uuid);
34 kfree(nspm);
35}
36
37static void namespace_blk_release(struct device *dev)
38{
1b40e09a
DW
39 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
40 struct nd_region *nd_region = to_nd_region(dev->parent);
41
42 if (nsblk->id >= 0)
43 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
44 kfree(nsblk->alt_name);
45 kfree(nsblk->uuid);
46 kfree(nsblk->res);
47 kfree(nsblk);
bf9bccc1
DW
48}
49
3d88002e
DW
50static struct device_type namespace_io_device_type = {
51 .name = "nd_namespace_io",
52 .release = namespace_io_release,
53};
54
bf9bccc1
DW
55static struct device_type namespace_pmem_device_type = {
56 .name = "nd_namespace_pmem",
57 .release = namespace_pmem_release,
58};
59
60static struct device_type namespace_blk_device_type = {
61 .name = "nd_namespace_blk",
62 .release = namespace_blk_release,
63};
64
65static bool is_namespace_pmem(struct device *dev)
66{
67 return dev ? dev->type == &namespace_pmem_device_type : false;
68}
69
70static bool is_namespace_blk(struct device *dev)
71{
72 return dev ? dev->type == &namespace_blk_device_type : false;
73}
74
75static bool is_namespace_io(struct device *dev)
76{
77 return dev ? dev->type == &namespace_io_device_type : false;
78}
79
e07ecd76
DW
80static int is_uuid_busy(struct device *dev, void *data)
81{
82 u8 *uuid1 = data, *uuid2 = NULL;
83
84 if (is_namespace_pmem(dev)) {
85 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
86
87 uuid2 = nspm->uuid;
88 } else if (is_namespace_blk(dev)) {
89 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
90
91 uuid2 = nsblk->uuid;
92 } else if (is_nd_btt(dev)) {
93 struct nd_btt *nd_btt = to_nd_btt(dev);
94
95 uuid2 = nd_btt->uuid;
96 } else if (is_nd_pfn(dev)) {
97 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
98
99 uuid2 = nd_pfn->uuid;
100 }
101
102 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
103 return -EBUSY;
104
105 return 0;
106}
107
108static int is_namespace_uuid_busy(struct device *dev, void *data)
109{
110 if (is_nd_pmem(dev) || is_nd_blk(dev))
111 return device_for_each_child(dev, data, is_uuid_busy);
112 return 0;
113}
114
115/**
116 * nd_is_uuid_unique - verify that no other namespace has @uuid
117 * @dev: any device on a nvdimm_bus
118 * @uuid: uuid to check
119 */
120bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
121{
122 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
123
124 if (!nvdimm_bus)
125 return false;
126 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
127 if (device_for_each_child(&nvdimm_bus->dev, uuid,
128 is_namespace_uuid_busy) != 0)
129 return false;
130 return true;
131}
132
004f1afb
DW
133bool pmem_should_map_pages(struct device *dev)
134{
135 struct nd_region *nd_region = to_nd_region(dev->parent);
136
137 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
138 return false;
139
140 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
141 return false;
142
143 if (is_nd_pfn(dev) || is_nd_btt(dev))
144 return false;
145
146#ifdef ARCH_MEMREMAP_PMEM
147 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
148#else
149 return false;
150#endif
151}
152EXPORT_SYMBOL(pmem_should_map_pages);
153
5212e11f
VV
154const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
155 char *name)
156{
157 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
004f1afb 158 const char *suffix = NULL;
5212e11f 159
0731de0d
DW
160 if (ndns->claim && is_nd_btt(ndns->claim))
161 suffix = "s";
5212e11f 162
004f1afb 163 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
004f1afb
DW
164 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
165 } else if (is_namespace_blk(&ndns->dev)) {
5212e11f
VV
166 struct nd_namespace_blk *nsblk;
167
168 nsblk = to_nd_namespace_blk(&ndns->dev);
004f1afb
DW
169 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
170 suffix ? suffix : "");
5212e11f
VV
171 } else {
172 return NULL;
173 }
174
175 return name;
176}
177EXPORT_SYMBOL(nvdimm_namespace_disk_name);
178
6ec68954
VV
179const u8 *nd_dev_to_uuid(struct device *dev)
180{
181 static const u8 null_uuid[16];
182
183 if (!dev)
184 return null_uuid;
185
186 if (is_namespace_pmem(dev)) {
187 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
188
189 return nspm->uuid;
190 } else if (is_namespace_blk(dev)) {
191 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
192
193 return nsblk->uuid;
194 } else
195 return null_uuid;
196}
197EXPORT_SYMBOL(nd_dev_to_uuid);
198
3d88002e
DW
199static ssize_t nstype_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct nd_region *nd_region = to_nd_region(dev->parent);
203
204 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
205}
206static DEVICE_ATTR_RO(nstype);
207
bf9bccc1
DW
208static ssize_t __alt_name_store(struct device *dev, const char *buf,
209 const size_t len)
210{
211 char *input, *pos, *alt_name, **ns_altname;
212 ssize_t rc;
213
214 if (is_namespace_pmem(dev)) {
215 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
216
217 ns_altname = &nspm->alt_name;
218 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
219 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
220
221 ns_altname = &nsblk->alt_name;
bf9bccc1
DW
222 } else
223 return -ENXIO;
224
8c2f7e86 225 if (dev->driver || to_ndns(dev)->claim)
bf9bccc1
DW
226 return -EBUSY;
227
228 input = kmemdup(buf, len + 1, GFP_KERNEL);
229 if (!input)
230 return -ENOMEM;
231
232 input[len] = '\0';
233 pos = strim(input);
234 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
235 rc = -EINVAL;
236 goto out;
237 }
238
239 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
240 if (!alt_name) {
241 rc = -ENOMEM;
242 goto out;
243 }
244 kfree(*ns_altname);
245 *ns_altname = alt_name;
246 sprintf(*ns_altname, "%s", pos);
247 rc = len;
248
249out:
250 kfree(input);
251 return rc;
252}
253
1b40e09a
DW
254static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
255{
8c2f7e86 256 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
1b40e09a
DW
257 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
258 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
259 struct nd_label_id label_id;
260 resource_size_t size = 0;
261 struct resource *res;
262
263 if (!nsblk->uuid)
264 return 0;
265 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
266 for_each_dpa_resource(ndd, res)
267 if (strcmp(res->name, label_id.id) == 0)
268 size += resource_size(res);
269 return size;
270}
271
047fc8a1
RZ
272static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
273{
274 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
275 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
276 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
277 struct nd_label_id label_id;
278 struct resource *res;
279 int count, i;
280
281 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
282 return false;
283
284 count = 0;
285 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
286 for_each_dpa_resource(ndd, res) {
287 if (strcmp(res->name, label_id.id) != 0)
288 continue;
289 /*
290 * Resources with unacknoweldged adjustments indicate a
291 * failure to update labels
292 */
293 if (res->flags & DPA_RESOURCE_ADJUSTED)
294 return false;
295 count++;
296 }
297
298 /* These values match after a successful label update */
299 if (count != nsblk->num_resources)
300 return false;
301
302 for (i = 0; i < nsblk->num_resources; i++) {
303 struct resource *found = NULL;
304
305 for_each_dpa_resource(ndd, res)
306 if (res == nsblk->res[i]) {
307 found = res;
308 break;
309 }
310 /* stale resource */
311 if (!found)
312 return false;
313 }
314
315 return true;
316}
317
318resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
319{
320 resource_size_t size;
321
322 nvdimm_bus_lock(&nsblk->common.dev);
323 size = __nd_namespace_blk_validate(nsblk);
324 nvdimm_bus_unlock(&nsblk->common.dev);
325
326 return size;
327}
328EXPORT_SYMBOL(nd_namespace_blk_validate);
329
330
f524bf27
DW
331static int nd_namespace_label_update(struct nd_region *nd_region,
332 struct device *dev)
333{
8c2f7e86 334 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
f524bf27 335 "namespace must be idle during label update\n");
8c2f7e86 336 if (dev->driver || to_ndns(dev)->claim)
f524bf27
DW
337 return 0;
338
339 /*
340 * Only allow label writes that will result in a valid namespace
341 * or deletion of an existing namespace.
342 */
343 if (is_namespace_pmem(dev)) {
344 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
0ba1c634 345 resource_size_t size = resource_size(&nspm->nsio.res);
f524bf27
DW
346
347 if (size == 0 && nspm->uuid)
348 /* delete allocation */;
349 else if (!nspm->uuid)
350 return 0;
351
352 return nd_pmem_namespace_label_update(nd_region, nspm, size);
353 } else if (is_namespace_blk(dev)) {
0ba1c634
DW
354 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
355 resource_size_t size = nd_namespace_blk_size(nsblk);
356
357 if (size == 0 && nsblk->uuid)
358 /* delete allocation */;
359 else if (!nsblk->uuid || !nsblk->lbasize)
360 return 0;
361
362 return nd_blk_namespace_label_update(nd_region, nsblk, size);
f524bf27
DW
363 } else
364 return -ENXIO;
365}
366
bf9bccc1
DW
367static ssize_t alt_name_store(struct device *dev,
368 struct device_attribute *attr, const char *buf, size_t len)
369{
f524bf27 370 struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc1
DW
371 ssize_t rc;
372
373 device_lock(dev);
374 nvdimm_bus_lock(dev);
375 wait_nvdimm_bus_probe_idle(dev);
376 rc = __alt_name_store(dev, buf, len);
f524bf27
DW
377 if (rc >= 0)
378 rc = nd_namespace_label_update(nd_region, dev);
bf9bccc1
DW
379 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
380 nvdimm_bus_unlock(dev);
381 device_unlock(dev);
382
f524bf27 383 return rc < 0 ? rc : len;
bf9bccc1
DW
384}
385
386static ssize_t alt_name_show(struct device *dev,
387 struct device_attribute *attr, char *buf)
388{
389 char *ns_altname;
390
391 if (is_namespace_pmem(dev)) {
392 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
393
394 ns_altname = nspm->alt_name;
395 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
396 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
397
398 ns_altname = nsblk->alt_name;
bf9bccc1
DW
399 } else
400 return -ENXIO;
401
402 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
403}
404static DEVICE_ATTR_RW(alt_name);
405
406static int scan_free(struct nd_region *nd_region,
407 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
408 resource_size_t n)
409{
410 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
411 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
412 int rc = 0;
413
414 while (n) {
415 struct resource *res, *last;
416 resource_size_t new_start;
417
418 last = NULL;
419 for_each_dpa_resource(ndd, res)
420 if (strcmp(res->name, label_id->id) == 0)
421 last = res;
422 res = last;
423 if (!res)
424 return 0;
425
426 if (n >= resource_size(res)) {
427 n -= resource_size(res);
428 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
429 nvdimm_free_dpa(ndd, res);
430 /* retry with last resource deleted */
431 continue;
432 }
433
434 /*
435 * Keep BLK allocations relegated to high DPA as much as
436 * possible
437 */
438 if (is_blk)
439 new_start = res->start + n;
440 else
441 new_start = res->start;
442
443 rc = adjust_resource(res, new_start, resource_size(res) - n);
1b40e09a
DW
444 if (rc == 0)
445 res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc1
DW
446 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
447 break;
448 }
449
450 return rc;
451}
452
453/**
454 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
455 * @nd_region: the set of dimms to reclaim @n bytes from
456 * @label_id: unique identifier for the namespace consuming this dpa range
457 * @n: number of bytes per-dimm to release
458 *
459 * Assumes resources are ordered. Starting from the end try to
460 * adjust_resource() the allocation to @n, but if @n is larger than the
461 * allocation delete it and find the 'new' last allocation in the label
462 * set.
463 */
464static int shrink_dpa_allocation(struct nd_region *nd_region,
465 struct nd_label_id *label_id, resource_size_t n)
466{
467 int i;
468
469 for (i = 0; i < nd_region->ndr_mappings; i++) {
470 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
471 int rc;
472
473 rc = scan_free(nd_region, nd_mapping, label_id, n);
474 if (rc)
475 return rc;
476 }
477
478 return 0;
479}
480
481static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
482 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
483 resource_size_t n)
484{
485 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
486 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
487 resource_size_t first_dpa;
488 struct resource *res;
489 int rc = 0;
490
491 /* allocate blk from highest dpa first */
492 if (is_blk)
493 first_dpa = nd_mapping->start + nd_mapping->size - n;
494 else
495 first_dpa = nd_mapping->start;
496
497 /* first resource allocation for this label-id or dimm */
498 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
499 if (!res)
500 rc = -EBUSY;
501
502 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
503 return rc ? n : 0;
504}
505
1b40e09a
DW
506static bool space_valid(bool is_pmem, bool is_reserve,
507 struct nd_label_id *label_id, struct resource *res)
bf9bccc1
DW
508{
509 /*
510 * For BLK-space any space is valid, for PMEM-space, it must be
1b40e09a
DW
511 * contiguous with an existing allocation unless we are
512 * reserving pmem.
bf9bccc1 513 */
1b40e09a 514 if (is_reserve || !is_pmem)
bf9bccc1
DW
515 return true;
516 if (!res || strcmp(res->name, label_id->id) == 0)
517 return true;
518 return false;
519}
520
521enum alloc_loc {
522 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
523};
524
525static resource_size_t scan_allocate(struct nd_region *nd_region,
526 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
527 resource_size_t n)
528{
529 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
1b40e09a 530 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bf9bccc1
DW
531 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
532 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
533 const resource_size_t to_allocate = n;
534 struct resource *res;
535 int first;
536
537 retry:
538 first = 0;
539 for_each_dpa_resource(ndd, res) {
540 resource_size_t allocate, available = 0, free_start, free_end;
541 struct resource *next = res->sibling, *new_res = NULL;
542 enum alloc_loc loc = ALLOC_ERR;
543 const char *action;
544 int rc = 0;
545
546 /* ignore resources outside this nd_mapping */
547 if (res->start > mapping_end)
548 continue;
549 if (res->end < nd_mapping->start)
550 continue;
551
552 /* space at the beginning of the mapping */
553 if (!first++ && res->start > nd_mapping->start) {
554 free_start = nd_mapping->start;
555 available = res->start - free_start;
1b40e09a 556 if (space_valid(is_pmem, is_reserve, label_id, NULL))
bf9bccc1
DW
557 loc = ALLOC_BEFORE;
558 }
559
560 /* space between allocations */
561 if (!loc && next) {
562 free_start = res->start + resource_size(res);
563 free_end = min(mapping_end, next->start - 1);
1b40e09a 564 if (space_valid(is_pmem, is_reserve, label_id, res)
bf9bccc1
DW
565 && free_start < free_end) {
566 available = free_end + 1 - free_start;
567 loc = ALLOC_MID;
568 }
569 }
570
571 /* space at the end of the mapping */
572 if (!loc && !next) {
573 free_start = res->start + resource_size(res);
574 free_end = mapping_end;
1b40e09a 575 if (space_valid(is_pmem, is_reserve, label_id, res)
bf9bccc1
DW
576 && free_start < free_end) {
577 available = free_end + 1 - free_start;
578 loc = ALLOC_AFTER;
579 }
580 }
581
582 if (!loc || !available)
583 continue;
584 allocate = min(available, n);
585 switch (loc) {
586 case ALLOC_BEFORE:
587 if (strcmp(res->name, label_id->id) == 0) {
588 /* adjust current resource up */
1b40e09a 589 if (is_pmem && !is_reserve)
bf9bccc1
DW
590 return n;
591 rc = adjust_resource(res, res->start - allocate,
592 resource_size(res) + allocate);
593 action = "cur grow up";
594 } else
595 action = "allocate";
596 break;
597 case ALLOC_MID:
598 if (strcmp(next->name, label_id->id) == 0) {
599 /* adjust next resource up */
1b40e09a 600 if (is_pmem && !is_reserve)
bf9bccc1
DW
601 return n;
602 rc = adjust_resource(next, next->start
603 - allocate, resource_size(next)
604 + allocate);
605 new_res = next;
606 action = "next grow up";
607 } else if (strcmp(res->name, label_id->id) == 0) {
608 action = "grow down";
609 } else
610 action = "allocate";
611 break;
612 case ALLOC_AFTER:
613 if (strcmp(res->name, label_id->id) == 0)
614 action = "grow down";
615 else
616 action = "allocate";
617 break;
618 default:
619 return n;
620 }
621
622 if (strcmp(action, "allocate") == 0) {
623 /* BLK allocate bottom up */
624 if (!is_pmem)
625 free_start += available - allocate;
1b40e09a 626 else if (!is_reserve && free_start != nd_mapping->start)
bf9bccc1
DW
627 return n;
628
629 new_res = nvdimm_allocate_dpa(ndd, label_id,
630 free_start, allocate);
631 if (!new_res)
632 rc = -EBUSY;
633 } else if (strcmp(action, "grow down") == 0) {
634 /* adjust current resource down */
635 rc = adjust_resource(res, res->start, resource_size(res)
636 + allocate);
1b40e09a
DW
637 if (rc == 0)
638 res->flags |= DPA_RESOURCE_ADJUSTED;
bf9bccc1
DW
639 }
640
641 if (!new_res)
642 new_res = res;
643
644 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
645 action, loc, rc);
646
647 if (rc)
648 return n;
649
650 n -= allocate;
651 if (n) {
652 /*
653 * Retry scan with newly inserted resources.
654 * For example, if we did an ALLOC_BEFORE
655 * insertion there may also have been space
656 * available for an ALLOC_AFTER insertion, so we
657 * need to check this same resource again
658 */
659 goto retry;
660 } else
661 return 0;
662 }
663
1b40e09a
DW
664 /*
665 * If we allocated nothing in the BLK case it may be because we are in
666 * an initial "pmem-reserve pass". Only do an initial BLK allocation
667 * when none of the DPA space is reserved.
668 */
669 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
bf9bccc1
DW
670 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
671 return n;
672}
673
1b40e09a
DW
674static int merge_dpa(struct nd_region *nd_region,
675 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
676{
677 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
678 struct resource *res;
679
680 if (strncmp("pmem", label_id->id, 4) == 0)
681 return 0;
682 retry:
683 for_each_dpa_resource(ndd, res) {
684 int rc;
685 struct resource *next = res->sibling;
686 resource_size_t end = res->start + resource_size(res);
687
688 if (!next || strcmp(res->name, label_id->id) != 0
689 || strcmp(next->name, label_id->id) != 0
690 || end != next->start)
691 continue;
692 end += resource_size(next);
693 nvdimm_free_dpa(ndd, next);
694 rc = adjust_resource(res, res->start, end - res->start);
695 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
696 if (rc)
697 return rc;
698 res->flags |= DPA_RESOURCE_ADJUSTED;
699 goto retry;
700 }
701
702 return 0;
703}
704
705static int __reserve_free_pmem(struct device *dev, void *data)
706{
707 struct nvdimm *nvdimm = data;
708 struct nd_region *nd_region;
709 struct nd_label_id label_id;
710 int i;
711
712 if (!is_nd_pmem(dev))
713 return 0;
714
715 nd_region = to_nd_region(dev);
716 if (nd_region->ndr_mappings == 0)
717 return 0;
718
719 memset(&label_id, 0, sizeof(label_id));
720 strcat(label_id.id, "pmem-reserve");
721 for (i = 0; i < nd_region->ndr_mappings; i++) {
722 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
723 resource_size_t n, rem = 0;
724
725 if (nd_mapping->nvdimm != nvdimm)
726 continue;
727
728 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
729 if (n == 0)
730 return 0;
731 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
732 dev_WARN_ONCE(&nd_region->dev, rem,
733 "pmem reserve underrun: %#llx of %#llx bytes\n",
734 (unsigned long long) n - rem,
735 (unsigned long long) n);
736 return rem ? -ENXIO : 0;
737 }
738
739 return 0;
740}
741
742static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
743 struct nd_mapping *nd_mapping)
744{
745 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
746 struct resource *res, *_res;
747
748 for_each_dpa_resource_safe(ndd, res, _res)
749 if (strcmp(res->name, "pmem-reserve") == 0)
750 nvdimm_free_dpa(ndd, res);
751}
752
753static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
754 struct nd_mapping *nd_mapping)
755{
756 struct nvdimm *nvdimm = nd_mapping->nvdimm;
757 int rc;
758
759 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
760 __reserve_free_pmem);
761 if (rc)
762 release_free_pmem(nvdimm_bus, nd_mapping);
763 return rc;
764}
765
bf9bccc1
DW
766/**
767 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
768 * @nd_region: the set of dimms to allocate @n more bytes from
769 * @label_id: unique identifier for the namespace consuming this dpa range
770 * @n: number of bytes per-dimm to add to the existing allocation
771 *
772 * Assumes resources are ordered. For BLK regions, first consume
773 * BLK-only available DPA free space, then consume PMEM-aliased DPA
774 * space starting at the highest DPA. For PMEM regions start
775 * allocations from the start of an interleave set and end at the first
776 * BLK allocation or the end of the interleave set, whichever comes
777 * first.
778 */
779static int grow_dpa_allocation(struct nd_region *nd_region,
780 struct nd_label_id *label_id, resource_size_t n)
781{
1b40e09a
DW
782 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
783 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
bf9bccc1
DW
784 int i;
785
786 for (i = 0; i < nd_region->ndr_mappings; i++) {
787 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1b40e09a
DW
788 resource_size_t rem = n;
789 int rc, j;
790
791 /*
792 * In the BLK case try once with all unallocated PMEM
793 * reserved, and once without
794 */
795 for (j = is_pmem; j < 2; j++) {
796 bool blk_only = j == 0;
797
798 if (blk_only) {
799 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
800 if (rc)
801 return rc;
802 }
803 rem = scan_allocate(nd_region, nd_mapping,
804 label_id, rem);
805 if (blk_only)
806 release_free_pmem(nvdimm_bus, nd_mapping);
bf9bccc1 807
1b40e09a
DW
808 /* try again and allow encroachments into PMEM */
809 if (rem == 0)
810 break;
811 }
812
813 dev_WARN_ONCE(&nd_region->dev, rem,
814 "allocation underrun: %#llx of %#llx bytes\n",
815 (unsigned long long) n - rem,
816 (unsigned long long) n);
817 if (rem)
818 return -ENXIO;
819
820 rc = merge_dpa(nd_region, nd_mapping, label_id);
bf9bccc1
DW
821 if (rc)
822 return rc;
823 }
824
825 return 0;
826}
827
828static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
829 struct nd_namespace_pmem *nspm, resource_size_t size)
830{
831 struct resource *res = &nspm->nsio.res;
832
833 res->start = nd_region->ndr_start;
834 res->end = nd_region->ndr_start + size - 1;
835}
836
bd26d0d0
DK
837static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
838{
839 if (!uuid) {
840 dev_dbg(dev, "%s: uuid not set\n", where);
841 return true;
842 }
843 return false;
844}
845
bf9bccc1
DW
846static ssize_t __size_store(struct device *dev, unsigned long long val)
847{
848 resource_size_t allocated = 0, available = 0;
849 struct nd_region *nd_region = to_nd_region(dev->parent);
850 struct nd_mapping *nd_mapping;
851 struct nvdimm_drvdata *ndd;
852 struct nd_label_id label_id;
853 u32 flags = 0, remainder;
854 u8 *uuid = NULL;
855 int rc, i;
856
8c2f7e86 857 if (dev->driver || to_ndns(dev)->claim)
bf9bccc1
DW
858 return -EBUSY;
859
860 if (is_namespace_pmem(dev)) {
861 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
862
863 uuid = nspm->uuid;
864 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
865 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
866
867 uuid = nsblk->uuid;
868 flags = NSLABEL_FLAG_LOCAL;
bf9bccc1
DW
869 }
870
871 /*
872 * We need a uuid for the allocation-label and dimm(s) on which
873 * to store the label.
874 */
bd26d0d0 875 if (uuid_not_set(uuid, dev, __func__))
bf9bccc1 876 return -ENXIO;
bd26d0d0
DK
877 if (nd_region->ndr_mappings == 0) {
878 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
879 return -ENXIO;
880 }
bf9bccc1
DW
881
882 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
883 if (remainder) {
884 dev_dbg(dev, "%llu is not %dK aligned\n", val,
885 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
886 return -EINVAL;
887 }
888
889 nd_label_gen_id(&label_id, uuid, flags);
890 for (i = 0; i < nd_region->ndr_mappings; i++) {
891 nd_mapping = &nd_region->mapping[i];
892 ndd = to_ndd(nd_mapping);
893
894 /*
895 * All dimms in an interleave set, or the base dimm for a blk
896 * region, need to be enabled for the size to be changed.
897 */
898 if (!ndd)
899 return -ENXIO;
900
901 allocated += nvdimm_allocated_dpa(ndd, &label_id);
902 }
903 available = nd_region_available_dpa(nd_region);
904
905 if (val > available + allocated)
906 return -ENOSPC;
907
908 if (val == allocated)
909 return 0;
910
911 val = div_u64(val, nd_region->ndr_mappings);
912 allocated = div_u64(allocated, nd_region->ndr_mappings);
913 if (val < allocated)
914 rc = shrink_dpa_allocation(nd_region, &label_id,
915 allocated - val);
916 else
917 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
918
919 if (rc)
920 return rc;
921
922 if (is_namespace_pmem(dev)) {
923 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
924
925 nd_namespace_pmem_set_size(nd_region, nspm,
926 val * nd_region->ndr_mappings);
1b40e09a 927 } else if (is_namespace_blk(dev)) {
8c2f7e86
DW
928 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
929
1b40e09a
DW
930 /*
931 * Try to delete the namespace if we deleted all of its
8c2f7e86
DW
932 * allocation, this is not the seed device for the
933 * region, and it is not actively claimed by a btt
934 * instance.
1b40e09a 935 */
8c2f7e86
DW
936 if (val == 0 && nd_region->ns_seed != dev
937 && !nsblk->common.claim)
1b40e09a 938 nd_device_unregister(dev, ND_ASYNC);
bf9bccc1
DW
939 }
940
941 return rc;
942}
943
944static ssize_t size_store(struct device *dev,
945 struct device_attribute *attr, const char *buf, size_t len)
946{
f524bf27 947 struct nd_region *nd_region = to_nd_region(dev->parent);
bf9bccc1
DW
948 unsigned long long val;
949 u8 **uuid = NULL;
950 int rc;
951
952 rc = kstrtoull(buf, 0, &val);
953 if (rc)
954 return rc;
955
956 device_lock(dev);
957 nvdimm_bus_lock(dev);
958 wait_nvdimm_bus_probe_idle(dev);
959 rc = __size_store(dev, val);
f524bf27
DW
960 if (rc >= 0)
961 rc = nd_namespace_label_update(nd_region, dev);
bf9bccc1
DW
962
963 if (is_namespace_pmem(dev)) {
964 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
965
966 uuid = &nspm->uuid;
967 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
968 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
969
970 uuid = &nsblk->uuid;
bf9bccc1
DW
971 }
972
973 if (rc == 0 && val == 0 && uuid) {
974 /* setting size zero == 'delete namespace' */
975 kfree(*uuid);
976 *uuid = NULL;
977 }
978
979 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
980 ? "fail" : "success", rc);
981
982 nvdimm_bus_unlock(dev);
983 device_unlock(dev);
984
f524bf27 985 return rc < 0 ? rc : len;
bf9bccc1
DW
986}
987
8c2f7e86 988resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
bf9bccc1 989{
8c2f7e86 990 struct device *dev = &ndns->dev;
1b40e09a 991
bf9bccc1
DW
992 if (is_namespace_pmem(dev)) {
993 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
994
8c2f7e86 995 return resource_size(&nspm->nsio.res);
bf9bccc1 996 } else if (is_namespace_blk(dev)) {
8c2f7e86 997 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
bf9bccc1
DW
998 } else if (is_namespace_io(dev)) {
999 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1000
8c2f7e86
DW
1001 return resource_size(&nsio->res);
1002 } else
1003 WARN_ONCE(1, "unknown namespace type\n");
1004 return 0;
1005}
1006
1007resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1008{
1009 resource_size_t size;
1b40e09a 1010
8c2f7e86
DW
1011 nvdimm_bus_lock(&ndns->dev);
1012 size = __nvdimm_namespace_capacity(ndns);
1013 nvdimm_bus_unlock(&ndns->dev);
1014
1015 return size;
1016}
1017EXPORT_SYMBOL(nvdimm_namespace_capacity);
1018
1019static ssize_t size_show(struct device *dev,
1020 struct device_attribute *attr, char *buf)
1021{
1022 return sprintf(buf, "%llu\n", (unsigned long long)
1023 nvdimm_namespace_capacity(to_ndns(dev)));
bf9bccc1
DW
1024}
1025static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1026
1027static ssize_t uuid_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1029{
1030 u8 *uuid;
1031
1032 if (is_namespace_pmem(dev)) {
1033 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1034
1035 uuid = nspm->uuid;
1036 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
1037 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1038
1039 uuid = nsblk->uuid;
bf9bccc1
DW
1040 } else
1041 return -ENXIO;
1042
1043 if (uuid)
1044 return sprintf(buf, "%pUb\n", uuid);
1045 return sprintf(buf, "\n");
1046}
1047
1048/**
1049 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1050 * @nd_region: parent region so we can updates all dimms in the set
1051 * @dev: namespace type for generating label_id
1052 * @new_uuid: incoming uuid
1053 * @old_uuid: reference to the uuid storage location in the namespace object
1054 */
1055static int namespace_update_uuid(struct nd_region *nd_region,
1056 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1057{
1058 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1059 struct nd_label_id old_label_id;
1060 struct nd_label_id new_label_id;
f524bf27 1061 int i;
bf9bccc1 1062
f524bf27
DW
1063 if (!nd_is_uuid_unique(dev, new_uuid))
1064 return -EINVAL;
bf9bccc1
DW
1065
1066 if (*old_uuid == NULL)
1067 goto out;
1068
f524bf27
DW
1069 /*
1070 * If we've already written a label with this uuid, then it's
1071 * too late to rename because we can't reliably update the uuid
1072 * without losing the old namespace. Userspace must delete this
1073 * namespace to abandon the old uuid.
1074 */
1075 for (i = 0; i < nd_region->ndr_mappings; i++) {
1076 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1077
1078 /*
1079 * This check by itself is sufficient because old_uuid
1080 * would be NULL above if this uuid did not exist in the
1081 * currently written set.
1082 *
1083 * FIXME: can we delete uuid with zero dpa allocated?
1084 */
1085 if (nd_mapping->labels)
1086 return -EBUSY;
1087 }
1088
bf9bccc1
DW
1089 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1090 nd_label_gen_id(&new_label_id, new_uuid, flags);
1091 for (i = 0; i < nd_region->ndr_mappings; i++) {
1092 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1093 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1094 struct resource *res;
1095
1096 for_each_dpa_resource(ndd, res)
1097 if (strcmp(res->name, old_label_id.id) == 0)
1098 sprintf((void *) res->name, "%s",
1099 new_label_id.id);
1100 }
1101 kfree(*old_uuid);
1102 out:
1103 *old_uuid = new_uuid;
1104 return 0;
1105}
1106
1107static ssize_t uuid_store(struct device *dev,
1108 struct device_attribute *attr, const char *buf, size_t len)
1109{
1110 struct nd_region *nd_region = to_nd_region(dev->parent);
1111 u8 *uuid = NULL;
8c2f7e86 1112 ssize_t rc = 0;
bf9bccc1 1113 u8 **ns_uuid;
bf9bccc1
DW
1114
1115 if (is_namespace_pmem(dev)) {
1116 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1117
1118 ns_uuid = &nspm->uuid;
1119 } else if (is_namespace_blk(dev)) {
1b40e09a
DW
1120 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1121
1122 ns_uuid = &nsblk->uuid;
bf9bccc1
DW
1123 } else
1124 return -ENXIO;
1125
1126 device_lock(dev);
1127 nvdimm_bus_lock(dev);
1128 wait_nvdimm_bus_probe_idle(dev);
8c2f7e86
DW
1129 if (to_ndns(dev)->claim)
1130 rc = -EBUSY;
1131 if (rc >= 0)
1132 rc = nd_uuid_store(dev, &uuid, buf, len);
bf9bccc1
DW
1133 if (rc >= 0)
1134 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
f524bf27
DW
1135 if (rc >= 0)
1136 rc = nd_namespace_label_update(nd_region, dev);
1137 else
1138 kfree(uuid);
bf9bccc1
DW
1139 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1140 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1141 nvdimm_bus_unlock(dev);
1142 device_unlock(dev);
1143
f524bf27 1144 return rc < 0 ? rc : len;
bf9bccc1
DW
1145}
1146static DEVICE_ATTR_RW(uuid);
1147
1148static ssize_t resource_show(struct device *dev,
1149 struct device_attribute *attr, char *buf)
1150{
1151 struct resource *res;
1152
1153 if (is_namespace_pmem(dev)) {
1154 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1155
1156 res = &nspm->nsio.res;
1157 } else if (is_namespace_io(dev)) {
1158 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1159
1160 res = &nsio->res;
1161 } else
1162 return -ENXIO;
1163
1164 /* no address to convey if the namespace has no allocation */
1165 if (resource_size(res) == 0)
1166 return -ENXIO;
1167 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1168}
1169static DEVICE_ATTR_RO(resource);
1170
fcae6957
VV
1171static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1172 4096, 4104, 4160, 4224, 0 };
1b40e09a
DW
1173
1174static ssize_t sector_size_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176{
1177 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1178
1179 if (!is_namespace_blk(dev))
1180 return -ENXIO;
1181
1182 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1183}
1184
1185static ssize_t sector_size_store(struct device *dev,
1186 struct device_attribute *attr, const char *buf, size_t len)
1187{
1188 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
f524bf27 1189 struct nd_region *nd_region = to_nd_region(dev->parent);
8c2f7e86 1190 ssize_t rc = 0;
1b40e09a
DW
1191
1192 if (!is_namespace_blk(dev))
1193 return -ENXIO;
1194
1195 device_lock(dev);
1196 nvdimm_bus_lock(dev);
8c2f7e86
DW
1197 if (to_ndns(dev)->claim)
1198 rc = -EBUSY;
1199 if (rc >= 0)
1200 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1201 ns_lbasize_supported);
f524bf27
DW
1202 if (rc >= 0)
1203 rc = nd_namespace_label_update(nd_region, dev);
1204 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1205 rc, rc < 0 ? "tried" : "wrote", buf,
1206 buf[len - 1] == '\n' ? "" : "\n");
1b40e09a
DW
1207 nvdimm_bus_unlock(dev);
1208 device_unlock(dev);
1209
1210 return rc ? rc : len;
1211}
1212static DEVICE_ATTR_RW(sector_size);
1213
0ba1c634
DW
1214static ssize_t dpa_extents_show(struct device *dev,
1215 struct device_attribute *attr, char *buf)
1216{
1217 struct nd_region *nd_region = to_nd_region(dev->parent);
1218 struct nd_label_id label_id;
1219 int count = 0, i;
1220 u8 *uuid = NULL;
1221 u32 flags = 0;
1222
1223 nvdimm_bus_lock(dev);
1224 if (is_namespace_pmem(dev)) {
1225 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1226
1227 uuid = nspm->uuid;
1228 flags = 0;
1229 } else if (is_namespace_blk(dev)) {
1230 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1231
1232 uuid = nsblk->uuid;
1233 flags = NSLABEL_FLAG_LOCAL;
1234 }
1235
1236 if (!uuid)
1237 goto out;
1238
1239 nd_label_gen_id(&label_id, uuid, flags);
1240 for (i = 0; i < nd_region->ndr_mappings; i++) {
1241 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1242 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1243 struct resource *res;
1244
1245 for_each_dpa_resource(ndd, res)
1246 if (strcmp(res->name, label_id.id) == 0)
1247 count++;
1248 }
1249 out:
1250 nvdimm_bus_unlock(dev);
1251
1252 return sprintf(buf, "%d\n", count);
1253}
1254static DEVICE_ATTR_RO(dpa_extents);
1255
8c2f7e86
DW
1256static ssize_t holder_show(struct device *dev,
1257 struct device_attribute *attr, char *buf)
1258{
1259 struct nd_namespace_common *ndns = to_ndns(dev);
1260 ssize_t rc;
1261
1262 device_lock(dev);
1263 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1264 device_unlock(dev);
1265
1266 return rc;
1267}
1268static DEVICE_ATTR_RO(holder);
1269
0731de0d
DW
1270static ssize_t mode_show(struct device *dev,
1271 struct device_attribute *attr, char *buf)
1272{
1273 struct nd_namespace_common *ndns = to_ndns(dev);
1274 struct device *claim;
1275 char *mode;
1276 ssize_t rc;
1277
1278 device_lock(dev);
1279 claim = ndns->claim;
1280 if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim)))
1281 mode = "memory";
1282 else if (claim && is_nd_btt(claim))
1283 mode = "safe";
1284 else
1285 mode = "raw";
1286 rc = sprintf(buf, "%s\n", mode);
1287 device_unlock(dev);
1288
1289 return rc;
1290}
1291static DEVICE_ATTR_RO(mode);
1292
8c2f7e86
DW
1293static ssize_t force_raw_store(struct device *dev,
1294 struct device_attribute *attr, const char *buf, size_t len)
1295{
1296 bool force_raw;
1297 int rc = strtobool(buf, &force_raw);
1298
1299 if (rc)
1300 return rc;
1301
1302 to_ndns(dev)->force_raw = force_raw;
1303 return len;
1304}
1305
1306static ssize_t force_raw_show(struct device *dev,
1307 struct device_attribute *attr, char *buf)
1308{
1309 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1310}
1311static DEVICE_ATTR_RW(force_raw);
1312
3d88002e
DW
1313static struct attribute *nd_namespace_attributes[] = {
1314 &dev_attr_nstype.attr,
bf9bccc1 1315 &dev_attr_size.attr,
0731de0d 1316 &dev_attr_mode.attr,
bf9bccc1 1317 &dev_attr_uuid.attr,
8c2f7e86 1318 &dev_attr_holder.attr,
bf9bccc1
DW
1319 &dev_attr_resource.attr,
1320 &dev_attr_alt_name.attr,
8c2f7e86 1321 &dev_attr_force_raw.attr,
1b40e09a 1322 &dev_attr_sector_size.attr,
0ba1c634 1323 &dev_attr_dpa_extents.attr,
3d88002e
DW
1324 NULL,
1325};
1326
bf9bccc1
DW
1327static umode_t namespace_visible(struct kobject *kobj,
1328 struct attribute *a, int n)
1329{
1330 struct device *dev = container_of(kobj, struct device, kobj);
1331
1332 if (a == &dev_attr_resource.attr) {
1333 if (is_namespace_blk(dev))
1334 return 0;
1335 return a->mode;
1336 }
1337
1338 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1339 if (a == &dev_attr_size.attr)
1340 return S_IWUSR | S_IRUGO;
1b40e09a
DW
1341
1342 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1343 return 0;
1344
bf9bccc1
DW
1345 return a->mode;
1346 }
1347
8c2f7e86
DW
1348 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1349 || a == &dev_attr_holder.attr
0731de0d
DW
1350 || a == &dev_attr_force_raw.attr
1351 || a == &dev_attr_mode.attr)
bf9bccc1
DW
1352 return a->mode;
1353
1354 return 0;
1355}
1356
3d88002e
DW
1357static struct attribute_group nd_namespace_attribute_group = {
1358 .attrs = nd_namespace_attributes,
bf9bccc1 1359 .is_visible = namespace_visible,
3d88002e
DW
1360};
1361
1362static const struct attribute_group *nd_namespace_attribute_groups[] = {
1363 &nd_device_attribute_group,
1364 &nd_namespace_attribute_group,
74ae66c3 1365 &nd_numa_attribute_group,
3d88002e
DW
1366 NULL,
1367};
1368
8c2f7e86
DW
1369struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1370{
1371 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
e1455744 1372 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
8c2f7e86
DW
1373 struct nd_namespace_common *ndns;
1374 resource_size_t size;
1375
e1455744
DW
1376 if (nd_btt || nd_pfn) {
1377 struct device *host = NULL;
1378
1379 if (nd_btt) {
1380 host = &nd_btt->dev;
1381 ndns = nd_btt->ndns;
1382 } else if (nd_pfn) {
1383 host = &nd_pfn->dev;
1384 ndns = nd_pfn->ndns;
1385 }
1386
1387 if (!ndns || !host)
8c2f7e86
DW
1388 return ERR_PTR(-ENODEV);
1389
1390 /*
1391 * Flush any in-progess probes / removals in the driver
1392 * for the raw personality of this namespace.
1393 */
1394 device_lock(&ndns->dev);
1395 device_unlock(&ndns->dev);
1396 if (ndns->dev.driver) {
1397 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
e1455744 1398 dev_name(host));
8c2f7e86
DW
1399 return ERR_PTR(-EBUSY);
1400 }
e1455744 1401 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != host,
8c2f7e86 1402 "host (%s) vs claim (%s) mismatch\n",
e1455744 1403 dev_name(host),
8c2f7e86
DW
1404 dev_name(ndns->claim)))
1405 return ERR_PTR(-ENXIO);
1406 } else {
1407 ndns = to_ndns(dev);
1408 if (ndns->claim) {
1409 dev_dbg(dev, "claimed by %s, failing probe\n",
1410 dev_name(ndns->claim));
1411
1412 return ERR_PTR(-ENXIO);
1413 }
1414 }
1415
1416 size = nvdimm_namespace_capacity(ndns);
1417 if (size < ND_MIN_NAMESPACE_SIZE) {
1418 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1419 &size, ND_MIN_NAMESPACE_SIZE);
1420 return ERR_PTR(-ENODEV);
1421 }
1422
1423 if (is_namespace_pmem(&ndns->dev)) {
1424 struct nd_namespace_pmem *nspm;
1425
1426 nspm = to_nd_namespace_pmem(&ndns->dev);
bd26d0d0 1427 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
8c2f7e86 1428 return ERR_PTR(-ENODEV);
8c2f7e86 1429 } else if (is_namespace_blk(&ndns->dev)) {
047fc8a1
RZ
1430 struct nd_namespace_blk *nsblk;
1431
1432 nsblk = to_nd_namespace_blk(&ndns->dev);
bd26d0d0
DK
1433 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1434 return ERR_PTR(-ENODEV);
1435 if (!nsblk->lbasize) {
1436 dev_dbg(&ndns->dev, "%s: sector size not set\n",
1437 __func__);
1438 return ERR_PTR(-ENODEV);
1439 }
047fc8a1
RZ
1440 if (!nd_namespace_blk_validate(nsblk))
1441 return ERR_PTR(-ENODEV);
8c2f7e86
DW
1442 }
1443
1444 return ndns;
1445}
1446EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1447
3d88002e
DW
1448static struct device **create_namespace_io(struct nd_region *nd_region)
1449{
1450 struct nd_namespace_io *nsio;
1451 struct device *dev, **devs;
1452 struct resource *res;
1453
1454 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1455 if (!nsio)
1456 return NULL;
1457
1458 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1459 if (!devs) {
1460 kfree(nsio);
1461 return NULL;
1462 }
1463
8c2f7e86 1464 dev = &nsio->common.dev;
3d88002e
DW
1465 dev->type = &namespace_io_device_type;
1466 dev->parent = &nd_region->dev;
1467 res = &nsio->res;
1468 res->name = dev_name(&nd_region->dev);
1469 res->flags = IORESOURCE_MEM;
1470 res->start = nd_region->ndr_start;
1471 res->end = res->start + nd_region->ndr_size - 1;
1472
1473 devs[0] = dev;
1474 return devs;
1475}
1476
bf9bccc1
DW
1477static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1478 u64 cookie, u16 pos)
1479{
1480 struct nd_namespace_label *found = NULL;
1481 int i;
1482
1483 for (i = 0; i < nd_region->ndr_mappings; i++) {
1484 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1485 struct nd_namespace_label *nd_label;
1486 bool found_uuid = false;
1487 int l;
1488
1489 for_each_label(l, nd_label, nd_mapping->labels) {
1490 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1491 u16 position = __le16_to_cpu(nd_label->position);
1492 u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1493
1494 if (isetcookie != cookie)
1495 continue;
1496
1497 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1498 continue;
1499
1500 if (found_uuid) {
1501 dev_dbg(to_ndd(nd_mapping)->dev,
1502 "%s duplicate entry for uuid\n",
1503 __func__);
1504 return false;
1505 }
1506 found_uuid = true;
1507 if (nlabel != nd_region->ndr_mappings)
1508 continue;
1509 if (position != pos)
1510 continue;
1511 found = nd_label;
1512 break;
1513 }
1514 if (found)
1515 break;
1516 }
1517 return found != NULL;
1518}
1519
1520static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1521{
1522 struct nd_namespace_label *select = NULL;
1523 int i;
1524
1525 if (!pmem_id)
1526 return -ENODEV;
1527
1528 for (i = 0; i < nd_region->ndr_mappings; i++) {
1529 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1530 struct nd_namespace_label *nd_label;
1531 u64 hw_start, hw_end, pmem_start, pmem_end;
1532 int l;
1533
1534 for_each_label(l, nd_label, nd_mapping->labels)
1535 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1536 break;
1537
1538 if (!nd_label) {
1539 WARN_ON(1);
1540 return -EINVAL;
1541 }
1542
1543 select = nd_label;
1544 /*
1545 * Check that this label is compliant with the dpa
1546 * range published in NFIT
1547 */
1548 hw_start = nd_mapping->start;
1549 hw_end = hw_start + nd_mapping->size;
1550 pmem_start = __le64_to_cpu(select->dpa);
1551 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1552 if (pmem_start == hw_start && pmem_end <= hw_end)
1553 /* pass */;
1554 else
1555 return -EINVAL;
1556
1557 nd_mapping->labels[0] = select;
1558 nd_mapping->labels[1] = NULL;
1559 }
1560 return 0;
1561}
1562
1563/**
1564 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1565 * @nd_region: region with mappings to validate
1566 */
1567static int find_pmem_label_set(struct nd_region *nd_region,
1568 struct nd_namespace_pmem *nspm)
1569{
1570 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1571 struct nd_namespace_label *nd_label;
1572 u8 select_id[NSLABEL_UUID_LEN];
1573 resource_size_t size = 0;
1574 u8 *pmem_id = NULL;
1575 int rc = -ENODEV, l;
1576 u16 i;
1577
1578 if (cookie == 0)
1579 return -ENXIO;
1580
1581 /*
1582 * Find a complete set of labels by uuid. By definition we can start
1583 * with any mapping as the reference label
1584 */
1585 for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1586 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1587
1588 if (isetcookie != cookie)
1589 continue;
1590
1591 for (i = 0; nd_region->ndr_mappings; i++)
1592 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1593 cookie, i))
1594 break;
1595 if (i < nd_region->ndr_mappings) {
1596 /*
1597 * Give up if we don't find an instance of a
1598 * uuid at each position (from 0 to
1599 * nd_region->ndr_mappings - 1), or if we find a
1600 * dimm with two instances of the same uuid.
1601 */
1602 rc = -EINVAL;
1603 goto err;
1604 } else if (pmem_id) {
1605 /*
1606 * If there is more than one valid uuid set, we
1607 * need userspace to clean this up.
1608 */
1609 rc = -EBUSY;
1610 goto err;
1611 }
1612 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1613 pmem_id = select_id;
1614 }
1615
1616 /*
1617 * Fix up each mapping's 'labels' to have the validated pmem label for
1618 * that position at labels[0], and NULL at labels[1]. In the process,
1619 * check that the namespace aligns with interleave-set. We know
1620 * that it does not overlap with any blk namespaces by virtue of
1621 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1622 * succeeded).
1623 */
1624 rc = select_pmem_id(nd_region, pmem_id);
1625 if (rc)
1626 goto err;
1627
1628 /* Calculate total size and populate namespace properties from label0 */
1629 for (i = 0; i < nd_region->ndr_mappings; i++) {
1630 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1631 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1632
1633 size += __le64_to_cpu(label0->rawsize);
1634 if (__le16_to_cpu(label0->position) != 0)
1635 continue;
1636 WARN_ON(nspm->alt_name || nspm->uuid);
1637 nspm->alt_name = kmemdup((void __force *) label0->name,
1638 NSLABEL_NAME_LEN, GFP_KERNEL);
1639 nspm->uuid = kmemdup((void __force *) label0->uuid,
1640 NSLABEL_UUID_LEN, GFP_KERNEL);
1641 }
1642
1643 if (!nspm->alt_name || !nspm->uuid) {
1644 rc = -ENOMEM;
1645 goto err;
1646 }
1647
1648 nd_namespace_pmem_set_size(nd_region, nspm, size);
1649
1650 return 0;
1651 err:
1652 switch (rc) {
1653 case -EINVAL:
1654 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1655 break;
1656 case -ENODEV:
1657 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1658 break;
1659 default:
1660 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1661 __func__, rc);
1662 break;
1663 }
1664 return rc;
1665}
1666
1667static struct device **create_namespace_pmem(struct nd_region *nd_region)
1668{
1669 struct nd_namespace_pmem *nspm;
1670 struct device *dev, **devs;
1671 struct resource *res;
1672 int rc;
1673
1674 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1675 if (!nspm)
1676 return NULL;
1677
8c2f7e86 1678 dev = &nspm->nsio.common.dev;
bf9bccc1
DW
1679 dev->type = &namespace_pmem_device_type;
1680 dev->parent = &nd_region->dev;
1681 res = &nspm->nsio.res;
1682 res->name = dev_name(&nd_region->dev);
1683 res->flags = IORESOURCE_MEM;
1684 rc = find_pmem_label_set(nd_region, nspm);
1685 if (rc == -ENODEV) {
1686 int i;
1687
1688 /* Pass, try to permit namespace creation... */
1689 for (i = 0; i < nd_region->ndr_mappings; i++) {
1690 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1691
1692 kfree(nd_mapping->labels);
1693 nd_mapping->labels = NULL;
1694 }
1695
1696 /* Publish a zero-sized namespace for userspace to configure. */
1697 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1698
1699 rc = 0;
1700 } else if (rc)
1701 goto err;
1702
1703 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1704 if (!devs)
1705 goto err;
1706
1707 devs[0] = dev;
1708 return devs;
1709
1710 err:
8c2f7e86 1711 namespace_pmem_release(&nspm->nsio.common.dev);
bf9bccc1
DW
1712 return NULL;
1713}
1714
1b40e09a
DW
1715struct resource *nsblk_add_resource(struct nd_region *nd_region,
1716 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1717 resource_size_t start)
1718{
1719 struct nd_label_id label_id;
1720 struct resource *res;
1721
1722 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1723 res = krealloc(nsblk->res,
1724 sizeof(void *) * (nsblk->num_resources + 1),
1725 GFP_KERNEL);
1726 if (!res)
1727 return NULL;
1728 nsblk->res = (struct resource **) res;
1729 for_each_dpa_resource(ndd, res)
1730 if (strcmp(res->name, label_id.id) == 0
1731 && res->start == start) {
1732 nsblk->res[nsblk->num_resources++] = res;
1733 return res;
1734 }
1735 return NULL;
1736}
1737
1738static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1739{
1740 struct nd_namespace_blk *nsblk;
1741 struct device *dev;
1742
1743 if (!is_nd_blk(&nd_region->dev))
1744 return NULL;
1745
1746 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1747 if (!nsblk)
1748 return NULL;
1749
8c2f7e86 1750 dev = &nsblk->common.dev;
1b40e09a
DW
1751 dev->type = &namespace_blk_device_type;
1752 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1753 if (nsblk->id < 0) {
1754 kfree(nsblk);
1755 return NULL;
1756 }
1757 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1758 dev->parent = &nd_region->dev;
1759 dev->groups = nd_namespace_attribute_groups;
1760
8c2f7e86 1761 return &nsblk->common.dev;
1b40e09a
DW
1762}
1763
1764void nd_region_create_blk_seed(struct nd_region *nd_region)
1765{
1766 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1767 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1768 /*
1769 * Seed creation failures are not fatal, provisioning is simply
1770 * disabled until memory becomes available
1771 */
1772 if (!nd_region->ns_seed)
1773 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1774 else
1775 nd_device_register(nd_region->ns_seed);
1776}
1777
2dc43331
DW
1778void nd_region_create_pfn_seed(struct nd_region *nd_region)
1779{
1780 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1781 nd_region->pfn_seed = nd_pfn_create(nd_region);
1782 /*
1783 * Seed creation failures are not fatal, provisioning is simply
1784 * disabled until memory becomes available
1785 */
1786 if (!nd_region->pfn_seed)
1787 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1788}
1789
8c2f7e86
DW
1790void nd_region_create_btt_seed(struct nd_region *nd_region)
1791{
1792 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1793 nd_region->btt_seed = nd_btt_create(nd_region);
1794 /*
1795 * Seed creation failures are not fatal, provisioning is simply
1796 * disabled until memory becomes available
1797 */
1798 if (!nd_region->btt_seed)
1799 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1800}
1801
1b40e09a
DW
1802static struct device **create_namespace_blk(struct nd_region *nd_region)
1803{
1804 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1805 struct nd_namespace_label *nd_label;
1806 struct device *dev, **devs = NULL;
1807 struct nd_namespace_blk *nsblk;
1808 struct nvdimm_drvdata *ndd;
1809 int i, l, count = 0;
1810 struct resource *res;
1811
1812 if (nd_region->ndr_mappings == 0)
1813 return NULL;
1814
1815 ndd = to_ndd(nd_mapping);
1816 for_each_label(l, nd_label, nd_mapping->labels) {
1817 u32 flags = __le32_to_cpu(nd_label->flags);
1818 char *name[NSLABEL_NAME_LEN];
1819 struct device **__devs;
1820
1821 if (flags & NSLABEL_FLAG_LOCAL)
1822 /* pass */;
1823 else
1824 continue;
1825
1826 for (i = 0; i < count; i++) {
1827 nsblk = to_nd_namespace_blk(devs[i]);
1828 if (memcmp(nsblk->uuid, nd_label->uuid,
1829 NSLABEL_UUID_LEN) == 0) {
1830 res = nsblk_add_resource(nd_region, ndd, nsblk,
1831 __le64_to_cpu(nd_label->dpa));
1832 if (!res)
1833 goto err;
1834 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
8c2f7e86 1835 dev_name(&nsblk->common.dev));
1b40e09a
DW
1836 break;
1837 }
1838 }
1839 if (i < count)
1840 continue;
1841 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1842 if (!__devs)
1843 goto err;
1844 memcpy(__devs, devs, sizeof(dev) * count);
1845 kfree(devs);
1846 devs = __devs;
1847
1848 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1849 if (!nsblk)
1850 goto err;
8c2f7e86 1851 dev = &nsblk->common.dev;
1b40e09a
DW
1852 dev->type = &namespace_blk_device_type;
1853 dev->parent = &nd_region->dev;
1854 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1855 devs[count++] = dev;
1856 nsblk->id = -1;
1857 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1858 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1859 GFP_KERNEL);
1860 if (!nsblk->uuid)
1861 goto err;
1862 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1863 if (name[0])
1864 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1865 GFP_KERNEL);
1866 res = nsblk_add_resource(nd_region, ndd, nsblk,
1867 __le64_to_cpu(nd_label->dpa));
1868 if (!res)
1869 goto err;
1870 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
8c2f7e86 1871 dev_name(&nsblk->common.dev));
1b40e09a
DW
1872 }
1873
1874 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1875 __func__, count, count == 1 ? "" : "s");
1876
1877 if (count == 0) {
1878 /* Publish a zero-sized namespace for userspace to configure. */
1879 for (i = 0; i < nd_region->ndr_mappings; i++) {
1880 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1881
1882 kfree(nd_mapping->labels);
1883 nd_mapping->labels = NULL;
1884 }
1885
1886 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1887 if (!devs)
1888 goto err;
1889 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1890 if (!nsblk)
1891 goto err;
8c2f7e86 1892 dev = &nsblk->common.dev;
1b40e09a
DW
1893 dev->type = &namespace_blk_device_type;
1894 dev->parent = &nd_region->dev;
1895 devs[count++] = dev;
1896 }
1897
1898 return devs;
1899
1900err:
1901 for (i = 0; i < count; i++) {
1902 nsblk = to_nd_namespace_blk(devs[i]);
8c2f7e86 1903 namespace_blk_release(&nsblk->common.dev);
1b40e09a
DW
1904 }
1905 kfree(devs);
1906 return NULL;
1907}
1908
bf9bccc1
DW
1909static int init_active_labels(struct nd_region *nd_region)
1910{
1911 int i;
1912
1913 for (i = 0; i < nd_region->ndr_mappings; i++) {
1914 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1915 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1916 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1917 int count, j;
1918
1919 /*
1920 * If the dimm is disabled then prevent the region from
1921 * being activated if it aliases DPA.
1922 */
1923 if (!ndd) {
1924 if ((nvdimm->flags & NDD_ALIASING) == 0)
1925 return 0;
1926 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1927 dev_name(&nd_mapping->nvdimm->dev));
1928 return -ENXIO;
1929 }
1930 nd_mapping->ndd = ndd;
1931 atomic_inc(&nvdimm->busy);
1932 get_ndd(ndd);
1933
1934 count = nd_label_active_count(ndd);
1935 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1936 if (!count)
1937 continue;
1938 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1939 GFP_KERNEL);
1940 if (!nd_mapping->labels)
1941 return -ENOMEM;
1942 for (j = 0; j < count; j++) {
1943 struct nd_namespace_label *label;
1944
1945 label = nd_label_active(ndd, j);
1946 nd_mapping->labels[j] = label;
1947 }
1948 }
1949
1950 return 0;
1951}
1952
3d88002e
DW
1953int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1954{
1955 struct device **devs = NULL;
bf9bccc1 1956 int i, rc = 0, type;
3d88002e
DW
1957
1958 *err = 0;
bf9bccc1
DW
1959 nvdimm_bus_lock(&nd_region->dev);
1960 rc = init_active_labels(nd_region);
1961 if (rc) {
1962 nvdimm_bus_unlock(&nd_region->dev);
1963 return rc;
1964 }
1965
1966 type = nd_region_to_nstype(nd_region);
1967 switch (type) {
3d88002e
DW
1968 case ND_DEVICE_NAMESPACE_IO:
1969 devs = create_namespace_io(nd_region);
1970 break;
bf9bccc1
DW
1971 case ND_DEVICE_NAMESPACE_PMEM:
1972 devs = create_namespace_pmem(nd_region);
1973 break;
1b40e09a
DW
1974 case ND_DEVICE_NAMESPACE_BLK:
1975 devs = create_namespace_blk(nd_region);
1976 break;
3d88002e
DW
1977 default:
1978 break;
1979 }
bf9bccc1 1980 nvdimm_bus_unlock(&nd_region->dev);
3d88002e
DW
1981
1982 if (!devs)
1983 return -ENODEV;
1984
1985 for (i = 0; devs[i]; i++) {
1986 struct device *dev = devs[i];
1b40e09a 1987 int id;
3d88002e 1988
1b40e09a
DW
1989 if (type == ND_DEVICE_NAMESPACE_BLK) {
1990 struct nd_namespace_blk *nsblk;
1991
1992 nsblk = to_nd_namespace_blk(dev);
1993 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
1994 GFP_KERNEL);
1995 nsblk->id = id;
1996 } else
1997 id = i;
1998
1999 if (id < 0)
2000 break;
2001 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
3d88002e
DW
2002 dev->groups = nd_namespace_attribute_groups;
2003 nd_device_register(dev);
2004 }
1b40e09a
DW
2005 if (i)
2006 nd_region->ns_seed = devs[0];
2007
2008 if (devs[i]) {
2009 int j;
2010
2011 for (j = i; devs[j]; j++) {
2012 struct device *dev = devs[j];
2013
2014 device_initialize(dev);
2015 put_device(dev);
2016 }
2017 *err = j - i;
2018 /*
2019 * All of the namespaces we tried to register failed, so
2020 * fail region activation.
2021 */
2022 if (*err == 0)
2023 rc = -ENODEV;
2024 }
3d88002e
DW
2025 kfree(devs);
2026
1b40e09a
DW
2027 if (rc == -ENODEV)
2028 return rc;
2029
3d88002e
DW
2030 return i;
2031}
This page took 0.150876 seconds and 5 git commands to generate.