Commit | Line | Data |
---|---|---|
e6dfb2de DW |
1 | /* |
2 | * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
4d88a97a | 14 | #include <linux/vmalloc.h> |
e6dfb2de | 15 | #include <linux/device.h> |
62232e45 | 16 | #include <linux/ndctl.h> |
e6dfb2de DW |
17 | #include <linux/slab.h> |
18 | #include <linux/io.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/mm.h> | |
21 | #include "nd-core.h" | |
0ba1c634 | 22 | #include "label.h" |
4d88a97a | 23 | #include "nd.h" |
e6dfb2de DW |
24 | |
25 | static DEFINE_IDA(dimm_ida); | |
26 | ||
4d88a97a DW |
27 | /* |
28 | * Retrieve bus and dimm handle and return if this bus supports | |
29 | * get_config_data commands | |
30 | */ | |
31 | static int __validate_dimm(struct nvdimm_drvdata *ndd) | |
32 | { | |
33 | struct nvdimm *nvdimm; | |
34 | ||
35 | if (!ndd) | |
36 | return -EINVAL; | |
37 | ||
38 | nvdimm = to_nvdimm(ndd->dev); | |
39 | ||
40 | if (!nvdimm->dsm_mask) | |
41 | return -ENXIO; | |
42 | if (!test_bit(ND_CMD_GET_CONFIG_DATA, nvdimm->dsm_mask)) | |
43 | return -ENXIO; | |
44 | ||
45 | return 0; | |
46 | } | |
47 | ||
48 | static int validate_dimm(struct nvdimm_drvdata *ndd) | |
49 | { | |
50 | int rc = __validate_dimm(ndd); | |
51 | ||
52 | if (rc && ndd) | |
53 | dev_dbg(ndd->dev, "%pf: %s error: %d\n", | |
54 | __builtin_return_address(0), __func__, rc); | |
55 | return rc; | |
56 | } | |
57 | ||
58 | /** | |
59 | * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area | |
60 | * @nvdimm: dimm to initialize | |
61 | */ | |
62 | int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) | |
63 | { | |
64 | struct nd_cmd_get_config_size *cmd = &ndd->nsarea; | |
65 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
66 | struct nvdimm_bus_descriptor *nd_desc; | |
67 | int rc = validate_dimm(ndd); | |
68 | ||
69 | if (rc) | |
70 | return rc; | |
71 | ||
72 | if (cmd->config_size) | |
73 | return 0; /* already valid */ | |
74 | ||
75 | memset(cmd, 0, sizeof(*cmd)); | |
76 | nd_desc = nvdimm_bus->nd_desc; | |
77 | return nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
78 | ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd)); | |
79 | } | |
80 | ||
81 | int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) | |
82 | { | |
83 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
84 | struct nd_cmd_get_config_data_hdr *cmd; | |
85 | struct nvdimm_bus_descriptor *nd_desc; | |
86 | int rc = validate_dimm(ndd); | |
87 | u32 max_cmd_size, config_size; | |
88 | size_t offset; | |
89 | ||
90 | if (rc) | |
91 | return rc; | |
92 | ||
93 | if (ndd->data) | |
94 | return 0; | |
95 | ||
4a826c83 DW |
96 | if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 |
97 | || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) { | |
98 | dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n", | |
99 | ndd->nsarea.max_xfer, ndd->nsarea.config_size); | |
4d88a97a | 100 | return -ENXIO; |
4a826c83 | 101 | } |
4d88a97a DW |
102 | |
103 | ndd->data = kmalloc(ndd->nsarea.config_size, GFP_KERNEL); | |
104 | if (!ndd->data) | |
105 | ndd->data = vmalloc(ndd->nsarea.config_size); | |
106 | ||
107 | if (!ndd->data) | |
108 | return -ENOMEM; | |
109 | ||
110 | max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer); | |
111 | cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL); | |
112 | if (!cmd) | |
113 | return -ENOMEM; | |
114 | ||
115 | nd_desc = nvdimm_bus->nd_desc; | |
116 | for (config_size = ndd->nsarea.config_size, offset = 0; | |
117 | config_size; config_size -= cmd->in_length, | |
118 | offset += cmd->in_length) { | |
119 | cmd->in_length = min(config_size, max_cmd_size); | |
120 | cmd->in_offset = offset; | |
121 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
122 | ND_CMD_GET_CONFIG_DATA, cmd, | |
123 | cmd->in_length + sizeof(*cmd)); | |
124 | if (rc || cmd->status) { | |
125 | rc = -ENXIO; | |
126 | break; | |
127 | } | |
128 | memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); | |
129 | } | |
130 | dev_dbg(ndd->dev, "%s: len: %zu rc: %d\n", __func__, offset, rc); | |
131 | kfree(cmd); | |
132 | ||
133 | return rc; | |
134 | } | |
135 | ||
f524bf27 DW |
136 | int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, |
137 | void *buf, size_t len) | |
138 | { | |
139 | int rc = validate_dimm(ndd); | |
140 | size_t max_cmd_size, buf_offset; | |
141 | struct nd_cmd_set_config_hdr *cmd; | |
142 | struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); | |
143 | struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; | |
144 | ||
145 | if (rc) | |
146 | return rc; | |
147 | ||
148 | if (!ndd->data) | |
149 | return -ENXIO; | |
150 | ||
151 | if (offset + len > ndd->nsarea.config_size) | |
152 | return -ENXIO; | |
153 | ||
154 | max_cmd_size = min_t(u32, PAGE_SIZE, len); | |
155 | max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer); | |
156 | cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL); | |
157 | if (!cmd) | |
158 | return -ENOMEM; | |
159 | ||
160 | for (buf_offset = 0; len; len -= cmd->in_length, | |
161 | buf_offset += cmd->in_length) { | |
162 | size_t cmd_size; | |
163 | u32 *status; | |
164 | ||
165 | cmd->in_offset = offset + buf_offset; | |
166 | cmd->in_length = min(max_cmd_size, len); | |
167 | memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); | |
168 | ||
169 | /* status is output in the last 4-bytes of the command buffer */ | |
170 | cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); | |
171 | status = ((void *) cmd) + cmd_size - sizeof(u32); | |
172 | ||
173 | rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), | |
174 | ND_CMD_SET_CONFIG_DATA, cmd, cmd_size); | |
175 | if (rc || *status) { | |
176 | rc = rc ? rc : -ENXIO; | |
177 | break; | |
178 | } | |
179 | } | |
180 | kfree(cmd); | |
181 | ||
182 | return rc; | |
183 | } | |
184 | ||
e6dfb2de DW |
185 | static void nvdimm_release(struct device *dev) |
186 | { | |
187 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
188 | ||
189 | ida_simple_remove(&dimm_ida, nvdimm->id); | |
190 | kfree(nvdimm); | |
191 | } | |
192 | ||
193 | static struct device_type nvdimm_device_type = { | |
194 | .name = "nvdimm", | |
195 | .release = nvdimm_release, | |
196 | }; | |
197 | ||
62232e45 | 198 | bool is_nvdimm(struct device *dev) |
e6dfb2de DW |
199 | { |
200 | return dev->type == &nvdimm_device_type; | |
201 | } | |
202 | ||
203 | struct nvdimm *to_nvdimm(struct device *dev) | |
204 | { | |
205 | struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev); | |
206 | ||
207 | WARN_ON(!is_nvdimm(dev)); | |
208 | return nvdimm; | |
209 | } | |
210 | EXPORT_SYMBOL_GPL(to_nvdimm); | |
211 | ||
047fc8a1 RZ |
212 | struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) |
213 | { | |
214 | struct nd_region *nd_region = &ndbr->nd_region; | |
215 | struct nd_mapping *nd_mapping = &nd_region->mapping[0]; | |
216 | ||
217 | return nd_mapping->nvdimm; | |
218 | } | |
219 | EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); | |
220 | ||
bf9bccc1 DW |
221 | struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) |
222 | { | |
223 | struct nvdimm *nvdimm = nd_mapping->nvdimm; | |
224 | ||
225 | WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); | |
226 | ||
227 | return dev_get_drvdata(&nvdimm->dev); | |
228 | } | |
229 | EXPORT_SYMBOL(to_ndd); | |
230 | ||
231 | void nvdimm_drvdata_release(struct kref *kref) | |
232 | { | |
233 | struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref); | |
234 | struct device *dev = ndd->dev; | |
235 | struct resource *res, *_r; | |
236 | ||
237 | dev_dbg(dev, "%s\n", __func__); | |
238 | ||
239 | nvdimm_bus_lock(dev); | |
240 | for_each_dpa_resource_safe(ndd, res, _r) | |
241 | nvdimm_free_dpa(ndd, res); | |
242 | nvdimm_bus_unlock(dev); | |
243 | ||
244 | if (ndd->data && is_vmalloc_addr(ndd->data)) | |
245 | vfree(ndd->data); | |
246 | else | |
247 | kfree(ndd->data); | |
248 | kfree(ndd); | |
249 | put_device(dev); | |
250 | } | |
251 | ||
252 | void get_ndd(struct nvdimm_drvdata *ndd) | |
253 | { | |
254 | kref_get(&ndd->kref); | |
255 | } | |
256 | ||
257 | void put_ndd(struct nvdimm_drvdata *ndd) | |
258 | { | |
259 | if (ndd) | |
260 | kref_put(&ndd->kref, nvdimm_drvdata_release); | |
261 | } | |
262 | ||
e6dfb2de DW |
263 | const char *nvdimm_name(struct nvdimm *nvdimm) |
264 | { | |
265 | return dev_name(&nvdimm->dev); | |
266 | } | |
267 | EXPORT_SYMBOL_GPL(nvdimm_name); | |
268 | ||
269 | void *nvdimm_provider_data(struct nvdimm *nvdimm) | |
270 | { | |
62232e45 DW |
271 | if (nvdimm) |
272 | return nvdimm->provider_data; | |
273 | return NULL; | |
e6dfb2de DW |
274 | } |
275 | EXPORT_SYMBOL_GPL(nvdimm_provider_data); | |
276 | ||
62232e45 DW |
277 | static ssize_t commands_show(struct device *dev, |
278 | struct device_attribute *attr, char *buf) | |
279 | { | |
280 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
281 | int cmd, len = 0; | |
282 | ||
283 | if (!nvdimm->dsm_mask) | |
284 | return sprintf(buf, "\n"); | |
285 | ||
286 | for_each_set_bit(cmd, nvdimm->dsm_mask, BITS_PER_LONG) | |
287 | len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd)); | |
288 | len += sprintf(buf + len, "\n"); | |
289 | return len; | |
290 | } | |
291 | static DEVICE_ATTR_RO(commands); | |
292 | ||
eaf96153 DW |
293 | static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
294 | char *buf) | |
295 | { | |
296 | struct nvdimm *nvdimm = to_nvdimm(dev); | |
297 | ||
298 | /* | |
299 | * The state may be in the process of changing, userspace should | |
300 | * quiesce probing if it wants a static answer | |
301 | */ | |
302 | nvdimm_bus_lock(dev); | |
303 | nvdimm_bus_unlock(dev); | |
304 | return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) | |
305 | ? "active" : "idle"); | |
306 | } | |
307 | static DEVICE_ATTR_RO(state); | |
308 | ||
0ba1c634 DW |
309 | static ssize_t available_slots_show(struct device *dev, |
310 | struct device_attribute *attr, char *buf) | |
311 | { | |
312 | struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); | |
313 | ssize_t rc; | |
314 | u32 nfree; | |
315 | ||
316 | if (!ndd) | |
317 | return -ENXIO; | |
318 | ||
319 | nvdimm_bus_lock(dev); | |
320 | nfree = nd_label_nfree(ndd); | |
321 | if (nfree - 1 > nfree) { | |
322 | dev_WARN_ONCE(dev, 1, "we ate our last label?\n"); | |
323 | nfree = 0; | |
324 | } else | |
325 | nfree--; | |
326 | rc = sprintf(buf, "%d\n", nfree); | |
327 | nvdimm_bus_unlock(dev); | |
328 | return rc; | |
329 | } | |
330 | static DEVICE_ATTR_RO(available_slots); | |
331 | ||
62232e45 | 332 | static struct attribute *nvdimm_attributes[] = { |
eaf96153 | 333 | &dev_attr_state.attr, |
62232e45 | 334 | &dev_attr_commands.attr, |
0ba1c634 | 335 | &dev_attr_available_slots.attr, |
62232e45 DW |
336 | NULL, |
337 | }; | |
338 | ||
339 | struct attribute_group nvdimm_attribute_group = { | |
340 | .attrs = nvdimm_attributes, | |
341 | }; | |
342 | EXPORT_SYMBOL_GPL(nvdimm_attribute_group); | |
343 | ||
e6dfb2de | 344 | struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, |
62232e45 DW |
345 | const struct attribute_group **groups, unsigned long flags, |
346 | unsigned long *dsm_mask) | |
e6dfb2de DW |
347 | { |
348 | struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); | |
349 | struct device *dev; | |
350 | ||
351 | if (!nvdimm) | |
352 | return NULL; | |
353 | ||
354 | nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); | |
355 | if (nvdimm->id < 0) { | |
356 | kfree(nvdimm); | |
357 | return NULL; | |
358 | } | |
359 | nvdimm->provider_data = provider_data; | |
360 | nvdimm->flags = flags; | |
62232e45 | 361 | nvdimm->dsm_mask = dsm_mask; |
eaf96153 | 362 | atomic_set(&nvdimm->busy, 0); |
e6dfb2de DW |
363 | dev = &nvdimm->dev; |
364 | dev_set_name(dev, "nmem%d", nvdimm->id); | |
365 | dev->parent = &nvdimm_bus->dev; | |
366 | dev->type = &nvdimm_device_type; | |
62232e45 | 367 | dev->devt = MKDEV(nvdimm_major, nvdimm->id); |
e6dfb2de | 368 | dev->groups = groups; |
4d88a97a | 369 | nd_device_register(dev); |
e6dfb2de DW |
370 | |
371 | return nvdimm; | |
372 | } | |
373 | EXPORT_SYMBOL_GPL(nvdimm_create); | |
4d88a97a | 374 | |
1b40e09a DW |
375 | /** |
376 | * nd_blk_available_dpa - account the unused dpa of BLK region | |
377 | * @nd_mapping: container of dpa-resource-root + labels | |
378 | * | |
379 | * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges. | |
380 | */ | |
381 | resource_size_t nd_blk_available_dpa(struct nd_mapping *nd_mapping) | |
382 | { | |
383 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
384 | resource_size_t map_end, busy = 0, available; | |
385 | struct resource *res; | |
386 | ||
387 | if (!ndd) | |
388 | return 0; | |
389 | ||
390 | map_end = nd_mapping->start + nd_mapping->size - 1; | |
391 | for_each_dpa_resource(ndd, res) | |
392 | if (res->start >= nd_mapping->start && res->start < map_end) { | |
393 | resource_size_t end = min(map_end, res->end); | |
394 | ||
395 | busy += end - res->start + 1; | |
396 | } else if (res->end >= nd_mapping->start | |
397 | && res->end <= map_end) { | |
398 | busy += res->end - nd_mapping->start; | |
399 | } else if (nd_mapping->start > res->start | |
400 | && nd_mapping->start < res->end) { | |
401 | /* total eclipse of the BLK region mapping */ | |
402 | busy += nd_mapping->size; | |
403 | } | |
404 | ||
405 | available = map_end - nd_mapping->start + 1; | |
406 | if (busy < available) | |
407 | return available - busy; | |
408 | return 0; | |
409 | } | |
410 | ||
bf9bccc1 DW |
411 | /** |
412 | * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa | |
413 | * @nd_mapping: container of dpa-resource-root + labels | |
414 | * @nd_region: constrain available space check to this reference region | |
415 | * @overlap: calculate available space assuming this level of overlap | |
416 | * | |
417 | * Validate that a PMEM label, if present, aligns with the start of an | |
418 | * interleave set and truncate the available size at the lowest BLK | |
419 | * overlap point. | |
420 | * | |
421 | * The expectation is that this routine is called multiple times as it | |
422 | * probes for the largest BLK encroachment for any single member DIMM of | |
423 | * the interleave set. Once that value is determined the PMEM-limit for | |
424 | * the set can be established. | |
425 | */ | |
426 | resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, | |
427 | struct nd_mapping *nd_mapping, resource_size_t *overlap) | |
428 | { | |
429 | resource_size_t map_start, map_end, busy = 0, available, blk_start; | |
430 | struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); | |
431 | struct resource *res; | |
432 | const char *reason; | |
433 | ||
434 | if (!ndd) | |
435 | return 0; | |
436 | ||
437 | map_start = nd_mapping->start; | |
438 | map_end = map_start + nd_mapping->size - 1; | |
439 | blk_start = max(map_start, map_end + 1 - *overlap); | |
440 | for_each_dpa_resource(ndd, res) | |
441 | if (res->start >= map_start && res->start < map_end) { | |
442 | if (strncmp(res->name, "blk", 3) == 0) | |
443 | blk_start = min(blk_start, res->start); | |
444 | else if (res->start != map_start) { | |
445 | reason = "misaligned to iset"; | |
446 | goto err; | |
447 | } else { | |
448 | if (busy) { | |
449 | reason = "duplicate overlapping PMEM reservations?"; | |
450 | goto err; | |
451 | } | |
452 | busy += resource_size(res); | |
453 | continue; | |
454 | } | |
455 | } else if (res->end >= map_start && res->end <= map_end) { | |
456 | if (strncmp(res->name, "blk", 3) == 0) { | |
457 | /* | |
458 | * If a BLK allocation overlaps the start of | |
459 | * PMEM the entire interleave set may now only | |
460 | * be used for BLK. | |
461 | */ | |
462 | blk_start = map_start; | |
463 | } else { | |
464 | reason = "misaligned to iset"; | |
465 | goto err; | |
466 | } | |
467 | } else if (map_start > res->start && map_start < res->end) { | |
468 | /* total eclipse of the mapping */ | |
469 | busy += nd_mapping->size; | |
470 | blk_start = map_start; | |
471 | } | |
472 | ||
473 | *overlap = map_end + 1 - blk_start; | |
474 | available = blk_start - map_start; | |
475 | if (busy < available) | |
476 | return available - busy; | |
477 | return 0; | |
478 | ||
479 | err: | |
480 | /* | |
481 | * Something is wrong, PMEM must align with the start of the | |
482 | * interleave set, and there can only be one allocation per set. | |
483 | */ | |
484 | nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason); | |
485 | return 0; | |
486 | } | |
487 | ||
4a826c83 DW |
488 | void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res) |
489 | { | |
490 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
491 | kfree(res->name); | |
492 | __release_region(&ndd->dpa, res->start, resource_size(res)); | |
493 | } | |
494 | ||
495 | struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd, | |
496 | struct nd_label_id *label_id, resource_size_t start, | |
497 | resource_size_t n) | |
498 | { | |
499 | char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL); | |
500 | struct resource *res; | |
501 | ||
502 | if (!name) | |
503 | return NULL; | |
504 | ||
505 | WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); | |
506 | res = __request_region(&ndd->dpa, start, n, name, 0); | |
507 | if (!res) | |
508 | kfree(name); | |
509 | return res; | |
510 | } | |
511 | ||
bf9bccc1 DW |
512 | /** |
513 | * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id | |
514 | * @nvdimm: container of dpa-resource-root + labels | |
515 | * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid> | |
516 | */ | |
517 | resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd, | |
518 | struct nd_label_id *label_id) | |
519 | { | |
520 | resource_size_t allocated = 0; | |
521 | struct resource *res; | |
522 | ||
523 | for_each_dpa_resource(ndd, res) | |
524 | if (strcmp(res->name, label_id->id) == 0) | |
525 | allocated += resource_size(res); | |
526 | ||
527 | return allocated; | |
528 | } | |
529 | ||
4d88a97a DW |
530 | static int count_dimms(struct device *dev, void *c) |
531 | { | |
532 | int *count = c; | |
533 | ||
534 | if (is_nvdimm(dev)) | |
535 | (*count)++; | |
536 | return 0; | |
537 | } | |
538 | ||
539 | int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count) | |
540 | { | |
541 | int count = 0; | |
542 | /* Flush any possible dimm registration failures */ | |
543 | nd_synchronize(); | |
544 | ||
545 | device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); | |
546 | dev_dbg(&nvdimm_bus->dev, "%s: count: %d\n", __func__, count); | |
547 | if (count != dimm_count) | |
548 | return -ENXIO; | |
549 | return 0; | |
550 | } | |
551 | EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count); |