Merge commit 'v3.17' into next
[deliverable/linux.git] / drivers / mtd / mtdcore.c
1 /*
2 * Core registration and callback routines for MTD
3 * drivers and users.
4 *
5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6 * Copyright © 2006 Red Hat UK Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/ptrace.h>
27 #include <linux/seq_file.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/err.h>
33 #include <linux/ioctl.h>
34 #include <linux/init.h>
35 #include <linux/proc_fs.h>
36 #include <linux/idr.h>
37 #include <linux/backing-dev.h>
38 #include <linux/gfp.h>
39 #include <linux/slab.h>
40
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/partitions.h>
43
44 #include "mtdcore.h"
45
46 /*
47 * backing device capabilities for non-mappable devices (such as NAND flash)
48 * - permits private mappings, copies are taken of the data
49 */
50 static struct backing_dev_info mtd_bdi_unmappable = {
51 .capabilities = BDI_CAP_MAP_COPY,
52 };
53
54 /*
55 * backing device capabilities for R/O mappable devices (such as ROM)
56 * - permits private mappings, copies are taken of the data
57 * - permits non-writable shared mappings
58 */
59 static struct backing_dev_info mtd_bdi_ro_mappable = {
60 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
61 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
62 };
63
64 /*
65 * backing device capabilities for writable mappable devices (such as RAM)
66 * - permits private mappings, copies are taken of the data
67 * - permits non-writable shared mappings
68 */
69 static struct backing_dev_info mtd_bdi_rw_mappable = {
70 .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
71 BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
72 BDI_CAP_WRITE_MAP),
73 };
74
75 static int mtd_cls_suspend(struct device *dev, pm_message_t state);
76 static int mtd_cls_resume(struct device *dev);
77
78 static struct class mtd_class = {
79 .name = "mtd",
80 .owner = THIS_MODULE,
81 .suspend = mtd_cls_suspend,
82 .resume = mtd_cls_resume,
83 };
84
85 static DEFINE_IDR(mtd_idr);
86
87 /* These are exported solely for the purpose of mtd_blkdevs.c. You
88 should not use them for _anything_ else */
89 DEFINE_MUTEX(mtd_table_mutex);
90 EXPORT_SYMBOL_GPL(mtd_table_mutex);
91
92 struct mtd_info *__mtd_next_device(int i)
93 {
94 return idr_get_next(&mtd_idr, &i);
95 }
96 EXPORT_SYMBOL_GPL(__mtd_next_device);
97
98 static LIST_HEAD(mtd_notifiers);
99
100
101 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
102
103 /* REVISIT once MTD uses the driver model better, whoever allocates
104 * the mtd_info will probably want to use the release() hook...
105 */
106 static void mtd_release(struct device *dev)
107 {
108 struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev);
109 dev_t index = MTD_DEVT(mtd->index);
110
111 /* remove /dev/mtdXro node if needed */
112 if (index)
113 device_destroy(&mtd_class, index + 1);
114 }
115
116 static int mtd_cls_suspend(struct device *dev, pm_message_t state)
117 {
118 struct mtd_info *mtd = dev_get_drvdata(dev);
119
120 return mtd ? mtd_suspend(mtd) : 0;
121 }
122
123 static int mtd_cls_resume(struct device *dev)
124 {
125 struct mtd_info *mtd = dev_get_drvdata(dev);
126
127 if (mtd)
128 mtd_resume(mtd);
129 return 0;
130 }
131
132 static ssize_t mtd_type_show(struct device *dev,
133 struct device_attribute *attr, char *buf)
134 {
135 struct mtd_info *mtd = dev_get_drvdata(dev);
136 char *type;
137
138 switch (mtd->type) {
139 case MTD_ABSENT:
140 type = "absent";
141 break;
142 case MTD_RAM:
143 type = "ram";
144 break;
145 case MTD_ROM:
146 type = "rom";
147 break;
148 case MTD_NORFLASH:
149 type = "nor";
150 break;
151 case MTD_NANDFLASH:
152 type = "nand";
153 break;
154 case MTD_DATAFLASH:
155 type = "dataflash";
156 break;
157 case MTD_UBIVOLUME:
158 type = "ubi";
159 break;
160 case MTD_MLCNANDFLASH:
161 type = "mlc-nand";
162 break;
163 default:
164 type = "unknown";
165 }
166
167 return snprintf(buf, PAGE_SIZE, "%s\n", type);
168 }
169 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
170
171 static ssize_t mtd_flags_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
173 {
174 struct mtd_info *mtd = dev_get_drvdata(dev);
175
176 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
177
178 }
179 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
180
181 static ssize_t mtd_size_show(struct device *dev,
182 struct device_attribute *attr, char *buf)
183 {
184 struct mtd_info *mtd = dev_get_drvdata(dev);
185
186 return snprintf(buf, PAGE_SIZE, "%llu\n",
187 (unsigned long long)mtd->size);
188
189 }
190 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
191
192 static ssize_t mtd_erasesize_show(struct device *dev,
193 struct device_attribute *attr, char *buf)
194 {
195 struct mtd_info *mtd = dev_get_drvdata(dev);
196
197 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
198
199 }
200 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
201
202 static ssize_t mtd_writesize_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
204 {
205 struct mtd_info *mtd = dev_get_drvdata(dev);
206
207 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
208
209 }
210 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
211
212 static ssize_t mtd_subpagesize_show(struct device *dev,
213 struct device_attribute *attr, char *buf)
214 {
215 struct mtd_info *mtd = dev_get_drvdata(dev);
216 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
217
218 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
219
220 }
221 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
222
223 static ssize_t mtd_oobsize_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225 {
226 struct mtd_info *mtd = dev_get_drvdata(dev);
227
228 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
229
230 }
231 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
232
233 static ssize_t mtd_numeraseregions_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235 {
236 struct mtd_info *mtd = dev_get_drvdata(dev);
237
238 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
239
240 }
241 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
242 NULL);
243
244 static ssize_t mtd_name_show(struct device *dev,
245 struct device_attribute *attr, char *buf)
246 {
247 struct mtd_info *mtd = dev_get_drvdata(dev);
248
249 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
250
251 }
252 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
253
254 static ssize_t mtd_ecc_strength_show(struct device *dev,
255 struct device_attribute *attr, char *buf)
256 {
257 struct mtd_info *mtd = dev_get_drvdata(dev);
258
259 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
260 }
261 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
262
263 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
264 struct device_attribute *attr,
265 char *buf)
266 {
267 struct mtd_info *mtd = dev_get_drvdata(dev);
268
269 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
270 }
271
272 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
273 struct device_attribute *attr,
274 const char *buf, size_t count)
275 {
276 struct mtd_info *mtd = dev_get_drvdata(dev);
277 unsigned int bitflip_threshold;
278 int retval;
279
280 retval = kstrtouint(buf, 0, &bitflip_threshold);
281 if (retval)
282 return retval;
283
284 mtd->bitflip_threshold = bitflip_threshold;
285 return count;
286 }
287 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
288 mtd_bitflip_threshold_show,
289 mtd_bitflip_threshold_store);
290
291 static ssize_t mtd_ecc_step_size_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293 {
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295
296 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
297
298 }
299 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
300
301 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303 {
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
308 }
309 static DEVICE_ATTR(corrected_bits, S_IRUGO,
310 mtd_ecc_stats_corrected_show, NULL);
311
312 static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
313 struct device_attribute *attr, char *buf)
314 {
315 struct mtd_info *mtd = dev_get_drvdata(dev);
316 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
317
318 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
319 }
320 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
321
322 static ssize_t mtd_badblocks_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324 {
325 struct mtd_info *mtd = dev_get_drvdata(dev);
326 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
327
328 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
329 }
330 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
331
332 static ssize_t mtd_bbtblocks_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334 {
335 struct mtd_info *mtd = dev_get_drvdata(dev);
336 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
337
338 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
339 }
340 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
341
342 static struct attribute *mtd_attrs[] = {
343 &dev_attr_type.attr,
344 &dev_attr_flags.attr,
345 &dev_attr_size.attr,
346 &dev_attr_erasesize.attr,
347 &dev_attr_writesize.attr,
348 &dev_attr_subpagesize.attr,
349 &dev_attr_oobsize.attr,
350 &dev_attr_numeraseregions.attr,
351 &dev_attr_name.attr,
352 &dev_attr_ecc_strength.attr,
353 &dev_attr_ecc_step_size.attr,
354 &dev_attr_corrected_bits.attr,
355 &dev_attr_ecc_failures.attr,
356 &dev_attr_bad_blocks.attr,
357 &dev_attr_bbt_blocks.attr,
358 &dev_attr_bitflip_threshold.attr,
359 NULL,
360 };
361 ATTRIBUTE_GROUPS(mtd);
362
363 static struct device_type mtd_devtype = {
364 .name = "mtd",
365 .groups = mtd_groups,
366 .release = mtd_release,
367 };
368
369 /**
370 * add_mtd_device - register an MTD device
371 * @mtd: pointer to new MTD device info structure
372 *
373 * Add a device to the list of MTD devices present in the system, and
374 * notify each currently active MTD 'user' of its arrival. Returns
375 * zero on success or 1 on failure, which currently will only happen
376 * if there is insufficient memory or a sysfs error.
377 */
378
379 int add_mtd_device(struct mtd_info *mtd)
380 {
381 struct mtd_notifier *not;
382 int i, error;
383
384 if (!mtd->backing_dev_info) {
385 switch (mtd->type) {
386 case MTD_RAM:
387 mtd->backing_dev_info = &mtd_bdi_rw_mappable;
388 break;
389 case MTD_ROM:
390 mtd->backing_dev_info = &mtd_bdi_ro_mappable;
391 break;
392 default:
393 mtd->backing_dev_info = &mtd_bdi_unmappable;
394 break;
395 }
396 }
397
398 BUG_ON(mtd->writesize == 0);
399 mutex_lock(&mtd_table_mutex);
400
401 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
402 if (i < 0)
403 goto fail_locked;
404
405 mtd->index = i;
406 mtd->usecount = 0;
407
408 /* default value if not set by driver */
409 if (mtd->bitflip_threshold == 0)
410 mtd->bitflip_threshold = mtd->ecc_strength;
411
412 if (is_power_of_2(mtd->erasesize))
413 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
414 else
415 mtd->erasesize_shift = 0;
416
417 if (is_power_of_2(mtd->writesize))
418 mtd->writesize_shift = ffs(mtd->writesize) - 1;
419 else
420 mtd->writesize_shift = 0;
421
422 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
423 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
424
425 /* Some chips always power up locked. Unlock them now */
426 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
427 error = mtd_unlock(mtd, 0, mtd->size);
428 if (error && error != -EOPNOTSUPP)
429 printk(KERN_WARNING
430 "%s: unlock failed, writes may not work\n",
431 mtd->name);
432 }
433
434 /* Caller should have set dev.parent to match the
435 * physical device.
436 */
437 mtd->dev.type = &mtd_devtype;
438 mtd->dev.class = &mtd_class;
439 mtd->dev.devt = MTD_DEVT(i);
440 dev_set_name(&mtd->dev, "mtd%d", i);
441 dev_set_drvdata(&mtd->dev, mtd);
442 if (device_register(&mtd->dev) != 0)
443 goto fail_added;
444
445 if (MTD_DEVT(i))
446 device_create(&mtd_class, mtd->dev.parent,
447 MTD_DEVT(i) + 1,
448 NULL, "mtd%dro", i);
449
450 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
451 /* No need to get a refcount on the module containing
452 the notifier, since we hold the mtd_table_mutex */
453 list_for_each_entry(not, &mtd_notifiers, list)
454 not->add(mtd);
455
456 mutex_unlock(&mtd_table_mutex);
457 /* We _know_ we aren't being removed, because
458 our caller is still holding us here. So none
459 of this try_ nonsense, and no bitching about it
460 either. :) */
461 __module_get(THIS_MODULE);
462 return 0;
463
464 fail_added:
465 idr_remove(&mtd_idr, i);
466 fail_locked:
467 mutex_unlock(&mtd_table_mutex);
468 return 1;
469 }
470
471 /**
472 * del_mtd_device - unregister an MTD device
473 * @mtd: pointer to MTD device info structure
474 *
475 * Remove a device from the list of MTD devices present in the system,
476 * and notify each currently active MTD 'user' of its departure.
477 * Returns zero on success or 1 on failure, which currently will happen
478 * if the requested device does not appear to be present in the list.
479 */
480
481 int del_mtd_device(struct mtd_info *mtd)
482 {
483 int ret;
484 struct mtd_notifier *not;
485
486 mutex_lock(&mtd_table_mutex);
487
488 if (idr_find(&mtd_idr, mtd->index) != mtd) {
489 ret = -ENODEV;
490 goto out_error;
491 }
492
493 /* No need to get a refcount on the module containing
494 the notifier, since we hold the mtd_table_mutex */
495 list_for_each_entry(not, &mtd_notifiers, list)
496 not->remove(mtd);
497
498 if (mtd->usecount) {
499 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
500 mtd->index, mtd->name, mtd->usecount);
501 ret = -EBUSY;
502 } else {
503 device_unregister(&mtd->dev);
504
505 idr_remove(&mtd_idr, mtd->index);
506
507 module_put(THIS_MODULE);
508 ret = 0;
509 }
510
511 out_error:
512 mutex_unlock(&mtd_table_mutex);
513 return ret;
514 }
515
516 /**
517 * mtd_device_parse_register - parse partitions and register an MTD device.
518 *
519 * @mtd: the MTD device to register
520 * @types: the list of MTD partition probes to try, see
521 * 'parse_mtd_partitions()' for more information
522 * @parser_data: MTD partition parser-specific data
523 * @parts: fallback partition information to register, if parsing fails;
524 * only valid if %nr_parts > %0
525 * @nr_parts: the number of partitions in parts, if zero then the full
526 * MTD device is registered if no partition info is found
527 *
528 * This function aggregates MTD partitions parsing (done by
529 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
530 * basically follows the most common pattern found in many MTD drivers:
531 *
532 * * It first tries to probe partitions on MTD device @mtd using parsers
533 * specified in @types (if @types is %NULL, then the default list of parsers
534 * is used, see 'parse_mtd_partitions()' for more information). If none are
535 * found this functions tries to fallback to information specified in
536 * @parts/@nr_parts.
537 * * If any partitioning info was found, this function registers the found
538 * partitions.
539 * * If no partitions were found this function just registers the MTD device
540 * @mtd and exits.
541 *
542 * Returns zero in case of success and a negative error code in case of failure.
543 */
544 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
545 struct mtd_part_parser_data *parser_data,
546 const struct mtd_partition *parts,
547 int nr_parts)
548 {
549 int err;
550 struct mtd_partition *real_parts;
551
552 err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
553 if (err <= 0 && nr_parts && parts) {
554 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
555 GFP_KERNEL);
556 if (!real_parts)
557 err = -ENOMEM;
558 else
559 err = nr_parts;
560 }
561
562 if (err > 0) {
563 err = add_mtd_partitions(mtd, real_parts, err);
564 kfree(real_parts);
565 } else if (err == 0) {
566 err = add_mtd_device(mtd);
567 if (err == 1)
568 err = -ENODEV;
569 }
570
571 return err;
572 }
573 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
574
575 /**
576 * mtd_device_unregister - unregister an existing MTD device.
577 *
578 * @master: the MTD device to unregister. This will unregister both the master
579 * and any partitions if registered.
580 */
581 int mtd_device_unregister(struct mtd_info *master)
582 {
583 int err;
584
585 err = del_mtd_partitions(master);
586 if (err)
587 return err;
588
589 if (!device_is_registered(&master->dev))
590 return 0;
591
592 return del_mtd_device(master);
593 }
594 EXPORT_SYMBOL_GPL(mtd_device_unregister);
595
596 /**
597 * register_mtd_user - register a 'user' of MTD devices.
598 * @new: pointer to notifier info structure
599 *
600 * Registers a pair of callbacks function to be called upon addition
601 * or removal of MTD devices. Causes the 'add' callback to be immediately
602 * invoked for each MTD device currently present in the system.
603 */
604 void register_mtd_user (struct mtd_notifier *new)
605 {
606 struct mtd_info *mtd;
607
608 mutex_lock(&mtd_table_mutex);
609
610 list_add(&new->list, &mtd_notifiers);
611
612 __module_get(THIS_MODULE);
613
614 mtd_for_each_device(mtd)
615 new->add(mtd);
616
617 mutex_unlock(&mtd_table_mutex);
618 }
619 EXPORT_SYMBOL_GPL(register_mtd_user);
620
621 /**
622 * unregister_mtd_user - unregister a 'user' of MTD devices.
623 * @old: pointer to notifier info structure
624 *
625 * Removes a callback function pair from the list of 'users' to be
626 * notified upon addition or removal of MTD devices. Causes the
627 * 'remove' callback to be immediately invoked for each MTD device
628 * currently present in the system.
629 */
630 int unregister_mtd_user (struct mtd_notifier *old)
631 {
632 struct mtd_info *mtd;
633
634 mutex_lock(&mtd_table_mutex);
635
636 module_put(THIS_MODULE);
637
638 mtd_for_each_device(mtd)
639 old->remove(mtd);
640
641 list_del(&old->list);
642 mutex_unlock(&mtd_table_mutex);
643 return 0;
644 }
645 EXPORT_SYMBOL_GPL(unregister_mtd_user);
646
647 /**
648 * get_mtd_device - obtain a validated handle for an MTD device
649 * @mtd: last known address of the required MTD device
650 * @num: internal device number of the required MTD device
651 *
652 * Given a number and NULL address, return the num'th entry in the device
653 * table, if any. Given an address and num == -1, search the device table
654 * for a device with that address and return if it's still present. Given
655 * both, return the num'th driver only if its address matches. Return
656 * error code if not.
657 */
658 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
659 {
660 struct mtd_info *ret = NULL, *other;
661 int err = -ENODEV;
662
663 mutex_lock(&mtd_table_mutex);
664
665 if (num == -1) {
666 mtd_for_each_device(other) {
667 if (other == mtd) {
668 ret = mtd;
669 break;
670 }
671 }
672 } else if (num >= 0) {
673 ret = idr_find(&mtd_idr, num);
674 if (mtd && mtd != ret)
675 ret = NULL;
676 }
677
678 if (!ret) {
679 ret = ERR_PTR(err);
680 goto out;
681 }
682
683 err = __get_mtd_device(ret);
684 if (err)
685 ret = ERR_PTR(err);
686 out:
687 mutex_unlock(&mtd_table_mutex);
688 return ret;
689 }
690 EXPORT_SYMBOL_GPL(get_mtd_device);
691
692
693 int __get_mtd_device(struct mtd_info *mtd)
694 {
695 int err;
696
697 if (!try_module_get(mtd->owner))
698 return -ENODEV;
699
700 if (mtd->_get_device) {
701 err = mtd->_get_device(mtd);
702
703 if (err) {
704 module_put(mtd->owner);
705 return err;
706 }
707 }
708 mtd->usecount++;
709 return 0;
710 }
711 EXPORT_SYMBOL_GPL(__get_mtd_device);
712
713 /**
714 * get_mtd_device_nm - obtain a validated handle for an MTD device by
715 * device name
716 * @name: MTD device name to open
717 *
718 * This function returns MTD device description structure in case of
719 * success and an error code in case of failure.
720 */
721 struct mtd_info *get_mtd_device_nm(const char *name)
722 {
723 int err = -ENODEV;
724 struct mtd_info *mtd = NULL, *other;
725
726 mutex_lock(&mtd_table_mutex);
727
728 mtd_for_each_device(other) {
729 if (!strcmp(name, other->name)) {
730 mtd = other;
731 break;
732 }
733 }
734
735 if (!mtd)
736 goto out_unlock;
737
738 err = __get_mtd_device(mtd);
739 if (err)
740 goto out_unlock;
741
742 mutex_unlock(&mtd_table_mutex);
743 return mtd;
744
745 out_unlock:
746 mutex_unlock(&mtd_table_mutex);
747 return ERR_PTR(err);
748 }
749 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
750
751 void put_mtd_device(struct mtd_info *mtd)
752 {
753 mutex_lock(&mtd_table_mutex);
754 __put_mtd_device(mtd);
755 mutex_unlock(&mtd_table_mutex);
756
757 }
758 EXPORT_SYMBOL_GPL(put_mtd_device);
759
760 void __put_mtd_device(struct mtd_info *mtd)
761 {
762 --mtd->usecount;
763 BUG_ON(mtd->usecount < 0);
764
765 if (mtd->_put_device)
766 mtd->_put_device(mtd);
767
768 module_put(mtd->owner);
769 }
770 EXPORT_SYMBOL_GPL(__put_mtd_device);
771
772 /*
773 * Erase is an asynchronous operation. Device drivers are supposed
774 * to call instr->callback() whenever the operation completes, even
775 * if it completes with a failure.
776 * Callers are supposed to pass a callback function and wait for it
777 * to be called before writing to the block.
778 */
779 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
780 {
781 if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr)
782 return -EINVAL;
783 if (!(mtd->flags & MTD_WRITEABLE))
784 return -EROFS;
785 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
786 if (!instr->len) {
787 instr->state = MTD_ERASE_DONE;
788 mtd_erase_callback(instr);
789 return 0;
790 }
791 return mtd->_erase(mtd, instr);
792 }
793 EXPORT_SYMBOL_GPL(mtd_erase);
794
795 /*
796 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
797 */
798 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
799 void **virt, resource_size_t *phys)
800 {
801 *retlen = 0;
802 *virt = NULL;
803 if (phys)
804 *phys = 0;
805 if (!mtd->_point)
806 return -EOPNOTSUPP;
807 if (from < 0 || from > mtd->size || len > mtd->size - from)
808 return -EINVAL;
809 if (!len)
810 return 0;
811 return mtd->_point(mtd, from, len, retlen, virt, phys);
812 }
813 EXPORT_SYMBOL_GPL(mtd_point);
814
815 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
816 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
817 {
818 if (!mtd->_point)
819 return -EOPNOTSUPP;
820 if (from < 0 || from > mtd->size || len > mtd->size - from)
821 return -EINVAL;
822 if (!len)
823 return 0;
824 return mtd->_unpoint(mtd, from, len);
825 }
826 EXPORT_SYMBOL_GPL(mtd_unpoint);
827
828 /*
829 * Allow NOMMU mmap() to directly map the device (if not NULL)
830 * - return the address to which the offset maps
831 * - return -ENOSYS to indicate refusal to do the mapping
832 */
833 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
834 unsigned long offset, unsigned long flags)
835 {
836 if (!mtd->_get_unmapped_area)
837 return -EOPNOTSUPP;
838 if (offset > mtd->size || len > mtd->size - offset)
839 return -EINVAL;
840 return mtd->_get_unmapped_area(mtd, len, offset, flags);
841 }
842 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
843
844 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
845 u_char *buf)
846 {
847 int ret_code;
848 *retlen = 0;
849 if (from < 0 || from > mtd->size || len > mtd->size - from)
850 return -EINVAL;
851 if (!len)
852 return 0;
853
854 /*
855 * In the absence of an error, drivers return a non-negative integer
856 * representing the maximum number of bitflips that were corrected on
857 * any one ecc region (if applicable; zero otherwise).
858 */
859 ret_code = mtd->_read(mtd, from, len, retlen, buf);
860 if (unlikely(ret_code < 0))
861 return ret_code;
862 if (mtd->ecc_strength == 0)
863 return 0; /* device lacks ecc */
864 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
865 }
866 EXPORT_SYMBOL_GPL(mtd_read);
867
868 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
869 const u_char *buf)
870 {
871 *retlen = 0;
872 if (to < 0 || to > mtd->size || len > mtd->size - to)
873 return -EINVAL;
874 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
875 return -EROFS;
876 if (!len)
877 return 0;
878 return mtd->_write(mtd, to, len, retlen, buf);
879 }
880 EXPORT_SYMBOL_GPL(mtd_write);
881
882 /*
883 * In blackbox flight recorder like scenarios we want to make successful writes
884 * in interrupt context. panic_write() is only intended to be called when its
885 * known the kernel is about to panic and we need the write to succeed. Since
886 * the kernel is not going to be running for much longer, this function can
887 * break locks and delay to ensure the write succeeds (but not sleep).
888 */
889 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
890 const u_char *buf)
891 {
892 *retlen = 0;
893 if (!mtd->_panic_write)
894 return -EOPNOTSUPP;
895 if (to < 0 || to > mtd->size || len > mtd->size - to)
896 return -EINVAL;
897 if (!(mtd->flags & MTD_WRITEABLE))
898 return -EROFS;
899 if (!len)
900 return 0;
901 return mtd->_panic_write(mtd, to, len, retlen, buf);
902 }
903 EXPORT_SYMBOL_GPL(mtd_panic_write);
904
905 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
906 {
907 int ret_code;
908 ops->retlen = ops->oobretlen = 0;
909 if (!mtd->_read_oob)
910 return -EOPNOTSUPP;
911 /*
912 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
913 * similar to mtd->_read(), returning a non-negative integer
914 * representing max bitflips. In other cases, mtd->_read_oob() may
915 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
916 */
917 ret_code = mtd->_read_oob(mtd, from, ops);
918 if (unlikely(ret_code < 0))
919 return ret_code;
920 if (mtd->ecc_strength == 0)
921 return 0; /* device lacks ecc */
922 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
923 }
924 EXPORT_SYMBOL_GPL(mtd_read_oob);
925
926 /*
927 * Method to access the protection register area, present in some flash
928 * devices. The user data is one time programmable but the factory data is read
929 * only.
930 */
931 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
932 struct otp_info *buf)
933 {
934 if (!mtd->_get_fact_prot_info)
935 return -EOPNOTSUPP;
936 if (!len)
937 return 0;
938 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
939 }
940 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
941
942 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
943 size_t *retlen, u_char *buf)
944 {
945 *retlen = 0;
946 if (!mtd->_read_fact_prot_reg)
947 return -EOPNOTSUPP;
948 if (!len)
949 return 0;
950 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
951 }
952 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
953
954 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
955 struct otp_info *buf)
956 {
957 if (!mtd->_get_user_prot_info)
958 return -EOPNOTSUPP;
959 if (!len)
960 return 0;
961 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
962 }
963 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
964
965 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
966 size_t *retlen, u_char *buf)
967 {
968 *retlen = 0;
969 if (!mtd->_read_user_prot_reg)
970 return -EOPNOTSUPP;
971 if (!len)
972 return 0;
973 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
974 }
975 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
976
977 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
978 size_t *retlen, u_char *buf)
979 {
980 int ret;
981
982 *retlen = 0;
983 if (!mtd->_write_user_prot_reg)
984 return -EOPNOTSUPP;
985 if (!len)
986 return 0;
987 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
988 if (ret)
989 return ret;
990
991 /*
992 * If no data could be written at all, we are out of memory and
993 * must return -ENOSPC.
994 */
995 return (*retlen) ? 0 : -ENOSPC;
996 }
997 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
998
999 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1000 {
1001 if (!mtd->_lock_user_prot_reg)
1002 return -EOPNOTSUPP;
1003 if (!len)
1004 return 0;
1005 return mtd->_lock_user_prot_reg(mtd, from, len);
1006 }
1007 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1008
1009 /* Chip-supported device locking */
1010 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1011 {
1012 if (!mtd->_lock)
1013 return -EOPNOTSUPP;
1014 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1015 return -EINVAL;
1016 if (!len)
1017 return 0;
1018 return mtd->_lock(mtd, ofs, len);
1019 }
1020 EXPORT_SYMBOL_GPL(mtd_lock);
1021
1022 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1023 {
1024 if (!mtd->_unlock)
1025 return -EOPNOTSUPP;
1026 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1027 return -EINVAL;
1028 if (!len)
1029 return 0;
1030 return mtd->_unlock(mtd, ofs, len);
1031 }
1032 EXPORT_SYMBOL_GPL(mtd_unlock);
1033
1034 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1035 {
1036 if (!mtd->_is_locked)
1037 return -EOPNOTSUPP;
1038 if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs)
1039 return -EINVAL;
1040 if (!len)
1041 return 0;
1042 return mtd->_is_locked(mtd, ofs, len);
1043 }
1044 EXPORT_SYMBOL_GPL(mtd_is_locked);
1045
1046 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1047 {
1048 if (ofs < 0 || ofs > mtd->size)
1049 return -EINVAL;
1050 if (!mtd->_block_isreserved)
1051 return 0;
1052 return mtd->_block_isreserved(mtd, ofs);
1053 }
1054 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1055
1056 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1057 {
1058 if (ofs < 0 || ofs > mtd->size)
1059 return -EINVAL;
1060 if (!mtd->_block_isbad)
1061 return 0;
1062 return mtd->_block_isbad(mtd, ofs);
1063 }
1064 EXPORT_SYMBOL_GPL(mtd_block_isbad);
1065
1066 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1067 {
1068 if (!mtd->_block_markbad)
1069 return -EOPNOTSUPP;
1070 if (ofs < 0 || ofs > mtd->size)
1071 return -EINVAL;
1072 if (!(mtd->flags & MTD_WRITEABLE))
1073 return -EROFS;
1074 return mtd->_block_markbad(mtd, ofs);
1075 }
1076 EXPORT_SYMBOL_GPL(mtd_block_markbad);
1077
1078 /*
1079 * default_mtd_writev - the default writev method
1080 * @mtd: mtd device description object pointer
1081 * @vecs: the vectors to write
1082 * @count: count of vectors in @vecs
1083 * @to: the MTD device offset to write to
1084 * @retlen: on exit contains the count of bytes written to the MTD device.
1085 *
1086 * This function returns zero in case of success and a negative error code in
1087 * case of failure.
1088 */
1089 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1090 unsigned long count, loff_t to, size_t *retlen)
1091 {
1092 unsigned long i;
1093 size_t totlen = 0, thislen;
1094 int ret = 0;
1095
1096 for (i = 0; i < count; i++) {
1097 if (!vecs[i].iov_len)
1098 continue;
1099 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1100 vecs[i].iov_base);
1101 totlen += thislen;
1102 if (ret || thislen != vecs[i].iov_len)
1103 break;
1104 to += vecs[i].iov_len;
1105 }
1106 *retlen = totlen;
1107 return ret;
1108 }
1109
1110 /*
1111 * mtd_writev - the vector-based MTD write method
1112 * @mtd: mtd device description object pointer
1113 * @vecs: the vectors to write
1114 * @count: count of vectors in @vecs
1115 * @to: the MTD device offset to write to
1116 * @retlen: on exit contains the count of bytes written to the MTD device.
1117 *
1118 * This function returns zero in case of success and a negative error code in
1119 * case of failure.
1120 */
1121 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1122 unsigned long count, loff_t to, size_t *retlen)
1123 {
1124 *retlen = 0;
1125 if (!(mtd->flags & MTD_WRITEABLE))
1126 return -EROFS;
1127 if (!mtd->_writev)
1128 return default_mtd_writev(mtd, vecs, count, to, retlen);
1129 return mtd->_writev(mtd, vecs, count, to, retlen);
1130 }
1131 EXPORT_SYMBOL_GPL(mtd_writev);
1132
1133 /**
1134 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1135 * @mtd: mtd device description object pointer
1136 * @size: a pointer to the ideal or maximum size of the allocation, points
1137 * to the actual allocation size on success.
1138 *
1139 * This routine attempts to allocate a contiguous kernel buffer up to
1140 * the specified size, backing off the size of the request exponentially
1141 * until the request succeeds or until the allocation size falls below
1142 * the system page size. This attempts to make sure it does not adversely
1143 * impact system performance, so when allocating more than one page, we
1144 * ask the memory allocator to avoid re-trying, swapping, writing back
1145 * or performing I/O.
1146 *
1147 * Note, this function also makes sure that the allocated buffer is aligned to
1148 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1149 *
1150 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1151 * to handle smaller (i.e. degraded) buffer allocations under low- or
1152 * fragmented-memory situations where such reduced allocations, from a
1153 * requested ideal, are allowed.
1154 *
1155 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1156 */
1157 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1158 {
1159 gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1160 __GFP_NORETRY | __GFP_NO_KSWAPD;
1161 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1162 void *kbuf;
1163
1164 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1165
1166 while (*size > min_alloc) {
1167 kbuf = kmalloc(*size, flags);
1168 if (kbuf)
1169 return kbuf;
1170
1171 *size >>= 1;
1172 *size = ALIGN(*size, mtd->writesize);
1173 }
1174
1175 /*
1176 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1177 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1178 */
1179 return kmalloc(*size, GFP_KERNEL);
1180 }
1181 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1182
1183 #ifdef CONFIG_PROC_FS
1184
1185 /*====================================================================*/
1186 /* Support for /proc/mtd */
1187
1188 static int mtd_proc_show(struct seq_file *m, void *v)
1189 {
1190 struct mtd_info *mtd;
1191
1192 seq_puts(m, "dev: size erasesize name\n");
1193 mutex_lock(&mtd_table_mutex);
1194 mtd_for_each_device(mtd) {
1195 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1196 mtd->index, (unsigned long long)mtd->size,
1197 mtd->erasesize, mtd->name);
1198 }
1199 mutex_unlock(&mtd_table_mutex);
1200 return 0;
1201 }
1202
1203 static int mtd_proc_open(struct inode *inode, struct file *file)
1204 {
1205 return single_open(file, mtd_proc_show, NULL);
1206 }
1207
1208 static const struct file_operations mtd_proc_ops = {
1209 .open = mtd_proc_open,
1210 .read = seq_read,
1211 .llseek = seq_lseek,
1212 .release = single_release,
1213 };
1214 #endif /* CONFIG_PROC_FS */
1215
1216 /*====================================================================*/
1217 /* Init code */
1218
1219 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1220 {
1221 int ret;
1222
1223 ret = bdi_init(bdi);
1224 if (!ret)
1225 ret = bdi_register(bdi, NULL, "%s", name);
1226
1227 if (ret)
1228 bdi_destroy(bdi);
1229
1230 return ret;
1231 }
1232
1233 static struct proc_dir_entry *proc_mtd;
1234
1235 static int __init init_mtd(void)
1236 {
1237 int ret;
1238
1239 ret = class_register(&mtd_class);
1240 if (ret)
1241 goto err_reg;
1242
1243 ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1244 if (ret)
1245 goto err_bdi1;
1246
1247 ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1248 if (ret)
1249 goto err_bdi2;
1250
1251 ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1252 if (ret)
1253 goto err_bdi3;
1254
1255 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1256
1257 ret = init_mtdchar();
1258 if (ret)
1259 goto out_procfs;
1260
1261 return 0;
1262
1263 out_procfs:
1264 if (proc_mtd)
1265 remove_proc_entry("mtd", NULL);
1266 err_bdi3:
1267 bdi_destroy(&mtd_bdi_ro_mappable);
1268 err_bdi2:
1269 bdi_destroy(&mtd_bdi_unmappable);
1270 err_bdi1:
1271 class_unregister(&mtd_class);
1272 err_reg:
1273 pr_err("Error registering mtd class or bdi: %d\n", ret);
1274 return ret;
1275 }
1276
1277 static void __exit cleanup_mtd(void)
1278 {
1279 cleanup_mtdchar();
1280 if (proc_mtd)
1281 remove_proc_entry("mtd", NULL);
1282 class_unregister(&mtd_class);
1283 bdi_destroy(&mtd_bdi_unmappable);
1284 bdi_destroy(&mtd_bdi_ro_mappable);
1285 bdi_destroy(&mtd_bdi_rw_mappable);
1286 }
1287
1288 module_init(init_mtd);
1289 module_exit(cleanup_mtd);
1290
1291 MODULE_LICENSE("GPL");
1292 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1293 MODULE_DESCRIPTION("Core MTD registration and access routines");
This page took 0.0784859999999999 seconds and 6 git commands to generate.