UBI: Fastmap: Add new module parameter fm_debug
[deliverable/linux.git] / drivers / mtd / ubi / build.c
CommitLineData
801c135c
AB
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 * Copyright (c) Nokia Corporation, 2007
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * Author: Artem Bityutskiy (Битюцкий Артём),
20 * Frank Haverkamp
21 */
22
23/*
9f961b57
AB
24 * This file includes UBI initialization and building of UBI devices.
25 *
26 * When UBI is initialized, it attaches all the MTD devices specified as the
27 * module load parameters or the kernel boot parameters. If MTD devices were
28 * specified, UBI does not attach any MTD device, but it is possible to do
29 * later using the "UBI control device".
801c135c
AB
30 */
31
32#include <linux/err.h>
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <linux/stringify.h>
f9b0080e 36#include <linux/namei.h>
801c135c 37#include <linux/stat.h>
9f961b57 38#include <linux/miscdevice.h>
ba4087e9 39#include <linux/mtd/partitions.h>
7753f169 40#include <linux/log2.h>
cdfa788a 41#include <linux/kthread.h>
774b1382 42#include <linux/kernel.h>
5a0e3ad6 43#include <linux/slab.h>
f83c3838 44#include <linux/major.h>
801c135c
AB
45#include "ubi.h"
46
47/* Maximum length of the 'mtd=' parameter */
48#define MTD_PARAM_LEN_MAX 64
49
5993f9b7 50/* Maximum number of comma-separated items in the 'mtd=' parameter */
83ff59a0 51#define MTD_PARAM_MAX_COUNT 4
5993f9b7 52
d2f588f9
RG
53/* Maximum value for the number of bad PEBs per 1024 PEBs */
54#define MAX_MTD_UBI_BEB_LIMIT 768
55
af7ad7a0
MKB
56#ifdef CONFIG_MTD_UBI_MODULE
57#define ubi_is_module() 1
58#else
59#define ubi_is_module() 0
60#endif
61
801c135c
AB
62/**
63 * struct mtd_dev_param - MTD device parameter description data structure.
f9b0080e
AB
64 * @name: MTD character device node path, MTD device name, or MTD device number
65 * string
801c135c 66 * @vid_hdr_offs: VID header offset
edac493d 67 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
801c135c 68 */
9c9ec147 69struct mtd_dev_param {
801c135c 70 char name[MTD_PARAM_LEN_MAX];
83ff59a0 71 int ubi_num;
801c135c 72 int vid_hdr_offs;
edac493d 73 int max_beb_per1024;
801c135c
AB
74};
75
76/* Numbers of elements set in the @mtd_dev_param array */
9e0c7ef3 77static int __initdata mtd_devs;
801c135c
AB
78
79/* MTD devices specification parameters */
9e0c7ef3 80static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
77e6c2f0
RW
81#ifdef CONFIG_MTD_UBI_FASTMAP
82/* UBI module parameter to enable fastmap automatically on non-fastmap images */
83static bool fm_autoconvert;
479c2c0c 84static bool fm_debug;
77e6c2f0 85#endif
801c135c
AB
86/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
87struct class *ubi_class;
88
06b68ba1
AB
89/* Slab cache for wear-leveling entries */
90struct kmem_cache *ubi_wl_entry_slab;
91
9f961b57
AB
92/* UBI control character device */
93static struct miscdevice ubi_ctrl_cdev = {
94 .minor = MISC_DYNAMIC_MINOR,
95 .name = "ubi_ctrl",
96 .fops = &ubi_ctrl_cdev_operations,
97};
06b68ba1 98
e73f4459
AB
99/* All UBI devices in system */
100static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
101
cdfa788a
AB
102/* Serializes UBI devices creations and removals */
103DEFINE_MUTEX(ubi_devices_mutex);
104
e73f4459
AB
105/* Protects @ubi_devices and @ubi->ref_count */
106static DEFINE_SPINLOCK(ubi_devices_lock);
107
801c135c 108/* "Show" method for files in '/<sysfs>/class/ubi/' */
c174a08c
AB
109static ssize_t ubi_version_show(struct class *class,
110 struct class_attribute *attr, char *buf)
801c135c
AB
111{
112 return sprintf(buf, "%d\n", UBI_VERSION);
113}
114
115/* UBI version attribute ('/<sysfs>/class/ubi/version') */
116static struct class_attribute ubi_version =
117 __ATTR(version, S_IRUGO, ubi_version_show, NULL);
118
119static ssize_t dev_attribute_show(struct device *dev,
120 struct device_attribute *attr, char *buf);
121
122/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */
123static struct device_attribute dev_eraseblock_size =
124 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL);
125static struct device_attribute dev_avail_eraseblocks =
126 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
127static struct device_attribute dev_total_eraseblocks =
128 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL);
129static struct device_attribute dev_volumes_count =
130 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL);
131static struct device_attribute dev_max_ec =
132 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL);
133static struct device_attribute dev_reserved_for_bad =
134 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL);
135static struct device_attribute dev_bad_peb_count =
136 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL);
137static struct device_attribute dev_max_vol_count =
138 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL);
139static struct device_attribute dev_min_io_size =
140 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
141static struct device_attribute dev_bgt_enabled =
142 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
b6b76ba4
AB
143static struct device_attribute dev_mtd_num =
144 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
801c135c 145
0e0ee1cc
DP
146/**
147 * ubi_volume_notify - send a volume change notification.
148 * @ubi: UBI device description object
149 * @vol: volume description object of the changed volume
150 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
151 *
152 * This is a helper function which notifies all subscribers about a volume
153 * change event (creation, removal, re-sizing, re-naming, updating). Returns
154 * zero in case of success and a negative error code in case of failure.
155 */
156int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
157{
84b678f4 158 int ret;
0e0ee1cc
DP
159 struct ubi_notification nt;
160
161 ubi_do_get_device_info(ubi, &nt.di);
162 ubi_do_get_volume_info(ubi, vol, &nt.vi);
77e6c2f0 163
77e6c2f0
RW
164 switch (ntype) {
165 case UBI_VOLUME_ADDED:
166 case UBI_VOLUME_REMOVED:
167 case UBI_VOLUME_RESIZED:
168 case UBI_VOLUME_RENAMED:
84b678f4
RW
169 ret = ubi_update_fastmap(ubi);
170 if (ret)
171 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
77e6c2f0 172 }
84b678f4 173
0e0ee1cc
DP
174 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
175}
176
177/**
178 * ubi_notify_all - send a notification to all volumes.
179 * @ubi: UBI device description object
180 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc)
181 * @nb: the notifier to call
182 *
183 * This function walks all volumes of UBI device @ubi and sends the @ntype
184 * notification for each volume. If @nb is %NULL, then all registered notifiers
185 * are called, otherwise only the @nb notifier is called. Returns the number of
186 * sent notifications.
187 */
188int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb)
189{
190 struct ubi_notification nt;
191 int i, count = 0;
192
193 ubi_do_get_device_info(ubi, &nt.di);
194
195 mutex_lock(&ubi->device_mutex);
196 for (i = 0; i < ubi->vtbl_slots; i++) {
197 /*
198 * Since the @ubi->device is locked, and we are not going to
199 * change @ubi->volumes, we do not have to lock
200 * @ubi->volumes_lock.
201 */
202 if (!ubi->volumes[i])
203 continue;
204
205 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi);
206 if (nb)
207 nb->notifier_call(nb, ntype, &nt);
208 else
209 blocking_notifier_call_chain(&ubi_notifiers, ntype,
210 &nt);
211 count += 1;
212 }
213 mutex_unlock(&ubi->device_mutex);
214
215 return count;
216}
217
218/**
219 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
220 * @nb: the notifier to call
221 *
222 * This function walks all UBI devices and volumes and sends the
223 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
224 * registered notifiers are called, otherwise only the @nb notifier is called.
225 * Returns the number of sent notifications.
226 */
227int ubi_enumerate_volumes(struct notifier_block *nb)
228{
229 int i, count = 0;
230
231 /*
232 * Since the @ubi_devices_mutex is locked, and we are not going to
233 * change @ubi_devices, we do not have to lock @ubi_devices_lock.
234 */
235 for (i = 0; i < UBI_MAX_DEVICES; i++) {
236 struct ubi_device *ubi = ubi_devices[i];
237
238 if (!ubi)
239 continue;
240 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb);
241 }
242
243 return count;
244}
245
e73f4459
AB
246/**
247 * ubi_get_device - get UBI device.
248 * @ubi_num: UBI device number
249 *
250 * This function returns UBI device description object for UBI device number
251 * @ubi_num, or %NULL if the device does not exist. This function increases the
252 * device reference count to prevent removal of the device. In other words, the
253 * device cannot be removed if its reference count is not zero.
254 */
255struct ubi_device *ubi_get_device(int ubi_num)
256{
257 struct ubi_device *ubi;
258
259 spin_lock(&ubi_devices_lock);
260 ubi = ubi_devices[ubi_num];
261 if (ubi) {
262 ubi_assert(ubi->ref_count >= 0);
263 ubi->ref_count += 1;
264 get_device(&ubi->dev);
265 }
266 spin_unlock(&ubi_devices_lock);
267
268 return ubi;
269}
270
271/**
272 * ubi_put_device - drop an UBI device reference.
273 * @ubi: UBI device description object
274 */
275void ubi_put_device(struct ubi_device *ubi)
276{
277 spin_lock(&ubi_devices_lock);
278 ubi->ref_count -= 1;
279 put_device(&ubi->dev);
280 spin_unlock(&ubi_devices_lock);
281}
282
283/**
ebaaf1af 284 * ubi_get_by_major - get UBI device by character device major number.
e73f4459
AB
285 * @major: major number
286 *
287 * This function is similar to 'ubi_get_device()', but it searches the device
288 * by its major number.
289 */
290struct ubi_device *ubi_get_by_major(int major)
291{
292 int i;
293 struct ubi_device *ubi;
294
295 spin_lock(&ubi_devices_lock);
296 for (i = 0; i < UBI_MAX_DEVICES; i++) {
297 ubi = ubi_devices[i];
298 if (ubi && MAJOR(ubi->cdev.dev) == major) {
299 ubi_assert(ubi->ref_count >= 0);
300 ubi->ref_count += 1;
301 get_device(&ubi->dev);
302 spin_unlock(&ubi_devices_lock);
303 return ubi;
304 }
305 }
306 spin_unlock(&ubi_devices_lock);
307
308 return NULL;
309}
310
311/**
312 * ubi_major2num - get UBI device number by character device major number.
313 * @major: major number
314 *
315 * This function searches UBI device number object by its major number. If UBI
cdfa788a 316 * device was not found, this function returns -ENODEV, otherwise the UBI device
e73f4459
AB
317 * number is returned.
318 */
319int ubi_major2num(int major)
320{
321 int i, ubi_num = -ENODEV;
322
323 spin_lock(&ubi_devices_lock);
324 for (i = 0; i < UBI_MAX_DEVICES; i++) {
325 struct ubi_device *ubi = ubi_devices[i];
326
327 if (ubi && MAJOR(ubi->cdev.dev) == major) {
328 ubi_num = ubi->ubi_num;
329 break;
330 }
331 }
332 spin_unlock(&ubi_devices_lock);
333
334 return ubi_num;
335}
336
801c135c
AB
337/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
338static ssize_t dev_attribute_show(struct device *dev,
339 struct device_attribute *attr, char *buf)
340{
e73f4459
AB
341 ssize_t ret;
342 struct ubi_device *ubi;
801c135c 343
e73f4459
AB
344 /*
345 * The below code looks weird, but it actually makes sense. We get the
346 * UBI device reference from the contained 'struct ubi_device'. But it
347 * is unclear if the device was removed or not yet. Indeed, if the
348 * device was removed before we increased its reference count,
349 * 'ubi_get_device()' will return -ENODEV and we fail.
350 *
351 * Remember, 'struct ubi_device' is freed in the release function, so
352 * we still can use 'ubi->ubi_num'.
353 */
801c135c 354 ubi = container_of(dev, struct ubi_device, dev);
e73f4459
AB
355 ubi = ubi_get_device(ubi->ubi_num);
356 if (!ubi)
357 return -ENODEV;
358
801c135c 359 if (attr == &dev_eraseblock_size)
e73f4459 360 ret = sprintf(buf, "%d\n", ubi->leb_size);
801c135c 361 else if (attr == &dev_avail_eraseblocks)
e73f4459 362 ret = sprintf(buf, "%d\n", ubi->avail_pebs);
801c135c 363 else if (attr == &dev_total_eraseblocks)
e73f4459 364 ret = sprintf(buf, "%d\n", ubi->good_peb_count);
801c135c 365 else if (attr == &dev_volumes_count)
4b3cc340 366 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
801c135c 367 else if (attr == &dev_max_ec)
e73f4459 368 ret = sprintf(buf, "%d\n", ubi->max_ec);
801c135c 369 else if (attr == &dev_reserved_for_bad)
e73f4459 370 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
801c135c 371 else if (attr == &dev_bad_peb_count)
e73f4459 372 ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
801c135c 373 else if (attr == &dev_max_vol_count)
e73f4459 374 ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
801c135c 375 else if (attr == &dev_min_io_size)
e73f4459 376 ret = sprintf(buf, "%d\n", ubi->min_io_size);
801c135c 377 else if (attr == &dev_bgt_enabled)
e73f4459 378 ret = sprintf(buf, "%d\n", ubi->thread_enabled);
b6b76ba4
AB
379 else if (attr == &dev_mtd_num)
380 ret = sprintf(buf, "%d\n", ubi->mtd->index);
801c135c 381 else
b6b76ba4 382 ret = -EINVAL;
801c135c 383
e73f4459
AB
384 ubi_put_device(ubi);
385 return ret;
801c135c
AB
386}
387
36b477d0
AB
388static void dev_release(struct device *dev)
389{
390 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev);
391
392 kfree(ubi);
393}
801c135c
AB
394
395/**
396 * ubi_sysfs_init - initialize sysfs for an UBI device.
397 * @ubi: UBI device description object
0bf1c439
AB
398 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
399 * taken
801c135c
AB
400 *
401 * This function returns zero in case of success and a negative error code in
402 * case of failure.
403 */
0bf1c439 404static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
801c135c
AB
405{
406 int err;
407
408 ubi->dev.release = dev_release;
49dfc299 409 ubi->dev.devt = ubi->cdev.dev;
801c135c 410 ubi->dev.class = ubi_class;
160bbab3 411 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
801c135c
AB
412 err = device_register(&ubi->dev);
413 if (err)
db6e5770 414 return err;
801c135c 415
0bf1c439 416 *ref = 1;
801c135c
AB
417 err = device_create_file(&ubi->dev, &dev_eraseblock_size);
418 if (err)
db6e5770 419 return err;
801c135c
AB
420 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
421 if (err)
db6e5770 422 return err;
801c135c
AB
423 err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
424 if (err)
db6e5770 425 return err;
801c135c
AB
426 err = device_create_file(&ubi->dev, &dev_volumes_count);
427 if (err)
db6e5770 428 return err;
801c135c
AB
429 err = device_create_file(&ubi->dev, &dev_max_ec);
430 if (err)
db6e5770 431 return err;
801c135c
AB
432 err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
433 if (err)
db6e5770 434 return err;
801c135c
AB
435 err = device_create_file(&ubi->dev, &dev_bad_peb_count);
436 if (err)
db6e5770 437 return err;
801c135c
AB
438 err = device_create_file(&ubi->dev, &dev_max_vol_count);
439 if (err)
db6e5770 440 return err;
801c135c
AB
441 err = device_create_file(&ubi->dev, &dev_min_io_size);
442 if (err)
db6e5770 443 return err;
801c135c 444 err = device_create_file(&ubi->dev, &dev_bgt_enabled);
b6b76ba4
AB
445 if (err)
446 return err;
447 err = device_create_file(&ubi->dev, &dev_mtd_num);
801c135c
AB
448 return err;
449}
450
451/**
452 * ubi_sysfs_close - close sysfs for an UBI device.
453 * @ubi: UBI device description object
454 */
455static void ubi_sysfs_close(struct ubi_device *ubi)
456{
b6b76ba4 457 device_remove_file(&ubi->dev, &dev_mtd_num);
801c135c
AB
458 device_remove_file(&ubi->dev, &dev_bgt_enabled);
459 device_remove_file(&ubi->dev, &dev_min_io_size);
460 device_remove_file(&ubi->dev, &dev_max_vol_count);
461 device_remove_file(&ubi->dev, &dev_bad_peb_count);
462 device_remove_file(&ubi->dev, &dev_reserved_for_bad);
463 device_remove_file(&ubi->dev, &dev_max_ec);
464 device_remove_file(&ubi->dev, &dev_volumes_count);
465 device_remove_file(&ubi->dev, &dev_total_eraseblocks);
466 device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
467 device_remove_file(&ubi->dev, &dev_eraseblock_size);
468 device_unregister(&ubi->dev);
469}
470
471/**
0bf1c439 472 * kill_volumes - destroy all user volumes.
801c135c
AB
473 * @ubi: UBI device description object
474 */
475static void kill_volumes(struct ubi_device *ubi)
476{
477 int i;
478
479 for (i = 0; i < ubi->vtbl_slots; i++)
480 if (ubi->volumes[i])
89b96b69 481 ubi_free_volume(ubi, ubi->volumes[i]);
801c135c
AB
482}
483
484/**
485 * uif_init - initialize user interfaces for an UBI device.
486 * @ubi: UBI device description object
0bf1c439
AB
487 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
488 * taken, otherwise set to %0
489 *
490 * This function initializes various user interfaces for an UBI device. If the
491 * initialization fails at an early stage, this function frees all the
492 * resources it allocated, returns an error, and @ref is set to %0. However,
493 * if the initialization fails after the UBI device was registered in the
494 * driver core subsystem, this function takes a reference to @ubi->dev, because
495 * otherwise the release function ('dev_release()') would free whole @ubi
496 * object. The @ref argument is set to %1 in this case. The caller has to put
497 * this reference.
801c135c
AB
498 *
499 * This function returns zero in case of success and a negative error code in
0bf1c439 500 * case of failure.
801c135c 501 */
0bf1c439 502static int uif_init(struct ubi_device *ubi, int *ref)
801c135c 503{
8c4c19f1 504 int i, err;
801c135c
AB
505 dev_t dev;
506
0bf1c439 507 *ref = 0;
801c135c
AB
508 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
509
510 /*
511 * Major numbers for the UBI character devices are allocated
512 * dynamically. Major numbers of volume character devices are
513 * equivalent to ones of the corresponding UBI character device. Minor
514 * numbers of UBI character devices are 0, while minor numbers of
515 * volume character devices start from 1. Thus, we allocate one major
516 * number and ubi->vtbl_slots + 1 minor numbers.
517 */
518 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
519 if (err) {
32608703 520 ubi_err(ubi, "cannot register UBI character devices");
801c135c
AB
521 return err;
522 }
523
49dfc299 524 ubi_assert(MINOR(dev) == 0);
801c135c 525 cdev_init(&ubi->cdev, &ubi_cdev_operations);
c8566350 526 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev));
801c135c
AB
527 ubi->cdev.owner = THIS_MODULE;
528
801c135c
AB
529 err = cdev_add(&ubi->cdev, dev, 1);
530 if (err) {
32608703 531 ubi_err(ubi, "cannot add character device");
801c135c
AB
532 goto out_unreg;
533 }
534
0bf1c439 535 err = ubi_sysfs_init(ubi, ref);
801c135c 536 if (err)
db6e5770 537 goto out_sysfs;
801c135c
AB
538
539 for (i = 0; i < ubi->vtbl_slots; i++)
540 if (ubi->volumes[i]) {
89b96b69 541 err = ubi_add_volume(ubi, ubi->volumes[i]);
01f7b309 542 if (err) {
32608703 543 ubi_err(ubi, "cannot add volume %d", i);
801c135c 544 goto out_volumes;
01f7b309 545 }
801c135c
AB
546 }
547
548 return 0;
549
550out_volumes:
551 kill_volumes(ubi);
db6e5770 552out_sysfs:
0bf1c439
AB
553 if (*ref)
554 get_device(&ubi->dev);
801c135c 555 ubi_sysfs_close(ubi);
801c135c
AB
556 cdev_del(&ubi->cdev);
557out_unreg:
49dfc299 558 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
32608703
TB
559 ubi_err(ubi, "cannot initialize UBI %s, error %d",
560 ubi->ubi_name, err);
801c135c
AB
561 return err;
562}
563
564/**
565 * uif_close - close user interfaces for an UBI device.
566 * @ubi: UBI device description object
505d1caa
AB
567 *
568 * Note, since this function un-registers UBI volume device objects (@vol->dev),
569 * the memory allocated voe the volumes is freed as well (in the release
570 * function).
801c135c
AB
571 */
572static void uif_close(struct ubi_device *ubi)
573{
574 kill_volumes(ubi);
575 ubi_sysfs_close(ubi);
576 cdev_del(&ubi->cdev);
49dfc299 577 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
801c135c
AB
578}
579
505d1caa 580/**
47e1ec70 581 * ubi_free_internal_volumes - free internal volumes.
505d1caa
AB
582 * @ubi: UBI device description object
583 */
47e1ec70 584void ubi_free_internal_volumes(struct ubi_device *ubi)
505d1caa
AB
585{
586 int i;
587
588 for (i = ubi->vtbl_slots;
589 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
590 kfree(ubi->volumes[i]->eba_tbl);
591 kfree(ubi->volumes[i]);
592 }
593}
594
95e6fb02
RG
595static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
596{
597 int limit, device_pebs;
598 uint64_t device_size;
599
600 if (!max_beb_per1024)
601 return 0;
602
603 /*
604 * Here we are using size of the entire flash chip and
605 * not just the MTD partition size because the maximum
606 * number of bad eraseblocks is a percentage of the
607 * whole device and bad eraseblocks are not fairly
608 * distributed over the flash chip. So the worst case
609 * is that all the bad eraseblocks of the chip are in
610 * the MTD partition we are attaching (ubi->mtd).
611 */
612 device_size = mtd_get_device_size(ubi->mtd);
613 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
614 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
615
616 /* Round it up */
617 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
618 limit += 1;
619
620 return limit;
621}
622
801c135c 623/**
85c6e6e2 624 * io_init - initialize I/O sub-system for a given UBI device.
801c135c 625 * @ubi: UBI device description object
256334c3 626 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c
AB
627 *
628 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
629 * assumed:
630 * o EC header is always at offset zero - this cannot be changed;
631 * o VID header starts just after the EC header at the closest address
cdfa788a 632 * aligned to @io->hdrs_min_io_size;
801c135c 633 * o data starts just after the VID header at the closest address aligned to
cdfa788a 634 * @io->min_io_size
801c135c
AB
635 *
636 * This function returns zero in case of success and a negative error code in
637 * case of failure.
638 */
256334c3 639static int io_init(struct ubi_device *ubi, int max_beb_per1024)
801c135c 640{
719bb840
AB
641 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
642 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
643
801c135c
AB
644 if (ubi->mtd->numeraseregions != 0) {
645 /*
646 * Some flashes have several erase regions. Different regions
647 * may have different eraseblock size and other
648 * characteristics. It looks like mostly multi-region flashes
649 * have one "main" region and one or more small regions to
650 * store boot loader code or boot parameters or whatever. I
651 * guess we should just pick the largest region. But this is
652 * not implemented.
653 */
32608703 654 ubi_err(ubi, "multiple regions, not implemented");
801c135c
AB
655 return -EINVAL;
656 }
657
dd38fccf 658 if (ubi->vid_hdr_offset < 0)
cdfa788a
AB
659 return -EINVAL;
660
801c135c
AB
661 /*
662 * Note, in this implementation we support MTD devices with 0x7FFFFFFF
663 * physical eraseblocks maximum.
664 */
665
666 ubi->peb_size = ubi->mtd->erasesize;
69423d99 667 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
801c135c
AB
668 ubi->flash_size = ubi->mtd->size;
669
8beeb3bb 670 if (mtd_can_have_bb(ubi->mtd)) {
801c135c 671 ubi->bad_allowed = 1;
95e6fb02 672 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
8beeb3bb 673 }
801c135c 674
ebf53f42
AB
675 if (ubi->mtd->type == MTD_NORFLASH) {
676 ubi_assert(ubi->mtd->writesize == 1);
677 ubi->nor_flash = 1;
678 }
679
801c135c
AB
680 ubi->min_io_size = ubi->mtd->writesize;
681 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
682
cadb40cc
KP
683 /*
684 * Make sure minimal I/O unit is power of 2. Note, there is no
685 * fundamental reason for this assumption. It is just an optimization
686 * which allows us to avoid costly division operations.
687 */
7753f169 688 if (!is_power_of_2(ubi->min_io_size)) {
32608703 689 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
01f7b309 690 ubi->min_io_size);
801c135c
AB
691 return -EINVAL;
692 }
693
694 ubi_assert(ubi->hdrs_min_io_size > 0);
695 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size);
696 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0);
697
30b542ef
AB
698 ubi->max_write_size = ubi->mtd->writebufsize;
699 /*
700 * Maximum write size has to be greater or equivalent to min. I/O
701 * size, and be multiple of min. I/O size.
702 */
703 if (ubi->max_write_size < ubi->min_io_size ||
704 ubi->max_write_size % ubi->min_io_size ||
705 !is_power_of_2(ubi->max_write_size)) {
32608703 706 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
30b542ef
AB
707 ubi->max_write_size, ubi->min_io_size);
708 return -EINVAL;
709 }
710
801c135c
AB
711 /* Calculate default aligned sizes of EC and VID headers */
712 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
713 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
714
719bb840
AB
715 dbg_gen("min_io_size %d", ubi->min_io_size);
716 dbg_gen("max_write_size %d", ubi->max_write_size);
717 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
718 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
719 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
801c135c
AB
720
721 if (ubi->vid_hdr_offset == 0)
722 /* Default offset */
723 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset =
724 ubi->ec_hdr_alsize;
725 else {
726 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset &
727 ~(ubi->hdrs_min_io_size - 1);
728 ubi->vid_hdr_shift = ubi->vid_hdr_offset -
729 ubi->vid_hdr_aloffset;
730 }
731
732 /* Similar for the data offset */
e8cfe009 733 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
dd38fccf 734 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
801c135c 735
719bb840
AB
736 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
737 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
738 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
739 dbg_gen("leb_start %d", ubi->leb_start);
801c135c
AB
740
741 /* The shift must be aligned to 32-bit boundary */
742 if (ubi->vid_hdr_shift % 4) {
32608703 743 ubi_err(ubi, "unaligned VID header shift %d",
801c135c
AB
744 ubi->vid_hdr_shift);
745 return -EINVAL;
746 }
747
748 /* Check sanity */
749 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE ||
750 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
751 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
cadb40cc 752 ubi->leb_start & (ubi->min_io_size - 1)) {
32608703 753 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
801c135c
AB
754 ubi->vid_hdr_offset, ubi->leb_start);
755 return -EINVAL;
756 }
757
b86a2c56
AB
758 /*
759 * Set maximum amount of physical erroneous eraseblocks to be 10%.
760 * Erroneous PEB are those which have read errors.
761 */
762 ubi->max_erroneous = ubi->peb_count / 10;
763 if (ubi->max_erroneous < 16)
764 ubi->max_erroneous = 16;
719bb840 765 dbg_gen("max_erroneous %d", ubi->max_erroneous);
b86a2c56 766
801c135c
AB
767 /*
768 * It may happen that EC and VID headers are situated in one minimal
769 * I/O unit. In this case we can only accept this UBI image in
770 * read-only mode.
771 */
772 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
32608703 773 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
801c135c
AB
774 ubi->ro_mode = 1;
775 }
776
777 ubi->leb_size = ubi->peb_size - ubi->leb_start;
778
779 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
32608703 780 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
049333ce 781 ubi->mtd->index);
801c135c
AB
782 ubi->ro_mode = 1;
783 }
784
801c135c 785 /*
fbd0107f 786 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
801c135c
AB
787 * unfortunately, MTD does not provide this information. We should loop
788 * over all physical eraseblocks and invoke mtd->block_is_bad() for
fbd0107f
AB
789 * each physical eraseblock. So, we leave @ubi->bad_peb_count
790 * uninitialized so far.
801c135c
AB
791 */
792
793 return 0;
794}
795
4ccf8cff
AB
796/**
797 * autoresize - re-size the volume which has the "auto-resize" flag set.
798 * @ubi: UBI device description object
799 * @vol_id: ID of the volume to re-size
800 *
fbd0107f 801 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
4ccf8cff
AB
802 * the volume table to the largest possible size. See comments in ubi-header.h
803 * for more description of the flag. Returns zero in case of success and a
804 * negative error code in case of failure.
805 */
806static int autoresize(struct ubi_device *ubi, int vol_id)
807{
808 struct ubi_volume_desc desc;
809 struct ubi_volume *vol = ubi->volumes[vol_id];
810 int err, old_reserved_pebs = vol->reserved_pebs;
811
abb3e011 812 if (ubi->ro_mode) {
32608703 813 ubi_warn(ubi, "skip auto-resize because of R/O mode");
abb3e011
AB
814 return 0;
815 }
816
4ccf8cff
AB
817 /*
818 * Clear the auto-resize flag in the volume in-memory copy of the
505d1caa 819 * volume table, and 'ubi_resize_volume()' will propagate this change
4ccf8cff
AB
820 * to the flash.
821 */
822 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
823
824 if (ubi->avail_pebs == 0) {
825 struct ubi_vtbl_record vtbl_rec;
826
827 /*
505d1caa 828 * No available PEBs to re-size the volume, clear the flag on
4ccf8cff
AB
829 * flash and exit.
830 */
d856c13c 831 vtbl_rec = ubi->vtbl[vol_id];
4ccf8cff
AB
832 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
833 if (err)
32608703 834 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
4ccf8cff
AB
835 vol_id);
836 } else {
837 desc.vol = vol;
838 err = ubi_resize_volume(&desc,
839 old_reserved_pebs + ubi->avail_pebs);
840 if (err)
32608703
TB
841 ubi_err(ubi, "cannot auto-resize volume %d",
842 vol_id);
4ccf8cff
AB
843 }
844
845 if (err)
846 return err;
847
32608703
TB
848 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
849 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
4ccf8cff
AB
850 return 0;
851}
852
801c135c 853/**
cdfa788a 854 * ubi_attach_mtd_dev - attach an MTD device.
ebaaf1af 855 * @mtd: MTD device description object
897a316c 856 * @ubi_num: number to assign to the new UBI device
801c135c 857 * @vid_hdr_offset: VID header offset
edac493d 858 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
801c135c 859 *
897a316c
AB
860 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
861 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
505d1caa 862 * which case this function finds a vacant device number and assigns it
897a316c
AB
863 * automatically. Returns the new UBI device number in case of success and a
864 * negative error code in case of failure.
cdfa788a
AB
865 *
866 * Note, the invocations of this function has to be serialized by the
867 * @ubi_devices_mutex.
801c135c 868 */
256334c3
RG
869int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
870 int vid_hdr_offset, int max_beb_per1024)
801c135c
AB
871{
872 struct ubi_device *ubi;
0bf1c439 873 int i, err, ref = 0;
801c135c 874
d2f588f9
RG
875 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
876 return -EINVAL;
877
878 if (!max_beb_per1024)
879 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
880
cdfa788a
AB
881 /*
882 * Check if we already have the same MTD device attached.
883 *
884 * Note, this function assumes that UBI devices creations and deletions
885 * are serialized, so it does not take the &ubi_devices_lock.
886 */
897a316c 887 for (i = 0; i < UBI_MAX_DEVICES; i++) {
b96bf4c3 888 ubi = ubi_devices[i];
cdfa788a 889 if (ubi && mtd->index == ubi->mtd->index) {
32608703 890 ubi_err(ubi, "mtd%d is already attached to ubi%d",
801c135c 891 mtd->index, i);
897a316c 892 return -EEXIST;
801c135c 893 }
897a316c 894 }
801c135c 895
897a316c
AB
896 /*
897 * Make sure this MTD device is not emulated on top of an UBI volume
898 * already. Well, generally this recursion works fine, but there are
899 * different problems like the UBI module takes a reference to itself
900 * by attaching (and thus, opening) the emulated MTD device. This
901 * results in inability to unload the module. And in general it makes
902 * no sense to attach emulated MTD devices, so we prohibit this.
903 */
904 if (mtd->type == MTD_UBIVOLUME) {
32608703 905 ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
049333ce 906 mtd->index);
897a316c
AB
907 return -EINVAL;
908 }
909
910 if (ubi_num == UBI_DEV_NUM_AUTO) {
911 /* Search for an empty slot in the @ubi_devices array */
912 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
913 if (!ubi_devices[ubi_num])
914 break;
915 if (ubi_num == UBI_MAX_DEVICES) {
32608703 916 ubi_err(ubi, "only %d UBI devices may be created",
9c9ec147 917 UBI_MAX_DEVICES);
897a316c
AB
918 return -ENFILE;
919 }
920 } else {
921 if (ubi_num >= UBI_MAX_DEVICES)
922 return -EINVAL;
b96bf4c3 923
897a316c
AB
924 /* Make sure ubi_num is not busy */
925 if (ubi_devices[ubi_num]) {
45fc5c81 926 ubi_err(ubi, "already exists");
897a316c
AB
927 return -EEXIST;
928 }
b96bf4c3
AB
929 }
930
cdfa788a
AB
931 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
932 if (!ubi)
933 return -ENOMEM;
801c135c 934
cdfa788a 935 ubi->mtd = mtd;
897a316c 936 ubi->ubi_num = ubi_num;
801c135c 937 ubi->vid_hdr_offset = vid_hdr_offset;
4ccf8cff
AB
938 ubi->autoresize_vol_id = -1;
939
77e6c2f0
RW
940#ifdef CONFIG_MTD_UBI_FASTMAP
941 ubi->fm_pool.used = ubi->fm_pool.size = 0;
942 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
943
944 /*
945 * fm_pool.max_size is 5% of the total number of PEBs but it's also
946 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
947 */
948 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
949 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
950 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
951 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
952
68e3226b 953 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
77e6c2f0 954 ubi->fm_disabled = !fm_autoconvert;
479c2c0c
RW
955 if (fm_debug)
956 ubi_enable_dbg_chk_fastmap(ubi);
77e6c2f0
RW
957
958 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
959 <= UBI_FM_MAX_START) {
32608703 960 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
77e6c2f0
RW
961 UBI_FM_MAX_START);
962 ubi->fm_disabled = 1;
963 }
964
32608703
TB
965 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
966 ubi_msg(ubi, "default fastmap WL pool size: %d",
967 ubi->fm_wl_pool.max_size);
77e6c2f0
RW
968#else
969 ubi->fm_disabled = 1;
970#endif
4ccf8cff
AB
971 mutex_init(&ubi->buf_mutex);
972 mutex_init(&ubi->ckvol_mutex);
f089c0b2 973 mutex_init(&ubi->device_mutex);
4ccf8cff 974 spin_lock_init(&ubi->volumes_lock);
111ab0b2
RW
975 init_rwsem(&ubi->fm_protect);
976 init_rwsem(&ubi->fm_eba_sem);
cdfa788a 977
45fc5c81 978 ubi_msg(ubi, "attaching mtd%d", mtd->index);
cdfa788a 979
256334c3 980 err = io_init(ubi, max_beb_per1024);
801c135c
AB
981 if (err)
982 goto out_free;
983
ad5942ba 984 err = -ENOMEM;
0ca39d74
AB
985 ubi->peb_buf = vmalloc(ubi->peb_size);
986 if (!ubi->peb_buf)
e88d6e10
AB
987 goto out_free;
988
77e6c2f0
RW
989#ifdef CONFIG_MTD_UBI_FASTMAP
990 ubi->fm_size = ubi_calc_fm_size(ubi);
991 ubi->fm_buf = vzalloc(ubi->fm_size);
992 if (!ubi->fm_buf)
993 goto out_free;
994#endif
dac6e208 995 err = ubi_attach(ubi, 0);
801c135c 996 if (err) {
32608703
TB
997 ubi_err(ubi, "failed to attach mtd%d, error %d",
998 mtd->index, err);
eab73772 999 goto out_free;
801c135c
AB
1000 }
1001
4ccf8cff
AB
1002 if (ubi->autoresize_vol_id != -1) {
1003 err = autoresize(ubi, ubi->autoresize_vol_id);
1004 if (err)
1005 goto out_detach;
1006 }
1007
0bf1c439 1008 err = uif_init(ubi, &ref);
801c135c 1009 if (err)
0bf1c439 1010 goto out_detach;
801c135c 1011
2a734bb8
AB
1012 err = ubi_debugfs_init_dev(ubi);
1013 if (err)
1014 goto out_uif;
1015
f170168b 1016 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
cdfa788a
AB
1017 if (IS_ERR(ubi->bgt_thread)) {
1018 err = PTR_ERR(ubi->bgt_thread);
32608703
TB
1019 ubi_err(ubi, "cannot spawn \"%s\", error %d",
1020 ubi->bgt_name, err);
2a734bb8 1021 goto out_debugfs;
cdfa788a
AB
1022 }
1023
32608703
TB
1024 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
1025 mtd->index, mtd->name, ubi->flash_size >> 20);
1026 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
719bb840 1027 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
32608703 1028 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
719bb840 1029 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
32608703 1030 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
719bb840 1031 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
32608703 1032 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
719bb840 1033 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
32608703 1034 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
719bb840
AB
1035 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1036 ubi->vtbl_slots);
32608703 1037 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
719bb840
AB
1038 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1039 ubi->image_seq);
32608703 1040 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
719bb840 1041 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
801c135c 1042
ddbd3b61
AB
1043 /*
1044 * The below lock makes sure we do not race with 'ubi_thread()' which
1045 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up.
1046 */
1047 spin_lock(&ubi->wl_lock);
28237e45 1048 ubi->thread_enabled = 1;
d37e6bf6 1049 wake_up_process(ubi->bgt_thread);
ddbd3b61 1050 spin_unlock(&ubi->wl_lock);
801c135c 1051
897a316c 1052 ubi_devices[ubi_num] = ubi;
0e0ee1cc 1053 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
897a316c 1054 return ubi_num;
801c135c 1055
2a734bb8
AB
1056out_debugfs:
1057 ubi_debugfs_exit_dev(ubi);
cdfa788a 1058out_uif:
01a4110d
AB
1059 get_device(&ubi->dev);
1060 ubi_assert(ref);
cdfa788a 1061 uif_close(ubi);
801c135c 1062out_detach:
801c135c 1063 ubi_wl_close(ubi);
47e1ec70 1064 ubi_free_internal_volumes(ubi);
d7f0c4dc 1065 vfree(ubi->vtbl);
801c135c 1066out_free:
0ca39d74 1067 vfree(ubi->peb_buf);
77e6c2f0 1068 vfree(ubi->fm_buf);
0bf1c439
AB
1069 if (ref)
1070 put_device(&ubi->dev);
1071 else
1072 kfree(ubi);
801c135c
AB
1073 return err;
1074}
1075
1076/**
cdfa788a
AB
1077 * ubi_detach_mtd_dev - detach an MTD device.
1078 * @ubi_num: UBI device number to detach from
1079 * @anyway: detach MTD even if device reference count is not zero
1080 *
1081 * This function destroys an UBI device number @ubi_num and detaches the
1082 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1083 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1084 * exist.
1085 *
1086 * Note, the invocations of this function has to be serialized by the
1087 * @ubi_devices_mutex.
801c135c 1088 */
cdfa788a 1089int ubi_detach_mtd_dev(int ubi_num, int anyway)
801c135c 1090{
cdfa788a
AB
1091 struct ubi_device *ubi;
1092
1093 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
1094 return -EINVAL;
1095
0e0ee1cc
DP
1096 ubi = ubi_get_device(ubi_num);
1097 if (!ubi)
cdfa788a 1098 return -EINVAL;
cdfa788a 1099
0e0ee1cc
DP
1100 spin_lock(&ubi_devices_lock);
1101 put_device(&ubi->dev);
1102 ubi->ref_count -= 1;
cdfa788a
AB
1103 if (ubi->ref_count) {
1104 if (!anyway) {
897a316c 1105 spin_unlock(&ubi_devices_lock);
cdfa788a
AB
1106 return -EBUSY;
1107 }
1108 /* This may only happen if there is a bug */
32608703 1109 ubi_err(ubi, "%s reference count %d, destroy anyway",
cdfa788a
AB
1110 ubi->ubi_name, ubi->ref_count);
1111 }
897a316c 1112 ubi_devices[ubi_num] = NULL;
cdfa788a
AB
1113 spin_unlock(&ubi_devices_lock);
1114
897a316c 1115 ubi_assert(ubi_num == ubi->ubi_num);
0e0ee1cc 1116 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
32608703 1117 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
77e6c2f0
RW
1118#ifdef CONFIG_MTD_UBI_FASTMAP
1119 /* If we don't write a new fastmap at detach time we lose all
24b7a347
RW
1120 * EC updates that have been made since the last written fastmap.
1121 * In case of fastmap debugging we omit the update to simulate an
1122 * unclean shutdown. */
1123 if (!ubi_dbg_chk_fastmap(ubi))
1124 ubi_update_fastmap(ubi);
77e6c2f0 1125#endif
cdfa788a
AB
1126 /*
1127 * Before freeing anything, we have to stop the background thread to
1128 * prevent it from doing anything on this device while we are freeing.
1129 */
1130 if (ubi->bgt_thread)
1131 kthread_stop(ubi->bgt_thread);
801c135c 1132
36b477d0
AB
1133 /*
1134 * Get a reference to the device in order to prevent 'dev_release()'
0bf1c439 1135 * from freeing the @ubi object.
36b477d0
AB
1136 */
1137 get_device(&ubi->dev);
1138
2a734bb8 1139 ubi_debugfs_exit_dev(ubi);
801c135c 1140 uif_close(ubi);
77e6c2f0 1141
801c135c 1142 ubi_wl_close(ubi);
47e1ec70 1143 ubi_free_internal_volumes(ubi);
92ad8f37 1144 vfree(ubi->vtbl);
801c135c 1145 put_mtd_device(ubi->mtd);
0ca39d74 1146 vfree(ubi->peb_buf);
77e6c2f0 1147 vfree(ubi->fm_buf);
32608703 1148 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
36b477d0 1149 put_device(&ubi->dev);
cdfa788a 1150 return 0;
801c135c
AB
1151}
1152
cdfa788a 1153/**
f9b0080e
AB
1154 * open_mtd_by_chdev - open an MTD device by its character device node path.
1155 * @mtd_dev: MTD character device node path
1156 *
1157 * This helper function opens an MTD device by its character node device path.
1158 * Returns MTD device description object in case of success and a negative
1159 * error code in case of failure.
1160 */
1161static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
1162{
1163 int err, major, minor, mode;
1164 struct path path;
1165
1166 /* Probably this is an MTD character device node path */
1167 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
1168 if (err)
1169 return ERR_PTR(err);
1170
1171 /* MTD device number is defined by the major / minor numbers */
1172 major = imajor(path.dentry->d_inode);
1173 minor = iminor(path.dentry->d_inode);
1174 mode = path.dentry->d_inode->i_mode;
1175 path_put(&path);
1176 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
1177 return ERR_PTR(-EINVAL);
1178
1179 if (minor & 1)
1180 /*
1181 * Just do not think the "/dev/mtdrX" devices support is need,
1182 * so do not support them to avoid doing extra work.
1183 */
1184 return ERR_PTR(-EINVAL);
1185
1186 return get_mtd_device(NULL, minor / 2);
1187}
1188
1189/**
1190 * open_mtd_device - open MTD device by name, character device path, or number.
1191 * @mtd_dev: name, character device node path, or MTD device device number
cdfa788a 1192 *
d1f3dd6c 1193 * This function tries to open and MTD device described by @mtd_dev string,
f9b0080e
AB
1194 * which is first treated as ASCII MTD device number, and if it is not true, it
1195 * is treated as MTD device name, and if that is also not true, it is treated
1196 * as MTD character device node path. Returns MTD device description object in
1197 * case of success and a negative error code in case of failure.
cdfa788a
AB
1198 */
1199static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
1200{
1201 struct mtd_info *mtd;
d1f3dd6c
AB
1202 int mtd_num;
1203 char *endp;
cdfa788a 1204
d1f3dd6c
AB
1205 mtd_num = simple_strtoul(mtd_dev, &endp, 0);
1206 if (*endp != '\0' || mtd_dev == endp) {
cdfa788a 1207 /*
d1f3dd6c
AB
1208 * This does not look like an ASCII integer, probably this is
1209 * MTD device name.
cdfa788a 1210 */
d1f3dd6c 1211 mtd = get_mtd_device_nm(mtd_dev);
f9b0080e
AB
1212 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
1213 /* Probably this is an MTD character device node path */
1214 mtd = open_mtd_by_chdev(mtd_dev);
d1f3dd6c 1215 } else
cdfa788a 1216 mtd = get_mtd_device(NULL, mtd_num);
cdfa788a
AB
1217
1218 return mtd;
1219}
1220
801c135c
AB
1221static int __init ubi_init(void)
1222{
1223 int err, i, k;
1224
1225 /* Ensure that EC and VID headers have correct size */
1226 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
1227 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1228
1229 if (mtd_devs > UBI_MAX_DEVICES) {
32608703
TB
1230 pr_err("UBI error: too many MTD devices, maximum is %d",
1231 UBI_MAX_DEVICES);
801c135c
AB
1232 return -EINVAL;
1233 }
1234
9f961b57 1235 /* Create base sysfs directory and sysfs files */
801c135c 1236 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
9f961b57
AB
1237 if (IS_ERR(ubi_class)) {
1238 err = PTR_ERR(ubi_class);
32608703 1239 pr_err("UBI error: cannot create UBI class");
9f961b57
AB
1240 goto out;
1241 }
801c135c
AB
1242
1243 err = class_create_file(ubi_class, &ubi_version);
9f961b57 1244 if (err) {
32608703 1245 pr_err("UBI error: cannot create sysfs file");
801c135c 1246 goto out_class;
9f961b57
AB
1247 }
1248
1249 err = misc_register(&ubi_ctrl_cdev);
1250 if (err) {
32608703 1251 pr_err("UBI error: cannot register device");
9f961b57
AB
1252 goto out_version;
1253 }
801c135c 1254
06b68ba1 1255 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
c4506092
AB
1256 sizeof(struct ubi_wl_entry),
1257 0, 0, NULL);
4d525145
JL
1258 if (!ubi_wl_entry_slab) {
1259 err = -ENOMEM;
b9a06623 1260 goto out_dev_unreg;
4d525145 1261 }
06b68ba1 1262
2a734bb8
AB
1263 err = ubi_debugfs_init();
1264 if (err)
1265 goto out_slab;
1266
1267
801c135c
AB
1268 /* Attach MTD devices */
1269 for (i = 0; i < mtd_devs; i++) {
1270 struct mtd_dev_param *p = &mtd_dev_param[i];
cdfa788a 1271 struct mtd_info *mtd;
801c135c
AB
1272
1273 cond_resched();
cdfa788a
AB
1274
1275 mtd = open_mtd_device(p->name);
1276 if (IS_ERR(mtd)) {
1277 err = PTR_ERR(mtd);
32608703
TB
1278 pr_err("UBI error: cannot open mtd %s, error %d",
1279 p->name, err);
1557b9e1
MF
1280 /* See comment below re-ubi_is_module(). */
1281 if (ubi_is_module())
1282 goto out_detach;
1283 continue;
cdfa788a
AB
1284 }
1285
1286 mutex_lock(&ubi_devices_mutex);
83ff59a0 1287 err = ubi_attach_mtd_dev(mtd, p->ubi_num,
edac493d 1288 p->vid_hdr_offs, p->max_beb_per1024);
cdfa788a
AB
1289 mutex_unlock(&ubi_devices_mutex);
1290 if (err < 0) {
32608703
TB
1291 pr_err("UBI error: cannot attach mtd%d",
1292 mtd->index);
af7ad7a0
MKB
1293 put_mtd_device(mtd);
1294
1295 /*
1296 * Originally UBI stopped initializing on any error.
1297 * However, later on it was found out that this
1298 * behavior is not very good when UBI is compiled into
1299 * the kernel and the MTD devices to attach are passed
1300 * through the command line. Indeed, UBI failure
1301 * stopped whole boot sequence.
1302 *
1303 * To fix this, we changed the behavior for the
1304 * non-module case, but preserved the old behavior for
1305 * the module case, just for compatibility. This is a
1306 * little inconsistent, though.
1307 */
1308 if (ubi_is_module())
1309 goto out_detach;
9f961b57 1310 }
801c135c
AB
1311 }
1312
9d54c8a3
EG
1313 err = ubiblock_init();
1314 if (err) {
32608703 1315 pr_err("UBI error: block: cannot initialize, error %d", err);
9d54c8a3
EG
1316
1317 /* See comment above re-ubi_is_module(). */
1318 if (ubi_is_module())
1319 goto out_detach;
1320 }
1321
801c135c
AB
1322 return 0;
1323
1324out_detach:
1325 for (k = 0; k < i; k++)
cdfa788a
AB
1326 if (ubi_devices[k]) {
1327 mutex_lock(&ubi_devices_mutex);
1328 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
1329 mutex_unlock(&ubi_devices_mutex);
1330 }
2a734bb8
AB
1331 ubi_debugfs_exit();
1332out_slab:
06b68ba1 1333 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57
AB
1334out_dev_unreg:
1335 misc_deregister(&ubi_ctrl_cdev);
3a8d4642 1336out_version:
801c135c
AB
1337 class_remove_file(ubi_class, &ubi_version);
1338out_class:
1339 class_destroy(ubi_class);
9f961b57 1340out:
32608703 1341 pr_err("UBI error: cannot initialize UBI, error %d", err);
801c135c
AB
1342 return err;
1343}
cf38aca5 1344late_initcall(ubi_init);
801c135c
AB
1345
1346static void __exit ubi_exit(void)
1347{
b96bf4c3 1348 int i;
801c135c 1349
9d54c8a3
EG
1350 ubiblock_exit();
1351
b96bf4c3 1352 for (i = 0; i < UBI_MAX_DEVICES; i++)
cdfa788a
AB
1353 if (ubi_devices[i]) {
1354 mutex_lock(&ubi_devices_mutex);
1355 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
1356 mutex_unlock(&ubi_devices_mutex);
1357 }
2a734bb8 1358 ubi_debugfs_exit();
06b68ba1 1359 kmem_cache_destroy(ubi_wl_entry_slab);
9f961b57 1360 misc_deregister(&ubi_ctrl_cdev);
801c135c
AB
1361 class_remove_file(ubi_class, &ubi_version);
1362 class_destroy(ubi_class);
1363}
1364module_exit(ubi_exit);
1365
1366/**
ebaaf1af 1367 * bytes_str_to_int - convert a number of bytes string into an integer.
801c135c
AB
1368 * @str: the string to convert
1369 *
1370 * This function returns positive resulting integer in case of success and a
1371 * negative error code in case of failure.
1372 */
1373static int __init bytes_str_to_int(const char *str)
1374{
1375 char *endp;
1376 unsigned long result;
1377
1378 result = simple_strtoul(str, &endp, 0);
774b1382 1379 if (str == endp || result >= INT_MAX) {
32608703 1380 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1381 return -EINVAL;
1382 }
1383
1384 switch (*endp) {
1385 case 'G':
1386 result *= 1024;
1387 case 'M':
1388 result *= 1024;
1389 case 'K':
801c135c 1390 result *= 1024;
aeddb877 1391 if (endp[1] == 'i' && endp[2] == 'B')
801c135c
AB
1392 endp += 2;
1393 case '\0':
1394 break;
1395 default:
32608703 1396 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
801c135c
AB
1397 return -EINVAL;
1398 }
1399
1400 return result;
1401}
1402
1403/**
1404 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1405 * @val: the parameter value to parse
1406 * @kp: not used
1407 *
1408 * This function returns zero in case of success and a negative error code in
1409 * case of error.
1410 */
1411static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1412{
1413 int i, len;
1414 struct mtd_dev_param *p;
1415 char buf[MTD_PARAM_LEN_MAX];
1416 char *pbuf = &buf[0];
83ff59a0 1417 char *tokens[MTD_PARAM_MAX_COUNT], *token;
801c135c 1418
77c722dd
AB
1419 if (!val)
1420 return -EINVAL;
1421
801c135c 1422 if (mtd_devs == UBI_MAX_DEVICES) {
32608703
TB
1423 pr_err("UBI error: too many parameters, max. is %d\n",
1424 UBI_MAX_DEVICES);
801c135c
AB
1425 return -EINVAL;
1426 }
1427
1428 len = strnlen(val, MTD_PARAM_LEN_MAX);
1429 if (len == MTD_PARAM_LEN_MAX) {
32608703
TB
1430 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1431 val, MTD_PARAM_LEN_MAX);
801c135c
AB
1432 return -EINVAL;
1433 }
1434
1435 if (len == 0) {
45fc5c81 1436 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
801c135c
AB
1437 return 0;
1438 }
1439
1440 strcpy(buf, val);
1441
1442 /* Get rid of the final newline */
1443 if (buf[len - 1] == '\n')
503990eb 1444 buf[len - 1] = '\0';
801c135c 1445
5993f9b7 1446 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
801c135c
AB
1447 tokens[i] = strsep(&pbuf, ",");
1448
1449 if (pbuf) {
32608703 1450 pr_err("UBI error: too many arguments at \"%s\"\n", val);
801c135c
AB
1451 return -EINVAL;
1452 }
1453
801c135c
AB
1454 p = &mtd_dev_param[mtd_devs];
1455 strcpy(&p->name[0], tokens[0]);
1456
83ff59a0
MF
1457 token = tokens[1];
1458 if (token) {
1459 p->vid_hdr_offs = bytes_str_to_int(token);
801c135c 1460
83ff59a0
MF
1461 if (p->vid_hdr_offs < 0)
1462 return p->vid_hdr_offs;
1463 }
801c135c 1464
83ff59a0
MF
1465 token = tokens[2];
1466 if (token) {
1467 int err = kstrtoint(token, 10, &p->max_beb_per1024);
edac493d
RG
1468
1469 if (err) {
32608703
TB
1470 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1471 token);
edac493d
RG
1472 return -EINVAL;
1473 }
1474 }
1475
83ff59a0
MF
1476 token = tokens[3];
1477 if (token) {
1478 int err = kstrtoint(token, 10, &p->ubi_num);
1479
1480 if (err) {
32608703
TB
1481 pr_err("UBI error: bad value for ubi_num parameter: %s",
1482 token);
83ff59a0
MF
1483 return -EINVAL;
1484 }
1485 } else
1486 p->ubi_num = UBI_DEV_NUM_AUTO;
1487
801c135c
AB
1488 mtd_devs += 1;
1489 return 0;
1490}
1491
1492module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
83ff59a0 1493MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024[,ubi_num]]].\n"
801c135c 1494 "Multiple \"mtd\" parameters may be specified.\n"
edac493d
RG
1495 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1496 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1497 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1498 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
83ff59a0 1499 "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
edac493d
RG
1500 "\n"
1501 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1502 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1503 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
83ff59a0 1504 "Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values for the other fields.\n"
edac493d 1505 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
77e6c2f0
RW
1506#ifdef CONFIG_MTD_UBI_FASTMAP
1507module_param(fm_autoconvert, bool, 0644);
1508MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
479c2c0c
RW
1509module_param(fm_debug, bool, 0);
1510MODULE_PARM_DESC(fm_debug, "Set this parameter to enable fastmap debugging by default. Warning, this will make fastmap slow!");
77e6c2f0 1511#endif
801c135c
AB
1512MODULE_VERSION(__stringify(UBI_VERSION));
1513MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1514MODULE_AUTHOR("Artem Bityutskiy");
1515MODULE_LICENSE("GPL");
This page took 0.617622 seconds and 5 git commands to generate.