cfi-cmdset-0001: always update the chip status
[deliverable/linux.git] / drivers / mtd / mtdchar.c
1 /*
2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
3 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8 #include <linux/config.h>
9 #include <linux/device.h>
10 #include <linux/fs.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/compatmac.h>
19
20 #include <asm/uaccess.h>
21
22 static struct class *mtd_class;
23
24 static void mtd_notify_add(struct mtd_info* mtd)
25 {
26 if (!mtd)
27 return;
28
29 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
30 NULL, "mtd%d", mtd->index);
31
32 class_device_create(mtd_class, NULL,
33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
34 NULL, "mtd%dro", mtd->index);
35 }
36
37 static void mtd_notify_remove(struct mtd_info* mtd)
38 {
39 if (!mtd)
40 return;
41
42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
44 }
45
46 static struct mtd_notifier notifier = {
47 .add = mtd_notify_add,
48 .remove = mtd_notify_remove,
49 };
50
51 /*
52 * We use file->private_data to store a pointer to the MTDdevice.
53 * Since alighment is at least 32 bits, we have 2 bits free for OTP
54 * modes as well.
55 */
56
57 #define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L)
58
59 #define MTD_MODE_OTP_FACT 1
60 #define MTD_MODE_OTP_USER 2
61 #define MTD_MODE(file) ((long)((file)->private_data) & 3)
62
63 #define SET_MTD_MODE(file, mode) \
64 do { long __p = (long)((file)->private_data); \
65 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
66
67 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
68 {
69 struct mtd_info *mtd = TO_MTD(file);
70
71 switch (orig) {
72 case 0:
73 /* SEEK_SET */
74 break;
75 case 1:
76 /* SEEK_CUR */
77 offset += file->f_pos;
78 break;
79 case 2:
80 /* SEEK_END */
81 offset += mtd->size;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 if (offset >= 0 && offset < mtd->size)
88 return file->f_pos = offset;
89
90 return -EINVAL;
91 }
92
93
94
95 static int mtd_open(struct inode *inode, struct file *file)
96 {
97 int minor = iminor(inode);
98 int devnum = minor >> 1;
99 struct mtd_info *mtd;
100
101 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
102
103 if (devnum >= MAX_MTD_DEVICES)
104 return -ENODEV;
105
106 /* You can't open the RO devices RW */
107 if ((file->f_mode & 2) && (minor & 1))
108 return -EACCES;
109
110 mtd = get_mtd_device(NULL, devnum);
111
112 if (!mtd)
113 return -ENODEV;
114
115 if (MTD_ABSENT == mtd->type) {
116 put_mtd_device(mtd);
117 return -ENODEV;
118 }
119
120 file->private_data = mtd;
121
122 /* You can't open it RW if it's not a writeable device */
123 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
124 put_mtd_device(mtd);
125 return -EACCES;
126 }
127
128 return 0;
129 } /* mtd_open */
130
131 /*====================================================================*/
132
133 static int mtd_close(struct inode *inode, struct file *file)
134 {
135 struct mtd_info *mtd;
136
137 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
138
139 mtd = TO_MTD(file);
140
141 if (mtd->sync)
142 mtd->sync(mtd);
143
144 put_mtd_device(mtd);
145
146 return 0;
147 } /* mtd_close */
148
149 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
150 userspace buffer down and use it directly with readv/writev.
151 */
152 #define MAX_KMALLOC_SIZE 0x20000
153
154 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
155 {
156 struct mtd_info *mtd = TO_MTD(file);
157 size_t retlen=0;
158 size_t total_retlen=0;
159 int ret=0;
160 int len;
161 char *kbuf;
162
163 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
164
165 if (*ppos + count > mtd->size)
166 count = mtd->size - *ppos;
167
168 if (!count)
169 return 0;
170
171 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
172 and pass them directly to the MTD functions */
173
174 if (count > MAX_KMALLOC_SIZE)
175 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
176 else
177 kbuf=kmalloc(count, GFP_KERNEL);
178
179 if (!kbuf)
180 return -ENOMEM;
181
182 while (count) {
183
184 if (count > MAX_KMALLOC_SIZE)
185 len = MAX_KMALLOC_SIZE;
186 else
187 len = count;
188
189 switch (MTD_MODE(file)) {
190 case MTD_MODE_OTP_FACT:
191 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
192 break;
193 case MTD_MODE_OTP_USER:
194 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
195 break;
196 default:
197 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
198 }
199 /* Nand returns -EBADMSG on ecc errors, but it returns
200 * the data. For our userspace tools it is important
201 * to dump areas with ecc errors !
202 * Userspace software which accesses NAND this way
203 * must be aware of the fact that it deals with NAND
204 */
205 if (!ret || (ret == -EBADMSG)) {
206 *ppos += retlen;
207 if (copy_to_user(buf, kbuf, retlen)) {
208 kfree(kbuf);
209 return -EFAULT;
210 }
211 else
212 total_retlen += retlen;
213
214 count -= retlen;
215 buf += retlen;
216 if (retlen == 0)
217 count = 0;
218 }
219 else {
220 kfree(kbuf);
221 return ret;
222 }
223
224 }
225
226 kfree(kbuf);
227 return total_retlen;
228 } /* mtd_read */
229
230 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
231 {
232 struct mtd_info *mtd = TO_MTD(file);
233 char *kbuf;
234 size_t retlen;
235 size_t total_retlen=0;
236 int ret=0;
237 int len;
238
239 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
240
241 if (*ppos == mtd->size)
242 return -ENOSPC;
243
244 if (*ppos + count > mtd->size)
245 count = mtd->size - *ppos;
246
247 if (!count)
248 return 0;
249
250 if (count > MAX_KMALLOC_SIZE)
251 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
252 else
253 kbuf=kmalloc(count, GFP_KERNEL);
254
255 if (!kbuf)
256 return -ENOMEM;
257
258 while (count) {
259
260 if (count > MAX_KMALLOC_SIZE)
261 len = MAX_KMALLOC_SIZE;
262 else
263 len = count;
264
265 if (copy_from_user(kbuf, buf, len)) {
266 kfree(kbuf);
267 return -EFAULT;
268 }
269
270 switch (MTD_MODE(file)) {
271 case MTD_MODE_OTP_FACT:
272 ret = -EROFS;
273 break;
274 case MTD_MODE_OTP_USER:
275 if (!mtd->write_user_prot_reg) {
276 ret = -EOPNOTSUPP;
277 break;
278 }
279 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
280 break;
281 default:
282 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
283 }
284 if (!ret) {
285 *ppos += retlen;
286 total_retlen += retlen;
287 count -= retlen;
288 buf += retlen;
289 }
290 else {
291 kfree(kbuf);
292 return ret;
293 }
294 }
295
296 kfree(kbuf);
297 return total_retlen;
298 } /* mtd_write */
299
300 /*======================================================================
301
302 IOCTL calls for getting device parameters.
303
304 ======================================================================*/
305 static void mtdchar_erase_callback (struct erase_info *instr)
306 {
307 wake_up((wait_queue_head_t *)instr->priv);
308 }
309
310 static int mtd_ioctl(struct inode *inode, struct file *file,
311 u_int cmd, u_long arg)
312 {
313 struct mtd_info *mtd = TO_MTD(file);
314 void __user *argp = (void __user *)arg;
315 int ret = 0;
316 u_long size;
317
318 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
319
320 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
321 if (cmd & IOC_IN) {
322 if (!access_ok(VERIFY_READ, argp, size))
323 return -EFAULT;
324 }
325 if (cmd & IOC_OUT) {
326 if (!access_ok(VERIFY_WRITE, argp, size))
327 return -EFAULT;
328 }
329
330 switch (cmd) {
331 case MEMGETREGIONCOUNT:
332 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
333 return -EFAULT;
334 break;
335
336 case MEMGETREGIONINFO:
337 {
338 struct region_info_user ur;
339
340 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
341 return -EFAULT;
342
343 if (ur.regionindex >= mtd->numeraseregions)
344 return -EINVAL;
345 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
346 sizeof(struct mtd_erase_region_info)))
347 return -EFAULT;
348 break;
349 }
350
351 case MEMGETINFO:
352 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
353 return -EFAULT;
354 break;
355
356 case MEMERASE:
357 {
358 struct erase_info *erase;
359
360 if(!(file->f_mode & 2))
361 return -EPERM;
362
363 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
364 if (!erase)
365 ret = -ENOMEM;
366 else {
367 wait_queue_head_t waitq;
368 DECLARE_WAITQUEUE(wait, current);
369
370 init_waitqueue_head(&waitq);
371
372 memset (erase,0,sizeof(struct erase_info));
373 if (copy_from_user(&erase->addr, argp,
374 sizeof(struct erase_info_user))) {
375 kfree(erase);
376 return -EFAULT;
377 }
378 erase->mtd = mtd;
379 erase->callback = mtdchar_erase_callback;
380 erase->priv = (unsigned long)&waitq;
381
382 /*
383 FIXME: Allow INTERRUPTIBLE. Which means
384 not having the wait_queue head on the stack.
385
386 If the wq_head is on the stack, and we
387 leave because we got interrupted, then the
388 wq_head is no longer there when the
389 callback routine tries to wake us up.
390 */
391 ret = mtd->erase(mtd, erase);
392 if (!ret) {
393 set_current_state(TASK_UNINTERRUPTIBLE);
394 add_wait_queue(&waitq, &wait);
395 if (erase->state != MTD_ERASE_DONE &&
396 erase->state != MTD_ERASE_FAILED)
397 schedule();
398 remove_wait_queue(&waitq, &wait);
399 set_current_state(TASK_RUNNING);
400
401 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
402 }
403 kfree(erase);
404 }
405 break;
406 }
407
408 case MEMWRITEOOB:
409 {
410 struct mtd_oob_buf buf;
411 void *databuf;
412 ssize_t retlen;
413
414 if(!(file->f_mode & 2))
415 return -EPERM;
416
417 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
418 return -EFAULT;
419
420 if (buf.length > 0x4096)
421 return -EINVAL;
422
423 if (!mtd->write_oob)
424 ret = -EOPNOTSUPP;
425 else
426 ret = access_ok(VERIFY_READ, buf.ptr,
427 buf.length) ? 0 : EFAULT;
428
429 if (ret)
430 return ret;
431
432 databuf = kmalloc(buf.length, GFP_KERNEL);
433 if (!databuf)
434 return -ENOMEM;
435
436 if (copy_from_user(databuf, buf.ptr, buf.length)) {
437 kfree(databuf);
438 return -EFAULT;
439 }
440
441 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
442
443 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
444 ret = -EFAULT;
445
446 kfree(databuf);
447 break;
448
449 }
450
451 case MEMREADOOB:
452 {
453 struct mtd_oob_buf buf;
454 void *databuf;
455 ssize_t retlen;
456
457 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
458 return -EFAULT;
459
460 if (buf.length > 0x4096)
461 return -EINVAL;
462
463 if (!mtd->read_oob)
464 ret = -EOPNOTSUPP;
465 else
466 ret = access_ok(VERIFY_WRITE, buf.ptr,
467 buf.length) ? 0 : -EFAULT;
468
469 if (ret)
470 return ret;
471
472 databuf = kmalloc(buf.length, GFP_KERNEL);
473 if (!databuf)
474 return -ENOMEM;
475
476 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
477
478 if (put_user(retlen, (uint32_t __user *)argp))
479 ret = -EFAULT;
480 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
481 ret = -EFAULT;
482
483 kfree(databuf);
484 break;
485 }
486
487 case MEMLOCK:
488 {
489 struct erase_info_user info;
490
491 if (copy_from_user(&info, argp, sizeof(info)))
492 return -EFAULT;
493
494 if (!mtd->lock)
495 ret = -EOPNOTSUPP;
496 else
497 ret = mtd->lock(mtd, info.start, info.length);
498 break;
499 }
500
501 case MEMUNLOCK:
502 {
503 struct erase_info_user info;
504
505 if (copy_from_user(&info, argp, sizeof(info)))
506 return -EFAULT;
507
508 if (!mtd->unlock)
509 ret = -EOPNOTSUPP;
510 else
511 ret = mtd->unlock(mtd, info.start, info.length);
512 break;
513 }
514
515 case MEMSETOOBSEL:
516 {
517 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
518 return -EFAULT;
519 break;
520 }
521
522 case MEMGETOOBSEL:
523 {
524 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
525 return -EFAULT;
526 break;
527 }
528
529 case MEMGETBADBLOCK:
530 {
531 loff_t offs;
532
533 if (copy_from_user(&offs, argp, sizeof(loff_t)))
534 return -EFAULT;
535 if (!mtd->block_isbad)
536 ret = -EOPNOTSUPP;
537 else
538 return mtd->block_isbad(mtd, offs);
539 break;
540 }
541
542 case MEMSETBADBLOCK:
543 {
544 loff_t offs;
545
546 if (copy_from_user(&offs, argp, sizeof(loff_t)))
547 return -EFAULT;
548 if (!mtd->block_markbad)
549 ret = -EOPNOTSUPP;
550 else
551 return mtd->block_markbad(mtd, offs);
552 break;
553 }
554
555 #if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
556 case OTPSELECT:
557 {
558 int mode;
559 if (copy_from_user(&mode, argp, sizeof(int)))
560 return -EFAULT;
561 SET_MTD_MODE(file, 0);
562 switch (mode) {
563 case MTD_OTP_FACTORY:
564 if (!mtd->read_fact_prot_reg)
565 ret = -EOPNOTSUPP;
566 else
567 SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
568 break;
569 case MTD_OTP_USER:
570 if (!mtd->read_fact_prot_reg)
571 ret = -EOPNOTSUPP;
572 else
573 SET_MTD_MODE(file, MTD_MODE_OTP_USER);
574 break;
575 default:
576 ret = -EINVAL;
577 case MTD_OTP_OFF:
578 break;
579 }
580 file->f_pos = 0;
581 break;
582 }
583
584 case OTPGETREGIONCOUNT:
585 case OTPGETREGIONINFO:
586 {
587 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
588 if (!buf)
589 return -ENOMEM;
590 ret = -EOPNOTSUPP;
591 switch (MTD_MODE(file)) {
592 case MTD_MODE_OTP_FACT:
593 if (mtd->get_fact_prot_info)
594 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
595 break;
596 case MTD_MODE_OTP_USER:
597 if (mtd->get_user_prot_info)
598 ret = mtd->get_user_prot_info(mtd, buf, 4096);
599 break;
600 }
601 if (ret >= 0) {
602 if (cmd == OTPGETREGIONCOUNT) {
603 int nbr = ret / sizeof(struct otp_info);
604 ret = copy_to_user(argp, &nbr, sizeof(int));
605 } else
606 ret = copy_to_user(argp, buf, ret);
607 if (ret)
608 ret = -EFAULT;
609 }
610 kfree(buf);
611 break;
612 }
613
614 case OTPLOCK:
615 {
616 struct otp_info info;
617
618 if (MTD_MODE(file) != MTD_MODE_OTP_USER)
619 return -EINVAL;
620 if (copy_from_user(&info, argp, sizeof(info)))
621 return -EFAULT;
622 if (!mtd->lock_user_prot_reg)
623 return -EOPNOTSUPP;
624 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
625 break;
626 }
627 #endif
628
629 default:
630 ret = -ENOTTY;
631 }
632
633 return ret;
634 } /* memory_ioctl */
635
636 static struct file_operations mtd_fops = {
637 .owner = THIS_MODULE,
638 .llseek = mtd_lseek,
639 .read = mtd_read,
640 .write = mtd_write,
641 .ioctl = mtd_ioctl,
642 .open = mtd_open,
643 .release = mtd_close,
644 };
645
646 static int __init init_mtdchar(void)
647 {
648 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
649 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
650 MTD_CHAR_MAJOR);
651 return -EAGAIN;
652 }
653
654 mtd_class = class_create(THIS_MODULE, "mtd");
655
656 if (IS_ERR(mtd_class)) {
657 printk(KERN_ERR "Error creating mtd class.\n");
658 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
659 return PTR_ERR(mtd_class);
660 }
661
662 register_mtd_user(&notifier);
663 return 0;
664 }
665
666 static void __exit cleanup_mtdchar(void)
667 {
668 unregister_mtd_user(&notifier);
669 class_destroy(mtd_class);
670 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
671 }
672
673 module_init(init_mtdchar);
674 module_exit(cleanup_mtdchar);
675
676
677 MODULE_LICENSE("GPL");
678 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
679 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
This page took 0.060421 seconds and 5 git commands to generate.