[PATCH] fix missing includes
[deliverable/linux.git] / drivers / mtd / mtdchar.c
CommitLineData
1da177e4 1/*
3a7a8824 2 * $Id: mtdchar.c,v 1.73 2005/07/04 17:36:41 gleixner Exp $
1da177e4
LT
3 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/compatmac.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/fs.h>
4e57b681 16#include <linux/sched.h> /* TASK_* */
1da177e4
LT
17#include <asm/uaccess.h>
18
9bc7b387
TP
19#include <linux/device.h>
20
21static struct class *mtd_class;
1da177e4
LT
22
23static void mtd_notify_add(struct mtd_info* mtd)
24{
25 if (!mtd)
26 return;
27
53f46542 28 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
9bc7b387
TP
29 NULL, "mtd%d", mtd->index);
30
53f46542 31 class_device_create(mtd_class, NULL,
9bc7b387
TP
32 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
33 NULL, "mtd%dro", mtd->index);
1da177e4
LT
34}
35
36static void mtd_notify_remove(struct mtd_info* mtd)
37{
38 if (!mtd)
39 return;
9bc7b387
TP
40
41 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
1da177e4
LT
43}
44
45static struct mtd_notifier notifier = {
46 .add = mtd_notify_add,
47 .remove = mtd_notify_remove,
48};
49
045e9a5d
NP
50/*
51 * We use file->private_data to store a pointer to the MTDdevice.
52 * Since alighment is at least 32 bits, we have 2 bits free for OTP
53 * modes as well.
54 */
55
56#define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L)
31f4233b 57
045e9a5d
NP
58#define MTD_MODE_OTP_FACT 1
59#define MTD_MODE_OTP_USER 2
60#define MTD_MODE(file) ((long)((file)->private_data) & 3)
61
62#define SET_MTD_MODE(file, mode) \
63 do { long __p = (long)((file)->private_data); \
64 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
31f4233b 65
1da177e4
LT
66static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
67{
045e9a5d 68 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
69
70 switch (orig) {
71 case 0:
72 /* SEEK_SET */
73 file->f_pos = offset;
74 break;
75 case 1:
76 /* SEEK_CUR */
77 file->f_pos += offset;
78 break;
79 case 2:
80 /* SEEK_END */
81 file->f_pos =mtd->size + offset;
82 break;
83 default:
84 return -EINVAL;
85 }
86
87 if (file->f_pos < 0)
88 file->f_pos = 0;
89 else if (file->f_pos >= mtd->size)
90 file->f_pos = mtd->size - 1;
91
92 return file->f_pos;
93}
94
95
96
97static int mtd_open(struct inode *inode, struct file *file)
98{
99 int minor = iminor(inode);
100 int devnum = minor >> 1;
101 struct mtd_info *mtd;
102
103 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
104
105 if (devnum >= MAX_MTD_DEVICES)
106 return -ENODEV;
107
108 /* You can't open the RO devices RW */
109 if ((file->f_mode & 2) && (minor & 1))
110 return -EACCES;
111
112 mtd = get_mtd_device(NULL, devnum);
113
114 if (!mtd)
115 return -ENODEV;
116
117 if (MTD_ABSENT == mtd->type) {
118 put_mtd_device(mtd);
119 return -ENODEV;
120 }
121
122 file->private_data = mtd;
123
124 /* You can't open it RW if it's not a writeable device */
125 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
126 put_mtd_device(mtd);
127 return -EACCES;
128 }
129
130 return 0;
131} /* mtd_open */
132
133/*====================================================================*/
134
135static int mtd_close(struct inode *inode, struct file *file)
136{
137 struct mtd_info *mtd;
138
139 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
140
045e9a5d 141 mtd = TO_MTD(file);
1da177e4
LT
142
143 if (mtd->sync)
144 mtd->sync(mtd);
145
146 put_mtd_device(mtd);
147
148 return 0;
149} /* mtd_close */
150
151/* FIXME: This _really_ needs to die. In 2.5, we should lock the
152 userspace buffer down and use it directly with readv/writev.
153*/
154#define MAX_KMALLOC_SIZE 0x20000
155
156static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
157{
045e9a5d 158 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
159 size_t retlen=0;
160 size_t total_retlen=0;
161 int ret=0;
162 int len;
163 char *kbuf;
164
165 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
166
167 if (*ppos + count > mtd->size)
168 count = mtd->size - *ppos;
169
170 if (!count)
171 return 0;
172
173 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
174 and pass them directly to the MTD functions */
175 while (count) {
176 if (count > MAX_KMALLOC_SIZE)
177 len = MAX_KMALLOC_SIZE;
178 else
179 len = count;
180
181 kbuf=kmalloc(len,GFP_KERNEL);
182 if (!kbuf)
183 return -ENOMEM;
184
045e9a5d 185 switch (MTD_MODE(file)) {
31f4233b
NP
186 case MTD_MODE_OTP_FACT:
187 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
188 break;
189 case MTD_MODE_OTP_USER:
190 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
191 break;
192 default:
193 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
194 }
1da177e4
LT
195 /* Nand returns -EBADMSG on ecc errors, but it returns
196 * the data. For our userspace tools it is important
197 * to dump areas with ecc errors !
198 * Userspace software which accesses NAND this way
199 * must be aware of the fact that it deals with NAND
200 */
201 if (!ret || (ret == -EBADMSG)) {
202 *ppos += retlen;
203 if (copy_to_user(buf, kbuf, retlen)) {
204 kfree(kbuf);
205 return -EFAULT;
206 }
207 else
208 total_retlen += retlen;
209
210 count -= retlen;
211 buf += retlen;
31f4233b
NP
212 if (retlen == 0)
213 count = 0;
1da177e4
LT
214 }
215 else {
216 kfree(kbuf);
217 return ret;
218 }
219
220 kfree(kbuf);
221 }
222
223 return total_retlen;
224} /* mtd_read */
225
226static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
227{
045e9a5d 228 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
229 char *kbuf;
230 size_t retlen;
231 size_t total_retlen=0;
232 int ret=0;
233 int len;
234
235 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
236
237 if (*ppos == mtd->size)
238 return -ENOSPC;
239
240 if (*ppos + count > mtd->size)
241 count = mtd->size - *ppos;
242
243 if (!count)
244 return 0;
245
246 while (count) {
247 if (count > MAX_KMALLOC_SIZE)
248 len = MAX_KMALLOC_SIZE;
249 else
250 len = count;
251
252 kbuf=kmalloc(len,GFP_KERNEL);
253 if (!kbuf) {
254 printk("kmalloc is null\n");
255 return -ENOMEM;
256 }
257
258 if (copy_from_user(kbuf, buf, len)) {
259 kfree(kbuf);
260 return -EFAULT;
261 }
262
045e9a5d 263 switch (MTD_MODE(file)) {
31f4233b
NP
264 case MTD_MODE_OTP_FACT:
265 ret = -EROFS;
266 break;
267 case MTD_MODE_OTP_USER:
268 if (!mtd->write_user_prot_reg) {
269 ret = -EOPNOTSUPP;
270 break;
271 }
272 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
273 break;
274 default:
275 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
276 }
1da177e4
LT
277 if (!ret) {
278 *ppos += retlen;
279 total_retlen += retlen;
280 count -= retlen;
281 buf += retlen;
282 }
283 else {
284 kfree(kbuf);
285 return ret;
286 }
287
288 kfree(kbuf);
289 }
290
291 return total_retlen;
292} /* mtd_write */
293
294/*======================================================================
295
296 IOCTL calls for getting device parameters.
297
298======================================================================*/
299static void mtdchar_erase_callback (struct erase_info *instr)
300{
301 wake_up((wait_queue_head_t *)instr->priv);
302}
303
304static int mtd_ioctl(struct inode *inode, struct file *file,
305 u_int cmd, u_long arg)
306{
045e9a5d 307 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
308 void __user *argp = (void __user *)arg;
309 int ret = 0;
310 u_long size;
311
312 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
313
314 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
315 if (cmd & IOC_IN) {
316 if (!access_ok(VERIFY_READ, argp, size))
317 return -EFAULT;
318 }
319 if (cmd & IOC_OUT) {
320 if (!access_ok(VERIFY_WRITE, argp, size))
321 return -EFAULT;
322 }
323
324 switch (cmd) {
325 case MEMGETREGIONCOUNT:
326 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
327 return -EFAULT;
328 break;
329
330 case MEMGETREGIONINFO:
331 {
332 struct region_info_user ur;
333
334 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
335 return -EFAULT;
336
337 if (ur.regionindex >= mtd->numeraseregions)
338 return -EINVAL;
339 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
340 sizeof(struct mtd_erase_region_info)))
341 return -EFAULT;
342 break;
343 }
344
345 case MEMGETINFO:
346 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
347 return -EFAULT;
348 break;
349
350 case MEMERASE:
351 {
352 struct erase_info *erase;
353
354 if(!(file->f_mode & 2))
355 return -EPERM;
356
357 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
358 if (!erase)
359 ret = -ENOMEM;
360 else {
361 wait_queue_head_t waitq;
362 DECLARE_WAITQUEUE(wait, current);
363
364 init_waitqueue_head(&waitq);
365
366 memset (erase,0,sizeof(struct erase_info));
367 if (copy_from_user(&erase->addr, argp,
368 sizeof(struct erase_info_user))) {
369 kfree(erase);
370 return -EFAULT;
371 }
372 erase->mtd = mtd;
373 erase->callback = mtdchar_erase_callback;
374 erase->priv = (unsigned long)&waitq;
375
376 /*
377 FIXME: Allow INTERRUPTIBLE. Which means
378 not having the wait_queue head on the stack.
379
380 If the wq_head is on the stack, and we
381 leave because we got interrupted, then the
382 wq_head is no longer there when the
383 callback routine tries to wake us up.
384 */
385 ret = mtd->erase(mtd, erase);
386 if (!ret) {
387 set_current_state(TASK_UNINTERRUPTIBLE);
388 add_wait_queue(&waitq, &wait);
389 if (erase->state != MTD_ERASE_DONE &&
390 erase->state != MTD_ERASE_FAILED)
391 schedule();
392 remove_wait_queue(&waitq, &wait);
393 set_current_state(TASK_RUNNING);
394
395 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
396 }
397 kfree(erase);
398 }
399 break;
400 }
401
402 case MEMWRITEOOB:
403 {
404 struct mtd_oob_buf buf;
405 void *databuf;
406 ssize_t retlen;
407
408 if(!(file->f_mode & 2))
409 return -EPERM;
410
411 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
412 return -EFAULT;
413
414 if (buf.length > 0x4096)
415 return -EINVAL;
416
417 if (!mtd->write_oob)
418 ret = -EOPNOTSUPP;
419 else
420 ret = access_ok(VERIFY_READ, buf.ptr,
421 buf.length) ? 0 : EFAULT;
422
423 if (ret)
424 return ret;
425
426 databuf = kmalloc(buf.length, GFP_KERNEL);
427 if (!databuf)
428 return -ENOMEM;
429
430 if (copy_from_user(databuf, buf.ptr, buf.length)) {
431 kfree(databuf);
432 return -EFAULT;
433 }
434
435 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
436
437 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
438 ret = -EFAULT;
439
440 kfree(databuf);
441 break;
442
443 }
444
445 case MEMREADOOB:
446 {
447 struct mtd_oob_buf buf;
448 void *databuf;
449 ssize_t retlen;
450
451 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
452 return -EFAULT;
453
454 if (buf.length > 0x4096)
455 return -EINVAL;
456
457 if (!mtd->read_oob)
458 ret = -EOPNOTSUPP;
459 else
460 ret = access_ok(VERIFY_WRITE, buf.ptr,
461 buf.length) ? 0 : -EFAULT;
462
463 if (ret)
464 return ret;
465
466 databuf = kmalloc(buf.length, GFP_KERNEL);
467 if (!databuf)
468 return -ENOMEM;
469
470 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
471
472 if (put_user(retlen, (uint32_t __user *)argp))
473 ret = -EFAULT;
474 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
475 ret = -EFAULT;
476
477 kfree(databuf);
478 break;
479 }
480
481 case MEMLOCK:
482 {
483 struct erase_info_user info;
484
485 if (copy_from_user(&info, argp, sizeof(info)))
486 return -EFAULT;
487
488 if (!mtd->lock)
489 ret = -EOPNOTSUPP;
490 else
491 ret = mtd->lock(mtd, info.start, info.length);
492 break;
493 }
494
495 case MEMUNLOCK:
496 {
497 struct erase_info_user info;
498
499 if (copy_from_user(&info, argp, sizeof(info)))
500 return -EFAULT;
501
502 if (!mtd->unlock)
503 ret = -EOPNOTSUPP;
504 else
505 ret = mtd->unlock(mtd, info.start, info.length);
506 break;
507 }
508
509 case MEMSETOOBSEL:
510 {
511 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
512 return -EFAULT;
513 break;
514 }
515
516 case MEMGETOOBSEL:
517 {
518 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
519 return -EFAULT;
520 break;
521 }
522
523 case MEMGETBADBLOCK:
524 {
525 loff_t offs;
526
527 if (copy_from_user(&offs, argp, sizeof(loff_t)))
528 return -EFAULT;
529 if (!mtd->block_isbad)
530 ret = -EOPNOTSUPP;
531 else
532 return mtd->block_isbad(mtd, offs);
533 break;
534 }
535
536 case MEMSETBADBLOCK:
537 {
538 loff_t offs;
539
540 if (copy_from_user(&offs, argp, sizeof(loff_t)))
541 return -EFAULT;
542 if (!mtd->block_markbad)
543 ret = -EOPNOTSUPP;
544 else
545 return mtd->block_markbad(mtd, offs);
546 break;
547 }
548
31f4233b
NP
549#ifdef CONFIG_MTD_OTP
550 case OTPSELECT:
551 {
552 int mode;
553 if (copy_from_user(&mode, argp, sizeof(int)))
554 return -EFAULT;
045e9a5d 555 SET_MTD_MODE(file, 0);
31f4233b
NP
556 switch (mode) {
557 case MTD_OTP_FACTORY:
558 if (!mtd->read_fact_prot_reg)
559 ret = -EOPNOTSUPP;
560 else
045e9a5d 561 SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
31f4233b
NP
562 break;
563 case MTD_OTP_USER:
564 if (!mtd->read_fact_prot_reg)
565 ret = -EOPNOTSUPP;
566 else
045e9a5d 567 SET_MTD_MODE(file, MTD_MODE_OTP_USER);
31f4233b
NP
568 break;
569 default:
570 ret = -EINVAL;
571 case MTD_OTP_OFF:
572 break;
573 }
81dba488 574 file->f_pos = 0;
31f4233b
NP
575 break;
576 }
577
578 case OTPGETREGIONCOUNT:
579 case OTPGETREGIONINFO:
580 {
581 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
582 if (!buf)
583 return -ENOMEM;
584 ret = -EOPNOTSUPP;
045e9a5d 585 switch (MTD_MODE(file)) {
31f4233b
NP
586 case MTD_MODE_OTP_FACT:
587 if (mtd->get_fact_prot_info)
588 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
589 break;
590 case MTD_MODE_OTP_USER:
591 if (mtd->get_user_prot_info)
592 ret = mtd->get_user_prot_info(mtd, buf, 4096);
593 break;
594 }
595 if (ret >= 0) {
596 if (cmd == OTPGETREGIONCOUNT) {
597 int nbr = ret / sizeof(struct otp_info);
598 ret = copy_to_user(argp, &nbr, sizeof(int));
599 } else
600 ret = copy_to_user(argp, buf, ret);
601 if (ret)
602 ret = -EFAULT;
603 }
604 kfree(buf);
605 break;
606 }
607
608 case OTPLOCK:
609 {
610 struct otp_info info;
611
045e9a5d 612 if (MTD_MODE(file) != MTD_MODE_OTP_USER)
31f4233b
NP
613 return -EINVAL;
614 if (copy_from_user(&info, argp, sizeof(info)))
615 return -EFAULT;
616 if (!mtd->lock_user_prot_reg)
617 return -EOPNOTSUPP;
618 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
619 break;
620 }
621#endif
622
1da177e4
LT
623 default:
624 ret = -ENOTTY;
625 }
626
627 return ret;
628} /* memory_ioctl */
629
630static struct file_operations mtd_fops = {
631 .owner = THIS_MODULE,
632 .llseek = mtd_lseek,
633 .read = mtd_read,
634 .write = mtd_write,
635 .ioctl = mtd_ioctl,
636 .open = mtd_open,
637 .release = mtd_close,
638};
639
640static int __init init_mtdchar(void)
641{
642 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
643 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
644 MTD_CHAR_MAJOR);
645 return -EAGAIN;
646 }
647
9bc7b387
TP
648 mtd_class = class_create(THIS_MODULE, "mtd");
649
650 if (IS_ERR(mtd_class)) {
651 printk(KERN_ERR "Error creating mtd class.\n");
652 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
3a7a8824 653 return PTR_ERR(mtd_class);
9bc7b387
TP
654 }
655
656 register_mtd_user(&notifier);
1da177e4
LT
657 return 0;
658}
659
660static void __exit cleanup_mtdchar(void)
661{
9bc7b387
TP
662 unregister_mtd_user(&notifier);
663 class_destroy(mtd_class);
1da177e4
LT
664 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
665}
666
667module_init(init_mtdchar);
668module_exit(cleanup_mtdchar);
669
670
671MODULE_LICENSE("GPL");
672MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
673MODULE_DESCRIPTION("Direct character-device access to MTD devices");
This page took 0.094489 seconds and 5 git commands to generate.