Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
97894cda | 2 | * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $ |
1da177e4 LT |
3 | * |
4 | * (C) 2003 David Woodhouse <dwmw2@infradead.org> | |
5 | * | |
6 | * Interface to Linux 2.5 block layer for MTD 'translation layers'. | |
7 | * | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/list.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/mtd/blktrans.h> | |
16 | #include <linux/mtd/mtd.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/blkpg.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/hdreg.h> | |
21 | #include <linux/init.h> | |
48b19268 | 22 | #include <linux/mutex.h> |
1da177e4 | 23 | #include <asm/uaccess.h> |
1da177e4 LT |
24 | |
25 | static LIST_HEAD(blktrans_majors); | |
26 | ||
48b19268 | 27 | extern struct mutex mtd_table_mutex; |
1da177e4 LT |
28 | extern struct mtd_info *mtd_table[]; |
29 | ||
30 | struct mtd_blkcore_priv { | |
31 | struct completion thread_dead; | |
32 | int exiting; | |
33 | wait_queue_head_t thread_wq; | |
34 | struct request_queue *rq; | |
35 | spinlock_t queue_lock; | |
36 | }; | |
37 | ||
38 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |
39 | struct mtd_blktrans_dev *dev, | |
40 | struct request *req) | |
41 | { | |
42 | unsigned long block, nsect; | |
43 | char *buf; | |
44 | ||
19187672 RP |
45 | block = req->sector << 9 >> tr->blkshift; |
46 | nsect = req->current_nr_sectors << 9 >> tr->blkshift; | |
47 | ||
1da177e4 LT |
48 | buf = req->buffer; |
49 | ||
4aff5e23 | 50 | if (!blk_fs_request(req)) |
1da177e4 LT |
51 | return 0; |
52 | ||
19187672 | 53 | if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) |
1da177e4 LT |
54 | return 0; |
55 | ||
56 | switch(rq_data_dir(req)) { | |
57 | case READ: | |
19187672 | 58 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
1da177e4 LT |
59 | if (tr->readsect(dev, block, buf)) |
60 | return 0; | |
61 | return 1; | |
62 | ||
63 | case WRITE: | |
64 | if (!tr->writesect) | |
65 | return 0; | |
66 | ||
19187672 | 67 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
1da177e4 LT |
68 | if (tr->writesect(dev, block, buf)) |
69 | return 0; | |
70 | return 1; | |
71 | ||
72 | default: | |
9a292308 | 73 | printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); |
1da177e4 LT |
74 | return 0; |
75 | } | |
76 | } | |
77 | ||
78 | static int mtd_blktrans_thread(void *arg) | |
79 | { | |
80 | struct mtd_blktrans_ops *tr = arg; | |
81 | struct request_queue *rq = tr->blkcore_priv->rq; | |
82 | ||
83 | /* we might get involved when memory gets low, so use PF_MEMALLOC */ | |
84 | current->flags |= PF_MEMALLOC | PF_NOFREEZE; | |
85 | ||
86 | daemonize("%sd", tr->name); | |
87 | ||
88 | /* daemonize() doesn't do this for us since some kernel threads | |
97894cda | 89 | actually want to deal with signals. We can't just call |
1da177e4 LT |
90 | exit_sighand() since that'll cause an oops when we finally |
91 | do exit. */ | |
92 | spin_lock_irq(¤t->sighand->siglock); | |
93 | sigfillset(¤t->blocked); | |
94 | recalc_sigpending(); | |
95 | spin_unlock_irq(¤t->sighand->siglock); | |
96 | ||
97 | spin_lock_irq(rq->queue_lock); | |
97894cda | 98 | |
1da177e4 LT |
99 | while (!tr->blkcore_priv->exiting) { |
100 | struct request *req; | |
101 | struct mtd_blktrans_dev *dev; | |
102 | int res = 0; | |
103 | DECLARE_WAITQUEUE(wait, current); | |
104 | ||
105 | req = elv_next_request(rq); | |
106 | ||
107 | if (!req) { | |
108 | add_wait_queue(&tr->blkcore_priv->thread_wq, &wait); | |
109 | set_current_state(TASK_INTERRUPTIBLE); | |
110 | ||
111 | spin_unlock_irq(rq->queue_lock); | |
112 | ||
113 | schedule(); | |
114 | remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait); | |
115 | ||
116 | spin_lock_irq(rq->queue_lock); | |
117 | ||
118 | continue; | |
119 | } | |
120 | ||
121 | dev = req->rq_disk->private_data; | |
122 | tr = dev->tr; | |
123 | ||
124 | spin_unlock_irq(rq->queue_lock); | |
125 | ||
48b19268 | 126 | mutex_lock(&dev->lock); |
1da177e4 | 127 | res = do_blktrans_request(tr, dev, req); |
48b19268 | 128 | mutex_unlock(&dev->lock); |
1da177e4 LT |
129 | |
130 | spin_lock_irq(rq->queue_lock); | |
131 | ||
132 | end_request(req, res); | |
133 | } | |
134 | spin_unlock_irq(rq->queue_lock); | |
135 | ||
136 | complete_and_exit(&tr->blkcore_priv->thread_dead, 0); | |
137 | } | |
138 | ||
139 | static void mtd_blktrans_request(struct request_queue *rq) | |
140 | { | |
141 | struct mtd_blktrans_ops *tr = rq->queuedata; | |
142 | wake_up(&tr->blkcore_priv->thread_wq); | |
143 | } | |
144 | ||
145 | ||
146 | static int blktrans_open(struct inode *i, struct file *f) | |
147 | { | |
148 | struct mtd_blktrans_dev *dev; | |
149 | struct mtd_blktrans_ops *tr; | |
150 | int ret = -ENODEV; | |
151 | ||
152 | dev = i->i_bdev->bd_disk->private_data; | |
153 | tr = dev->tr; | |
154 | ||
155 | if (!try_module_get(dev->mtd->owner)) | |
156 | goto out; | |
157 | ||
158 | if (!try_module_get(tr->owner)) | |
159 | goto out_tr; | |
160 | ||
97894cda | 161 | /* FIXME: Locking. A hot pluggable device can go away |
1da177e4 LT |
162 | (del_mtd_device can be called for it) without its module |
163 | being unloaded. */ | |
164 | dev->mtd->usecount++; | |
165 | ||
166 | ret = 0; | |
167 | if (tr->open && (ret = tr->open(dev))) { | |
168 | dev->mtd->usecount--; | |
169 | module_put(dev->mtd->owner); | |
170 | out_tr: | |
171 | module_put(tr->owner); | |
172 | } | |
173 | out: | |
174 | return ret; | |
175 | } | |
176 | ||
177 | static int blktrans_release(struct inode *i, struct file *f) | |
178 | { | |
179 | struct mtd_blktrans_dev *dev; | |
180 | struct mtd_blktrans_ops *tr; | |
181 | int ret = 0; | |
182 | ||
183 | dev = i->i_bdev->bd_disk->private_data; | |
184 | tr = dev->tr; | |
185 | ||
186 | if (tr->release) | |
187 | ret = tr->release(dev); | |
188 | ||
189 | if (!ret) { | |
190 | dev->mtd->usecount--; | |
191 | module_put(dev->mtd->owner); | |
192 | module_put(tr->owner); | |
193 | } | |
194 | ||
195 | return ret; | |
196 | } | |
197 | ||
a885c8c4 CH |
198 | static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
199 | { | |
200 | struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data; | |
201 | ||
202 | if (dev->tr->getgeo) | |
203 | return dev->tr->getgeo(dev, geo); | |
204 | return -ENOTTY; | |
205 | } | |
1da177e4 | 206 | |
97894cda | 207 | static int blktrans_ioctl(struct inode *inode, struct file *file, |
1da177e4 LT |
208 | unsigned int cmd, unsigned long arg) |
209 | { | |
210 | struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data; | |
211 | struct mtd_blktrans_ops *tr = dev->tr; | |
212 | ||
213 | switch (cmd) { | |
214 | case BLKFLSBUF: | |
215 | if (tr->flush) | |
216 | return tr->flush(dev); | |
217 | /* The core code did the work, we had nothing to do. */ | |
218 | return 0; | |
1da177e4 LT |
219 | default: |
220 | return -ENOTTY; | |
221 | } | |
222 | } | |
223 | ||
224 | struct block_device_operations mtd_blktrans_ops = { | |
225 | .owner = THIS_MODULE, | |
226 | .open = blktrans_open, | |
227 | .release = blktrans_release, | |
228 | .ioctl = blktrans_ioctl, | |
a885c8c4 | 229 | .getgeo = blktrans_getgeo, |
1da177e4 LT |
230 | }; |
231 | ||
232 | int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |
233 | { | |
234 | struct mtd_blktrans_ops *tr = new->tr; | |
235 | struct list_head *this; | |
236 | int last_devnum = -1; | |
237 | struct gendisk *gd; | |
238 | ||
48b19268 IM |
239 | if (!!mutex_trylock(&mtd_table_mutex)) { |
240 | mutex_unlock(&mtd_table_mutex); | |
1da177e4 LT |
241 | BUG(); |
242 | } | |
243 | ||
244 | list_for_each(this, &tr->devs) { | |
245 | struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list); | |
246 | if (new->devnum == -1) { | |
247 | /* Use first free number */ | |
248 | if (d->devnum != last_devnum+1) { | |
249 | /* Found a free devnum. Plug it in here */ | |
250 | new->devnum = last_devnum+1; | |
251 | list_add_tail(&new->list, &d->list); | |
252 | goto added; | |
253 | } | |
254 | } else if (d->devnum == new->devnum) { | |
255 | /* Required number taken */ | |
256 | return -EBUSY; | |
257 | } else if (d->devnum > new->devnum) { | |
258 | /* Required number was free */ | |
259 | list_add_tail(&new->list, &d->list); | |
260 | goto added; | |
97894cda | 261 | } |
1da177e4 LT |
262 | last_devnum = d->devnum; |
263 | } | |
264 | if (new->devnum == -1) | |
265 | new->devnum = last_devnum+1; | |
266 | ||
267 | if ((new->devnum << tr->part_bits) > 256) { | |
268 | return -EBUSY; | |
269 | } | |
270 | ||
48b19268 | 271 | mutex_init(&new->lock); |
1da177e4 LT |
272 | list_add_tail(&new->list, &tr->devs); |
273 | added: | |
274 | if (!tr->writesect) | |
275 | new->readonly = 1; | |
276 | ||
277 | gd = alloc_disk(1 << tr->part_bits); | |
278 | if (!gd) { | |
279 | list_del(&new->list); | |
280 | return -ENOMEM; | |
281 | } | |
282 | gd->major = tr->major; | |
283 | gd->first_minor = (new->devnum) << tr->part_bits; | |
284 | gd->fops = &mtd_blktrans_ops; | |
97894cda | 285 | |
65a8de36 TP |
286 | if (tr->part_bits) |
287 | if (new->devnum < 26) | |
288 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
289 | "%s%c", tr->name, 'a' + new->devnum); | |
290 | else | |
291 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
292 | "%s%c%c", tr->name, | |
293 | 'a' - 1 + new->devnum / 26, | |
294 | 'a' + new->devnum % 26); | |
295 | else | |
296 | snprintf(gd->disk_name, sizeof(gd->disk_name), | |
297 | "%s%d", tr->name, new->devnum); | |
1da177e4 LT |
298 | |
299 | /* 2.5 has capacity in units of 512 bytes while still | |
300 | having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */ | |
19187672 | 301 | set_capacity(gd, (new->size * tr->blksize) >> 9); |
1da177e4 LT |
302 | |
303 | gd->private_data = new; | |
304 | new->blkcore_priv = gd; | |
305 | gd->queue = tr->blkcore_priv->rq; | |
306 | ||
307 | if (new->readonly) | |
308 | set_disk_ro(gd, 1); | |
309 | ||
310 | add_disk(gd); | |
97894cda | 311 | |
1da177e4 LT |
312 | return 0; |
313 | } | |
314 | ||
315 | int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old) | |
316 | { | |
48b19268 IM |
317 | if (!!mutex_trylock(&mtd_table_mutex)) { |
318 | mutex_unlock(&mtd_table_mutex); | |
1da177e4 LT |
319 | BUG(); |
320 | } | |
321 | ||
322 | list_del(&old->list); | |
323 | ||
324 | del_gendisk(old->blkcore_priv); | |
325 | put_disk(old->blkcore_priv); | |
97894cda | 326 | |
1da177e4 LT |
327 | return 0; |
328 | } | |
329 | ||
330 | static void blktrans_notify_remove(struct mtd_info *mtd) | |
331 | { | |
332 | struct list_head *this, *this2, *next; | |
333 | ||
334 | list_for_each(this, &blktrans_majors) { | |
335 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | |
336 | ||
337 | list_for_each_safe(this2, next, &tr->devs) { | |
338 | struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list); | |
339 | ||
340 | if (dev->mtd == mtd) | |
341 | tr->remove_dev(dev); | |
342 | } | |
343 | } | |
344 | } | |
345 | ||
346 | static void blktrans_notify_add(struct mtd_info *mtd) | |
347 | { | |
348 | struct list_head *this; | |
349 | ||
350 | if (mtd->type == MTD_ABSENT) | |
351 | return; | |
352 | ||
353 | list_for_each(this, &blktrans_majors) { | |
354 | struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list); | |
355 | ||
356 | tr->add_mtd(tr, mtd); | |
357 | } | |
358 | ||
359 | } | |
360 | ||
361 | static struct mtd_notifier blktrans_notifier = { | |
362 | .add = blktrans_notify_add, | |
363 | .remove = blktrans_notify_remove, | |
364 | }; | |
97894cda | 365 | |
1da177e4 LT |
366 | int register_mtd_blktrans(struct mtd_blktrans_ops *tr) |
367 | { | |
368 | int ret, i; | |
369 | ||
97894cda | 370 | /* Register the notifier if/when the first device type is |
1da177e4 LT |
371 | registered, to prevent the link/init ordering from fucking |
372 | us over. */ | |
373 | if (!blktrans_notifier.list.next) | |
374 | register_mtd_user(&blktrans_notifier); | |
375 | ||
376 | tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL); | |
377 | if (!tr->blkcore_priv) | |
378 | return -ENOMEM; | |
379 | ||
380 | memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv)); | |
381 | ||
48b19268 | 382 | mutex_lock(&mtd_table_mutex); |
1da177e4 LT |
383 | |
384 | ret = register_blkdev(tr->major, tr->name); | |
385 | if (ret) { | |
386 | printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n", | |
387 | tr->name, tr->major, ret); | |
388 | kfree(tr->blkcore_priv); | |
48b19268 | 389 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
390 | return ret; |
391 | } | |
392 | spin_lock_init(&tr->blkcore_priv->queue_lock); | |
393 | init_completion(&tr->blkcore_priv->thread_dead); | |
394 | init_waitqueue_head(&tr->blkcore_priv->thread_wq); | |
395 | ||
396 | tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock); | |
397 | if (!tr->blkcore_priv->rq) { | |
398 | unregister_blkdev(tr->major, tr->name); | |
399 | kfree(tr->blkcore_priv); | |
48b19268 | 400 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
401 | return -ENOMEM; |
402 | } | |
403 | ||
404 | tr->blkcore_priv->rq->queuedata = tr; | |
19187672 RP |
405 | blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); |
406 | tr->blkshift = ffs(tr->blksize) - 1; | |
1da177e4 LT |
407 | |
408 | ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL); | |
409 | if (ret < 0) { | |
410 | blk_cleanup_queue(tr->blkcore_priv->rq); | |
411 | unregister_blkdev(tr->major, tr->name); | |
412 | kfree(tr->blkcore_priv); | |
48b19268 | 413 | mutex_unlock(&mtd_table_mutex); |
1da177e4 | 414 | return ret; |
97894cda | 415 | } |
1da177e4 | 416 | |
1da177e4 LT |
417 | INIT_LIST_HEAD(&tr->devs); |
418 | list_add(&tr->list, &blktrans_majors); | |
419 | ||
420 | for (i=0; i<MAX_MTD_DEVICES; i++) { | |
421 | if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT) | |
422 | tr->add_mtd(tr, mtd_table[i]); | |
423 | } | |
424 | ||
48b19268 | 425 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
426 | |
427 | return 0; | |
428 | } | |
429 | ||
430 | int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr) | |
431 | { | |
432 | struct list_head *this, *next; | |
433 | ||
48b19268 | 434 | mutex_lock(&mtd_table_mutex); |
1da177e4 LT |
435 | |
436 | /* Clean up the kernel thread */ | |
437 | tr->blkcore_priv->exiting = 1; | |
438 | wake_up(&tr->blkcore_priv->thread_wq); | |
439 | wait_for_completion(&tr->blkcore_priv->thread_dead); | |
440 | ||
441 | /* Remove it from the list of active majors */ | |
442 | list_del(&tr->list); | |
443 | ||
444 | list_for_each_safe(this, next, &tr->devs) { | |
445 | struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list); | |
446 | tr->remove_dev(dev); | |
447 | } | |
448 | ||
1da177e4 LT |
449 | blk_cleanup_queue(tr->blkcore_priv->rq); |
450 | unregister_blkdev(tr->major, tr->name); | |
451 | ||
48b19268 | 452 | mutex_unlock(&mtd_table_mutex); |
1da177e4 LT |
453 | |
454 | kfree(tr->blkcore_priv); | |
455 | ||
373ebfbf | 456 | BUG_ON(!list_empty(&tr->devs)); |
1da177e4 LT |
457 | return 0; |
458 | } | |
459 | ||
460 | static void __exit mtd_blktrans_exit(void) | |
461 | { | |
462 | /* No race here -- if someone's currently in register_mtd_blktrans | |
463 | we're screwed anyway. */ | |
464 | if (blktrans_notifier.list.next) | |
465 | unregister_mtd_user(&blktrans_notifier); | |
466 | } | |
467 | ||
468 | module_exit(mtd_blktrans_exit); | |
469 | ||
470 | EXPORT_SYMBOL_GPL(register_mtd_blktrans); | |
471 | EXPORT_SYMBOL_GPL(deregister_mtd_blktrans); | |
472 | EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev); | |
473 | EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev); | |
474 | ||
475 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); | |
476 | MODULE_LICENSE("GPL"); | |
477 | MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'"); |