Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes...
[deliverable/linux.git] / block / bsg.c
1 /*
2 * bsg.c - block layer implementation of the sg v4 interface
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/percpu.h>
19 #include <linux/uio.h>
20 #include <linux/idr.h>
21 #include <linux/bsg.h>
22 #include <linux/smp_lock.h>
23
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_ioctl.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
29 #include <scsi/sg.h>
30
31 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
32 #define BSG_VERSION "0.4"
33
34 struct bsg_device {
35 struct request_queue *queue;
36 spinlock_t lock;
37 struct list_head busy_list;
38 struct list_head done_list;
39 struct hlist_node dev_list;
40 atomic_t ref_count;
41 int queued_cmds;
42 int done_cmds;
43 wait_queue_head_t wq_done;
44 wait_queue_head_t wq_free;
45 char name[BUS_ID_SIZE];
46 int max_queue;
47 unsigned long flags;
48 struct blk_scsi_cmd_filter *cmd_filter;
49 mode_t *f_mode;
50 };
51
52 enum {
53 BSG_F_BLOCK = 1,
54 };
55
56 #define BSG_DEFAULT_CMDS 64
57 #define BSG_MAX_DEVS 32768
58
59 #undef BSG_DEBUG
60
61 #ifdef BSG_DEBUG
62 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
63 #else
64 #define dprintk(fmt, args...)
65 #endif
66
67 static DEFINE_MUTEX(bsg_mutex);
68 static DEFINE_IDR(bsg_minor_idr);
69
70 #define BSG_LIST_ARRAY_SIZE 8
71 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
72
73 static struct class *bsg_class;
74 static int bsg_major;
75
76 static struct kmem_cache *bsg_cmd_cachep;
77
78 /*
79 * our internal command type
80 */
81 struct bsg_command {
82 struct bsg_device *bd;
83 struct list_head list;
84 struct request *rq;
85 struct bio *bio;
86 struct bio *bidi_bio;
87 int err;
88 struct sg_io_v4 hdr;
89 char sense[SCSI_SENSE_BUFFERSIZE];
90 };
91
92 static void bsg_free_command(struct bsg_command *bc)
93 {
94 struct bsg_device *bd = bc->bd;
95 unsigned long flags;
96
97 kmem_cache_free(bsg_cmd_cachep, bc);
98
99 spin_lock_irqsave(&bd->lock, flags);
100 bd->queued_cmds--;
101 spin_unlock_irqrestore(&bd->lock, flags);
102
103 wake_up(&bd->wq_free);
104 }
105
106 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
107 {
108 struct bsg_command *bc = ERR_PTR(-EINVAL);
109
110 spin_lock_irq(&bd->lock);
111
112 if (bd->queued_cmds >= bd->max_queue)
113 goto out;
114
115 bd->queued_cmds++;
116 spin_unlock_irq(&bd->lock);
117
118 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
119 if (unlikely(!bc)) {
120 spin_lock_irq(&bd->lock);
121 bd->queued_cmds--;
122 bc = ERR_PTR(-ENOMEM);
123 goto out;
124 }
125
126 bc->bd = bd;
127 INIT_LIST_HEAD(&bc->list);
128 dprintk("%s: returning free cmd %p\n", bd->name, bc);
129 return bc;
130 out:
131 spin_unlock_irq(&bd->lock);
132 return bc;
133 }
134
135 static inline struct hlist_head *bsg_dev_idx_hash(int index)
136 {
137 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
138 }
139
140 static int bsg_io_schedule(struct bsg_device *bd)
141 {
142 DEFINE_WAIT(wait);
143 int ret = 0;
144
145 spin_lock_irq(&bd->lock);
146
147 BUG_ON(bd->done_cmds > bd->queued_cmds);
148
149 /*
150 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
151 * work to do", even though we return -ENOSPC after this same test
152 * during bsg_write() -- there, it means our buffer can't have more
153 * bsg_commands added to it, thus has no space left.
154 */
155 if (bd->done_cmds == bd->queued_cmds) {
156 ret = -ENODATA;
157 goto unlock;
158 }
159
160 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
161 ret = -EAGAIN;
162 goto unlock;
163 }
164
165 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
166 spin_unlock_irq(&bd->lock);
167 io_schedule();
168 finish_wait(&bd->wq_done, &wait);
169
170 return ret;
171 unlock:
172 spin_unlock_irq(&bd->lock);
173 return ret;
174 }
175
176 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
177 struct sg_io_v4 *hdr, struct bsg_device *bd)
178 {
179 if (hdr->request_len > BLK_MAX_CDB) {
180 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
181 if (!rq->cmd)
182 return -ENOMEM;
183 }
184
185 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
186 hdr->request_len))
187 return -EFAULT;
188
189 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
190 if (blk_cmd_filter_verify_command(bd->cmd_filter, rq->cmd,
191 bd->f_mode))
192 return -EPERM;
193 } else if (!capable(CAP_SYS_RAWIO))
194 return -EPERM;
195
196 /*
197 * fill in request structure
198 */
199 rq->cmd_len = hdr->request_len;
200 rq->cmd_type = REQ_TYPE_BLOCK_PC;
201
202 rq->timeout = (hdr->timeout * HZ) / 1000;
203 if (!rq->timeout)
204 rq->timeout = q->sg_timeout;
205 if (!rq->timeout)
206 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
207
208 return 0;
209 }
210
211 /*
212 * Check if sg_io_v4 from user is allowed and valid
213 */
214 static int
215 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
216 {
217 int ret = 0;
218
219 if (hdr->guard != 'Q')
220 return -EINVAL;
221 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
222 hdr->din_xfer_len > (q->max_sectors << 9))
223 return -EIO;
224
225 switch (hdr->protocol) {
226 case BSG_PROTOCOL_SCSI:
227 switch (hdr->subprotocol) {
228 case BSG_SUB_PROTOCOL_SCSI_CMD:
229 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
230 break;
231 default:
232 ret = -EINVAL;
233 }
234 break;
235 default:
236 ret = -EINVAL;
237 }
238
239 *rw = hdr->dout_xfer_len ? WRITE : READ;
240 return ret;
241 }
242
243 /*
244 * map sg_io_v4 to a request.
245 */
246 static struct request *
247 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr)
248 {
249 struct request_queue *q = bd->queue;
250 struct request *rq, *next_rq = NULL;
251 int ret, rw;
252 unsigned int dxfer_len;
253 void *dxferp = NULL;
254
255 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
256 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
257 hdr->din_xfer_len);
258
259 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
260 if (ret)
261 return ERR_PTR(ret);
262
263 /*
264 * map scatter-gather elements seperately and string them to request
265 */
266 rq = blk_get_request(q, rw, GFP_KERNEL);
267 if (!rq)
268 return ERR_PTR(-ENOMEM);
269 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd);
270 if (ret)
271 goto out;
272
273 if (rw == WRITE && hdr->din_xfer_len) {
274 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
275 ret = -EOPNOTSUPP;
276 goto out;
277 }
278
279 next_rq = blk_get_request(q, READ, GFP_KERNEL);
280 if (!next_rq) {
281 ret = -ENOMEM;
282 goto out;
283 }
284 rq->next_rq = next_rq;
285 next_rq->cmd_type = rq->cmd_type;
286
287 dxferp = (void*)(unsigned long)hdr->din_xferp;
288 ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
289 if (ret)
290 goto out;
291 }
292
293 if (hdr->dout_xfer_len) {
294 dxfer_len = hdr->dout_xfer_len;
295 dxferp = (void*)(unsigned long)hdr->dout_xferp;
296 } else if (hdr->din_xfer_len) {
297 dxfer_len = hdr->din_xfer_len;
298 dxferp = (void*)(unsigned long)hdr->din_xferp;
299 } else
300 dxfer_len = 0;
301
302 if (dxfer_len) {
303 ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
304 if (ret)
305 goto out;
306 }
307 return rq;
308 out:
309 if (rq->cmd != rq->__cmd)
310 kfree(rq->cmd);
311 blk_put_request(rq);
312 if (next_rq) {
313 blk_rq_unmap_user(next_rq->bio);
314 blk_put_request(next_rq);
315 }
316 return ERR_PTR(ret);
317 }
318
319 /*
320 * async completion call-back from the block layer, when scsi/ide/whatever
321 * calls end_that_request_last() on a request
322 */
323 static void bsg_rq_end_io(struct request *rq, int uptodate)
324 {
325 struct bsg_command *bc = rq->end_io_data;
326 struct bsg_device *bd = bc->bd;
327 unsigned long flags;
328
329 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
330 bd->name, rq, bc, bc->bio, uptodate);
331
332 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
333
334 spin_lock_irqsave(&bd->lock, flags);
335 list_move_tail(&bc->list, &bd->done_list);
336 bd->done_cmds++;
337 spin_unlock_irqrestore(&bd->lock, flags);
338
339 wake_up(&bd->wq_done);
340 }
341
342 /*
343 * do final setup of a 'bc' and submit the matching 'rq' to the block
344 * layer for io
345 */
346 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
347 struct bsg_command *bc, struct request *rq)
348 {
349 rq->sense = bc->sense;
350 rq->sense_len = 0;
351
352 /*
353 * add bc command to busy queue and submit rq for io
354 */
355 bc->rq = rq;
356 bc->bio = rq->bio;
357 if (rq->next_rq)
358 bc->bidi_bio = rq->next_rq->bio;
359 bc->hdr.duration = jiffies;
360 spin_lock_irq(&bd->lock);
361 list_add_tail(&bc->list, &bd->busy_list);
362 spin_unlock_irq(&bd->lock);
363
364 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
365
366 rq->end_io_data = bc;
367 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
368 }
369
370 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
371 {
372 struct bsg_command *bc = NULL;
373
374 spin_lock_irq(&bd->lock);
375 if (bd->done_cmds) {
376 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
377 list_del(&bc->list);
378 bd->done_cmds--;
379 }
380 spin_unlock_irq(&bd->lock);
381
382 return bc;
383 }
384
385 /*
386 * Get a finished command from the done list
387 */
388 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
389 {
390 struct bsg_command *bc;
391 int ret;
392
393 do {
394 bc = bsg_next_done_cmd(bd);
395 if (bc)
396 break;
397
398 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
399 bc = ERR_PTR(-EAGAIN);
400 break;
401 }
402
403 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
404 if (ret) {
405 bc = ERR_PTR(-ERESTARTSYS);
406 break;
407 }
408 } while (1);
409
410 dprintk("%s: returning done %p\n", bd->name, bc);
411
412 return bc;
413 }
414
415 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
416 struct bio *bio, struct bio *bidi_bio)
417 {
418 int ret = 0;
419
420 dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
421 /*
422 * fill in all the output members
423 */
424 hdr->device_status = status_byte(rq->errors);
425 hdr->transport_status = host_byte(rq->errors);
426 hdr->driver_status = driver_byte(rq->errors);
427 hdr->info = 0;
428 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
429 hdr->info |= SG_INFO_CHECK;
430 hdr->response_len = 0;
431
432 if (rq->sense_len && hdr->response) {
433 int len = min_t(unsigned int, hdr->max_response_len,
434 rq->sense_len);
435
436 ret = copy_to_user((void*)(unsigned long)hdr->response,
437 rq->sense, len);
438 if (!ret)
439 hdr->response_len = len;
440 else
441 ret = -EFAULT;
442 }
443
444 if (rq->next_rq) {
445 hdr->dout_resid = rq->data_len;
446 hdr->din_resid = rq->next_rq->data_len;
447 blk_rq_unmap_user(bidi_bio);
448 blk_put_request(rq->next_rq);
449 } else if (rq_data_dir(rq) == READ)
450 hdr->din_resid = rq->data_len;
451 else
452 hdr->dout_resid = rq->data_len;
453
454 /*
455 * If the request generated a negative error number, return it
456 * (providing we aren't already returning an error); if it's
457 * just a protocol response (i.e. non negative), that gets
458 * processed above.
459 */
460 if (!ret && rq->errors < 0)
461 ret = rq->errors;
462
463 blk_rq_unmap_user(bio);
464 if (rq->cmd != rq->__cmd)
465 kfree(rq->cmd);
466 blk_put_request(rq);
467
468 return ret;
469 }
470
471 static int bsg_complete_all_commands(struct bsg_device *bd)
472 {
473 struct bsg_command *bc;
474 int ret, tret;
475
476 dprintk("%s: entered\n", bd->name);
477
478 /*
479 * wait for all commands to complete
480 */
481 ret = 0;
482 do {
483 ret = bsg_io_schedule(bd);
484 /*
485 * look for -ENODATA specifically -- we'll sometimes get
486 * -ERESTARTSYS when we've taken a signal, but we can't
487 * return until we're done freeing the queue, so ignore
488 * it. The signal will get handled when we're done freeing
489 * the bsg_device.
490 */
491 } while (ret != -ENODATA);
492
493 /*
494 * discard done commands
495 */
496 ret = 0;
497 do {
498 spin_lock_irq(&bd->lock);
499 if (!bd->queued_cmds) {
500 spin_unlock_irq(&bd->lock);
501 break;
502 }
503 spin_unlock_irq(&bd->lock);
504
505 bc = bsg_get_done_cmd(bd);
506 if (IS_ERR(bc))
507 break;
508
509 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
510 bc->bidi_bio);
511 if (!ret)
512 ret = tret;
513
514 bsg_free_command(bc);
515 } while (1);
516
517 return ret;
518 }
519
520 static int
521 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
522 const struct iovec *iov, ssize_t *bytes_read)
523 {
524 struct bsg_command *bc;
525 int nr_commands, ret;
526
527 if (count % sizeof(struct sg_io_v4))
528 return -EINVAL;
529
530 ret = 0;
531 nr_commands = count / sizeof(struct sg_io_v4);
532 while (nr_commands) {
533 bc = bsg_get_done_cmd(bd);
534 if (IS_ERR(bc)) {
535 ret = PTR_ERR(bc);
536 break;
537 }
538
539 /*
540 * this is the only case where we need to copy data back
541 * after completing the request. so do that here,
542 * bsg_complete_work() cannot do that for us
543 */
544 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
545 bc->bidi_bio);
546
547 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
548 ret = -EFAULT;
549
550 bsg_free_command(bc);
551
552 if (ret)
553 break;
554
555 buf += sizeof(struct sg_io_v4);
556 *bytes_read += sizeof(struct sg_io_v4);
557 nr_commands--;
558 }
559
560 return ret;
561 }
562
563 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
564 {
565 if (file->f_flags & O_NONBLOCK)
566 clear_bit(BSG_F_BLOCK, &bd->flags);
567 else
568 set_bit(BSG_F_BLOCK, &bd->flags);
569 }
570
571 static void bsg_set_cmd_filter(struct bsg_device *bd,
572 struct file *file)
573 {
574 struct inode *inode;
575 struct gendisk *disk;
576
577 if (!file)
578 return;
579
580 inode = file->f_dentry->d_inode;
581 if (!inode)
582 return;
583
584 disk = inode->i_bdev->bd_disk;
585
586 bd->cmd_filter = &disk->cmd_filter;
587 bd->f_mode = &file->f_mode;
588 }
589
590 /*
591 * Check if the error is a "real" error that we should return.
592 */
593 static inline int err_block_err(int ret)
594 {
595 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
596 return 1;
597
598 return 0;
599 }
600
601 static ssize_t
602 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
603 {
604 struct bsg_device *bd = file->private_data;
605 int ret;
606 ssize_t bytes_read;
607
608 dprintk("%s: read %Zd bytes\n", bd->name, count);
609
610 bsg_set_block(bd, file);
611 bsg_set_cmd_filter(bd, file);
612
613 bytes_read = 0;
614 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
615 *ppos = bytes_read;
616
617 if (!bytes_read || (bytes_read && err_block_err(ret)))
618 bytes_read = ret;
619
620 return bytes_read;
621 }
622
623 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
624 size_t count, ssize_t *bytes_written)
625 {
626 struct bsg_command *bc;
627 struct request *rq;
628 int ret, nr_commands;
629
630 if (count % sizeof(struct sg_io_v4))
631 return -EINVAL;
632
633 nr_commands = count / sizeof(struct sg_io_v4);
634 rq = NULL;
635 bc = NULL;
636 ret = 0;
637 while (nr_commands) {
638 struct request_queue *q = bd->queue;
639
640 bc = bsg_alloc_command(bd);
641 if (IS_ERR(bc)) {
642 ret = PTR_ERR(bc);
643 bc = NULL;
644 break;
645 }
646
647 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
648 ret = -EFAULT;
649 break;
650 }
651
652 /*
653 * get a request, fill in the blanks, and add to request queue
654 */
655 rq = bsg_map_hdr(bd, &bc->hdr);
656 if (IS_ERR(rq)) {
657 ret = PTR_ERR(rq);
658 rq = NULL;
659 break;
660 }
661
662 bsg_add_command(bd, q, bc, rq);
663 bc = NULL;
664 rq = NULL;
665 nr_commands--;
666 buf += sizeof(struct sg_io_v4);
667 *bytes_written += sizeof(struct sg_io_v4);
668 }
669
670 if (bc)
671 bsg_free_command(bc);
672
673 return ret;
674 }
675
676 static ssize_t
677 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
678 {
679 struct bsg_device *bd = file->private_data;
680 ssize_t bytes_written;
681 int ret;
682
683 dprintk("%s: write %Zd bytes\n", bd->name, count);
684
685 bsg_set_block(bd, file);
686 bsg_set_cmd_filter(bd, file);
687
688 bytes_written = 0;
689 ret = __bsg_write(bd, buf, count, &bytes_written);
690 *ppos = bytes_written;
691
692 /*
693 * return bytes written on non-fatal errors
694 */
695 if (!bytes_written || (bytes_written && err_block_err(ret)))
696 bytes_written = ret;
697
698 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
699 return bytes_written;
700 }
701
702 static struct bsg_device *bsg_alloc_device(void)
703 {
704 struct bsg_device *bd;
705
706 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
707 if (unlikely(!bd))
708 return NULL;
709
710 spin_lock_init(&bd->lock);
711
712 bd->max_queue = BSG_DEFAULT_CMDS;
713
714 INIT_LIST_HEAD(&bd->busy_list);
715 INIT_LIST_HEAD(&bd->done_list);
716 INIT_HLIST_NODE(&bd->dev_list);
717
718 init_waitqueue_head(&bd->wq_free);
719 init_waitqueue_head(&bd->wq_done);
720 return bd;
721 }
722
723 static void bsg_kref_release_function(struct kref *kref)
724 {
725 struct bsg_class_device *bcd =
726 container_of(kref, struct bsg_class_device, ref);
727 struct device *parent = bcd->parent;
728
729 if (bcd->release)
730 bcd->release(bcd->parent);
731
732 put_device(parent);
733 }
734
735 static int bsg_put_device(struct bsg_device *bd)
736 {
737 int ret = 0, do_free;
738 struct request_queue *q = bd->queue;
739
740 mutex_lock(&bsg_mutex);
741
742 do_free = atomic_dec_and_test(&bd->ref_count);
743 if (!do_free) {
744 mutex_unlock(&bsg_mutex);
745 goto out;
746 }
747
748 hlist_del(&bd->dev_list);
749 mutex_unlock(&bsg_mutex);
750
751 dprintk("%s: tearing down\n", bd->name);
752
753 /*
754 * close can always block
755 */
756 set_bit(BSG_F_BLOCK, &bd->flags);
757
758 /*
759 * correct error detection baddies here again. it's the responsibility
760 * of the app to properly reap commands before close() if it wants
761 * fool-proof error detection
762 */
763 ret = bsg_complete_all_commands(bd);
764
765 kfree(bd);
766 out:
767 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
768 if (do_free)
769 blk_put_queue(q);
770 return ret;
771 }
772
773 static struct bsg_device *bsg_add_device(struct inode *inode,
774 struct request_queue *rq,
775 struct file *file)
776 {
777 struct bsg_device *bd;
778 int ret;
779 #ifdef BSG_DEBUG
780 unsigned char buf[32];
781 #endif
782 ret = blk_get_queue(rq);
783 if (ret)
784 return ERR_PTR(-ENXIO);
785
786 bd = bsg_alloc_device();
787 if (!bd) {
788 blk_put_queue(rq);
789 return ERR_PTR(-ENOMEM);
790 }
791
792 bd->queue = rq;
793
794 bsg_set_block(bd, file);
795 bsg_set_cmd_filter(bd, file);
796
797 atomic_set(&bd->ref_count, 1);
798 mutex_lock(&bsg_mutex);
799 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
800
801 strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
802 dprintk("bound to <%s>, max queue %d\n",
803 format_dev_t(buf, inode->i_rdev), bd->max_queue);
804
805 mutex_unlock(&bsg_mutex);
806 return bd;
807 }
808
809 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
810 {
811 struct bsg_device *bd;
812 struct hlist_node *entry;
813
814 mutex_lock(&bsg_mutex);
815
816 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
817 if (bd->queue == q) {
818 atomic_inc(&bd->ref_count);
819 goto found;
820 }
821 }
822 bd = NULL;
823 found:
824 mutex_unlock(&bsg_mutex);
825 return bd;
826 }
827
828 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
829 {
830 struct bsg_device *bd;
831 struct bsg_class_device *bcd;
832
833 /*
834 * find the class device
835 */
836 mutex_lock(&bsg_mutex);
837 bcd = idr_find(&bsg_minor_idr, iminor(inode));
838 if (bcd)
839 kref_get(&bcd->ref);
840 mutex_unlock(&bsg_mutex);
841
842 if (!bcd)
843 return ERR_PTR(-ENODEV);
844
845 bd = __bsg_get_device(iminor(inode), bcd->queue);
846 if (bd)
847 return bd;
848
849 bd = bsg_add_device(inode, bcd->queue, file);
850 if (IS_ERR(bd))
851 kref_put(&bcd->ref, bsg_kref_release_function);
852
853 return bd;
854 }
855
856 static int bsg_open(struct inode *inode, struct file *file)
857 {
858 struct bsg_device *bd;
859
860 lock_kernel();
861 bd = bsg_get_device(inode, file);
862 unlock_kernel();
863
864 if (IS_ERR(bd))
865 return PTR_ERR(bd);
866
867 file->private_data = bd;
868 return 0;
869 }
870
871 static int bsg_release(struct inode *inode, struct file *file)
872 {
873 struct bsg_device *bd = file->private_data;
874
875 file->private_data = NULL;
876 return bsg_put_device(bd);
877 }
878
879 static unsigned int bsg_poll(struct file *file, poll_table *wait)
880 {
881 struct bsg_device *bd = file->private_data;
882 unsigned int mask = 0;
883
884 poll_wait(file, &bd->wq_done, wait);
885 poll_wait(file, &bd->wq_free, wait);
886
887 spin_lock_irq(&bd->lock);
888 if (!list_empty(&bd->done_list))
889 mask |= POLLIN | POLLRDNORM;
890 if (bd->queued_cmds >= bd->max_queue)
891 mask |= POLLOUT;
892 spin_unlock_irq(&bd->lock);
893
894 return mask;
895 }
896
897 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
898 {
899 struct bsg_device *bd = file->private_data;
900 int __user *uarg = (int __user *) arg;
901 int ret;
902
903 switch (cmd) {
904 /*
905 * our own ioctls
906 */
907 case SG_GET_COMMAND_Q:
908 return put_user(bd->max_queue, uarg);
909 case SG_SET_COMMAND_Q: {
910 int queue;
911
912 if (get_user(queue, uarg))
913 return -EFAULT;
914 if (queue < 1)
915 return -EINVAL;
916
917 spin_lock_irq(&bd->lock);
918 bd->max_queue = queue;
919 spin_unlock_irq(&bd->lock);
920 return 0;
921 }
922
923 /*
924 * SCSI/sg ioctls
925 */
926 case SG_GET_VERSION_NUM:
927 case SCSI_IOCTL_GET_IDLUN:
928 case SCSI_IOCTL_GET_BUS_NUMBER:
929 case SG_SET_TIMEOUT:
930 case SG_GET_TIMEOUT:
931 case SG_GET_RESERVED_SIZE:
932 case SG_SET_RESERVED_SIZE:
933 case SG_EMULATED_HOST:
934 case SCSI_IOCTL_SEND_COMMAND: {
935 void __user *uarg = (void __user *) arg;
936 return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
937 }
938 case SG_IO: {
939 struct request *rq;
940 struct bio *bio, *bidi_bio = NULL;
941 struct sg_io_v4 hdr;
942
943 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
944 return -EFAULT;
945
946 rq = bsg_map_hdr(bd, &hdr);
947 if (IS_ERR(rq))
948 return PTR_ERR(rq);
949
950 bio = rq->bio;
951 if (rq->next_rq)
952 bidi_bio = rq->next_rq->bio;
953 blk_execute_rq(bd->queue, NULL, rq, 0);
954 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
955
956 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
957 return -EFAULT;
958
959 return ret;
960 }
961 /*
962 * block device ioctls
963 */
964 default:
965 #if 0
966 return ioctl_by_bdev(bd->bdev, cmd, arg);
967 #else
968 return -ENOTTY;
969 #endif
970 }
971 }
972
973 static const struct file_operations bsg_fops = {
974 .read = bsg_read,
975 .write = bsg_write,
976 .poll = bsg_poll,
977 .open = bsg_open,
978 .release = bsg_release,
979 .unlocked_ioctl = bsg_ioctl,
980 .owner = THIS_MODULE,
981 };
982
983 void bsg_unregister_queue(struct request_queue *q)
984 {
985 struct bsg_class_device *bcd = &q->bsg_dev;
986
987 if (!bcd->class_dev)
988 return;
989
990 mutex_lock(&bsg_mutex);
991 idr_remove(&bsg_minor_idr, bcd->minor);
992 sysfs_remove_link(&q->kobj, "bsg");
993 device_unregister(bcd->class_dev);
994 bcd->class_dev = NULL;
995 kref_put(&bcd->ref, bsg_kref_release_function);
996 mutex_unlock(&bsg_mutex);
997 }
998 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
999
1000 int bsg_register_queue(struct request_queue *q, struct device *parent,
1001 const char *name, void (*release)(struct device *))
1002 {
1003 struct bsg_class_device *bcd;
1004 dev_t dev;
1005 int ret, minor;
1006 struct device *class_dev = NULL;
1007 const char *devname;
1008
1009 if (name)
1010 devname = name;
1011 else
1012 devname = parent->bus_id;
1013
1014 /*
1015 * we need a proper transport to send commands, not a stacked device
1016 */
1017 if (!q->request_fn)
1018 return 0;
1019
1020 bcd = &q->bsg_dev;
1021 memset(bcd, 0, sizeof(*bcd));
1022
1023 mutex_lock(&bsg_mutex);
1024
1025 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
1026 if (!ret) {
1027 ret = -ENOMEM;
1028 goto unlock;
1029 }
1030
1031 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1032 if (ret < 0)
1033 goto unlock;
1034
1035 if (minor >= BSG_MAX_DEVS) {
1036 printk(KERN_ERR "bsg: too many bsg devices\n");
1037 ret = -EINVAL;
1038 goto remove_idr;
1039 }
1040
1041 bcd->minor = minor;
1042 bcd->queue = q;
1043 bcd->parent = get_device(parent);
1044 bcd->release = release;
1045 kref_init(&bcd->ref);
1046 dev = MKDEV(bsg_major, bcd->minor);
1047 class_dev = device_create(bsg_class, parent, dev, "%s", devname);
1048 if (IS_ERR(class_dev)) {
1049 ret = PTR_ERR(class_dev);
1050 goto put_dev;
1051 }
1052 bcd->class_dev = class_dev;
1053
1054 if (q->kobj.sd) {
1055 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1056 if (ret)
1057 goto unregister_class_dev;
1058 }
1059
1060 mutex_unlock(&bsg_mutex);
1061 return 0;
1062
1063 unregister_class_dev:
1064 device_unregister(class_dev);
1065 put_dev:
1066 put_device(parent);
1067 remove_idr:
1068 idr_remove(&bsg_minor_idr, minor);
1069 unlock:
1070 mutex_unlock(&bsg_mutex);
1071 return ret;
1072 }
1073 EXPORT_SYMBOL_GPL(bsg_register_queue);
1074
1075 static struct cdev bsg_cdev;
1076
1077 static int __init bsg_init(void)
1078 {
1079 int ret, i;
1080 dev_t devid;
1081
1082 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1083 sizeof(struct bsg_command), 0, 0, NULL);
1084 if (!bsg_cmd_cachep) {
1085 printk(KERN_ERR "bsg: failed creating slab cache\n");
1086 return -ENOMEM;
1087 }
1088
1089 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1090 INIT_HLIST_HEAD(&bsg_device_list[i]);
1091
1092 bsg_class = class_create(THIS_MODULE, "bsg");
1093 if (IS_ERR(bsg_class)) {
1094 ret = PTR_ERR(bsg_class);
1095 goto destroy_kmemcache;
1096 }
1097
1098 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1099 if (ret)
1100 goto destroy_bsg_class;
1101
1102 bsg_major = MAJOR(devid);
1103
1104 cdev_init(&bsg_cdev, &bsg_fops);
1105 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1106 if (ret)
1107 goto unregister_chrdev;
1108
1109 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1110 " loaded (major %d)\n", bsg_major);
1111 return 0;
1112 unregister_chrdev:
1113 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1114 destroy_bsg_class:
1115 class_destroy(bsg_class);
1116 destroy_kmemcache:
1117 kmem_cache_destroy(bsg_cmd_cachep);
1118 return ret;
1119 }
1120
1121 MODULE_AUTHOR("Jens Axboe");
1122 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1123 MODULE_LICENSE("GPL");
1124
1125 device_initcall(bsg_init);
This page took 0.054237 seconds and 5 git commands to generate.