2 * bsg.c - block layer implementation of the sg v3 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
14 * - Should this get merged, block/scsi_ioctl.c will be migrated into
15 * this file. To keep maintenance down, it's easier to have them
16 * seperated right now.
19 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/file.h>
23 #include <linux/blkdev.h>
24 #include <linux/poll.h>
25 #include <linux/cdev.h>
26 #include <linux/percpu.h>
27 #include <linux/uio.h>
28 #include <linux/bsg.h>
30 #include <scsi/scsi.h>
31 #include <scsi/scsi_ioctl.h>
32 #include <scsi/scsi_cmnd.h>
35 static char bsg_version
[] = "block layer sg (bsg) 0.4";
41 request_queue_t
*queue
;
43 struct list_head busy_list
;
44 struct list_head done_list
;
45 struct hlist_node dev_list
;
50 unsigned long *cmd_bitmap
;
51 struct bsg_command
*cmd_map
;
52 wait_queue_head_t wq_done
;
53 wait_queue_head_t wq_free
;
54 char name
[BDEVNAME_SIZE
];
65 * command allocation bitmap defines
67 #define BSG_CMDS_PAGE_ORDER (1)
68 #define BSG_CMDS_PER_LONG (sizeof(unsigned long) * 8)
69 #define BSG_CMDS_MASK (BSG_CMDS_PER_LONG - 1)
70 #define BSG_CMDS_BYTES (PAGE_SIZE * (1 << BSG_CMDS_PAGE_ORDER))
71 #define BSG_CMDS (BSG_CMDS_BYTES / sizeof(struct bsg_command))
76 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
78 #define dprintk(fmt, args...)
81 #define list_entry_bc(entry) list_entry((entry), struct bsg_command, list)
86 #define BSG_MAJOR (240)
88 static DEFINE_MUTEX(bsg_mutex
);
89 static int bsg_device_nr
;
91 #define BSG_LIST_SIZE (8)
92 #define bsg_list_idx(minor) ((minor) & (BSG_LIST_SIZE - 1))
93 static struct hlist_head bsg_device_list
[BSG_LIST_SIZE
];
95 static struct class *bsg_class
;
96 static LIST_HEAD(bsg_class_list
);
99 * our internal command type
102 struct bsg_device
*bd
;
103 struct list_head list
;
107 struct sg_io_hdr hdr
;
108 struct sg_io_hdr __user
*uhdr
;
109 char sense
[SCSI_SENSE_BUFFERSIZE
];
112 static void bsg_free_command(struct bsg_command
*bc
)
114 struct bsg_device
*bd
= bc
->bd
;
115 unsigned long bitnr
= bc
- bd
->cmd_map
;
118 dprintk("%s: command bit offset %lu\n", bd
->name
, bitnr
);
120 spin_lock_irqsave(&bd
->lock
, flags
);
122 __clear_bit(bitnr
, bd
->cmd_bitmap
);
123 spin_unlock_irqrestore(&bd
->lock
, flags
);
125 wake_up(&bd
->wq_free
);
128 static struct bsg_command
*__bsg_alloc_command(struct bsg_device
*bd
)
130 struct bsg_command
*bc
= NULL
;
134 spin_lock_irq(&bd
->lock
);
136 if (bd
->queued_cmds
>= bd
->max_queue
)
139 for (free_nr
= 0, map
= bd
->cmd_bitmap
; *map
== ~0UL; map
++)
140 free_nr
+= BSG_CMDS_PER_LONG
;
142 BUG_ON(*map
== ~0UL);
145 free_nr
+= ffz(*map
);
146 __set_bit(free_nr
, bd
->cmd_bitmap
);
147 spin_unlock_irq(&bd
->lock
);
149 bc
= bd
->cmd_map
+ free_nr
;
150 memset(bc
, 0, sizeof(*bc
));
152 INIT_LIST_HEAD(&bc
->list
);
153 dprintk("%s: returning free cmd %p (bit %d)\n", bd
->name
, bc
, free_nr
);
156 dprintk("%s: failed (depth %d)\n", bd
->name
, bd
->queued_cmds
);
157 spin_unlock_irq(&bd
->lock
);
162 bsg_del_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
169 bsg_add_done_cmd(struct bsg_device
*bd
, struct bsg_command
*bc
)
172 list_add_tail(&bc
->list
, &bd
->done_list
);
173 wake_up(&bd
->wq_done
);
176 static inline int bsg_io_schedule(struct bsg_device
*bd
, int state
)
181 spin_lock_irq(&bd
->lock
);
183 BUG_ON(bd
->done_cmds
> bd
->queued_cmds
);
186 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
187 * work to do", even though we return -ENOSPC after this same test
188 * during bsg_write() -- there, it means our buffer can't have more
189 * bsg_commands added to it, thus has no space left.
191 if (bd
->done_cmds
== bd
->queued_cmds
) {
196 if (!test_bit(BSG_F_BLOCK
, &bd
->flags
)) {
201 prepare_to_wait(&bd
->wq_done
, &wait
, state
);
202 spin_unlock_irq(&bd
->lock
);
204 finish_wait(&bd
->wq_done
, &wait
);
206 if ((state
== TASK_INTERRUPTIBLE
) && signal_pending(current
))
211 spin_unlock_irq(&bd
->lock
);
216 * get a new free command, blocking if needed and specified
218 static struct bsg_command
*bsg_get_command(struct bsg_device
*bd
)
220 struct bsg_command
*bc
;
224 bc
= __bsg_alloc_command(bd
);
228 ret
= bsg_io_schedule(bd
, TASK_INTERRUPTIBLE
);
240 * Check if sg_io_hdr from user is allowed and valid
243 bsg_validate_sghdr(request_queue_t
*q
, struct sg_io_hdr
*hdr
, int *rw
)
245 if (hdr
->interface_id
!= 'S')
247 if (hdr
->cmd_len
> BLK_MAX_CDB
)
249 if (hdr
->dxfer_len
> (q
->max_sectors
<< 9))
253 * looks sane, if no data then it should be fine from our POV
258 switch (hdr
->dxfer_direction
) {
259 case SG_DXFER_TO_FROM_DEV
:
260 case SG_DXFER_FROM_DEV
:
263 case SG_DXFER_TO_DEV
:
274 * map sg_io_hdr to a request. for scatter-gather sg_io_hdr, we map
275 * each segment to a bio and string multiple bio's to the request
277 static struct request
*
278 bsg_map_hdr(struct bsg_device
*bd
, int rw
, struct sg_io_hdr
*hdr
)
280 request_queue_t
*q
= bd
->queue
;
282 struct sg_iovec __user
*u_iov
;
286 dprintk("map hdr %p/%d/%d\n", hdr
->dxferp
, hdr
->dxfer_len
,
289 ret
= bsg_validate_sghdr(q
, hdr
, &rw
);
294 * map scatter-gather elements seperately and string them to request
296 rq
= blk_get_request(q
, rw
, GFP_KERNEL
);
297 ret
= blk_fill_sghdr_rq(q
, rq
, hdr
, test_bit(BSG_F_WRITE_PERM
,
304 if (!hdr
->iovec_count
) {
305 ret
= blk_rq_map_user(q
, rq
, hdr
->dxferp
, hdr
->dxfer_len
);
311 for (ret
= 0, i
= 0; i
< hdr
->iovec_count
; i
++, u_iov
++) {
312 if (copy_from_user(&iov
, u_iov
, sizeof(iov
))) {
317 if (!iov
.iov_len
|| !iov
.iov_base
) {
322 ret
= blk_rq_map_user(q
, rq
, iov
.iov_base
, iov
.iov_len
);
332 dprintk("failed map at %d: %d\n", i
, ret
);
333 blk_unmap_sghdr_rq(rq
, hdr
);
341 * async completion call-back from the block layer, when scsi/ide/whatever
342 * calls end_that_request_last() on a request
344 static void bsg_rq_end_io(struct request
*rq
, int uptodate
)
346 struct bsg_command
*bc
= rq
->end_io_data
;
347 struct bsg_device
*bd
= bc
->bd
;
350 dprintk("%s: finished rq %p bio %p, bc %p offset %ld stat %d\n",
351 bd
->name
, rq
, bc
, bc
->bio
, bc
- bd
->cmd_map
, uptodate
);
353 bc
->hdr
.duration
= jiffies_to_msecs(jiffies
- bc
->hdr
.duration
);
355 spin_lock_irqsave(&bd
->lock
, flags
);
357 bsg_add_done_cmd(bd
, bc
);
358 spin_unlock_irqrestore(&bd
->lock
, flags
);
362 * do final setup of a 'bc' and submit the matching 'rq' to the block
365 static void bsg_add_command(struct bsg_device
*bd
, request_queue_t
*q
,
366 struct bsg_command
*bc
, struct request
*rq
)
368 rq
->sense
= bc
->sense
;
372 * add bc command to busy queue and submit rq for io
376 bc
->hdr
.duration
= jiffies
;
377 spin_lock_irq(&bd
->lock
);
378 list_add_tail(&bc
->list
, &bd
->busy_list
);
379 spin_unlock_irq(&bd
->lock
);
381 dprintk("%s: queueing rq %p, bc %p\n", bd
->name
, rq
, bc
);
383 rq
->end_io_data
= bc
;
384 blk_execute_rq_nowait(q
, bd
->disk
, rq
, 1, bsg_rq_end_io
);
387 static inline struct bsg_command
*bsg_next_done_cmd(struct bsg_device
*bd
)
389 struct bsg_command
*bc
= NULL
;
391 spin_lock_irq(&bd
->lock
);
393 bc
= list_entry_bc(bd
->done_list
.next
);
394 bsg_del_done_cmd(bd
, bc
);
396 spin_unlock_irq(&bd
->lock
);
402 * Get a finished command from the done list
404 static struct bsg_command
*__bsg_get_done_cmd(struct bsg_device
*bd
, int state
)
406 struct bsg_command
*bc
;
410 bc
= bsg_next_done_cmd(bd
);
414 ret
= bsg_io_schedule(bd
, state
);
421 dprintk("%s: returning done %p\n", bd
->name
, bc
);
426 static struct bsg_command
*
427 bsg_get_done_cmd(struct bsg_device
*bd
, const struct iovec
*iov
)
429 return __bsg_get_done_cmd(bd
, TASK_INTERRUPTIBLE
);
432 static struct bsg_command
*
433 bsg_get_done_cmd_nosignals(struct bsg_device
*bd
)
435 return __bsg_get_done_cmd(bd
, TASK_UNINTERRUPTIBLE
);
438 static int bsg_complete_all_commands(struct bsg_device
*bd
)
440 struct bsg_command
*bc
;
443 dprintk("%s: entered\n", bd
->name
);
445 set_bit(BSG_F_BLOCK
, &bd
->flags
);
448 * wait for all commands to complete
452 ret
= bsg_io_schedule(bd
, TASK_UNINTERRUPTIBLE
);
454 * look for -ENODATA specifically -- we'll sometimes get
455 * -ERESTARTSYS when we've taken a signal, but we can't
456 * return until we're done freeing the queue, so ignore
457 * it. The signal will get handled when we're done freeing
460 } while (ret
!= -ENODATA
);
463 * discard done commands
467 bc
= bsg_get_done_cmd_nosignals(bd
);
470 * we _must_ complete before restarting, because
471 * bsg_release can't handle this failing.
473 if (PTR_ERR(bc
) == -ERESTARTSYS
)
480 tret
= blk_complete_sghdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
);
484 bsg_free_command(bc
);
490 typedef struct bsg_command
*(*bsg_command_callback
)(struct bsg_device
*bd
, const struct iovec
*iov
);
493 __bsg_read(char __user
*buf
, size_t count
, bsg_command_callback get_bc
,
494 struct bsg_device
*bd
, const struct iovec
*iov
, ssize_t
*bytes_read
)
496 struct bsg_command
*bc
;
497 int nr_commands
, ret
;
499 if (count
% sizeof(struct sg_io_hdr
))
503 nr_commands
= count
/ sizeof(struct sg_io_hdr
);
504 while (nr_commands
) {
505 bc
= get_bc(bd
, iov
);
512 * this is the only case where we need to copy data back
513 * after completing the request. so do that here,
514 * bsg_complete_work() cannot do that for us
516 ret
= blk_complete_sghdr_rq(bc
->rq
, &bc
->hdr
, bc
->bio
);
518 if (copy_to_user(buf
, (char *) &bc
->hdr
, sizeof(bc
->hdr
)))
521 bsg_free_command(bc
);
526 buf
+= sizeof(struct sg_io_hdr
);
527 *bytes_read
+= sizeof(struct sg_io_hdr
);
534 static inline void bsg_set_block(struct bsg_device
*bd
, struct file
*file
)
536 if (file
->f_flags
& O_NONBLOCK
)
537 clear_bit(BSG_F_BLOCK
, &bd
->flags
);
539 set_bit(BSG_F_BLOCK
, &bd
->flags
);
542 static inline void bsg_set_write_perm(struct bsg_device
*bd
, struct file
*file
)
544 if (file
->f_mode
& FMODE_WRITE
)
545 set_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
547 clear_bit(BSG_F_WRITE_PERM
, &bd
->flags
);
550 static inline int err_block_err(int ret
)
552 if (ret
&& ret
!= -ENOSPC
&& ret
!= -ENODATA
&& ret
!= -EAGAIN
)
559 bsg_read(struct file
*file
, char __user
*buf
, size_t count
, loff_t
*ppos
)
561 struct bsg_device
*bd
= file
->private_data
;
565 dprintk("%s: read %lu bytes\n", bd
->name
, count
);
567 bsg_set_block(bd
, file
);
569 ret
= __bsg_read(buf
, count
, bsg_get_done_cmd
,
570 bd
, NULL
, &bytes_read
);
573 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
579 static ssize_t
__bsg_write(struct bsg_device
*bd
, const char __user
*buf
,
580 size_t count
, ssize_t
*bytes_read
)
582 struct bsg_command
*bc
;
584 int ret
, nr_commands
;
586 if (count
% sizeof(struct sg_io_hdr
))
589 nr_commands
= count
/ sizeof(struct sg_io_hdr
);
593 while (nr_commands
) {
594 request_queue_t
*q
= bd
->queue
;
597 bc
= bsg_get_command(bd
);
606 bc
->uhdr
= (struct sg_io_hdr __user
*) buf
;
607 if (copy_from_user(&bc
->hdr
, buf
, sizeof(bc
->hdr
))) {
613 * get a request, fill in the blanks, and add to request queue
615 rq
= bsg_map_hdr(bd
, rw
, &bc
->hdr
);
622 bsg_add_command(bd
, q
, bc
, rq
);
626 buf
+= sizeof(struct sg_io_hdr
);
627 *bytes_read
+= sizeof(struct sg_io_hdr
);
631 blk_unmap_sghdr_rq(rq
, &bc
->hdr
);
633 bsg_free_command(bc
);
639 bsg_write(struct file
*file
, const char __user
*buf
, size_t count
, loff_t
*ppos
)
641 struct bsg_device
*bd
= file
->private_data
;
645 dprintk("%s: write %lu bytes\n", bd
->name
, count
);
647 bsg_set_block(bd
, file
);
648 bsg_set_write_perm(bd
, file
);
651 ret
= __bsg_write(bd
, buf
, count
, &bytes_read
);
655 * return bytes written on non-fatal errors
657 if (!bytes_read
|| (bytes_read
&& err_block_err(ret
)))
660 dprintk("%s: returning %lu\n", bd
->name
, bytes_read
);
664 static void bsg_free_device(struct bsg_device
*bd
)
667 free_pages((unsigned long) bd
->cmd_map
, BSG_CMDS_PAGE_ORDER
);
669 kfree(bd
->cmd_bitmap
);
673 static struct bsg_device
*bsg_alloc_device(void)
675 struct bsg_command
*cmd_map
;
676 unsigned long *cmd_bitmap
;
677 struct bsg_device
*bd
;
680 bd
= kzalloc(sizeof(struct bsg_device
), GFP_KERNEL
);
684 spin_lock_init(&bd
->lock
);
686 bd
->max_queue
= BSG_CMDS
;
688 bits
= (BSG_CMDS
/ BSG_CMDS_PER_LONG
) + 1;
689 cmd_bitmap
= kzalloc(bits
* sizeof(unsigned long), GFP_KERNEL
);
692 bd
->cmd_bitmap
= cmd_bitmap
;
694 cmd_map
= (void *) __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
695 BSG_CMDS_PAGE_ORDER
);
697 goto out_free_bitmap
;
698 bd
->cmd_map
= cmd_map
;
700 INIT_LIST_HEAD(&bd
->busy_list
);
701 INIT_LIST_HEAD(&bd
->done_list
);
702 INIT_HLIST_NODE(&bd
->dev_list
);
704 init_waitqueue_head(&bd
->wq_free
);
705 init_waitqueue_head(&bd
->wq_done
);
715 static int bsg_put_device(struct bsg_device
*bd
)
719 mutex_lock(&bsg_mutex
);
721 if (!atomic_dec_and_test(&bd
->ref_count
))
724 dprintk("%s: tearing down\n", bd
->name
);
727 * close can always block
729 set_bit(BSG_F_BLOCK
, &bd
->flags
);
732 * correct error detection baddies here again. it's the responsibility
733 * of the app to properly reap commands before close() if it wants
734 * fool-proof error detection
736 ret
= bsg_complete_all_commands(bd
);
738 blk_put_queue(bd
->queue
);
739 hlist_del(&bd
->dev_list
);
742 mutex_unlock(&bsg_mutex
);
746 static struct bsg_device
*bsg_add_device(struct inode
*inode
,
747 struct gendisk
*disk
,
750 struct bsg_device
*bd
= NULL
;
752 unsigned char buf
[32];
755 bd
= bsg_alloc_device();
757 return ERR_PTR(-ENOMEM
);
760 bd
->queue
= disk
->queue
;
761 kobject_get(&disk
->queue
->kobj
);
762 bsg_set_block(bd
, file
);
764 atomic_set(&bd
->ref_count
, 1);
765 bd
->minor
= iminor(inode
);
766 mutex_lock(&bsg_mutex
);
767 hlist_add_head(&bd
->dev_list
,&bsg_device_list
[bsg_list_idx(bd
->minor
)]);
769 strncpy(bd
->name
, disk
->disk_name
, sizeof(bd
->name
) - 1);
770 dprintk("bound to <%s>, max queue %d\n",
771 format_dev_t(buf
, i
->i_rdev
), bd
->max_queue
);
773 mutex_unlock(&bsg_mutex
);
777 static struct bsg_device
*__bsg_get_device(int minor
)
779 struct hlist_head
*list
= &bsg_device_list
[bsg_list_idx(minor
)];
780 struct bsg_device
*bd
= NULL
;
781 struct hlist_node
*entry
;
783 mutex_lock(&bsg_mutex
);
785 hlist_for_each(entry
, list
) {
786 bd
= hlist_entry(entry
, struct bsg_device
, dev_list
);
787 if (bd
->minor
== minor
) {
788 atomic_inc(&bd
->ref_count
);
795 mutex_unlock(&bsg_mutex
);
799 static struct bsg_device
*bsg_get_device(struct inode
*inode
, struct file
*file
)
801 struct bsg_device
*bd
= __bsg_get_device(iminor(inode
));
802 struct bsg_class_device
*bcd
, *__bcd
;
808 * find the class device
811 mutex_lock(&bsg_mutex
);
812 list_for_each_entry(__bcd
, &bsg_class_list
, list
) {
813 if (__bcd
->minor
== iminor(inode
)) {
818 mutex_unlock(&bsg_mutex
);
821 return ERR_PTR(-ENODEV
);
823 return bsg_add_device(inode
, bcd
->disk
, file
);
826 static int bsg_open(struct inode
*inode
, struct file
*file
)
828 struct bsg_device
*bd
= bsg_get_device(inode
, file
);
833 file
->private_data
= bd
;
837 static int bsg_release(struct inode
*inode
, struct file
*file
)
839 struct bsg_device
*bd
= file
->private_data
;
841 file
->private_data
= NULL
;
842 return bsg_put_device(bd
);
845 static unsigned int bsg_poll(struct file
*file
, poll_table
*wait
)
847 struct bsg_device
*bd
= file
->private_data
;
848 unsigned int mask
= 0;
850 poll_wait(file
, &bd
->wq_done
, wait
);
851 poll_wait(file
, &bd
->wq_free
, wait
);
853 spin_lock_irq(&bd
->lock
);
854 if (!list_empty(&bd
->done_list
))
855 mask
|= POLLIN
| POLLRDNORM
;
856 if (bd
->queued_cmds
>= bd
->max_queue
)
858 spin_unlock_irq(&bd
->lock
);
864 bsg_ioctl(struct inode
*inode
, struct file
*file
, unsigned int cmd
,
867 struct bsg_device
*bd
= file
->private_data
;
868 int __user
*uarg
= (int __user
*) arg
;
877 case SG_GET_COMMAND_Q
:
878 return put_user(bd
->max_queue
, uarg
);
879 case SG_SET_COMMAND_Q
: {
882 if (get_user(queue
, uarg
))
884 if (queue
> BSG_CMDS
|| queue
< 1)
887 bd
->max_queue
= queue
;
894 case SG_GET_VERSION_NUM
:
895 case SCSI_IOCTL_GET_IDLUN
:
896 case SCSI_IOCTL_GET_BUS_NUMBER
:
899 case SG_GET_RESERVED_SIZE
:
900 case SG_SET_RESERVED_SIZE
:
901 case SG_EMULATED_HOST
:
903 case SCSI_IOCTL_SEND_COMMAND
: {
904 void __user
*uarg
= (void __user
*) arg
;
905 return scsi_cmd_ioctl(file
, bd
->disk
, cmd
, uarg
);
908 * block device ioctls
912 return ioctl_by_bdev(bd
->bdev
, cmd
, arg
);
919 static struct file_operations bsg_fops
= {
924 .release
= bsg_release
,
926 .owner
= THIS_MODULE
,
929 void bsg_unregister_disk(struct gendisk
*disk
)
931 struct bsg_class_device
*bcd
= &disk
->bsg_dev
;
936 mutex_lock(&bsg_mutex
);
937 sysfs_remove_link(&bcd
->disk
->queue
->kobj
, "bsg");
938 class_device_destroy(bsg_class
, MKDEV(BSG_MAJOR
, bcd
->minor
));
939 bcd
->class_dev
= NULL
;
940 list_del_init(&bcd
->list
);
941 mutex_unlock(&bsg_mutex
);
944 int bsg_register_disk(struct gendisk
*disk
)
946 request_queue_t
*q
= disk
->queue
;
947 struct bsg_class_device
*bcd
;
951 * we need a proper transport to send commands, not a stacked device
956 bcd
= &disk
->bsg_dev
;
957 memset(bcd
, 0, sizeof(*bcd
));
958 INIT_LIST_HEAD(&bcd
->list
);
960 mutex_lock(&bsg_mutex
);
961 dev
= MKDEV(BSG_MAJOR
, bsg_device_nr
);
962 bcd
->minor
= bsg_device_nr
;
965 bcd
->class_dev
= class_device_create(bsg_class
, NULL
, dev
, bcd
->dev
, "%s", disk
->disk_name
);
966 list_add_tail(&bcd
->list
, &bsg_class_list
);
967 sysfs_create_link(&q
->kobj
, &bcd
->class_dev
->kobj
, "bsg");
968 mutex_unlock(&bsg_mutex
);
972 static int __init
bsg_init(void)
976 for (i
= 0; i
< BSG_LIST_SIZE
; i
++)
977 INIT_HLIST_HEAD(&bsg_device_list
[i
]);
979 bsg_class
= class_create(THIS_MODULE
, "bsg");
980 if (IS_ERR(bsg_class
))
981 return PTR_ERR(bsg_class
);
983 ret
= register_chrdev(BSG_MAJOR
, "bsg", &bsg_fops
);
985 class_destroy(bsg_class
);
989 printk(KERN_INFO
"%s loaded\n", bsg_version
);
993 MODULE_AUTHOR("Jens Axboe");
994 MODULE_DESCRIPTION("Block layer SGSI generic (sg) driver");
995 MODULE_LICENSE("GPL");
997 subsys_initcall(bsg_init
);