2 * Network block device - make block devices work over TCP
4 * Note that you can not swap over this thing, yet. Seems to work but
5 * deadlocks sometimes - you can not swap over TCP in general.
7 * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz>
8 * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
10 * This file is released under GPLv2 or later.
12 * (part of code stolen from loop.c)
15 #include <linux/major.h>
17 #include <linux/blkdev.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/sched.h>
22 #include <linux/bio.h>
23 #include <linux/stat.h>
24 #include <linux/errno.h>
25 #include <linux/file.h>
26 #include <linux/ioctl.h>
27 #include <linux/mutex.h>
28 #include <linux/compiler.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/slab.h>
33 #include <linux/net.h>
34 #include <linux/kthread.h>
35 #include <linux/types.h>
37 #include <asm/uaccess.h>
38 #include <asm/types.h>
40 #include <linux/nbd.h>
44 int harderror
; /* Code of hard error */
45 struct socket
* sock
; /* If == NULL, device is not ready, yet */
48 spinlock_t queue_lock
;
49 struct list_head queue_head
; /* Requests waiting result */
50 struct request
*active_req
;
51 wait_queue_head_t active_wq
;
52 struct list_head waiting_queue
; /* Requests to be sent */
53 wait_queue_head_t waiting_wq
;
59 pid_t pid
; /* pid of nbd-client, if attached */
61 int disconnect
; /* a disconnect has been requested by user */
64 #define NBD_MAGIC 0x68797548
66 static unsigned int nbds_max
= 16;
67 static struct nbd_device
*nbd_dev
;
71 * Use just one lock (or at most 1 per NIC). Two arguments for this:
72 * 1. Each NIC is essentially a synchronization point for all servers
73 * accessed through that NIC so there's no need to have more locks
75 * 2. More locks lead to more "Dirty cache line bouncing" which will slow
76 * down each lock to the point where they're actually slower than just
78 * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
80 static DEFINE_SPINLOCK(nbd_lock
);
82 static inline struct device
*nbd_to_dev(struct nbd_device
*nbd
)
84 return disk_to_dev(nbd
->disk
);
87 static const char *nbdcmd_to_ascii(int cmd
)
90 case NBD_CMD_READ
: return "read";
91 case NBD_CMD_WRITE
: return "write";
92 case NBD_CMD_DISC
: return "disconnect";
93 case NBD_CMD_FLUSH
: return "flush";
94 case NBD_CMD_TRIM
: return "trim/discard";
99 static void nbd_end_request(struct nbd_device
*nbd
, struct request
*req
)
101 int error
= req
->errors
? -EIO
: 0;
102 struct request_queue
*q
= req
->q
;
105 dev_dbg(nbd_to_dev(nbd
), "request %p: %s\n", req
,
106 error
? "failed" : "done");
108 spin_lock_irqsave(q
->queue_lock
, flags
);
109 __blk_end_request_all(req
, error
);
110 spin_unlock_irqrestore(q
->queue_lock
, flags
);
113 static void sock_shutdown(struct nbd_device
*nbd
, int lock
)
115 /* Forcibly shutdown the socket causing all listeners
118 * FIXME: This code is duplicated from sys_shutdown, but
119 * there should be a more generic interface rather than
120 * calling socket ops directly here */
122 mutex_lock(&nbd
->tx_lock
);
124 dev_warn(disk_to_dev(nbd
->disk
), "shutting down socket\n");
125 kernel_sock_shutdown(nbd
->sock
, SHUT_RDWR
);
129 mutex_unlock(&nbd
->tx_lock
);
132 static void nbd_xmit_timeout(unsigned long arg
)
134 struct task_struct
*task
= (struct task_struct
*)arg
;
136 printk(KERN_WARNING
"nbd: killing hung xmit (%s, pid: %d)\n",
137 task
->comm
, task
->pid
);
138 force_sig(SIGKILL
, task
);
142 * Send or receive packet.
144 static int sock_xmit(struct nbd_device
*nbd
, int send
, void *buf
, int size
,
147 struct socket
*sock
= nbd
->sock
;
151 sigset_t blocked
, oldset
;
152 unsigned long pflags
= current
->flags
;
154 if (unlikely(!sock
)) {
155 dev_err(disk_to_dev(nbd
->disk
),
156 "Attempted %s on closed socket in sock_xmit\n",
157 (send
? "send" : "recv"));
161 /* Allow interception of SIGKILL only
162 * Don't allow other signals to interrupt the transmission */
163 siginitsetinv(&blocked
, sigmask(SIGKILL
));
164 sigprocmask(SIG_SETMASK
, &blocked
, &oldset
);
166 current
->flags
|= PF_MEMALLOC
;
168 sock
->sk
->sk_allocation
= GFP_NOIO
| __GFP_MEMALLOC
;
173 msg
.msg_control
= NULL
;
174 msg
.msg_controllen
= 0;
175 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
178 struct timer_list ti
;
180 if (nbd
->xmit_timeout
) {
182 ti
.function
= nbd_xmit_timeout
;
183 ti
.data
= (unsigned long)current
;
184 ti
.expires
= jiffies
+ nbd
->xmit_timeout
;
187 result
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
188 if (nbd
->xmit_timeout
)
191 result
= kernel_recvmsg(sock
, &msg
, &iov
, 1, size
,
194 if (signal_pending(current
)) {
196 printk(KERN_WARNING
"nbd (pid %d: %s) got signal %d\n",
197 task_pid_nr(current
), current
->comm
,
198 dequeue_signal_lock(current
, ¤t
->blocked
, &info
));
200 sock_shutdown(nbd
, !send
);
206 result
= -EPIPE
; /* short read */
213 sigprocmask(SIG_SETMASK
, &oldset
, NULL
);
214 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
219 static inline int sock_send_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
,
223 void *kaddr
= kmap(bvec
->bv_page
);
224 result
= sock_xmit(nbd
, 1, kaddr
+ bvec
->bv_offset
,
225 bvec
->bv_len
, flags
);
226 kunmap(bvec
->bv_page
);
230 /* always call with the tx_lock held */
231 static int nbd_send_req(struct nbd_device
*nbd
, struct request
*req
)
234 struct nbd_request request
;
235 unsigned long size
= blk_rq_bytes(req
);
237 memset(&request
, 0, sizeof(request
));
238 request
.magic
= htonl(NBD_REQUEST_MAGIC
);
239 request
.type
= htonl(nbd_cmd(req
));
241 if (nbd_cmd(req
) != NBD_CMD_FLUSH
&& nbd_cmd(req
) != NBD_CMD_DISC
) {
242 request
.from
= cpu_to_be64((u64
)blk_rq_pos(req
) << 9);
243 request
.len
= htonl(size
);
245 memcpy(request
.handle
, &req
, sizeof(req
));
247 dev_dbg(nbd_to_dev(nbd
), "request %p: sending control (%s@%llu,%uB)\n",
248 req
, nbdcmd_to_ascii(nbd_cmd(req
)),
249 (unsigned long long)blk_rq_pos(req
) << 9, blk_rq_bytes(req
));
250 result
= sock_xmit(nbd
, 1, &request
, sizeof(request
),
251 (nbd_cmd(req
) == NBD_CMD_WRITE
) ? MSG_MORE
: 0);
253 dev_err(disk_to_dev(nbd
->disk
),
254 "Send control failed (result %d)\n", result
);
258 if (nbd_cmd(req
) == NBD_CMD_WRITE
) {
259 struct req_iterator iter
;
262 * we are really probing at internals to determine
263 * whether to set MSG_MORE or not...
265 rq_for_each_segment(bvec
, req
, iter
) {
267 if (!rq_iter_last(bvec
, iter
))
269 dev_dbg(nbd_to_dev(nbd
), "request %p: sending %d bytes data\n",
271 result
= sock_send_bvec(nbd
, &bvec
, flags
);
273 dev_err(disk_to_dev(nbd
->disk
),
274 "Send data failed (result %d)\n",
286 static struct request
*nbd_find_request(struct nbd_device
*nbd
,
287 struct request
*xreq
)
289 struct request
*req
, *tmp
;
292 err
= wait_event_interruptible(nbd
->active_wq
, nbd
->active_req
!= xreq
);
296 spin_lock(&nbd
->queue_lock
);
297 list_for_each_entry_safe(req
, tmp
, &nbd
->queue_head
, queuelist
) {
300 list_del_init(&req
->queuelist
);
301 spin_unlock(&nbd
->queue_lock
);
304 spin_unlock(&nbd
->queue_lock
);
312 static inline int sock_recv_bvec(struct nbd_device
*nbd
, struct bio_vec
*bvec
)
315 void *kaddr
= kmap(bvec
->bv_page
);
316 result
= sock_xmit(nbd
, 0, kaddr
+ bvec
->bv_offset
, bvec
->bv_len
,
318 kunmap(bvec
->bv_page
);
322 /* NULL returned = something went wrong, inform userspace */
323 static struct request
*nbd_read_stat(struct nbd_device
*nbd
)
326 struct nbd_reply reply
;
330 result
= sock_xmit(nbd
, 0, &reply
, sizeof(reply
), MSG_WAITALL
);
332 dev_err(disk_to_dev(nbd
->disk
),
333 "Receive control failed (result %d)\n", result
);
337 if (ntohl(reply
.magic
) != NBD_REPLY_MAGIC
) {
338 dev_err(disk_to_dev(nbd
->disk
), "Wrong magic (0x%lx)\n",
339 (unsigned long)ntohl(reply
.magic
));
344 req
= nbd_find_request(nbd
, *(struct request
**)reply
.handle
);
346 result
= PTR_ERR(req
);
347 if (result
!= -ENOENT
)
350 dev_err(disk_to_dev(nbd
->disk
), "Unexpected reply (%p)\n",
356 if (ntohl(reply
.error
)) {
357 dev_err(disk_to_dev(nbd
->disk
), "Other side returned error (%d)\n",
363 dev_dbg(nbd_to_dev(nbd
), "request %p: got reply\n", req
);
364 if (nbd_cmd(req
) == NBD_CMD_READ
) {
365 struct req_iterator iter
;
368 rq_for_each_segment(bvec
, req
, iter
) {
369 result
= sock_recv_bvec(nbd
, &bvec
);
371 dev_err(disk_to_dev(nbd
->disk
), "Receive data failed (result %d)\n",
376 dev_dbg(nbd_to_dev(nbd
), "request %p: got %d bytes data\n",
382 nbd
->harderror
= result
;
386 static ssize_t
pid_show(struct device
*dev
,
387 struct device_attribute
*attr
, char *buf
)
389 struct gendisk
*disk
= dev_to_disk(dev
);
391 return sprintf(buf
, "%ld\n",
392 (long) ((struct nbd_device
*)disk
->private_data
)->pid
);
395 static struct device_attribute pid_attr
= {
396 .attr
= { .name
= "pid", .mode
= S_IRUGO
},
400 static int nbd_do_it(struct nbd_device
*nbd
)
405 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
407 sk_set_memalloc(nbd
->sock
->sk
);
408 nbd
->pid
= task_pid_nr(current
);
409 ret
= device_create_file(disk_to_dev(nbd
->disk
), &pid_attr
);
411 dev_err(disk_to_dev(nbd
->disk
), "device_create_file failed!\n");
416 while ((req
= nbd_read_stat(nbd
)) != NULL
)
417 nbd_end_request(nbd
, req
);
419 device_remove_file(disk_to_dev(nbd
->disk
), &pid_attr
);
424 static void nbd_clear_que(struct nbd_device
*nbd
)
428 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
431 * Because we have set nbd->sock to NULL under the tx_lock, all
432 * modifications to the list must have completed by now. For
433 * the same reason, the active_req must be NULL.
435 * As a consequence, we don't need to take the spin lock while
436 * purging the list here.
439 BUG_ON(nbd
->active_req
);
441 while (!list_empty(&nbd
->queue_head
)) {
442 req
= list_entry(nbd
->queue_head
.next
, struct request
,
444 list_del_init(&req
->queuelist
);
446 nbd_end_request(nbd
, req
);
449 while (!list_empty(&nbd
->waiting_queue
)) {
450 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
452 list_del_init(&req
->queuelist
);
454 nbd_end_request(nbd
, req
);
459 static void nbd_handle_req(struct nbd_device
*nbd
, struct request
*req
)
461 if (req
->cmd_type
!= REQ_TYPE_FS
)
464 nbd_cmd(req
) = NBD_CMD_READ
;
465 if (rq_data_dir(req
) == WRITE
) {
466 if ((req
->cmd_flags
& REQ_DISCARD
)) {
467 WARN_ON(!(nbd
->flags
& NBD_FLAG_SEND_TRIM
));
468 nbd_cmd(req
) = NBD_CMD_TRIM
;
470 nbd_cmd(req
) = NBD_CMD_WRITE
;
471 if (nbd
->flags
& NBD_FLAG_READ_ONLY
) {
472 dev_err(disk_to_dev(nbd
->disk
),
473 "Write on read-only\n");
478 if (req
->cmd_flags
& REQ_FLUSH
) {
479 BUG_ON(unlikely(blk_rq_sectors(req
)));
480 nbd_cmd(req
) = NBD_CMD_FLUSH
;
485 mutex_lock(&nbd
->tx_lock
);
486 if (unlikely(!nbd
->sock
)) {
487 mutex_unlock(&nbd
->tx_lock
);
488 dev_err(disk_to_dev(nbd
->disk
),
489 "Attempted send on closed socket\n");
493 nbd
->active_req
= req
;
495 if (nbd_send_req(nbd
, req
) != 0) {
496 dev_err(disk_to_dev(nbd
->disk
), "Request send failed\n");
498 nbd_end_request(nbd
, req
);
500 spin_lock(&nbd
->queue_lock
);
501 list_add_tail(&req
->queuelist
, &nbd
->queue_head
);
502 spin_unlock(&nbd
->queue_lock
);
505 nbd
->active_req
= NULL
;
506 mutex_unlock(&nbd
->tx_lock
);
507 wake_up_all(&nbd
->active_wq
);
513 nbd_end_request(nbd
, req
);
516 static int nbd_thread(void *data
)
518 struct nbd_device
*nbd
= data
;
521 set_user_nice(current
, MIN_NICE
);
522 while (!kthread_should_stop() || !list_empty(&nbd
->waiting_queue
)) {
523 /* wait for something to do */
524 wait_event_interruptible(nbd
->waiting_wq
,
525 kthread_should_stop() ||
526 !list_empty(&nbd
->waiting_queue
));
528 /* extract request */
529 if (list_empty(&nbd
->waiting_queue
))
532 spin_lock_irq(&nbd
->queue_lock
);
533 req
= list_entry(nbd
->waiting_queue
.next
, struct request
,
535 list_del_init(&req
->queuelist
);
536 spin_unlock_irq(&nbd
->queue_lock
);
539 nbd_handle_req(nbd
, req
);
545 * We always wait for result of write, for now. It would be nice to make it optional
547 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
548 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
551 static void do_nbd_request(struct request_queue
*q
)
552 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
556 while ((req
= blk_fetch_request(q
)) != NULL
) {
557 struct nbd_device
*nbd
;
559 spin_unlock_irq(q
->queue_lock
);
561 nbd
= req
->rq_disk
->private_data
;
563 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
565 dev_dbg(nbd_to_dev(nbd
), "request %p: dequeued (flags=%x)\n",
568 if (unlikely(!nbd
->sock
)) {
569 dev_err(disk_to_dev(nbd
->disk
),
570 "Attempted send on closed socket\n");
572 nbd_end_request(nbd
, req
);
573 spin_lock_irq(q
->queue_lock
);
577 spin_lock_irq(&nbd
->queue_lock
);
578 list_add_tail(&req
->queuelist
, &nbd
->waiting_queue
);
579 spin_unlock_irq(&nbd
->queue_lock
);
581 wake_up(&nbd
->waiting_wq
);
583 spin_lock_irq(q
->queue_lock
);
587 /* Must be called with tx_lock held */
589 static int __nbd_ioctl(struct block_device
*bdev
, struct nbd_device
*nbd
,
590 unsigned int cmd
, unsigned long arg
)
593 case NBD_DISCONNECT
: {
596 dev_info(disk_to_dev(nbd
->disk
), "NBD_DISCONNECT\n");
600 mutex_unlock(&nbd
->tx_lock
);
602 mutex_lock(&nbd
->tx_lock
);
603 blk_rq_init(NULL
, &sreq
);
604 sreq
.cmd_type
= REQ_TYPE_SPECIAL
;
605 nbd_cmd(&sreq
) = NBD_CMD_DISC
;
607 /* Check again after getting mutex back. */
613 nbd_send_req(nbd
, &sreq
);
617 case NBD_CLEAR_SOCK
: {
618 struct socket
*sock
= nbd
->sock
;
621 BUG_ON(!list_empty(&nbd
->queue_head
));
622 BUG_ON(!list_empty(&nbd
->waiting_queue
));
634 sock
= sockfd_lookup(arg
, &err
);
638 bdev
->bd_invalidated
= 1;
639 nbd
->disconnect
= 0; /* we're connected now */
645 case NBD_SET_BLKSIZE
:
647 nbd
->bytesize
&= ~(nbd
->blksize
-1);
648 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
649 set_blocksize(bdev
, nbd
->blksize
);
650 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
654 nbd
->bytesize
= arg
& ~(nbd
->blksize
-1);
655 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
656 set_blocksize(bdev
, nbd
->blksize
);
657 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
660 case NBD_SET_TIMEOUT
:
661 nbd
->xmit_timeout
= arg
* HZ
;
668 case NBD_SET_SIZE_BLOCKS
:
669 nbd
->bytesize
= ((u64
) arg
) * nbd
->blksize
;
670 bdev
->bd_inode
->i_size
= nbd
->bytesize
;
671 set_blocksize(bdev
, nbd
->blksize
);
672 set_capacity(nbd
->disk
, nbd
->bytesize
>> 9);
676 struct task_struct
*thread
;
685 mutex_unlock(&nbd
->tx_lock
);
687 if (nbd
->flags
& NBD_FLAG_READ_ONLY
)
688 set_device_ro(bdev
, true);
689 if (nbd
->flags
& NBD_FLAG_SEND_TRIM
)
690 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
,
692 if (nbd
->flags
& NBD_FLAG_SEND_FLUSH
)
693 blk_queue_flush(nbd
->disk
->queue
, REQ_FLUSH
);
695 blk_queue_flush(nbd
->disk
->queue
, 0);
697 thread
= kthread_run(nbd_thread
, nbd
, "%s",
698 nbd
->disk
->disk_name
);
699 if (IS_ERR(thread
)) {
700 mutex_lock(&nbd
->tx_lock
);
701 return PTR_ERR(thread
);
704 error
= nbd_do_it(nbd
);
705 kthread_stop(thread
);
707 mutex_lock(&nbd
->tx_lock
);
710 sock_shutdown(nbd
, 0);
714 dev_warn(disk_to_dev(nbd
->disk
), "queue cleared\n");
716 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, nbd
->disk
->queue
);
717 set_device_ro(bdev
, false);
722 bdev
->bd_inode
->i_size
= 0;
723 set_capacity(nbd
->disk
, 0);
725 ioctl_by_bdev(bdev
, BLKRRPART
, 0);
726 if (nbd
->disconnect
) /* user requested, ignore socket errors */
728 return nbd
->harderror
;
733 * This is for compatibility only. The queue is always cleared
734 * by NBD_DO_IT or NBD_CLEAR_SOCK.
738 case NBD_PRINT_DEBUG
:
739 dev_info(disk_to_dev(nbd
->disk
),
740 "next = %p, prev = %p, head = %p\n",
741 nbd
->queue_head
.next
, nbd
->queue_head
.prev
,
748 static int nbd_ioctl(struct block_device
*bdev
, fmode_t mode
,
749 unsigned int cmd
, unsigned long arg
)
751 struct nbd_device
*nbd
= bdev
->bd_disk
->private_data
;
754 if (!capable(CAP_SYS_ADMIN
))
757 BUG_ON(nbd
->magic
!= NBD_MAGIC
);
759 mutex_lock(&nbd
->tx_lock
);
760 error
= __nbd_ioctl(bdev
, nbd
, cmd
, arg
);
761 mutex_unlock(&nbd
->tx_lock
);
766 static const struct block_device_operations nbd_fops
=
768 .owner
= THIS_MODULE
,
773 * And here should be modules and kernel interface
774 * (Just smiley confuses emacs :-)
777 static int __init
nbd_init(void)
783 BUILD_BUG_ON(sizeof(struct nbd_request
) != 28);
786 printk(KERN_ERR
"nbd: max_part must be >= 0\n");
792 part_shift
= fls(max_part
);
795 * Adjust max_part according to part_shift as it is exported
796 * to user space so that user can know the max number of
797 * partition kernel should be able to manage.
799 * Note that -1 is required because partition 0 is reserved
800 * for the whole disk.
802 max_part
= (1UL << part_shift
) - 1;
805 if ((1UL << part_shift
) > DISK_MAX_PARTS
)
808 if (nbds_max
> 1UL << (MINORBITS
- part_shift
))
811 nbd_dev
= kcalloc(nbds_max
, sizeof(*nbd_dev
), GFP_KERNEL
);
815 for (i
= 0; i
< nbds_max
; i
++) {
816 struct gendisk
*disk
= alloc_disk(1 << part_shift
);
819 nbd_dev
[i
].disk
= disk
;
821 * The new linux 2.5 block layer implementation requires
822 * every gendisk to have its very own request_queue struct.
823 * These structs are big so we dynamically allocate them.
825 disk
->queue
= blk_init_queue(do_nbd_request
, &nbd_lock
);
831 * Tell the block layer that we are not a rotational device
833 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, disk
->queue
);
834 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, disk
->queue
);
835 disk
->queue
->limits
.discard_granularity
= 512;
836 disk
->queue
->limits
.max_discard_sectors
= UINT_MAX
;
837 disk
->queue
->limits
.discard_zeroes_data
= 0;
838 blk_queue_max_hw_sectors(disk
->queue
, 65536);
839 disk
->queue
->limits
.max_sectors
= 256;
842 if (register_blkdev(NBD_MAJOR
, "nbd")) {
847 printk(KERN_INFO
"nbd: registered device at major %d\n", NBD_MAJOR
);
849 for (i
= 0; i
< nbds_max
; i
++) {
850 struct gendisk
*disk
= nbd_dev
[i
].disk
;
851 nbd_dev
[i
].magic
= NBD_MAGIC
;
852 INIT_LIST_HEAD(&nbd_dev
[i
].waiting_queue
);
853 spin_lock_init(&nbd_dev
[i
].queue_lock
);
854 INIT_LIST_HEAD(&nbd_dev
[i
].queue_head
);
855 mutex_init(&nbd_dev
[i
].tx_lock
);
856 init_waitqueue_head(&nbd_dev
[i
].active_wq
);
857 init_waitqueue_head(&nbd_dev
[i
].waiting_wq
);
858 nbd_dev
[i
].blksize
= 1024;
859 nbd_dev
[i
].bytesize
= 0;
860 disk
->major
= NBD_MAJOR
;
861 disk
->first_minor
= i
<< part_shift
;
862 disk
->fops
= &nbd_fops
;
863 disk
->private_data
= &nbd_dev
[i
];
864 sprintf(disk
->disk_name
, "nbd%d", i
);
865 set_capacity(disk
, 0);
872 blk_cleanup_queue(nbd_dev
[i
].disk
->queue
);
873 put_disk(nbd_dev
[i
].disk
);
879 static void __exit
nbd_cleanup(void)
882 for (i
= 0; i
< nbds_max
; i
++) {
883 struct gendisk
*disk
= nbd_dev
[i
].disk
;
884 nbd_dev
[i
].magic
= 0;
887 blk_cleanup_queue(disk
->queue
);
891 unregister_blkdev(NBD_MAJOR
, "nbd");
893 printk(KERN_INFO
"nbd: unregistered device at major %d\n", NBD_MAJOR
);
896 module_init(nbd_init
);
897 module_exit(nbd_cleanup
);
899 MODULE_DESCRIPTION("Network Block Device");
900 MODULE_LICENSE("GPL");
902 module_param(nbds_max
, int, 0444);
903 MODULE_PARM_DESC(nbds_max
, "number of network block devices to initialize (default: 16)");
904 module_param(max_part
, int, 0444);
905 MODULE_PARM_DESC(max_part
, "number of partitions per device (default: 0)");