1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
4 * block device routines
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/genhd.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/export.h>
21 static DEFINE_MUTEX(aoeblk_mutex
);
22 static struct kmem_cache
*buf_pool_cache
;
24 static ssize_t
aoedisk_show_state(struct device
*dev
,
25 struct device_attribute
*attr
, char *page
)
27 struct gendisk
*disk
= dev_to_disk(dev
);
28 struct aoedev
*d
= disk
->private_data
;
30 return snprintf(page
, PAGE_SIZE
,
32 (d
->flags
& DEVFL_UP
) ? "up" : "down",
33 (d
->flags
& DEVFL_KICKME
) ? ",kickme" :
34 (d
->nopen
&& !(d
->flags
& DEVFL_UP
)) ? ",closewait" : "");
35 /* I'd rather see nopen exported so we can ditch closewait */
37 static ssize_t
aoedisk_show_mac(struct device
*dev
,
38 struct device_attribute
*attr
, char *page
)
40 struct gendisk
*disk
= dev_to_disk(dev
);
41 struct aoedev
*d
= disk
->private_data
;
42 struct aoetgt
*t
= d
->targets
[0];
45 return snprintf(page
, PAGE_SIZE
, "none\n");
46 return snprintf(page
, PAGE_SIZE
, "%pm\n", t
->addr
);
48 static ssize_t
aoedisk_show_netif(struct device
*dev
,
49 struct device_attribute
*attr
, char *page
)
51 struct gendisk
*disk
= dev_to_disk(dev
);
52 struct aoedev
*d
= disk
->private_data
;
53 struct net_device
*nds
[8], **nd
, **nnd
, **ne
;
54 struct aoetgt
**t
, **te
;
55 struct aoeif
*ifp
, *e
;
58 memset(nds
, 0, sizeof nds
);
60 ne
= nd
+ ARRAY_SIZE(nds
);
63 for (; t
< te
&& *t
; t
++) {
66 for (; ifp
< e
&& ifp
->nd
; ifp
++) {
67 for (nnd
= nds
; nnd
< nd
; nnd
++)
70 if (nnd
== nd
&& nd
!= ne
)
78 return snprintf(page
, PAGE_SIZE
, "none\n");
79 for (p
= page
; nd
< ne
; nd
++)
80 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "%s%s",
81 p
== page
? "" : ",", (*nd
)->name
);
82 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "\n");
85 /* firmware version */
86 static ssize_t
aoedisk_show_fwver(struct device
*dev
,
87 struct device_attribute
*attr
, char *page
)
89 struct gendisk
*disk
= dev_to_disk(dev
);
90 struct aoedev
*d
= disk
->private_data
;
92 return snprintf(page
, PAGE_SIZE
, "0x%04x\n", (unsigned int) d
->fw_ver
);
95 static DEVICE_ATTR(state
, S_IRUGO
, aoedisk_show_state
, NULL
);
96 static DEVICE_ATTR(mac
, S_IRUGO
, aoedisk_show_mac
, NULL
);
97 static DEVICE_ATTR(netif
, S_IRUGO
, aoedisk_show_netif
, NULL
);
98 static struct device_attribute dev_attr_firmware_version
= {
99 .attr
= { .name
= "firmware-version", .mode
= S_IRUGO
},
100 .show
= aoedisk_show_fwver
,
103 static struct attribute
*aoe_attrs
[] = {
104 &dev_attr_state
.attr
,
106 &dev_attr_netif
.attr
,
107 &dev_attr_firmware_version
.attr
,
111 static const struct attribute_group attr_group
= {
116 aoedisk_add_sysfs(struct aoedev
*d
)
118 return sysfs_create_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
121 aoedisk_rm_sysfs(struct aoedev
*d
)
123 sysfs_remove_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
127 aoeblk_open(struct block_device
*bdev
, fmode_t mode
)
129 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
132 mutex_lock(&aoeblk_mutex
);
133 spin_lock_irqsave(&d
->lock
, flags
);
134 if (d
->flags
& DEVFL_UP
) {
136 spin_unlock_irqrestore(&d
->lock
, flags
);
137 mutex_unlock(&aoeblk_mutex
);
140 spin_unlock_irqrestore(&d
->lock
, flags
);
141 mutex_unlock(&aoeblk_mutex
);
146 aoeblk_release(struct gendisk
*disk
, fmode_t mode
)
148 struct aoedev
*d
= disk
->private_data
;
151 spin_lock_irqsave(&d
->lock
, flags
);
153 if (--d
->nopen
== 0) {
154 spin_unlock_irqrestore(&d
->lock
, flags
);
155 aoecmd_cfg(d
->aoemajor
, d
->aoeminor
);
158 spin_unlock_irqrestore(&d
->lock
, flags
);
164 aoeblk_request(struct request_queue
*q
)
170 if ((d
->flags
& DEVFL_UP
) == 0) {
171 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
172 d
->aoemajor
, d
->aoeminor
);
173 while ((rq
= blk_peek_request(q
))) {
174 blk_start_request(rq
);
175 aoe_end_request(d
, rq
, 1);
183 aoeblk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
185 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
187 if ((d
->flags
& DEVFL_UP
) == 0) {
188 printk(KERN_ERR
"aoe: disk not up\n");
192 geo
->cylinders
= d
->geo
.cylinders
;
193 geo
->heads
= d
->geo
.heads
;
194 geo
->sectors
= d
->geo
.sectors
;
198 static const struct block_device_operations aoe_bdops
= {
200 .release
= aoeblk_release
,
201 .getgeo
= aoeblk_getgeo
,
202 .owner
= THIS_MODULE
,
205 /* alloc_disk and add_disk can sleep */
207 aoeblk_gdalloc(void *vp
)
209 struct aoedev
*d
= vp
;
212 struct request_queue
*q
;
213 enum { KB
= 1024, MB
= KB
* KB
, READ_AHEAD
= 2 * MB
, };
216 gd
= alloc_disk(AOE_PARTITIONS
);
218 pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
219 d
->aoemajor
, d
->aoeminor
);
223 mp
= mempool_create(MIN_BUFS
, mempool_alloc_slab
, mempool_free_slab
,
226 printk(KERN_ERR
"aoe: cannot allocate bufpool for %ld.%d\n",
227 d
->aoemajor
, d
->aoeminor
);
230 q
= blk_init_queue(aoeblk_request
, &d
->lock
);
232 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
233 d
->aoemajor
, d
->aoeminor
);
238 d
->blkq
= blk_alloc_queue(GFP_KERNEL
);
241 d
->blkq
->backing_dev_info
.name
= "aoe";
242 if (bdi_init(&d
->blkq
->backing_dev_info
))
244 spin_lock_irqsave(&d
->lock
, flags
);
245 blk_queue_max_hw_sectors(d
->blkq
, BLK_DEF_MAX_SECTORS
);
246 q
->backing_dev_info
.ra_pages
= READ_AHEAD
/ PAGE_CACHE_SIZE
;
248 d
->blkq
= gd
->queue
= q
;
251 gd
->major
= AOE_MAJOR
;
252 gd
->first_minor
= d
->sysminor
* AOE_PARTITIONS
;
253 gd
->fops
= &aoe_bdops
;
254 gd
->private_data
= d
;
255 set_capacity(gd
, d
->ssize
);
256 snprintf(gd
->disk_name
, sizeof gd
->disk_name
, "etherd/e%ld.%d",
257 d
->aoemajor
, d
->aoeminor
);
259 d
->flags
&= ~DEVFL_GDALLOC
;
260 d
->flags
|= DEVFL_UP
;
262 spin_unlock_irqrestore(&d
->lock
, flags
);
265 aoedisk_add_sysfs(d
);
269 blk_cleanup_queue(d
->blkq
);
272 mempool_destroy(d
->bufpool
);
276 spin_lock_irqsave(&d
->lock
, flags
);
277 d
->flags
&= ~DEVFL_GDALLOC
;
278 spin_unlock_irqrestore(&d
->lock
, flags
);
284 kmem_cache_destroy(buf_pool_cache
);
290 buf_pool_cache
= kmem_cache_create("aoe_bufs",
293 if (buf_pool_cache
== NULL
)