aoe: become I/O request queue handler for increased user control
[deliverable/linux.git] / drivers / block / aoe / aoechr.c
1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoechr.c
4 * AoE character device driver
5 */
6
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/skbuff.h>
14 #include <linux/export.h>
15 #include "aoe.h"
16
17 enum {
18 //MINOR_STAT = 1, (moved to sysfs)
19 MINOR_ERR = 2,
20 MINOR_DISCOVER,
21 MINOR_INTERFACES,
22 MINOR_REVALIDATE,
23 MINOR_FLUSH,
24 MSGSZ = 2048,
25 NMSG = 100, /* message backlog to retain */
26 };
27
28 struct aoe_chardev {
29 ulong minor;
30 char name[32];
31 };
32
33 enum { EMFL_VALID = 1 };
34
35 struct ErrMsg {
36 short flags;
37 short len;
38 char *msg;
39 };
40
41 static DEFINE_MUTEX(aoechr_mutex);
42 static struct ErrMsg emsgs[NMSG];
43 static int emsgs_head_idx, emsgs_tail_idx;
44 static struct completion emsgs_comp;
45 static spinlock_t emsgs_lock;
46 static int nblocked_emsgs_readers;
47 static struct class *aoe_class;
48 static struct aoe_chardev chardevs[] = {
49 { MINOR_ERR, "err" },
50 { MINOR_DISCOVER, "discover" },
51 { MINOR_INTERFACES, "interfaces" },
52 { MINOR_REVALIDATE, "revalidate" },
53 { MINOR_FLUSH, "flush" },
54 };
55
56 static int
57 discover(void)
58 {
59 aoecmd_cfg(0xffff, 0xff);
60 return 0;
61 }
62
63 static int
64 interfaces(const char __user *str, size_t size)
65 {
66 if (set_aoe_iflist(str, size)) {
67 printk(KERN_ERR
68 "aoe: could not set interface list: too many interfaces\n");
69 return -EINVAL;
70 }
71 return 0;
72 }
73
74 static int
75 revalidate(const char __user *str, size_t size)
76 {
77 int major, minor, n;
78 ulong flags;
79 struct aoedev *d;
80 struct sk_buff *skb;
81 char buf[16];
82
83 if (size >= sizeof buf)
84 return -EINVAL;
85 buf[sizeof buf - 1] = '\0';
86 if (copy_from_user(buf, str, size))
87 return -EFAULT;
88
89 n = sscanf(buf, "e%d.%d", &major, &minor);
90 if (n != 2) {
91 pr_err("aoe: invalid device specification %s\n", buf);
92 return -EINVAL;
93 }
94 d = aoedev_by_aoeaddr(major, minor);
95 if (!d)
96 return -EINVAL;
97 spin_lock_irqsave(&d->lock, flags);
98 aoecmd_cleanslate(d);
99 loop:
100 skb = aoecmd_ata_id(d);
101 spin_unlock_irqrestore(&d->lock, flags);
102 /* try again if we are able to sleep a bit,
103 * otherwise give up this revalidation
104 */
105 if (!skb && !msleep_interruptible(200)) {
106 spin_lock_irqsave(&d->lock, flags);
107 goto loop;
108 }
109 aoedev_put(d);
110 if (skb) {
111 struct sk_buff_head queue;
112 __skb_queue_head_init(&queue);
113 __skb_queue_tail(&queue, skb);
114 aoenet_xmit(&queue);
115 }
116 aoecmd_cfg(major, minor);
117 return 0;
118 }
119
120 void
121 aoechr_error(char *msg)
122 {
123 struct ErrMsg *em;
124 char *mp;
125 ulong flags, n;
126
127 n = strlen(msg);
128
129 spin_lock_irqsave(&emsgs_lock, flags);
130
131 em = emsgs + emsgs_tail_idx;
132 if ((em->flags & EMFL_VALID)) {
133 bail: spin_unlock_irqrestore(&emsgs_lock, flags);
134 return;
135 }
136
137 mp = kmalloc(n, GFP_ATOMIC);
138 if (mp == NULL) {
139 printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
140 goto bail;
141 }
142
143 memcpy(mp, msg, n);
144 em->msg = mp;
145 em->flags |= EMFL_VALID;
146 em->len = n;
147
148 emsgs_tail_idx++;
149 emsgs_tail_idx %= ARRAY_SIZE(emsgs);
150
151 spin_unlock_irqrestore(&emsgs_lock, flags);
152
153 if (nblocked_emsgs_readers)
154 complete(&emsgs_comp);
155 }
156
157 static ssize_t
158 aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
159 {
160 int ret = -EINVAL;
161
162 switch ((unsigned long) filp->private_data) {
163 default:
164 printk(KERN_INFO "aoe: can't write to that file.\n");
165 break;
166 case MINOR_DISCOVER:
167 ret = discover();
168 break;
169 case MINOR_INTERFACES:
170 ret = interfaces(buf, cnt);
171 break;
172 case MINOR_REVALIDATE:
173 ret = revalidate(buf, cnt);
174 break;
175 case MINOR_FLUSH:
176 ret = aoedev_flush(buf, cnt);
177 }
178 if (ret == 0)
179 ret = cnt;
180 return ret;
181 }
182
183 static int
184 aoechr_open(struct inode *inode, struct file *filp)
185 {
186 int n, i;
187
188 mutex_lock(&aoechr_mutex);
189 n = iminor(inode);
190 filp->private_data = (void *) (unsigned long) n;
191
192 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
193 if (chardevs[i].minor == n) {
194 mutex_unlock(&aoechr_mutex);
195 return 0;
196 }
197 mutex_unlock(&aoechr_mutex);
198 return -EINVAL;
199 }
200
201 static int
202 aoechr_rel(struct inode *inode, struct file *filp)
203 {
204 return 0;
205 }
206
207 static ssize_t
208 aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
209 {
210 unsigned long n;
211 char *mp;
212 struct ErrMsg *em;
213 ssize_t len;
214 ulong flags;
215
216 n = (unsigned long) filp->private_data;
217 if (n != MINOR_ERR)
218 return -EFAULT;
219
220 spin_lock_irqsave(&emsgs_lock, flags);
221
222 for (;;) {
223 em = emsgs + emsgs_head_idx;
224 if ((em->flags & EMFL_VALID) != 0)
225 break;
226 if (filp->f_flags & O_NDELAY) {
227 spin_unlock_irqrestore(&emsgs_lock, flags);
228 return -EAGAIN;
229 }
230 nblocked_emsgs_readers++;
231
232 spin_unlock_irqrestore(&emsgs_lock, flags);
233
234 n = wait_for_completion_interruptible(&emsgs_comp);
235
236 spin_lock_irqsave(&emsgs_lock, flags);
237
238 nblocked_emsgs_readers--;
239
240 if (n) {
241 spin_unlock_irqrestore(&emsgs_lock, flags);
242 return -ERESTARTSYS;
243 }
244 }
245 if (em->len > cnt) {
246 spin_unlock_irqrestore(&emsgs_lock, flags);
247 return -EAGAIN;
248 }
249 mp = em->msg;
250 len = em->len;
251 em->msg = NULL;
252 em->flags &= ~EMFL_VALID;
253
254 emsgs_head_idx++;
255 emsgs_head_idx %= ARRAY_SIZE(emsgs);
256
257 spin_unlock_irqrestore(&emsgs_lock, flags);
258
259 n = copy_to_user(buf, mp, len);
260 kfree(mp);
261 return n == 0 ? len : -EFAULT;
262 }
263
264 static const struct file_operations aoe_fops = {
265 .write = aoechr_write,
266 .read = aoechr_read,
267 .open = aoechr_open,
268 .release = aoechr_rel,
269 .owner = THIS_MODULE,
270 .llseek = noop_llseek,
271 };
272
273 static char *aoe_devnode(struct device *dev, umode_t *mode)
274 {
275 return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
276 }
277
278 int __init
279 aoechr_init(void)
280 {
281 int n, i;
282
283 n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
284 if (n < 0) {
285 printk(KERN_ERR "aoe: can't register char device\n");
286 return n;
287 }
288 init_completion(&emsgs_comp);
289 spin_lock_init(&emsgs_lock);
290 aoe_class = class_create(THIS_MODULE, "aoe");
291 if (IS_ERR(aoe_class)) {
292 unregister_chrdev(AOE_MAJOR, "aoechr");
293 return PTR_ERR(aoe_class);
294 }
295 aoe_class->devnode = aoe_devnode;
296
297 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
298 device_create(aoe_class, NULL,
299 MKDEV(AOE_MAJOR, chardevs[i].minor), NULL,
300 chardevs[i].name);
301
302 return 0;
303 }
304
305 void
306 aoechr_exit(void)
307 {
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
311 device_destroy(aoe_class, MKDEV(AOE_MAJOR, chardevs[i].minor));
312 class_destroy(aoe_class);
313 unregister_chrdev(AOE_MAJOR, "aoechr");
314 }
315
This page took 0.054209 seconds and 5 git commands to generate.