aoe: manipulate aoedev network stats under lock
[deliverable/linux.git] / drivers / block / aoe / aoecmd.c
CommitLineData
fea05a26 1/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
1da177e4
LT
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
04b3ab52 7#include <linux/ata.h>
5a0e3ad6 8#include <linux/slab.h>
1da177e4
LT
9#include <linux/hdreg.h>
10#include <linux/blkdev.h>
11#include <linux/skbuff.h>
12#include <linux/netdevice.h>
3ae1c24e 13#include <linux/genhd.h>
68e0d42f 14#include <linux/moduleparam.h>
896831f5
EC
15#include <linux/workqueue.h>
16#include <linux/kthread.h>
881d966b 17#include <net/net_namespace.h>
475172fb 18#include <asm/unaligned.h>
896831f5 19#include <linux/uio.h>
1da177e4
LT
20#include "aoe.h"
21
896831f5
EC
22#define MAXIOC (8192) /* default meant to avoid most soft lockups */
23
24static void ktcomplete(struct frame *, struct sk_buff *);
25
69cf2d85
EC
26static struct buf *nextbuf(struct aoedev *);
27
b751e8b6
EC
28static int aoe_deadsecs = 60 * 3;
29module_param(aoe_deadsecs, int, 0644);
30MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
1da177e4 31
7df620d8
EC
32static int aoe_maxout = 16;
33module_param(aoe_maxout, int, 0644);
34MODULE_PARM_DESC(aoe_maxout,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
36
896831f5
EC
37static wait_queue_head_t ktiowq;
38static struct ktstate kts;
39
40/* io completion queue */
41static struct {
42 struct list_head head;
43 spinlock_t lock;
44} iocq;
45
68e0d42f 46static struct sk_buff *
e407a7f6 47new_skb(ulong len)
1da177e4
LT
48{
49 struct sk_buff *skb;
50
51 skb = alloc_skb(len, GFP_ATOMIC);
52 if (skb) {
459a98ed 53 skb_reset_mac_header(skb);
c1d2bbe1 54 skb_reset_network_header(skb);
1da177e4 55 skb->protocol = __constant_htons(ETH_P_AOE);
8babe8cc 56 skb_checksum_none_assert(skb);
1da177e4
LT
57 }
58 return skb;
59}
60
3a0c40d2
EC
61static struct frame *
62getframe_deferred(struct aoedev *d, u32 tag)
63{
64 struct list_head *head, *pos, *nx;
65 struct frame *f;
66
67 head = &d->rexmitq;
68 list_for_each_safe(pos, nx, head) {
69 f = list_entry(pos, struct frame, head);
70 if (f->tag == tag) {
71 list_del(pos);
72 return f;
73 }
74 }
75 return NULL;
76}
77
1da177e4 78static struct frame *
64a80f5a 79getframe(struct aoedev *d, u32 tag)
1da177e4 80{
896831f5
EC
81 struct frame *f;
82 struct list_head *head, *pos, *nx;
83 u32 n;
1da177e4 84
896831f5 85 n = tag % NFACTIVE;
64a80f5a 86 head = &d->factive[n];
896831f5
EC
87 list_for_each_safe(pos, nx, head) {
88 f = list_entry(pos, struct frame, head);
89 if (f->tag == tag) {
90 list_del(pos);
1da177e4 91 return f;
896831f5
EC
92 }
93 }
1da177e4
LT
94 return NULL;
95}
96
97/*
98 * Leave the top bit clear so we have tagspace for userland.
99 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
100 * This driver reserves tag -1 to mean "unused frame."
101 */
102static int
64a80f5a 103newtag(struct aoedev *d)
1da177e4
LT
104{
105 register ulong n;
106
107 n = jiffies & 0xffff;
64a80f5a 108 return n |= (++d->lasttag & 0x7fff) << 16;
1da177e4
LT
109}
110
896831f5 111static u32
68e0d42f 112aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
1da177e4 113{
64a80f5a 114 u32 host_tag = newtag(d);
1da177e4 115
68e0d42f
EC
116 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
117 memcpy(h->dst, t->addr, sizeof h->dst);
63e9cc5d 118 h->type = __constant_cpu_to_be16(ETH_P_AOE);
1da177e4 119 h->verfl = AOE_HVER;
63e9cc5d 120 h->major = cpu_to_be16(d->aoemajor);
1da177e4
LT
121 h->minor = d->aoeminor;
122 h->cmd = AOECMD_ATA;
63e9cc5d 123 h->tag = cpu_to_be32(host_tag);
1da177e4
LT
124
125 return host_tag;
126}
127
19bf2635
EC
128static inline void
129put_lba(struct aoe_atahdr *ah, sector_t lba)
130{
131 ah->lba0 = lba;
132 ah->lba1 = lba >>= 8;
133 ah->lba2 = lba >>= 8;
134 ah->lba3 = lba >>= 8;
135 ah->lba4 = lba >>= 8;
136 ah->lba5 = lba >>= 8;
137}
138
3f0f0133 139static struct aoeif *
68e0d42f
EC
140ifrotate(struct aoetgt *t)
141{
3f0f0133
EC
142 struct aoeif *ifp;
143
144 ifp = t->ifp;
145 ifp++;
146 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
147 ifp = t->ifs;
148 if (ifp->nd == NULL)
149 return NULL;
150 return t->ifp = ifp;
68e0d42f
EC
151}
152
9bb237b6
EC
153static void
154skb_pool_put(struct aoedev *d, struct sk_buff *skb)
155{
e9bb8fb0 156 __skb_queue_tail(&d->skbpool, skb);
9bb237b6
EC
157}
158
159static struct sk_buff *
160skb_pool_get(struct aoedev *d)
161{
e9bb8fb0 162 struct sk_buff *skb = skb_peek(&d->skbpool);
9bb237b6 163
9bb237b6 164 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
e9bb8fb0 165 __skb_unlink(skb, &d->skbpool);
9bb237b6
EC
166 return skb;
167 }
e9bb8fb0
DM
168 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
169 (skb = new_skb(ETH_ZLEN)))
9bb237b6 170 return skb;
e9bb8fb0 171
9bb237b6
EC
172 return NULL;
173}
174
896831f5
EC
175void
176aoe_freetframe(struct frame *f)
177{
178 struct aoetgt *t;
179
180 t = f->t;
181 f->buf = NULL;
182 f->bv = NULL;
183 f->r_skb = NULL;
184 list_add(&f->head, &t->ffree);
185}
186
68e0d42f 187static struct frame *
896831f5 188newtframe(struct aoedev *d, struct aoetgt *t)
1da177e4 189{
896831f5 190 struct frame *f;
9bb237b6 191 struct sk_buff *skb;
896831f5
EC
192 struct list_head *pos;
193
194 if (list_empty(&t->ffree)) {
195 if (t->falloc >= NSKBPOOLMAX*2)
196 return NULL;
197 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
198 if (f == NULL)
199 return NULL;
200 t->falloc++;
201 f->t = t;
202 } else {
203 pos = t->ffree.next;
204 list_del(pos);
205 f = list_entry(pos, struct frame, head);
206 }
207
208 skb = f->skb;
209 if (skb == NULL) {
210 f->skb = skb = new_skb(ETH_ZLEN);
211 if (!skb) {
212bail: aoe_freetframe(f);
213 return NULL;
214 }
215 }
216
217 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
218 skb = skb_pool_get(d);
219 if (skb == NULL)
220 goto bail;
221 skb_pool_put(d, f->skb);
222 f->skb = skb;
223 }
224
225 skb->truesize -= skb->data_len;
226 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
227 skb_trim(skb, 0);
228 return f;
229}
230
231static struct frame *
232newframe(struct aoedev *d)
233{
234 struct frame *f;
235 struct aoetgt *t, **tt;
236 int totout = 0;
68e0d42f
EC
237
238 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
239 printk(KERN_ERR "aoe: NULL TARGETS!\n");
240 return NULL;
241 }
896831f5 242 tt = d->tgt; /* last used target */
9bb237b6 243 for (;;) {
896831f5
EC
244 tt++;
245 if (tt >= &d->targets[NTARGETS] || !*tt)
246 tt = d->targets;
247 t = *tt;
248 totout += t->nout;
249 if (t->nout < t->maxout
9bb237b6 250 && t != d->htgt
896831f5
EC
251 && t->ifp->nd) {
252 f = newtframe(d, t);
253 if (f) {
896831f5 254 ifrotate(t);
3f0f0133 255 d->tgt = tt;
68e0d42f
EC
256 return f;
257 }
68e0d42f 258 }
896831f5 259 if (tt == d->tgt) /* we've looped and found nada */
9bb237b6 260 break;
896831f5
EC
261 }
262 if (totout == 0) {
263 d->kicked++;
264 d->flags |= DEVFL_KICKME;
9bb237b6 265 }
68e0d42f
EC
266 return NULL;
267}
268
3d5b0605
EC
269static void
270skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
271{
272 int frag = 0;
273 ulong fcnt;
274loop:
275 fcnt = bv->bv_len - (off - bv->bv_offset);
276 if (fcnt > cnt)
277 fcnt = cnt;
278 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
279 cnt -= fcnt;
280 if (cnt <= 0)
281 return;
282 bv++;
283 off = bv->bv_offset;
284 goto loop;
285}
286
896831f5
EC
287static void
288fhash(struct frame *f)
289{
64a80f5a 290 struct aoedev *d = f->t->d;
896831f5
EC
291 u32 n;
292
293 n = f->tag % NFACTIVE;
64a80f5a 294 list_add_tail(&f->head, &d->factive[n]);
896831f5
EC
295}
296
68e0d42f
EC
297static int
298aoecmd_ata_rw(struct aoedev *d)
299{
300 struct frame *f;
1da177e4
LT
301 struct aoe_hdr *h;
302 struct aoe_atahdr *ah;
303 struct buf *buf;
68e0d42f 304 struct aoetgt *t;
1da177e4 305 struct sk_buff *skb;
69cf2d85 306 struct sk_buff_head queue;
3d5b0605 307 ulong bcnt, fbcnt;
1da177e4
LT
308 char writebit, extbit;
309
310 writebit = 0x10;
311 extbit = 0x4;
312
69cf2d85
EC
313 buf = nextbuf(d);
314 if (buf == NULL)
315 return 0;
896831f5 316 f = newframe(d);
68e0d42f
EC
317 if (f == NULL)
318 return 0;
319 t = *d->tgt;
3f0f0133 320 bcnt = d->maxbcnt;
68e0d42f
EC
321 if (bcnt == 0)
322 bcnt = DEFAULTBCNT;
3d5b0605
EC
323 if (bcnt > buf->resid)
324 bcnt = buf->resid;
325 fbcnt = bcnt;
326 f->bv = buf->bv;
327 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
328 do {
329 if (fbcnt < buf->bv_resid) {
330 buf->bv_resid -= fbcnt;
331 buf->resid -= fbcnt;
332 break;
333 }
334 fbcnt -= buf->bv_resid;
335 buf->resid -= buf->bv_resid;
336 if (buf->resid == 0) {
69cf2d85 337 d->ip.buf = NULL;
3d5b0605
EC
338 break;
339 }
340 buf->bv++;
341 buf->bv_resid = buf->bv->bv_len;
342 WARN_ON(buf->bv_resid == 0);
343 } while (fbcnt);
344
1da177e4 345 /* initialize the headers & frame */
e407a7f6 346 skb = f->skb;
abdbf94d 347 h = (struct aoe_hdr *) skb_mac_header(skb);
1da177e4 348 ah = (struct aoe_atahdr *) (h+1);
19900cde
EC
349 skb_put(skb, sizeof *h + sizeof *ah);
350 memset(h, 0, skb->len);
68e0d42f 351 f->tag = aoehdr_atainit(d, t, h);
896831f5 352 fhash(f);
68e0d42f 353 t->nout++;
1da177e4
LT
354 f->waited = 0;
355 f->buf = buf;
19bf2635 356 f->bcnt = bcnt;
68e0d42f 357 f->lba = buf->sector;
1da177e4
LT
358
359 /* set up ata header */
360 ah->scnt = bcnt >> 9;
68e0d42f 361 put_lba(ah, buf->sector);
1da177e4
LT
362 if (d->flags & DEVFL_EXT) {
363 ah->aflags |= AOEAFL_EXT;
1da177e4
LT
364 } else {
365 extbit = 0;
366 ah->lba3 &= 0x0f;
367 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
368 }
1da177e4 369 if (bio_data_dir(buf->bio) == WRITE) {
3d5b0605 370 skb_fillup(skb, f->bv, f->bv_off, bcnt);
1da177e4 371 ah->aflags |= AOEAFL_WRITE;
4f51dc5e
EC
372 skb->len += bcnt;
373 skb->data_len = bcnt;
3d5b0605 374 skb->truesize += bcnt;
68e0d42f 375 t->wpkts++;
1da177e4 376 } else {
68e0d42f 377 t->rpkts++;
1da177e4 378 writebit = 0;
1da177e4
LT
379 }
380
04b3ab52 381 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
1da177e4
LT
382
383 /* mark all tracking fields and load out */
384 buf->nframesout += 1;
1da177e4 385 buf->sector += bcnt >> 9;
1da177e4 386
68e0d42f 387 skb->dev = t->ifp->nd;
4f51dc5e 388 skb = skb_clone(skb, GFP_ATOMIC);
69cf2d85
EC
389 if (skb) {
390 __skb_queue_head_init(&queue);
391 __skb_queue_tail(&queue, skb);
392 aoenet_xmit(&queue);
393 }
68e0d42f 394 return 1;
1da177e4
LT
395}
396
3ae1c24e
EC
397/* some callers cannot sleep, and they can call this function,
398 * transmitting the packets later, when interrupts are on
399 */
e9bb8fb0
DM
400static void
401aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
3ae1c24e
EC
402{
403 struct aoe_hdr *h;
404 struct aoe_cfghdr *ch;
e9bb8fb0 405 struct sk_buff *skb;
3ae1c24e
EC
406 struct net_device *ifp;
407
840a185d
ED
408 rcu_read_lock();
409 for_each_netdev_rcu(&init_net, ifp) {
3ae1c24e
EC
410 dev_hold(ifp);
411 if (!is_aoe_netif(ifp))
7562f876 412 goto cont;
3ae1c24e 413
e407a7f6 414 skb = new_skb(sizeof *h + sizeof *ch);
3ae1c24e 415 if (skb == NULL) {
a12c93f0 416 printk(KERN_INFO "aoe: skb alloc failure\n");
7562f876 417 goto cont;
3ae1c24e 418 }
19900cde 419 skb_put(skb, sizeof *h + sizeof *ch);
e407a7f6 420 skb->dev = ifp;
e9bb8fb0 421 __skb_queue_tail(queue, skb);
abdbf94d 422 h = (struct aoe_hdr *) skb_mac_header(skb);
3ae1c24e
EC
423 memset(h, 0, sizeof *h + sizeof *ch);
424
425 memset(h->dst, 0xff, sizeof h->dst);
426 memcpy(h->src, ifp->dev_addr, sizeof h->src);
427 h->type = __constant_cpu_to_be16(ETH_P_AOE);
428 h->verfl = AOE_HVER;
429 h->major = cpu_to_be16(aoemajor);
430 h->minor = aoeminor;
431 h->cmd = AOECMD_CFG;
432
7562f876
PE
433cont:
434 dev_put(ifp);
3ae1c24e 435 }
840a185d 436 rcu_read_unlock();
3ae1c24e
EC
437}
438
1da177e4 439static void
896831f5 440resend(struct aoedev *d, struct frame *f)
1da177e4
LT
441{
442 struct sk_buff *skb;
69cf2d85 443 struct sk_buff_head queue;
1da177e4 444 struct aoe_hdr *h;
19bf2635 445 struct aoe_atahdr *ah;
896831f5 446 struct aoetgt *t;
1da177e4
LT
447 char buf[128];
448 u32 n;
1da177e4 449
896831f5 450 t = f->t;
64a80f5a 451 n = newtag(d);
68e0d42f 452 skb = f->skb;
3f0f0133
EC
453 if (ifrotate(t) == NULL) {
454 /* probably can't happen, but set it up to fail anyway */
455 pr_info("aoe: resend: no interfaces to rotate to.\n");
456 ktcomplete(f, NULL);
457 return;
458 }
68e0d42f
EC
459 h = (struct aoe_hdr *) skb_mac_header(skb);
460 ah = (struct aoe_atahdr *) (h+1);
1da177e4
LT
461
462 snprintf(buf, sizeof buf,
411c41ee 463 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
68e0d42f 464 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
411c41ee 465 h->src, h->dst, t->nout);
1da177e4
LT
466 aoechr_error(buf);
467
1da177e4 468 f->tag = n;
896831f5 469 fhash(f);
63e9cc5d 470 h->tag = cpu_to_be32(n);
68e0d42f
EC
471 memcpy(h->dst, t->addr, sizeof h->dst);
472 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
473
68e0d42f 474 skb->dev = t->ifp->nd;
4f51dc5e
EC
475 skb = skb_clone(skb, GFP_ATOMIC);
476 if (skb == NULL)
477 return;
69cf2d85
EC
478 __skb_queue_head_init(&queue);
479 __skb_queue_tail(&queue, skb);
480 aoenet_xmit(&queue);
1da177e4
LT
481}
482
483static int
896831f5 484tsince(u32 tag)
1da177e4
LT
485{
486 int n;
487
488 n = jiffies & 0xffff;
489 n -= tag & 0xffff;
490 if (n < 0)
491 n += 1<<16;
492 return n;
493}
494
68e0d42f
EC
495static struct aoeif *
496getif(struct aoetgt *t, struct net_device *nd)
497{
498 struct aoeif *p, *e;
499
500 p = t->ifs;
501 e = p + NAOEIFS;
502 for (; p < e; p++)
503 if (p->nd == nd)
504 return p;
505 return NULL;
506}
507
68e0d42f
EC
508static void
509ejectif(struct aoetgt *t, struct aoeif *ifp)
510{
511 struct aoeif *e;
1b86fda9 512 struct net_device *nd;
68e0d42f
EC
513 ulong n;
514
1b86fda9 515 nd = ifp->nd;
68e0d42f
EC
516 e = t->ifs + NAOEIFS - 1;
517 n = (e - ifp) * sizeof *ifp;
518 memmove(ifp, ifp+1, n);
519 e->nd = NULL;
1b86fda9 520 dev_put(nd);
68e0d42f
EC
521}
522
523static int
524sthtith(struct aoedev *d)
525{
896831f5
EC
526 struct frame *f, *nf;
527 struct list_head *nx, *pos, *head;
68e0d42f 528 struct sk_buff *skb;
896831f5
EC
529 struct aoetgt *ht = d->htgt;
530 int i;
531
532 for (i = 0; i < NFACTIVE; i++) {
64a80f5a 533 head = &d->factive[i];
896831f5
EC
534 list_for_each_safe(pos, nx, head) {
535 f = list_entry(pos, struct frame, head);
64a80f5a
EC
536 if (f->t != ht)
537 continue;
538
896831f5
EC
539 nf = newframe(d);
540 if (!nf)
541 return 0;
542
543 /* remove frame from active list */
544 list_del(pos);
545
546 /* reassign all pertinent bits to new outbound frame */
547 skb = nf->skb;
548 nf->skb = f->skb;
549 nf->buf = f->buf;
550 nf->bcnt = f->bcnt;
551 nf->lba = f->lba;
552 nf->bv = f->bv;
553 nf->bv_off = f->bv_off;
554 nf->waited = 0;
555 f->skb = skb;
556 aoe_freetframe(f);
557 ht->nout--;
558 nf->t->nout++;
559 resend(d, nf);
560 }
68e0d42f 561 }
3f0f0133
EC
562 /* We've cleaned up the outstanding so take away his
563 * interfaces so he won't be used. We should remove him from
564 * the target array here, but cleaning up a target is
565 * involved. PUNT!
566 */
68e0d42f
EC
567 memset(ht->ifs, 0, sizeof ht->ifs);
568 d->htgt = NULL;
569 return 1;
570}
571
3a0c40d2
EC
572static void
573rexmit_deferred(struct aoedev *d)
574{
575 struct aoetgt *t;
576 struct frame *f;
577 struct list_head *pos, *nx, *head;
578
579 head = &d->rexmitq;
580 list_for_each_safe(pos, nx, head) {
581 f = list_entry(pos, struct frame, head);
582 t = f->t;
583 if (t->nout >= t->maxout)
584 continue;
585 list_del(pos);
586 t->nout++;
587 resend(d, f);
588 }
589}
590
1da177e4
LT
591static void
592rexmit_timer(ulong vp)
593{
594 struct aoedev *d;
3a0c40d2 595 struct aoetgt *t;
68e0d42f 596 struct aoeif *ifp;
896831f5
EC
597 struct frame *f;
598 struct list_head *head, *pos, *nx;
599 LIST_HEAD(flist);
1da177e4
LT
600 register long timeout;
601 ulong flags, n;
896831f5 602 int i;
1da177e4
LT
603
604 d = (struct aoedev *) vp;
1da177e4 605
0d555ecf
EC
606 spin_lock_irqsave(&d->lock, flags);
607
3a0c40d2
EC
608 /* timeout based on observed timings and variations */
609 timeout = 2 * d->rttavg >> RTTSCALE;
610 timeout += 8 * d->rttdev >> RTTDSCALE;
611 if (timeout == 0)
612 timeout = 1;
1da177e4 613
1da177e4 614 if (d->flags & DEVFL_TKILL) {
1c6f3fca 615 spin_unlock_irqrestore(&d->lock, flags);
1da177e4
LT
616 return;
617 }
896831f5
EC
618
619 /* collect all frames to rexmit into flist */
64a80f5a
EC
620 for (i = 0; i < NFACTIVE; i++) {
621 head = &d->factive[i];
622 list_for_each_safe(pos, nx, head) {
623 f = list_entry(pos, struct frame, head);
624 if (tsince(f->tag) < timeout)
625 break; /* end of expired frames */
626 /* move to flist for later processing */
627 list_move_tail(pos, &flist);
68e0d42f 628 }
64a80f5a 629 }
69cf2d85 630
896831f5
EC
631 /* process expired frames */
632 while (!list_empty(&flist)) {
633 pos = flist.next;
634 f = list_entry(pos, struct frame, head);
3a0c40d2 635 n = f->waited += tsince(f->tag);
896831f5
EC
636 n /= HZ;
637 if (n > aoe_deadsecs) {
638 /* Waited too long. Device failure.
639 * Hang all frames on first hash bucket for downdev
640 * to clean up.
641 */
64a80f5a 642 list_splice(&flist, &d->factive[0]);
896831f5 643 aoedev_downdev(d);
3a0c40d2 644 goto out;
896831f5 645 }
896831f5
EC
646
647 t = f->t;
d54d35ac
EC
648 if (n > aoe_deadsecs/2)
649 d->htgt = t; /* see if another target can help */
650
3a0c40d2
EC
651 if (t->maxout != 1) {
652 t->ssthresh = t->maxout / 2;
653 t->maxout = 1;
896831f5
EC
654 }
655
656 ifp = getif(t, f->skb->dev);
657 if (ifp && ++ifp->lost > (t->nframes << 1)
658 && (ifp != t->ifs || t->ifs[1].nd)) {
659 ejectif(t, ifp);
660 ifp = NULL;
661 }
3a0c40d2
EC
662 list_move_tail(pos, &d->rexmitq);
663 t->nout--;
896831f5 664 }
3a0c40d2 665 rexmit_deferred(d);
896831f5 666
3a0c40d2 667out:
69cf2d85 668 if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
4f51dc5e 669 d->flags &= ~DEVFL_KICKME;
69cf2d85 670 d->blkq->request_fn(d->blkq);
4f51dc5e 671 }
1da177e4 672
1da177e4
LT
673 d->timer.expires = jiffies + TIMERTICK;
674 add_timer(&d->timer);
675
676 spin_unlock_irqrestore(&d->lock, flags);
69cf2d85 677}
1da177e4 678
69cf2d85
EC
679static unsigned long
680rqbiocnt(struct request *r)
681{
682 struct bio *bio;
683 unsigned long n = 0;
684
685 __rq_for_each_bio(bio, r)
686 n++;
687 return n;
688}
689
690/* This can be removed if we are certain that no users of the block
691 * layer will ever use zero-count pages in bios. Otherwise we have to
692 * protect against the put_page sometimes done by the network layer.
693 *
694 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
695 * discussion.
696 *
697 * We cannot use get_page in the workaround, because it insists on a
698 * positive page count as a precondition. So we use _count directly.
699 */
700static void
701bio_pageinc(struct bio *bio)
702{
703 struct bio_vec *bv;
704 struct page *page;
705 int i;
706
707 bio_for_each_segment(bv, bio, i) {
708 page = bv->bv_page;
709 /* Non-zero page count for non-head members of
710 * compound pages is no longer allowed by the kernel,
711 * but this has never been seen here.
712 */
713 if (unlikely(PageCompound(page)))
714 if (compound_trans_head(page) != page) {
715 pr_crit("page tail used for block I/O\n");
716 BUG();
717 }
718 atomic_inc(&page->_count);
719 }
720}
721
722static void
723bio_pagedec(struct bio *bio)
724{
725 struct bio_vec *bv;
726 int i;
727
728 bio_for_each_segment(bv, bio, i)
729 atomic_dec(&bv->bv_page->_count);
730}
731
732static void
733bufinit(struct buf *buf, struct request *rq, struct bio *bio)
734{
735 struct bio_vec *bv;
736
737 memset(buf, 0, sizeof(*buf));
738 buf->rq = rq;
739 buf->bio = bio;
740 buf->resid = bio->bi_size;
741 buf->sector = bio->bi_sector;
742 bio_pageinc(bio);
743 buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
744 buf->bv_resid = bv->bv_len;
745 WARN_ON(buf->bv_resid == 0);
746}
747
748static struct buf *
749nextbuf(struct aoedev *d)
750{
751 struct request *rq;
752 struct request_queue *q;
753 struct buf *buf;
754 struct bio *bio;
755
756 q = d->blkq;
757 if (q == NULL)
758 return NULL; /* initializing */
759 if (d->ip.buf)
760 return d->ip.buf;
761 rq = d->ip.rq;
762 if (rq == NULL) {
763 rq = blk_peek_request(q);
764 if (rq == NULL)
765 return NULL;
766 blk_start_request(rq);
767 d->ip.rq = rq;
768 d->ip.nxbio = rq->bio;
769 rq->special = (void *) rqbiocnt(rq);
770 }
771 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
772 if (buf == NULL) {
773 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
774 return NULL;
775 }
776 bio = d->ip.nxbio;
777 bufinit(buf, rq, bio);
778 bio = bio->bi_next;
779 d->ip.nxbio = bio;
780 if (bio == NULL)
781 d->ip.rq = NULL;
782 return d->ip.buf = buf;
1da177e4
LT
783}
784
68e0d42f
EC
785/* enters with d->lock held */
786void
787aoecmd_work(struct aoedev *d)
788{
68e0d42f
EC
789 if (d->htgt && !sthtith(d))
790 return;
3a0c40d2 791 rexmit_deferred(d);
69cf2d85
EC
792 while (aoecmd_ata_rw(d))
793 ;
68e0d42f
EC
794}
795
3ae1c24e
EC
796/* this function performs work that has been deferred until sleeping is OK
797 */
798void
c4028958 799aoecmd_sleepwork(struct work_struct *work)
3ae1c24e 800{
c4028958 801 struct aoedev *d = container_of(work, struct aoedev, work);
b21faa25
EC
802 struct block_device *bd;
803 u64 ssize;
3ae1c24e
EC
804
805 if (d->flags & DEVFL_GDALLOC)
806 aoeblk_gdalloc(d);
807
808 if (d->flags & DEVFL_NEWSIZE) {
80795aef 809 ssize = get_capacity(d->gd);
3ae1c24e 810 bd = bdget_disk(d->gd, 0);
3ae1c24e
EC
811 if (bd) {
812 mutex_lock(&bd->bd_inode->i_mutex);
813 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
814 mutex_unlock(&bd->bd_inode->i_mutex);
815 bdput(bd);
816 }
b21faa25 817 spin_lock_irq(&d->lock);
3ae1c24e
EC
818 d->flags |= DEVFL_UP;
819 d->flags &= ~DEVFL_NEWSIZE;
b21faa25 820 spin_unlock_irq(&d->lock);
3ae1c24e
EC
821 }
822}
823
667be1e7
EC
824static void
825ata_ident_fixstring(u16 *id, int ns)
826{
827 u16 s;
828
829 while (ns-- > 0) {
830 s = *id;
831 *id++ = s >> 8 | s << 8;
832 }
833}
834
1da177e4 835static void
68e0d42f 836ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
1da177e4
LT
837{
838 u64 ssize;
839 u16 n;
840
841 /* word 83: command set supported */
f885f8d1 842 n = get_unaligned_le16(&id[83 << 1]);
1da177e4
LT
843
844 /* word 86: command set/feature enabled */
f885f8d1 845 n |= get_unaligned_le16(&id[86 << 1]);
1da177e4
LT
846
847 if (n & (1<<10)) { /* bit 10: LBA 48 */
848 d->flags |= DEVFL_EXT;
849
850 /* word 100: number lba48 sectors */
f885f8d1 851 ssize = get_unaligned_le64(&id[100 << 1]);
1da177e4
LT
852
853 /* set as in ide-disk.c:init_idedisk_capacity */
854 d->geo.cylinders = ssize;
855 d->geo.cylinders /= (255 * 63);
856 d->geo.heads = 255;
857 d->geo.sectors = 63;
858 } else {
859 d->flags &= ~DEVFL_EXT;
860
861 /* number lba28 sectors */
f885f8d1 862 ssize = get_unaligned_le32(&id[60 << 1]);
1da177e4
LT
863
864 /* NOTE: obsolete in ATA 6 */
f885f8d1
HH
865 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
866 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
867 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
1da177e4 868 }
3ae1c24e 869
667be1e7
EC
870 ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
871 ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
872 ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
873 memcpy(d->ident, id, sizeof(d->ident));
874
3ae1c24e 875 if (d->ssize != ssize)
1d75981a 876 printk(KERN_INFO
411c41ee
HH
877 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
878 t->addr,
3ae1c24e
EC
879 d->aoemajor, d->aoeminor,
880 d->fw_ver, (long long)ssize);
1da177e4
LT
881 d->ssize = ssize;
882 d->geo.start = 0;
6b9699bb
EC
883 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
884 return;
1da177e4 885 if (d->gd != NULL) {
80795aef 886 set_capacity(d->gd, ssize);
3ae1c24e 887 d->flags |= DEVFL_NEWSIZE;
68e0d42f 888 } else
3ae1c24e 889 d->flags |= DEVFL_GDALLOC;
1da177e4 890 schedule_work(&d->work);
1da177e4
LT
891}
892
893static void
3a0c40d2 894calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
1da177e4
LT
895{
896 register long n;
897
898 n = rtt;
3a0c40d2
EC
899
900 /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
901 n -= d->rttavg >> RTTSCALE;
902 d->rttavg += n;
903 if (n < 0)
904 n = -n;
905 n -= d->rttdev >> RTTDSCALE;
906 d->rttdev += n;
907
908 if (!t || t->maxout >= t->nframes)
909 return;
910 if (t->maxout < t->ssthresh)
911 t->maxout += 1;
912 else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
913 t->maxout += 1;
914 t->next_cwnd = t->maxout;
915 }
1da177e4
LT
916}
917
68e0d42f
EC
918static struct aoetgt *
919gettgt(struct aoedev *d, char *addr)
920{
921 struct aoetgt **t, **e;
922
923 t = d->targets;
924 e = t + NTARGETS;
925 for (; t < e && *t; t++)
926 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
927 return *t;
928 return NULL;
929}
930
3d5b0605 931static void
896831f5 932bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
3d5b0605
EC
933{
934 ulong fcnt;
935 char *p;
936 int soff = 0;
937loop:
938 fcnt = bv->bv_len - (off - bv->bv_offset);
939 if (fcnt > cnt)
940 fcnt = cnt;
941 p = page_address(bv->bv_page) + off;
942 skb_copy_bits(skb, soff, p, fcnt);
943 soff += fcnt;
944 cnt -= fcnt;
945 if (cnt <= 0)
946 return;
947 bv++;
948 off = bv->bv_offset;
949 goto loop;
950}
951
69cf2d85
EC
952void
953aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
954{
955 struct bio *bio;
956 int bok;
957 struct request_queue *q;
958
959 q = d->blkq;
960 if (rq == d->ip.rq)
961 d->ip.rq = NULL;
962 do {
963 bio = rq->bio;
964 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
965 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
966
967 /* cf. http://lkml.org/lkml/2006/10/31/28 */
968 if (!fastfail)
11cfb6ff 969 __blk_run_queue(q);
69cf2d85
EC
970}
971
972static void
973aoe_end_buf(struct aoedev *d, struct buf *buf)
974{
975 struct request *rq;
976 unsigned long n;
977
978 if (buf == d->ip.buf)
979 d->ip.buf = NULL;
980 rq = buf->rq;
981 bio_pagedec(buf->bio);
982 mempool_free(buf, d->bufpool);
983 n = (unsigned long) rq->special;
984 rq->special = (void *) --n;
985 if (n == 0)
986 aoe_end_request(d, rq, 0);
987}
988
3d5b0605 989static void
896831f5 990ktiocomplete(struct frame *f)
3d5b0605 991{
896831f5
EC
992 struct aoe_hdr *hin, *hout;
993 struct aoe_atahdr *ahin, *ahout;
994 struct buf *buf;
995 struct sk_buff *skb;
996 struct aoetgt *t;
997 struct aoeif *ifp;
998 struct aoedev *d;
999 long n;
3d5b0605 1000
896831f5 1001 if (f == NULL)
3d5b0605 1002 return;
896831f5
EC
1003
1004 t = f->t;
1005 d = t->d;
1006
1007 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
1008 ahout = (struct aoe_atahdr *) (hout+1);
1009 buf = f->buf;
1010 skb = f->r_skb;
1011 if (skb == NULL)
1012 goto noskb; /* just fail the buf. */
1013
1014 hin = (struct aoe_hdr *) skb->data;
1015 skb_pull(skb, sizeof(*hin));
1016 ahin = (struct aoe_atahdr *) skb->data;
1017 skb_pull(skb, sizeof(*ahin));
1018 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
1019 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
1020 ahout->cmdstat, ahin->cmdstat,
1021 d->aoemajor, d->aoeminor);
a04b41cd 1022noskb: if (buf)
69cf2d85 1023 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
896831f5 1024 goto badrsp;
3d5b0605 1025 }
896831f5
EC
1026
1027 n = ahout->scnt << 9;
1028 switch (ahout->cmdstat) {
1029 case ATA_CMD_PIO_READ:
1030 case ATA_CMD_PIO_READ_EXT:
1031 if (skb->len < n) {
1032 pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
1033 skb->len, n);
69cf2d85 1034 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
896831f5
EC
1035 break;
1036 }
1037 bvcpy(f->bv, f->bv_off, skb, n);
1038 case ATA_CMD_PIO_WRITE:
1039 case ATA_CMD_PIO_WRITE_EXT:
1040 spin_lock_irq(&d->lock);
1041 ifp = getif(t, skb->dev);
3f0f0133 1042 if (ifp)
896831f5 1043 ifp->lost = 0;
896831f5
EC
1044 if (d->htgt == t) /* I'll help myself, thank you. */
1045 d->htgt = NULL;
1046 spin_unlock_irq(&d->lock);
1047 break;
1048 case ATA_CMD_ID_ATA:
1049 if (skb->len < 512) {
1050 pr_info("aoe: runt data size in ataid. skb->len=%d\n",
1051 skb->len);
1052 break;
1053 }
1054 if (skb_linearize(skb))
1055 break;
1056 spin_lock_irq(&d->lock);
1057 ataid_complete(d, t, skb->data);
1058 spin_unlock_irq(&d->lock);
1059 break;
1060 default:
1061 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1062 ahout->cmdstat,
1063 be16_to_cpu(get_unaligned(&hin->major)),
1064 hin->minor);
1065 }
1066badrsp:
1067 spin_lock_irq(&d->lock);
1068
1069 aoe_freetframe(f);
1070
69cf2d85
EC
1071 if (buf && --buf->nframesout == 0 && buf->resid == 0)
1072 aoe_end_buf(d, buf);
896831f5 1073
69cf2d85
EC
1074 aoecmd_work(d);
1075
1076 spin_unlock_irq(&d->lock);
1077 aoedev_put(d);
896831f5 1078 dev_kfree_skb(skb);
3d5b0605
EC
1079}
1080
896831f5
EC
1081/* Enters with iocq.lock held.
1082 * Returns true iff responses needing processing remain.
1083 */
1084static int
1085ktio(void)
1086{
1087 struct frame *f;
1088 struct list_head *pos;
1089 int i;
1090
1091 for (i = 0; ; ++i) {
1092 if (i == MAXIOC)
1093 return 1;
1094 if (list_empty(&iocq.head))
1095 return 0;
1096 pos = iocq.head.next;
1097 list_del(pos);
1098 spin_unlock_irq(&iocq.lock);
1099 f = list_entry(pos, struct frame, head);
1100 ktiocomplete(f);
1101 spin_lock_irq(&iocq.lock);
1102 }
1103}
1104
1105static int
1106kthread(void *vp)
1107{
1108 struct ktstate *k;
1109 DECLARE_WAITQUEUE(wait, current);
1110 int more;
1111
1112 k = vp;
1113 current->flags |= PF_NOFREEZE;
1114 set_user_nice(current, -10);
1115 complete(&k->rendez); /* tell spawner we're running */
1116 do {
1117 spin_lock_irq(k->lock);
1118 more = k->fn();
1119 if (!more) {
1120 add_wait_queue(k->waitq, &wait);
1121 __set_current_state(TASK_INTERRUPTIBLE);
1122 }
1123 spin_unlock_irq(k->lock);
1124 if (!more) {
1125 schedule();
1126 remove_wait_queue(k->waitq, &wait);
1127 } else
1128 cond_resched();
1129 } while (!kthread_should_stop());
1130 complete(&k->rendez); /* tell spawner we're stopping */
1131 return 0;
1132}
1133
eb086ec5 1134void
896831f5
EC
1135aoe_ktstop(struct ktstate *k)
1136{
1137 kthread_stop(k->task);
1138 wait_for_completion(&k->rendez);
1139}
1140
eb086ec5 1141int
896831f5
EC
1142aoe_ktstart(struct ktstate *k)
1143{
1144 struct task_struct *task;
1145
1146 init_completion(&k->rendez);
1147 task = kthread_run(kthread, k, k->name);
1148 if (task == NULL || IS_ERR(task))
1149 return -ENOMEM;
1150 k->task = task;
1151 wait_for_completion(&k->rendez); /* allow kthread to start */
1152 init_completion(&k->rendez); /* for waiting for exit later */
1153 return 0;
1154}
1155
1156/* pass it off to kthreads for processing */
1157static void
1158ktcomplete(struct frame *f, struct sk_buff *skb)
1159{
1160 ulong flags;
1161
1162 f->r_skb = skb;
1163 spin_lock_irqsave(&iocq.lock, flags);
1164 list_add_tail(&f->head, &iocq.head);
1165 spin_unlock_irqrestore(&iocq.lock, flags);
1166 wake_up(&ktiowq);
1167}
1168
1169struct sk_buff *
1da177e4
LT
1170aoecmd_ata_rsp(struct sk_buff *skb)
1171{
1172 struct aoedev *d;
896831f5 1173 struct aoe_hdr *h;
1da177e4 1174 struct frame *f;
896831f5 1175 u32 n;
1da177e4
LT
1176 ulong flags;
1177 char ebuf[128];
32465c65 1178 u16 aoemajor;
1179
896831f5
EC
1180 h = (struct aoe_hdr *) skb->data;
1181 aoemajor = be16_to_cpu(get_unaligned(&h->major));
0c966214 1182 d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
1da177e4
LT
1183 if (d == NULL) {
1184 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1185 "for unknown device %d.%d\n",
896831f5 1186 aoemajor, h->minor);
1da177e4 1187 aoechr_error(ebuf);
896831f5 1188 return skb;
1da177e4
LT
1189 }
1190
1191 spin_lock_irqsave(&d->lock, flags);
1192
896831f5 1193 n = be32_to_cpu(get_unaligned(&h->tag));
64a80f5a 1194 f = getframe(d, n);
3a0c40d2
EC
1195 if (f) {
1196 calc_rttavg(d, f->t, tsince(n));
1197 f->t->nout--;
1198 } else {
1199 f = getframe_deferred(d, n);
1200 if (f) {
1201 calc_rttavg(d, NULL, tsince(n));
1202 } else {
1203 calc_rttavg(d, NULL, tsince(n));
1204 spin_unlock_irqrestore(&d->lock, flags);
1205 aoedev_put(d);
1206 snprintf(ebuf, sizeof(ebuf),
2292a7e1 1207 "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
3a0c40d2
EC
1208 "unexpected rsp",
1209 get_unaligned_be16(&h->major),
1210 h->minor,
1211 get_unaligned_be32(&h->tag),
2292a7e1
EC
1212 jiffies,
1213 h->src,
1214 h->dst);
3a0c40d2
EC
1215 aoechr_error(ebuf);
1216 return skb;
1217 }
1da177e4 1218 }
1da177e4 1219 aoecmd_work(d);
1da177e4
LT
1220
1221 spin_unlock_irqrestore(&d->lock, flags);
896831f5
EC
1222
1223 ktcomplete(f, skb);
1224
1225 /*
1226 * Note here that we do not perform an aoedev_put, as we are
1227 * leaving this reference for the ktio to release.
1228 */
1229 return NULL;
1da177e4
LT
1230}
1231
1232void
1233aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1234{
e9bb8fb0 1235 struct sk_buff_head queue;
1da177e4 1236
e9bb8fb0
DM
1237 __skb_queue_head_init(&queue);
1238 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1239 aoenet_xmit(&queue);
1da177e4 1240}
a04b41cd 1241
68e0d42f 1242struct sk_buff *
1da177e4
LT
1243aoecmd_ata_id(struct aoedev *d)
1244{
1245 struct aoe_hdr *h;
1246 struct aoe_atahdr *ah;
1247 struct frame *f;
1248 struct sk_buff *skb;
68e0d42f 1249 struct aoetgt *t;
1da177e4 1250
896831f5 1251 f = newframe(d);
68e0d42f 1252 if (f == NULL)
1da177e4 1253 return NULL;
68e0d42f
EC
1254
1255 t = *d->tgt;
1da177e4
LT
1256
1257 /* initialize the headers & frame */
e407a7f6 1258 skb = f->skb;
abdbf94d 1259 h = (struct aoe_hdr *) skb_mac_header(skb);
1da177e4 1260 ah = (struct aoe_atahdr *) (h+1);
19900cde
EC
1261 skb_put(skb, sizeof *h + sizeof *ah);
1262 memset(h, 0, skb->len);
68e0d42f 1263 f->tag = aoehdr_atainit(d, t, h);
896831f5 1264 fhash(f);
68e0d42f 1265 t->nout++;
1da177e4 1266 f->waited = 0;
1da177e4 1267
1da177e4
LT
1268 /* set up ata header */
1269 ah->scnt = 1;
04b3ab52 1270 ah->cmdstat = ATA_CMD_ID_ATA;
1da177e4
LT
1271 ah->lba3 = 0xa0;
1272
68e0d42f 1273 skb->dev = t->ifp->nd;
1da177e4 1274
3a0c40d2
EC
1275 d->rttavg = RTTAVG_INIT;
1276 d->rttdev = RTTDEV_INIT;
1da177e4 1277 d->timer.function = rexmit_timer;
1da177e4 1278
4f51dc5e 1279 return skb_clone(skb, GFP_ATOMIC);
1da177e4 1280}
a04b41cd 1281
68e0d42f
EC
1282static struct aoetgt *
1283addtgt(struct aoedev *d, char *addr, ulong nframes)
1284{
1285 struct aoetgt *t, **tt, **te;
68e0d42f
EC
1286
1287 tt = d->targets;
1288 te = tt + NTARGETS;
1289 for (; tt < te && *tt; tt++)
1290 ;
1291
578c4aa0
EC
1292 if (tt == te) {
1293 printk(KERN_INFO
1294 "aoe: device addtgt failure; too many targets\n");
68e0d42f 1295 return NULL;
578c4aa0 1296 }
896831f5
EC
1297 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1298 if (!t) {
578c4aa0 1299 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
9bb237b6
EC
1300 return NULL;
1301 }
1302
896831f5 1303 d->ntargets++;
68e0d42f 1304 t->nframes = nframes;
896831f5 1305 t->d = d;
68e0d42f
EC
1306 memcpy(t->addr, addr, sizeof t->addr);
1307 t->ifp = t->ifs;
3a0c40d2 1308 aoecmd_wreset(t);
896831f5 1309 INIT_LIST_HEAD(&t->ffree);
68e0d42f 1310 return *tt = t;
68e0d42f
EC
1311}
1312
3f0f0133
EC
1313static void
1314setdbcnt(struct aoedev *d)
1315{
1316 struct aoetgt **t, **e;
1317 int bcnt = 0;
1318
1319 t = d->targets;
1320 e = t + NTARGETS;
1321 for (; t < e && *t; t++)
1322 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1323 bcnt = (*t)->minbcnt;
1324 if (bcnt != d->maxbcnt) {
1325 d->maxbcnt = bcnt;
1326 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1327 d->aoemajor, d->aoeminor, bcnt);
1328 }
1329}
1330
1331static void
1332setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1333{
1334 struct aoedev *d;
1335 struct aoeif *p, *e;
1336 int minbcnt;
1337
1338 d = t->d;
1339 minbcnt = bcnt;
1340 p = t->ifs;
1341 e = p + NAOEIFS;
1342 for (; p < e; p++) {
1343 if (p->nd == NULL)
1344 break; /* end of the valid interfaces */
1345 if (p->nd == nd) {
1346 p->bcnt = bcnt; /* we're updating */
1347 nd = NULL;
1348 } else if (minbcnt > p->bcnt)
1349 minbcnt = p->bcnt; /* find the min interface */
1350 }
1351 if (nd) {
1352 if (p == e) {
1353 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1354 return;
1355 }
1b86fda9 1356 dev_hold(nd);
3f0f0133
EC
1357 p->nd = nd;
1358 p->bcnt = bcnt;
1359 }
1360 t->minbcnt = minbcnt;
1361 setdbcnt(d);
1362}
1363
1da177e4
LT
1364void
1365aoecmd_cfg_rsp(struct sk_buff *skb)
1366{
1367 struct aoedev *d;
1368 struct aoe_hdr *h;
1369 struct aoe_cfghdr *ch;
68e0d42f 1370 struct aoetgt *t;
0c966214 1371 ulong flags, aoemajor;
1da177e4 1372 struct sk_buff *sl;
69cf2d85 1373 struct sk_buff_head queue;
19bf2635 1374 u16 n;
1da177e4 1375
69cf2d85 1376 sl = NULL;
abdbf94d 1377 h = (struct aoe_hdr *) skb_mac_header(skb);
1da177e4
LT
1378 ch = (struct aoe_cfghdr *) (h+1);
1379
1380 /*
1381 * Enough people have their dip switches set backwards to
1382 * warrant a loud message for this special case.
1383 */
823ed72e 1384 aoemajor = get_unaligned_be16(&h->major);
1da177e4 1385 if (aoemajor == 0xfff) {
a12c93f0 1386 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
6bb6285f 1387 "Check shelf dip switches.\n");
1da177e4
LT
1388 return;
1389 }
7159e969
EC
1390 if (aoemajor == 0xffff) {
1391 pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
0c966214 1392 aoemajor, (int) h->minor);
6583303c
EC
1393 return;
1394 }
7159e969
EC
1395 if (h->minor == 0xff) {
1396 pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
1397 aoemajor, (int) h->minor);
1da177e4
LT
1398 return;
1399 }
1400
19bf2635 1401 n = be16_to_cpu(ch->bufcnt);
7df620d8
EC
1402 if (n > aoe_maxout) /* keep it reasonable */
1403 n = aoe_maxout;
1da177e4 1404
7159e969
EC
1405 d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
1406 if (d == NULL) {
1407 pr_info("aoe: device allocation failure\n");
1408 return;
1409 }
1410
1da177e4
LT
1411 spin_lock_irqsave(&d->lock, flags);
1412
68e0d42f 1413 t = gettgt(d, h->src);
1b8a1636
EC
1414 if (t) {
1415 t->nframes = n;
1416 if (n < t->maxout)
3a0c40d2 1417 aoecmd_wreset(t);
1b8a1636 1418 } else {
68e0d42f 1419 t = addtgt(d, h->src, n);
69cf2d85
EC
1420 if (!t)
1421 goto bail;
68e0d42f 1422 }
3f0f0133
EC
1423 n = skb->dev->mtu;
1424 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1425 n /= 512;
1426 if (n > ch->scnt)
1427 n = ch->scnt;
1428 n = n ? n * 512 : DEFAULTBCNT;
1429 setifbcnt(t, skb->dev, n);
3ae1c24e
EC
1430
1431 /* don't change users' perspective */
69cf2d85
EC
1432 if (d->nopen == 0) {
1433 d->fw_ver = be16_to_cpu(ch->fwver);
1434 sl = aoecmd_ata_id(d);
1da177e4 1435 }
69cf2d85 1436bail:
1da177e4 1437 spin_unlock_irqrestore(&d->lock, flags);
69cf2d85 1438 aoedev_put(d);
e9bb8fb0 1439 if (sl) {
e9bb8fb0
DM
1440 __skb_queue_head_init(&queue);
1441 __skb_queue_tail(&queue, sl);
1442 aoenet_xmit(&queue);
1443 }
1da177e4
LT
1444}
1445
3a0c40d2
EC
1446void
1447aoecmd_wreset(struct aoetgt *t)
1448{
1449 t->maxout = 1;
1450 t->ssthresh = t->nframes / 2;
1451 t->next_cwnd = t->nframes;
1452}
1453
68e0d42f
EC
1454void
1455aoecmd_cleanslate(struct aoedev *d)
1456{
1457 struct aoetgt **t, **te;
68e0d42f 1458
3a0c40d2
EC
1459 d->rttavg = RTTAVG_INIT;
1460 d->rttdev = RTTDEV_INIT;
3f0f0133 1461 d->maxbcnt = 0;
68e0d42f
EC
1462
1463 t = d->targets;
1464 te = t + NTARGETS;
3f0f0133 1465 for (; t < te && *t; t++)
3a0c40d2 1466 aoecmd_wreset(*t);
68e0d42f 1467}
896831f5 1468
69cf2d85
EC
1469void
1470aoe_failbuf(struct aoedev *d, struct buf *buf)
1471{
1472 if (buf == NULL)
1473 return;
1474 buf->resid = 0;
1475 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1476 if (buf->nframesout == 0)
1477 aoe_end_buf(d, buf);
1478}
1479
1480void
1481aoe_flush_iocq(void)
896831f5
EC
1482{
1483 struct frame *f;
1484 struct aoedev *d;
1485 LIST_HEAD(flist);
1486 struct list_head *pos;
1487 struct sk_buff *skb;
1488 ulong flags;
1489
1490 spin_lock_irqsave(&iocq.lock, flags);
1491 list_splice_init(&iocq.head, &flist);
1492 spin_unlock_irqrestore(&iocq.lock, flags);
1493 while (!list_empty(&flist)) {
1494 pos = flist.next;
1495 list_del(pos);
1496 f = list_entry(pos, struct frame, head);
1497 d = f->t->d;
1498 skb = f->r_skb;
1499 spin_lock_irqsave(&d->lock, flags);
1500 if (f->buf) {
1501 f->buf->nframesout--;
1502 aoe_failbuf(d, f->buf);
1503 }
1504 aoe_freetframe(f);
1505 spin_unlock_irqrestore(&d->lock, flags);
1506 dev_kfree_skb(skb);
69cf2d85 1507 aoedev_put(d);
896831f5
EC
1508 }
1509}
1510
1511int __init
1512aoecmd_init(void)
1513{
1514 INIT_LIST_HEAD(&iocq.head);
1515 spin_lock_init(&iocq.lock);
1516 init_waitqueue_head(&ktiowq);
1517 kts.name = "aoe_ktio";
1518 kts.fn = ktio;
1519 kts.waitq = &ktiowq;
1520 kts.lock = &iocq.lock;
1521 return aoe_ktstart(&kts);
1522}
1523
1524void
1525aoecmd_exit(void)
1526{
1527 aoe_ktstop(&kts);
69cf2d85 1528 aoe_flush_iocq();
896831f5 1529}
This page took 0.767973 seconds and 5 git commands to generate.