cc692fee7ce1ce7aec706d2a680609716d0545ce
[deliverable/linux.git] / drivers / block / aoe / aoecmd.c
1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7 #include <linux/ata.h>
8 #include <linux/slab.h>
9 #include <linux/hdreg.h>
10 #include <linux/blkdev.h>
11 #include <linux/skbuff.h>
12 #include <linux/netdevice.h>
13 #include <linux/genhd.h>
14 #include <linux/moduleparam.h>
15 #include <linux/workqueue.h>
16 #include <linux/kthread.h>
17 #include <net/net_namespace.h>
18 #include <asm/unaligned.h>
19 #include <linux/uio.h>
20 #include "aoe.h"
21
22 #define MAXIOC (8192) /* default meant to avoid most soft lockups */
23
24 static void ktcomplete(struct frame *, struct sk_buff *);
25
26 static struct buf *nextbuf(struct aoedev *);
27
28 static int aoe_deadsecs = 60 * 3;
29 module_param(aoe_deadsecs, int, 0644);
30 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
31
32 static int aoe_maxout = 16;
33 module_param(aoe_maxout, int, 0644);
34 MODULE_PARM_DESC(aoe_maxout,
35 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
36
37 static wait_queue_head_t ktiowq;
38 static struct ktstate kts;
39
40 /* io completion queue */
41 static struct {
42 struct list_head head;
43 spinlock_t lock;
44 } iocq;
45
46 static struct sk_buff *
47 new_skb(ulong len)
48 {
49 struct sk_buff *skb;
50
51 skb = alloc_skb(len, GFP_ATOMIC);
52 if (skb) {
53 skb_reset_mac_header(skb);
54 skb_reset_network_header(skb);
55 skb->protocol = __constant_htons(ETH_P_AOE);
56 skb_checksum_none_assert(skb);
57 }
58 return skb;
59 }
60
61 static struct frame *
62 getframe(struct aoedev *d, u32 tag)
63 {
64 struct frame *f;
65 struct list_head *head, *pos, *nx;
66 u32 n;
67
68 n = tag % NFACTIVE;
69 head = &d->factive[n];
70 list_for_each_safe(pos, nx, head) {
71 f = list_entry(pos, struct frame, head);
72 if (f->tag == tag) {
73 list_del(pos);
74 return f;
75 }
76 }
77 return NULL;
78 }
79
80 /*
81 * Leave the top bit clear so we have tagspace for userland.
82 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
83 * This driver reserves tag -1 to mean "unused frame."
84 */
85 static int
86 newtag(struct aoedev *d)
87 {
88 register ulong n;
89
90 n = jiffies & 0xffff;
91 return n |= (++d->lasttag & 0x7fff) << 16;
92 }
93
94 static u32
95 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
96 {
97 u32 host_tag = newtag(d);
98
99 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
100 memcpy(h->dst, t->addr, sizeof h->dst);
101 h->type = __constant_cpu_to_be16(ETH_P_AOE);
102 h->verfl = AOE_HVER;
103 h->major = cpu_to_be16(d->aoemajor);
104 h->minor = d->aoeminor;
105 h->cmd = AOECMD_ATA;
106 h->tag = cpu_to_be32(host_tag);
107
108 return host_tag;
109 }
110
111 static inline void
112 put_lba(struct aoe_atahdr *ah, sector_t lba)
113 {
114 ah->lba0 = lba;
115 ah->lba1 = lba >>= 8;
116 ah->lba2 = lba >>= 8;
117 ah->lba3 = lba >>= 8;
118 ah->lba4 = lba >>= 8;
119 ah->lba5 = lba >>= 8;
120 }
121
122 static struct aoeif *
123 ifrotate(struct aoetgt *t)
124 {
125 struct aoeif *ifp;
126
127 ifp = t->ifp;
128 ifp++;
129 if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
130 ifp = t->ifs;
131 if (ifp->nd == NULL)
132 return NULL;
133 return t->ifp = ifp;
134 }
135
136 static void
137 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
138 {
139 __skb_queue_tail(&d->skbpool, skb);
140 }
141
142 static struct sk_buff *
143 skb_pool_get(struct aoedev *d)
144 {
145 struct sk_buff *skb = skb_peek(&d->skbpool);
146
147 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
148 __skb_unlink(skb, &d->skbpool);
149 return skb;
150 }
151 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
152 (skb = new_skb(ETH_ZLEN)))
153 return skb;
154
155 return NULL;
156 }
157
158 void
159 aoe_freetframe(struct frame *f)
160 {
161 struct aoetgt *t;
162
163 t = f->t;
164 f->buf = NULL;
165 f->bv = NULL;
166 f->r_skb = NULL;
167 list_add(&f->head, &t->ffree);
168 }
169
170 static struct frame *
171 newtframe(struct aoedev *d, struct aoetgt *t)
172 {
173 struct frame *f;
174 struct sk_buff *skb;
175 struct list_head *pos;
176
177 if (list_empty(&t->ffree)) {
178 if (t->falloc >= NSKBPOOLMAX*2)
179 return NULL;
180 f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
181 if (f == NULL)
182 return NULL;
183 t->falloc++;
184 f->t = t;
185 } else {
186 pos = t->ffree.next;
187 list_del(pos);
188 f = list_entry(pos, struct frame, head);
189 }
190
191 skb = f->skb;
192 if (skb == NULL) {
193 f->skb = skb = new_skb(ETH_ZLEN);
194 if (!skb) {
195 bail: aoe_freetframe(f);
196 return NULL;
197 }
198 }
199
200 if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
201 skb = skb_pool_get(d);
202 if (skb == NULL)
203 goto bail;
204 skb_pool_put(d, f->skb);
205 f->skb = skb;
206 }
207
208 skb->truesize -= skb->data_len;
209 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
210 skb_trim(skb, 0);
211 return f;
212 }
213
214 static struct frame *
215 newframe(struct aoedev *d)
216 {
217 struct frame *f;
218 struct aoetgt *t, **tt;
219 int totout = 0;
220
221 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
222 printk(KERN_ERR "aoe: NULL TARGETS!\n");
223 return NULL;
224 }
225 tt = d->tgt; /* last used target */
226 for (;;) {
227 tt++;
228 if (tt >= &d->targets[NTARGETS] || !*tt)
229 tt = d->targets;
230 t = *tt;
231 totout += t->nout;
232 if (t->nout < t->maxout
233 && t != d->htgt
234 && t->ifp->nd) {
235 f = newtframe(d, t);
236 if (f) {
237 ifrotate(t);
238 d->tgt = tt;
239 return f;
240 }
241 }
242 if (tt == d->tgt) /* we've looped and found nada */
243 break;
244 }
245 if (totout == 0) {
246 d->kicked++;
247 d->flags |= DEVFL_KICKME;
248 }
249 return NULL;
250 }
251
252 static void
253 skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
254 {
255 int frag = 0;
256 ulong fcnt;
257 loop:
258 fcnt = bv->bv_len - (off - bv->bv_offset);
259 if (fcnt > cnt)
260 fcnt = cnt;
261 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
262 cnt -= fcnt;
263 if (cnt <= 0)
264 return;
265 bv++;
266 off = bv->bv_offset;
267 goto loop;
268 }
269
270 static void
271 fhash(struct frame *f)
272 {
273 struct aoedev *d = f->t->d;
274 u32 n;
275
276 n = f->tag % NFACTIVE;
277 list_add_tail(&f->head, &d->factive[n]);
278 }
279
280 static int
281 aoecmd_ata_rw(struct aoedev *d)
282 {
283 struct frame *f;
284 struct aoe_hdr *h;
285 struct aoe_atahdr *ah;
286 struct buf *buf;
287 struct bio_vec *bv;
288 struct aoetgt *t;
289 struct sk_buff *skb;
290 struct sk_buff_head queue;
291 ulong bcnt, fbcnt;
292 char writebit, extbit;
293
294 writebit = 0x10;
295 extbit = 0x4;
296
297 buf = nextbuf(d);
298 if (buf == NULL)
299 return 0;
300 f = newframe(d);
301 if (f == NULL)
302 return 0;
303 t = *d->tgt;
304 bv = buf->bv;
305 bcnt = d->maxbcnt;
306 if (bcnt == 0)
307 bcnt = DEFAULTBCNT;
308 if (bcnt > buf->resid)
309 bcnt = buf->resid;
310 fbcnt = bcnt;
311 f->bv = buf->bv;
312 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
313 do {
314 if (fbcnt < buf->bv_resid) {
315 buf->bv_resid -= fbcnt;
316 buf->resid -= fbcnt;
317 break;
318 }
319 fbcnt -= buf->bv_resid;
320 buf->resid -= buf->bv_resid;
321 if (buf->resid == 0) {
322 d->ip.buf = NULL;
323 break;
324 }
325 buf->bv++;
326 buf->bv_resid = buf->bv->bv_len;
327 WARN_ON(buf->bv_resid == 0);
328 } while (fbcnt);
329
330 /* initialize the headers & frame */
331 skb = f->skb;
332 h = (struct aoe_hdr *) skb_mac_header(skb);
333 ah = (struct aoe_atahdr *) (h+1);
334 skb_put(skb, sizeof *h + sizeof *ah);
335 memset(h, 0, skb->len);
336 f->tag = aoehdr_atainit(d, t, h);
337 fhash(f);
338 t->nout++;
339 f->waited = 0;
340 f->buf = buf;
341 f->bcnt = bcnt;
342 f->lba = buf->sector;
343
344 /* set up ata header */
345 ah->scnt = bcnt >> 9;
346 put_lba(ah, buf->sector);
347 if (d->flags & DEVFL_EXT) {
348 ah->aflags |= AOEAFL_EXT;
349 } else {
350 extbit = 0;
351 ah->lba3 &= 0x0f;
352 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
353 }
354 if (bio_data_dir(buf->bio) == WRITE) {
355 skb_fillup(skb, f->bv, f->bv_off, bcnt);
356 ah->aflags |= AOEAFL_WRITE;
357 skb->len += bcnt;
358 skb->data_len = bcnt;
359 skb->truesize += bcnt;
360 t->wpkts++;
361 } else {
362 t->rpkts++;
363 writebit = 0;
364 }
365
366 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
367
368 /* mark all tracking fields and load out */
369 buf->nframesout += 1;
370 buf->sector += bcnt >> 9;
371
372 skb->dev = t->ifp->nd;
373 skb = skb_clone(skb, GFP_ATOMIC);
374 if (skb) {
375 __skb_queue_head_init(&queue);
376 __skb_queue_tail(&queue, skb);
377 aoenet_xmit(&queue);
378 }
379 return 1;
380 }
381
382 /* some callers cannot sleep, and they can call this function,
383 * transmitting the packets later, when interrupts are on
384 */
385 static void
386 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
387 {
388 struct aoe_hdr *h;
389 struct aoe_cfghdr *ch;
390 struct sk_buff *skb;
391 struct net_device *ifp;
392
393 rcu_read_lock();
394 for_each_netdev_rcu(&init_net, ifp) {
395 dev_hold(ifp);
396 if (!is_aoe_netif(ifp))
397 goto cont;
398
399 skb = new_skb(sizeof *h + sizeof *ch);
400 if (skb == NULL) {
401 printk(KERN_INFO "aoe: skb alloc failure\n");
402 goto cont;
403 }
404 skb_put(skb, sizeof *h + sizeof *ch);
405 skb->dev = ifp;
406 __skb_queue_tail(queue, skb);
407 h = (struct aoe_hdr *) skb_mac_header(skb);
408 memset(h, 0, sizeof *h + sizeof *ch);
409
410 memset(h->dst, 0xff, sizeof h->dst);
411 memcpy(h->src, ifp->dev_addr, sizeof h->src);
412 h->type = __constant_cpu_to_be16(ETH_P_AOE);
413 h->verfl = AOE_HVER;
414 h->major = cpu_to_be16(aoemajor);
415 h->minor = aoeminor;
416 h->cmd = AOECMD_CFG;
417
418 cont:
419 dev_put(ifp);
420 }
421 rcu_read_unlock();
422 }
423
424 static void
425 resend(struct aoedev *d, struct frame *f)
426 {
427 struct sk_buff *skb;
428 struct sk_buff_head queue;
429 struct aoe_hdr *h;
430 struct aoe_atahdr *ah;
431 struct aoetgt *t;
432 char buf[128];
433 u32 n;
434
435 t = f->t;
436 n = newtag(d);
437 skb = f->skb;
438 if (ifrotate(t) == NULL) {
439 /* probably can't happen, but set it up to fail anyway */
440 pr_info("aoe: resend: no interfaces to rotate to.\n");
441 ktcomplete(f, NULL);
442 return;
443 }
444 h = (struct aoe_hdr *) skb_mac_header(skb);
445 ah = (struct aoe_atahdr *) (h+1);
446
447 snprintf(buf, sizeof buf,
448 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
449 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
450 h->src, h->dst, t->nout);
451 aoechr_error(buf);
452
453 f->tag = n;
454 fhash(f);
455 h->tag = cpu_to_be32(n);
456 memcpy(h->dst, t->addr, sizeof h->dst);
457 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
458
459 skb->dev = t->ifp->nd;
460 skb = skb_clone(skb, GFP_ATOMIC);
461 if (skb == NULL)
462 return;
463 __skb_queue_head_init(&queue);
464 __skb_queue_tail(&queue, skb);
465 aoenet_xmit(&queue);
466 }
467
468 static int
469 tsince(u32 tag)
470 {
471 int n;
472
473 n = jiffies & 0xffff;
474 n -= tag & 0xffff;
475 if (n < 0)
476 n += 1<<16;
477 return n;
478 }
479
480 static struct aoeif *
481 getif(struct aoetgt *t, struct net_device *nd)
482 {
483 struct aoeif *p, *e;
484
485 p = t->ifs;
486 e = p + NAOEIFS;
487 for (; p < e; p++)
488 if (p->nd == nd)
489 return p;
490 return NULL;
491 }
492
493 static void
494 ejectif(struct aoetgt *t, struct aoeif *ifp)
495 {
496 struct aoeif *e;
497 ulong n;
498
499 e = t->ifs + NAOEIFS - 1;
500 n = (e - ifp) * sizeof *ifp;
501 memmove(ifp, ifp+1, n);
502 e->nd = NULL;
503 }
504
505 static int
506 sthtith(struct aoedev *d)
507 {
508 struct frame *f, *nf;
509 struct list_head *nx, *pos, *head;
510 struct sk_buff *skb;
511 struct aoetgt *ht = d->htgt;
512 int i;
513
514 for (i = 0; i < NFACTIVE; i++) {
515 head = &d->factive[i];
516 list_for_each_safe(pos, nx, head) {
517 f = list_entry(pos, struct frame, head);
518 if (f->t != ht)
519 continue;
520
521 nf = newframe(d);
522 if (!nf)
523 return 0;
524
525 /* remove frame from active list */
526 list_del(pos);
527
528 /* reassign all pertinent bits to new outbound frame */
529 skb = nf->skb;
530 nf->skb = f->skb;
531 nf->buf = f->buf;
532 nf->bcnt = f->bcnt;
533 nf->lba = f->lba;
534 nf->bv = f->bv;
535 nf->bv_off = f->bv_off;
536 nf->waited = 0;
537 f->skb = skb;
538 aoe_freetframe(f);
539 ht->nout--;
540 nf->t->nout++;
541 resend(d, nf);
542 }
543 }
544 /* We've cleaned up the outstanding so take away his
545 * interfaces so he won't be used. We should remove him from
546 * the target array here, but cleaning up a target is
547 * involved. PUNT!
548 */
549 memset(ht->ifs, 0, sizeof ht->ifs);
550 d->htgt = NULL;
551 return 1;
552 }
553
554 static inline unsigned char
555 ata_scnt(unsigned char *packet) {
556 struct aoe_hdr *h;
557 struct aoe_atahdr *ah;
558
559 h = (struct aoe_hdr *) packet;
560 ah = (struct aoe_atahdr *) (h+1);
561 return ah->scnt;
562 }
563
564 static void
565 rexmit_timer(ulong vp)
566 {
567 struct aoedev *d;
568 struct aoetgt *t, **tt, **te;
569 struct aoeif *ifp;
570 struct frame *f;
571 struct list_head *head, *pos, *nx;
572 LIST_HEAD(flist);
573 register long timeout;
574 ulong flags, n;
575 int i;
576
577 d = (struct aoedev *) vp;
578
579 /* timeout is always ~150% of the moving average */
580 timeout = d->rttavg;
581 timeout += timeout >> 1;
582
583 spin_lock_irqsave(&d->lock, flags);
584
585 if (d->flags & DEVFL_TKILL) {
586 spin_unlock_irqrestore(&d->lock, flags);
587 return;
588 }
589
590 /* collect all frames to rexmit into flist */
591 for (i = 0; i < NFACTIVE; i++) {
592 head = &d->factive[i];
593 list_for_each_safe(pos, nx, head) {
594 f = list_entry(pos, struct frame, head);
595 if (tsince(f->tag) < timeout)
596 break; /* end of expired frames */
597 /* move to flist for later processing */
598 list_move_tail(pos, &flist);
599 }
600 }
601 /* window check */
602 tt = d->targets;
603 te = tt + d->ntargets;
604 for (; tt < te && (t = *tt); tt++) {
605 if (t->nout == t->maxout
606 && t->maxout < t->nframes
607 && (jiffies - t->lastwadj)/HZ > 10) {
608 t->maxout++;
609 t->lastwadj = jiffies;
610 }
611 }
612
613 if (!list_empty(&flist)) { /* retransmissions necessary */
614 n = d->rttavg <<= 1;
615 if (n > MAXTIMER)
616 d->rttavg = MAXTIMER;
617 }
618
619 /* process expired frames */
620 while (!list_empty(&flist)) {
621 pos = flist.next;
622 f = list_entry(pos, struct frame, head);
623 n = f->waited += timeout;
624 n /= HZ;
625 if (n > aoe_deadsecs) {
626 /* Waited too long. Device failure.
627 * Hang all frames on first hash bucket for downdev
628 * to clean up.
629 */
630 list_splice(&flist, &d->factive[0]);
631 aoedev_downdev(d);
632 break;
633 }
634 list_del(pos);
635
636 t = f->t;
637 if (n > aoe_deadsecs/2)
638 d->htgt = t; /* see if another target can help */
639
640 if (t->nout == t->maxout) {
641 if (t->maxout > 1)
642 t->maxout--;
643 t->lastwadj = jiffies;
644 }
645
646 ifp = getif(t, f->skb->dev);
647 if (ifp && ++ifp->lost > (t->nframes << 1)
648 && (ifp != t->ifs || t->ifs[1].nd)) {
649 ejectif(t, ifp);
650 ifp = NULL;
651 }
652 resend(d, f);
653 }
654
655 if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) {
656 d->flags &= ~DEVFL_KICKME;
657 d->blkq->request_fn(d->blkq);
658 }
659
660 d->timer.expires = jiffies + TIMERTICK;
661 add_timer(&d->timer);
662
663 spin_unlock_irqrestore(&d->lock, flags);
664 }
665
666 static unsigned long
667 rqbiocnt(struct request *r)
668 {
669 struct bio *bio;
670 unsigned long n = 0;
671
672 __rq_for_each_bio(bio, r)
673 n++;
674 return n;
675 }
676
677 /* This can be removed if we are certain that no users of the block
678 * layer will ever use zero-count pages in bios. Otherwise we have to
679 * protect against the put_page sometimes done by the network layer.
680 *
681 * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
682 * discussion.
683 *
684 * We cannot use get_page in the workaround, because it insists on a
685 * positive page count as a precondition. So we use _count directly.
686 */
687 static void
688 bio_pageinc(struct bio *bio)
689 {
690 struct bio_vec *bv;
691 struct page *page;
692 int i;
693
694 bio_for_each_segment(bv, bio, i) {
695 page = bv->bv_page;
696 /* Non-zero page count for non-head members of
697 * compound pages is no longer allowed by the kernel,
698 * but this has never been seen here.
699 */
700 if (unlikely(PageCompound(page)))
701 if (compound_trans_head(page) != page) {
702 pr_crit("page tail used for block I/O\n");
703 BUG();
704 }
705 atomic_inc(&page->_count);
706 }
707 }
708
709 static void
710 bio_pagedec(struct bio *bio)
711 {
712 struct bio_vec *bv;
713 int i;
714
715 bio_for_each_segment(bv, bio, i)
716 atomic_dec(&bv->bv_page->_count);
717 }
718
719 static void
720 bufinit(struct buf *buf, struct request *rq, struct bio *bio)
721 {
722 struct bio_vec *bv;
723
724 memset(buf, 0, sizeof(*buf));
725 buf->rq = rq;
726 buf->bio = bio;
727 buf->resid = bio->bi_size;
728 buf->sector = bio->bi_sector;
729 bio_pageinc(bio);
730 buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
731 buf->bv_resid = bv->bv_len;
732 WARN_ON(buf->bv_resid == 0);
733 }
734
735 static struct buf *
736 nextbuf(struct aoedev *d)
737 {
738 struct request *rq;
739 struct request_queue *q;
740 struct buf *buf;
741 struct bio *bio;
742
743 q = d->blkq;
744 if (q == NULL)
745 return NULL; /* initializing */
746 if (d->ip.buf)
747 return d->ip.buf;
748 rq = d->ip.rq;
749 if (rq == NULL) {
750 rq = blk_peek_request(q);
751 if (rq == NULL)
752 return NULL;
753 blk_start_request(rq);
754 d->ip.rq = rq;
755 d->ip.nxbio = rq->bio;
756 rq->special = (void *) rqbiocnt(rq);
757 }
758 buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
759 if (buf == NULL) {
760 pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
761 return NULL;
762 }
763 bio = d->ip.nxbio;
764 bufinit(buf, rq, bio);
765 bio = bio->bi_next;
766 d->ip.nxbio = bio;
767 if (bio == NULL)
768 d->ip.rq = NULL;
769 return d->ip.buf = buf;
770 }
771
772 /* enters with d->lock held */
773 void
774 aoecmd_work(struct aoedev *d)
775 {
776 if (d->htgt && !sthtith(d))
777 return;
778 while (aoecmd_ata_rw(d))
779 ;
780 }
781
782 /* this function performs work that has been deferred until sleeping is OK
783 */
784 void
785 aoecmd_sleepwork(struct work_struct *work)
786 {
787 struct aoedev *d = container_of(work, struct aoedev, work);
788
789 if (d->flags & DEVFL_GDALLOC)
790 aoeblk_gdalloc(d);
791
792 if (d->flags & DEVFL_NEWSIZE) {
793 struct block_device *bd;
794 unsigned long flags;
795 u64 ssize;
796
797 ssize = get_capacity(d->gd);
798 bd = bdget_disk(d->gd, 0);
799
800 if (bd) {
801 mutex_lock(&bd->bd_inode->i_mutex);
802 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
803 mutex_unlock(&bd->bd_inode->i_mutex);
804 bdput(bd);
805 }
806 spin_lock_irqsave(&d->lock, flags);
807 d->flags |= DEVFL_UP;
808 d->flags &= ~DEVFL_NEWSIZE;
809 spin_unlock_irqrestore(&d->lock, flags);
810 }
811 }
812
813 static void
814 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
815 {
816 u64 ssize;
817 u16 n;
818
819 /* word 83: command set supported */
820 n = get_unaligned_le16(&id[83 << 1]);
821
822 /* word 86: command set/feature enabled */
823 n |= get_unaligned_le16(&id[86 << 1]);
824
825 if (n & (1<<10)) { /* bit 10: LBA 48 */
826 d->flags |= DEVFL_EXT;
827
828 /* word 100: number lba48 sectors */
829 ssize = get_unaligned_le64(&id[100 << 1]);
830
831 /* set as in ide-disk.c:init_idedisk_capacity */
832 d->geo.cylinders = ssize;
833 d->geo.cylinders /= (255 * 63);
834 d->geo.heads = 255;
835 d->geo.sectors = 63;
836 } else {
837 d->flags &= ~DEVFL_EXT;
838
839 /* number lba28 sectors */
840 ssize = get_unaligned_le32(&id[60 << 1]);
841
842 /* NOTE: obsolete in ATA 6 */
843 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
844 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
845 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
846 }
847
848 if (d->ssize != ssize)
849 printk(KERN_INFO
850 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
851 t->addr,
852 d->aoemajor, d->aoeminor,
853 d->fw_ver, (long long)ssize);
854 d->ssize = ssize;
855 d->geo.start = 0;
856 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
857 return;
858 if (d->gd != NULL) {
859 set_capacity(d->gd, ssize);
860 d->flags |= DEVFL_NEWSIZE;
861 } else
862 d->flags |= DEVFL_GDALLOC;
863 schedule_work(&d->work);
864 }
865
866 static void
867 calc_rttavg(struct aoedev *d, int rtt)
868 {
869 register long n;
870
871 n = rtt;
872 if (n < 0) {
873 n = -rtt;
874 if (n < MINTIMER)
875 n = MINTIMER;
876 else if (n > MAXTIMER)
877 n = MAXTIMER;
878 d->mintimer += (n - d->mintimer) >> 1;
879 } else if (n < d->mintimer)
880 n = d->mintimer;
881 else if (n > MAXTIMER)
882 n = MAXTIMER;
883
884 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
885 n -= d->rttavg;
886 d->rttavg += n >> 2;
887 }
888
889 static struct aoetgt *
890 gettgt(struct aoedev *d, char *addr)
891 {
892 struct aoetgt **t, **e;
893
894 t = d->targets;
895 e = t + NTARGETS;
896 for (; t < e && *t; t++)
897 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
898 return *t;
899 return NULL;
900 }
901
902 static void
903 bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
904 {
905 ulong fcnt;
906 char *p;
907 int soff = 0;
908 loop:
909 fcnt = bv->bv_len - (off - bv->bv_offset);
910 if (fcnt > cnt)
911 fcnt = cnt;
912 p = page_address(bv->bv_page) + off;
913 skb_copy_bits(skb, soff, p, fcnt);
914 soff += fcnt;
915 cnt -= fcnt;
916 if (cnt <= 0)
917 return;
918 bv++;
919 off = bv->bv_offset;
920 goto loop;
921 }
922
923 void
924 aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
925 {
926 struct bio *bio;
927 int bok;
928 struct request_queue *q;
929
930 q = d->blkq;
931 if (rq == d->ip.rq)
932 d->ip.rq = NULL;
933 do {
934 bio = rq->bio;
935 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
936 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
937
938 /* cf. http://lkml.org/lkml/2006/10/31/28 */
939 if (!fastfail)
940 q->request_fn(q);
941 }
942
943 static void
944 aoe_end_buf(struct aoedev *d, struct buf *buf)
945 {
946 struct request *rq;
947 unsigned long n;
948
949 if (buf == d->ip.buf)
950 d->ip.buf = NULL;
951 rq = buf->rq;
952 bio_pagedec(buf->bio);
953 mempool_free(buf, d->bufpool);
954 n = (unsigned long) rq->special;
955 rq->special = (void *) --n;
956 if (n == 0)
957 aoe_end_request(d, rq, 0);
958 }
959
960 static void
961 ktiocomplete(struct frame *f)
962 {
963 struct aoe_hdr *hin, *hout;
964 struct aoe_atahdr *ahin, *ahout;
965 struct buf *buf;
966 struct sk_buff *skb;
967 struct aoetgt *t;
968 struct aoeif *ifp;
969 struct aoedev *d;
970 long n;
971
972 if (f == NULL)
973 return;
974
975 t = f->t;
976 d = t->d;
977
978 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
979 ahout = (struct aoe_atahdr *) (hout+1);
980 buf = f->buf;
981 skb = f->r_skb;
982 if (skb == NULL)
983 goto noskb; /* just fail the buf. */
984
985 hin = (struct aoe_hdr *) skb->data;
986 skb_pull(skb, sizeof(*hin));
987 ahin = (struct aoe_atahdr *) skb->data;
988 skb_pull(skb, sizeof(*ahin));
989 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
990 pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
991 ahout->cmdstat, ahin->cmdstat,
992 d->aoemajor, d->aoeminor);
993 noskb: if (buf)
994 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
995 goto badrsp;
996 }
997
998 n = ahout->scnt << 9;
999 switch (ahout->cmdstat) {
1000 case ATA_CMD_PIO_READ:
1001 case ATA_CMD_PIO_READ_EXT:
1002 if (skb->len < n) {
1003 pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n",
1004 skb->len, n);
1005 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1006 break;
1007 }
1008 bvcpy(f->bv, f->bv_off, skb, n);
1009 case ATA_CMD_PIO_WRITE:
1010 case ATA_CMD_PIO_WRITE_EXT:
1011 spin_lock_irq(&d->lock);
1012 ifp = getif(t, skb->dev);
1013 if (ifp)
1014 ifp->lost = 0;
1015 if (d->htgt == t) /* I'll help myself, thank you. */
1016 d->htgt = NULL;
1017 spin_unlock_irq(&d->lock);
1018 break;
1019 case ATA_CMD_ID_ATA:
1020 if (skb->len < 512) {
1021 pr_info("aoe: runt data size in ataid. skb->len=%d\n",
1022 skb->len);
1023 break;
1024 }
1025 if (skb_linearize(skb))
1026 break;
1027 spin_lock_irq(&d->lock);
1028 ataid_complete(d, t, skb->data);
1029 spin_unlock_irq(&d->lock);
1030 break;
1031 default:
1032 pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
1033 ahout->cmdstat,
1034 be16_to_cpu(get_unaligned(&hin->major)),
1035 hin->minor);
1036 }
1037 badrsp:
1038 spin_lock_irq(&d->lock);
1039
1040 aoe_freetframe(f);
1041
1042 if (buf && --buf->nframesout == 0 && buf->resid == 0)
1043 aoe_end_buf(d, buf);
1044
1045 aoecmd_work(d);
1046
1047 spin_unlock_irq(&d->lock);
1048 aoedev_put(d);
1049 dev_kfree_skb(skb);
1050 }
1051
1052 /* Enters with iocq.lock held.
1053 * Returns true iff responses needing processing remain.
1054 */
1055 static int
1056 ktio(void)
1057 {
1058 struct frame *f;
1059 struct list_head *pos;
1060 int i;
1061
1062 for (i = 0; ; ++i) {
1063 if (i == MAXIOC)
1064 return 1;
1065 if (list_empty(&iocq.head))
1066 return 0;
1067 pos = iocq.head.next;
1068 list_del(pos);
1069 spin_unlock_irq(&iocq.lock);
1070 f = list_entry(pos, struct frame, head);
1071 ktiocomplete(f);
1072 spin_lock_irq(&iocq.lock);
1073 }
1074 }
1075
1076 static int
1077 kthread(void *vp)
1078 {
1079 struct ktstate *k;
1080 DECLARE_WAITQUEUE(wait, current);
1081 int more;
1082
1083 k = vp;
1084 current->flags |= PF_NOFREEZE;
1085 set_user_nice(current, -10);
1086 complete(&k->rendez); /* tell spawner we're running */
1087 do {
1088 spin_lock_irq(k->lock);
1089 more = k->fn();
1090 if (!more) {
1091 add_wait_queue(k->waitq, &wait);
1092 __set_current_state(TASK_INTERRUPTIBLE);
1093 }
1094 spin_unlock_irq(k->lock);
1095 if (!more) {
1096 schedule();
1097 remove_wait_queue(k->waitq, &wait);
1098 } else
1099 cond_resched();
1100 } while (!kthread_should_stop());
1101 complete(&k->rendez); /* tell spawner we're stopping */
1102 return 0;
1103 }
1104
1105 void
1106 aoe_ktstop(struct ktstate *k)
1107 {
1108 kthread_stop(k->task);
1109 wait_for_completion(&k->rendez);
1110 }
1111
1112 int
1113 aoe_ktstart(struct ktstate *k)
1114 {
1115 struct task_struct *task;
1116
1117 init_completion(&k->rendez);
1118 task = kthread_run(kthread, k, k->name);
1119 if (task == NULL || IS_ERR(task))
1120 return -ENOMEM;
1121 k->task = task;
1122 wait_for_completion(&k->rendez); /* allow kthread to start */
1123 init_completion(&k->rendez); /* for waiting for exit later */
1124 return 0;
1125 }
1126
1127 /* pass it off to kthreads for processing */
1128 static void
1129 ktcomplete(struct frame *f, struct sk_buff *skb)
1130 {
1131 ulong flags;
1132
1133 f->r_skb = skb;
1134 spin_lock_irqsave(&iocq.lock, flags);
1135 list_add_tail(&f->head, &iocq.head);
1136 spin_unlock_irqrestore(&iocq.lock, flags);
1137 wake_up(&ktiowq);
1138 }
1139
1140 struct sk_buff *
1141 aoecmd_ata_rsp(struct sk_buff *skb)
1142 {
1143 struct aoedev *d;
1144 struct aoe_hdr *h;
1145 struct frame *f;
1146 struct aoetgt *t;
1147 u32 n;
1148 ulong flags;
1149 char ebuf[128];
1150 u16 aoemajor;
1151
1152 h = (struct aoe_hdr *) skb->data;
1153 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1154 d = aoedev_by_aoeaddr(aoemajor, h->minor);
1155 if (d == NULL) {
1156 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
1157 "for unknown device %d.%d\n",
1158 aoemajor, h->minor);
1159 aoechr_error(ebuf);
1160 return skb;
1161 }
1162
1163 spin_lock_irqsave(&d->lock, flags);
1164
1165 n = be32_to_cpu(get_unaligned(&h->tag));
1166 f = getframe(d, n);
1167 if (f == NULL) {
1168 calc_rttavg(d, -tsince(n));
1169 spin_unlock_irqrestore(&d->lock, flags);
1170 aoedev_put(d);
1171 snprintf(ebuf, sizeof ebuf,
1172 "%15s e%d.%d tag=%08x@%08lx\n",
1173 "unexpected rsp",
1174 get_unaligned_be16(&h->major),
1175 h->minor,
1176 get_unaligned_be32(&h->tag),
1177 jiffies);
1178 aoechr_error(ebuf);
1179 return skb;
1180 }
1181 t = f->t;
1182 calc_rttavg(d, tsince(f->tag));
1183 t->nout--;
1184 aoecmd_work(d);
1185
1186 spin_unlock_irqrestore(&d->lock, flags);
1187
1188 ktcomplete(f, skb);
1189
1190 /*
1191 * Note here that we do not perform an aoedev_put, as we are
1192 * leaving this reference for the ktio to release.
1193 */
1194 return NULL;
1195 }
1196
1197 void
1198 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
1199 {
1200 struct sk_buff_head queue;
1201
1202 __skb_queue_head_init(&queue);
1203 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
1204 aoenet_xmit(&queue);
1205 }
1206
1207 struct sk_buff *
1208 aoecmd_ata_id(struct aoedev *d)
1209 {
1210 struct aoe_hdr *h;
1211 struct aoe_atahdr *ah;
1212 struct frame *f;
1213 struct sk_buff *skb;
1214 struct aoetgt *t;
1215
1216 f = newframe(d);
1217 if (f == NULL)
1218 return NULL;
1219
1220 t = *d->tgt;
1221
1222 /* initialize the headers & frame */
1223 skb = f->skb;
1224 h = (struct aoe_hdr *) skb_mac_header(skb);
1225 ah = (struct aoe_atahdr *) (h+1);
1226 skb_put(skb, sizeof *h + sizeof *ah);
1227 memset(h, 0, skb->len);
1228 f->tag = aoehdr_atainit(d, t, h);
1229 fhash(f);
1230 t->nout++;
1231 f->waited = 0;
1232
1233 /* set up ata header */
1234 ah->scnt = 1;
1235 ah->cmdstat = ATA_CMD_ID_ATA;
1236 ah->lba3 = 0xa0;
1237
1238 skb->dev = t->ifp->nd;
1239
1240 d->rttavg = MAXTIMER;
1241 d->timer.function = rexmit_timer;
1242
1243 return skb_clone(skb, GFP_ATOMIC);
1244 }
1245
1246 static struct aoetgt *
1247 addtgt(struct aoedev *d, char *addr, ulong nframes)
1248 {
1249 struct aoetgt *t, **tt, **te;
1250
1251 tt = d->targets;
1252 te = tt + NTARGETS;
1253 for (; tt < te && *tt; tt++)
1254 ;
1255
1256 if (tt == te) {
1257 printk(KERN_INFO
1258 "aoe: device addtgt failure; too many targets\n");
1259 return NULL;
1260 }
1261 t = kzalloc(sizeof(*t), GFP_ATOMIC);
1262 if (!t) {
1263 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
1264 return NULL;
1265 }
1266
1267 d->ntargets++;
1268 t->nframes = nframes;
1269 t->d = d;
1270 memcpy(t->addr, addr, sizeof t->addr);
1271 t->ifp = t->ifs;
1272 t->maxout = t->nframes;
1273 INIT_LIST_HEAD(&t->ffree);
1274 return *tt = t;
1275 }
1276
1277 static void
1278 setdbcnt(struct aoedev *d)
1279 {
1280 struct aoetgt **t, **e;
1281 int bcnt = 0;
1282
1283 t = d->targets;
1284 e = t + NTARGETS;
1285 for (; t < e && *t; t++)
1286 if (bcnt == 0 || bcnt > (*t)->minbcnt)
1287 bcnt = (*t)->minbcnt;
1288 if (bcnt != d->maxbcnt) {
1289 d->maxbcnt = bcnt;
1290 pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
1291 d->aoemajor, d->aoeminor, bcnt);
1292 }
1293 }
1294
1295 static void
1296 setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
1297 {
1298 struct aoedev *d;
1299 struct aoeif *p, *e;
1300 int minbcnt;
1301
1302 d = t->d;
1303 minbcnt = bcnt;
1304 p = t->ifs;
1305 e = p + NAOEIFS;
1306 for (; p < e; p++) {
1307 if (p->nd == NULL)
1308 break; /* end of the valid interfaces */
1309 if (p->nd == nd) {
1310 p->bcnt = bcnt; /* we're updating */
1311 nd = NULL;
1312 } else if (minbcnt > p->bcnt)
1313 minbcnt = p->bcnt; /* find the min interface */
1314 }
1315 if (nd) {
1316 if (p == e) {
1317 pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
1318 return;
1319 }
1320 p->nd = nd;
1321 p->bcnt = bcnt;
1322 }
1323 t->minbcnt = minbcnt;
1324 setdbcnt(d);
1325 }
1326
1327 void
1328 aoecmd_cfg_rsp(struct sk_buff *skb)
1329 {
1330 struct aoedev *d;
1331 struct aoe_hdr *h;
1332 struct aoe_cfghdr *ch;
1333 struct aoetgt *t;
1334 ulong flags, sysminor, aoemajor;
1335 struct sk_buff *sl;
1336 struct sk_buff_head queue;
1337 u16 n;
1338
1339 sl = NULL;
1340 h = (struct aoe_hdr *) skb_mac_header(skb);
1341 ch = (struct aoe_cfghdr *) (h+1);
1342
1343 /*
1344 * Enough people have their dip switches set backwards to
1345 * warrant a loud message for this special case.
1346 */
1347 aoemajor = get_unaligned_be16(&h->major);
1348 if (aoemajor == 0xfff) {
1349 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
1350 "Check shelf dip switches.\n");
1351 return;
1352 }
1353 if (h->minor >= NPERSHELF) {
1354 pr_err("aoe: e%ld.%d %s, %d\n",
1355 aoemajor, h->minor,
1356 "slot number larger than the maximum",
1357 NPERSHELF-1);
1358 return;
1359 }
1360
1361 sysminor = SYSMINOR(aoemajor, h->minor);
1362 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
1363 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
1364 aoemajor, (int) h->minor);
1365 return;
1366 }
1367
1368 n = be16_to_cpu(ch->bufcnt);
1369 if (n > aoe_maxout) /* keep it reasonable */
1370 n = aoe_maxout;
1371
1372 d = aoedev_by_sysminor_m(sysminor);
1373 if (d == NULL) {
1374 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1375 return;
1376 }
1377
1378 spin_lock_irqsave(&d->lock, flags);
1379
1380 t = gettgt(d, h->src);
1381 if (!t) {
1382 t = addtgt(d, h->src, n);
1383 if (!t)
1384 goto bail;
1385 }
1386 n = skb->dev->mtu;
1387 n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
1388 n /= 512;
1389 if (n > ch->scnt)
1390 n = ch->scnt;
1391 n = n ? n * 512 : DEFAULTBCNT;
1392 setifbcnt(t, skb->dev, n);
1393
1394 /* don't change users' perspective */
1395 if (d->nopen == 0) {
1396 d->fw_ver = be16_to_cpu(ch->fwver);
1397 sl = aoecmd_ata_id(d);
1398 }
1399 bail:
1400 spin_unlock_irqrestore(&d->lock, flags);
1401 aoedev_put(d);
1402 if (sl) {
1403 __skb_queue_head_init(&queue);
1404 __skb_queue_tail(&queue, sl);
1405 aoenet_xmit(&queue);
1406 }
1407 }
1408
1409 void
1410 aoecmd_cleanslate(struct aoedev *d)
1411 {
1412 struct aoetgt **t, **te;
1413
1414 d->mintimer = MINTIMER;
1415 d->maxbcnt = 0;
1416
1417 t = d->targets;
1418 te = t + NTARGETS;
1419 for (; t < te && *t; t++)
1420 (*t)->maxout = (*t)->nframes;
1421 }
1422
1423 void
1424 aoe_failbuf(struct aoedev *d, struct buf *buf)
1425 {
1426 if (buf == NULL)
1427 return;
1428 buf->resid = 0;
1429 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1430 if (buf->nframesout == 0)
1431 aoe_end_buf(d, buf);
1432 }
1433
1434 void
1435 aoe_flush_iocq(void)
1436 {
1437 struct frame *f;
1438 struct aoedev *d;
1439 LIST_HEAD(flist);
1440 struct list_head *pos;
1441 struct sk_buff *skb;
1442 ulong flags;
1443
1444 spin_lock_irqsave(&iocq.lock, flags);
1445 list_splice_init(&iocq.head, &flist);
1446 spin_unlock_irqrestore(&iocq.lock, flags);
1447 while (!list_empty(&flist)) {
1448 pos = flist.next;
1449 list_del(pos);
1450 f = list_entry(pos, struct frame, head);
1451 d = f->t->d;
1452 skb = f->r_skb;
1453 spin_lock_irqsave(&d->lock, flags);
1454 if (f->buf) {
1455 f->buf->nframesout--;
1456 aoe_failbuf(d, f->buf);
1457 }
1458 aoe_freetframe(f);
1459 spin_unlock_irqrestore(&d->lock, flags);
1460 dev_kfree_skb(skb);
1461 aoedev_put(d);
1462 }
1463 }
1464
1465 int __init
1466 aoecmd_init(void)
1467 {
1468 INIT_LIST_HEAD(&iocq.head);
1469 spin_lock_init(&iocq.lock);
1470 init_waitqueue_head(&ktiowq);
1471 kts.name = "aoe_ktio";
1472 kts.fn = ktio;
1473 kts.waitq = &ktiowq;
1474 kts.lock = &iocq.lock;
1475 return aoe_ktstart(&kts);
1476 }
1477
1478 void
1479 aoecmd_exit(void)
1480 {
1481 aoe_ktstop(&kts);
1482 aoe_flush_iocq();
1483 }
This page took 0.060388 seconds and 4 git commands to generate.