hwmon: (max6650) Add support for alarms
[deliverable/linux.git] / drivers / block / aoe / aoecmd.c
1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7 #include <linux/ata.h>
8 #include <linux/hdreg.h>
9 #include <linux/blkdev.h>
10 #include <linux/skbuff.h>
11 #include <linux/netdevice.h>
12 #include <linux/genhd.h>
13 #include <linux/moduleparam.h>
14 #include <net/net_namespace.h>
15 #include <asm/unaligned.h>
16 #include "aoe.h"
17
18 static int aoe_deadsecs = 60 * 3;
19 module_param(aoe_deadsecs, int, 0644);
20 MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
21
22 static int aoe_maxout = 16;
23 module_param(aoe_maxout, int, 0644);
24 MODULE_PARM_DESC(aoe_maxout,
25 "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
26
27 static struct sk_buff *
28 new_skb(ulong len)
29 {
30 struct sk_buff *skb;
31
32 skb = alloc_skb(len, GFP_ATOMIC);
33 if (skb) {
34 skb_reset_mac_header(skb);
35 skb_reset_network_header(skb);
36 skb->protocol = __constant_htons(ETH_P_AOE);
37 skb->priority = 0;
38 skb->next = skb->prev = NULL;
39
40 /* tell the network layer not to perform IP checksums
41 * or to get the NIC to do it
42 */
43 skb->ip_summed = CHECKSUM_NONE;
44 }
45 return skb;
46 }
47
48 static struct frame *
49 getframe(struct aoetgt *t, int tag)
50 {
51 struct frame *f, *e;
52
53 f = t->frames;
54 e = f + t->nframes;
55 for (; f<e; f++)
56 if (f->tag == tag)
57 return f;
58 return NULL;
59 }
60
61 /*
62 * Leave the top bit clear so we have tagspace for userland.
63 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
64 * This driver reserves tag -1 to mean "unused frame."
65 */
66 static int
67 newtag(struct aoetgt *t)
68 {
69 register ulong n;
70
71 n = jiffies & 0xffff;
72 return n |= (++t->lasttag & 0x7fff) << 16;
73 }
74
75 static int
76 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
77 {
78 u32 host_tag = newtag(t);
79
80 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
81 memcpy(h->dst, t->addr, sizeof h->dst);
82 h->type = __constant_cpu_to_be16(ETH_P_AOE);
83 h->verfl = AOE_HVER;
84 h->major = cpu_to_be16(d->aoemajor);
85 h->minor = d->aoeminor;
86 h->cmd = AOECMD_ATA;
87 h->tag = cpu_to_be32(host_tag);
88
89 return host_tag;
90 }
91
92 static inline void
93 put_lba(struct aoe_atahdr *ah, sector_t lba)
94 {
95 ah->lba0 = lba;
96 ah->lba1 = lba >>= 8;
97 ah->lba2 = lba >>= 8;
98 ah->lba3 = lba >>= 8;
99 ah->lba4 = lba >>= 8;
100 ah->lba5 = lba >>= 8;
101 }
102
103 static void
104 ifrotate(struct aoetgt *t)
105 {
106 t->ifp++;
107 if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL)
108 t->ifp = t->ifs;
109 if (t->ifp->nd == NULL) {
110 printk(KERN_INFO "aoe: no interface to rotate to\n");
111 BUG();
112 }
113 }
114
115 static void
116 skb_pool_put(struct aoedev *d, struct sk_buff *skb)
117 {
118 __skb_queue_tail(&d->skbpool, skb);
119 }
120
121 static struct sk_buff *
122 skb_pool_get(struct aoedev *d)
123 {
124 struct sk_buff *skb = skb_peek(&d->skbpool);
125
126 if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
127 __skb_unlink(skb, &d->skbpool);
128 return skb;
129 }
130 if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
131 (skb = new_skb(ETH_ZLEN)))
132 return skb;
133
134 return NULL;
135 }
136
137 /* freeframe is where we do our load balancing so it's a little hairy. */
138 static struct frame *
139 freeframe(struct aoedev *d)
140 {
141 struct frame *f, *e, *rf;
142 struct aoetgt **t;
143 struct sk_buff *skb;
144
145 if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
146 printk(KERN_ERR "aoe: NULL TARGETS!\n");
147 return NULL;
148 }
149 t = d->tgt;
150 t++;
151 if (t >= &d->targets[NTARGETS] || !*t)
152 t = d->targets;
153 for (;;) {
154 if ((*t)->nout < (*t)->maxout
155 && t != d->htgt
156 && (*t)->ifp->nd) {
157 rf = NULL;
158 f = (*t)->frames;
159 e = f + (*t)->nframes;
160 for (; f < e; f++) {
161 if (f->tag != FREETAG)
162 continue;
163 skb = f->skb;
164 if (!skb
165 && !(f->skb = skb = new_skb(ETH_ZLEN)))
166 continue;
167 if (atomic_read(&skb_shinfo(skb)->dataref)
168 != 1) {
169 if (!rf)
170 rf = f;
171 continue;
172 }
173 gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0;
174 skb_trim(skb, 0);
175 d->tgt = t;
176 ifrotate(*t);
177 return f;
178 }
179 /* Work can be done, but the network layer is
180 holding our precious packets. Try to grab
181 one from the pool. */
182 f = rf;
183 if (f == NULL) { /* more paranoia */
184 printk(KERN_ERR
185 "aoe: freeframe: %s.\n",
186 "unexpected null rf");
187 d->flags |= DEVFL_KICKME;
188 return NULL;
189 }
190 skb = skb_pool_get(d);
191 if (skb) {
192 skb_pool_put(d, f->skb);
193 f->skb = skb;
194 goto gotone;
195 }
196 (*t)->dataref++;
197 if ((*t)->nout == 0)
198 d->flags |= DEVFL_KICKME;
199 }
200 if (t == d->tgt) /* we've looped and found nada */
201 break;
202 t++;
203 if (t >= &d->targets[NTARGETS] || !*t)
204 t = d->targets;
205 }
206 return NULL;
207 }
208
209 static int
210 aoecmd_ata_rw(struct aoedev *d)
211 {
212 struct frame *f;
213 struct aoe_hdr *h;
214 struct aoe_atahdr *ah;
215 struct buf *buf;
216 struct bio_vec *bv;
217 struct aoetgt *t;
218 struct sk_buff *skb;
219 ulong bcnt;
220 char writebit, extbit;
221
222 writebit = 0x10;
223 extbit = 0x4;
224
225 f = freeframe(d);
226 if (f == NULL)
227 return 0;
228 t = *d->tgt;
229 buf = d->inprocess;
230 bv = buf->bv;
231 bcnt = t->ifp->maxbcnt;
232 if (bcnt == 0)
233 bcnt = DEFAULTBCNT;
234 if (bcnt > buf->bv_resid)
235 bcnt = buf->bv_resid;
236 /* initialize the headers & frame */
237 skb = f->skb;
238 h = (struct aoe_hdr *) skb_mac_header(skb);
239 ah = (struct aoe_atahdr *) (h+1);
240 skb_put(skb, sizeof *h + sizeof *ah);
241 memset(h, 0, skb->len);
242 f->tag = aoehdr_atainit(d, t, h);
243 t->nout++;
244 f->waited = 0;
245 f->buf = buf;
246 f->bufaddr = page_address(bv->bv_page) + buf->bv_off;
247 f->bcnt = bcnt;
248 f->lba = buf->sector;
249
250 /* set up ata header */
251 ah->scnt = bcnt >> 9;
252 put_lba(ah, buf->sector);
253 if (d->flags & DEVFL_EXT) {
254 ah->aflags |= AOEAFL_EXT;
255 } else {
256 extbit = 0;
257 ah->lba3 &= 0x0f;
258 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
259 }
260 if (bio_data_dir(buf->bio) == WRITE) {
261 skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt);
262 ah->aflags |= AOEAFL_WRITE;
263 skb->len += bcnt;
264 skb->data_len = bcnt;
265 t->wpkts++;
266 } else {
267 t->rpkts++;
268 writebit = 0;
269 }
270
271 ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
272
273 /* mark all tracking fields and load out */
274 buf->nframesout += 1;
275 buf->bv_off += bcnt;
276 buf->bv_resid -= bcnt;
277 buf->resid -= bcnt;
278 buf->sector += bcnt >> 9;
279 if (buf->resid == 0) {
280 d->inprocess = NULL;
281 } else if (buf->bv_resid == 0) {
282 buf->bv = ++bv;
283 buf->bv_resid = bv->bv_len;
284 WARN_ON(buf->bv_resid == 0);
285 buf->bv_off = bv->bv_offset;
286 }
287
288 skb->dev = t->ifp->nd;
289 skb = skb_clone(skb, GFP_ATOMIC);
290 if (skb)
291 __skb_queue_tail(&d->sendq, skb);
292 return 1;
293 }
294
295 /* some callers cannot sleep, and they can call this function,
296 * transmitting the packets later, when interrupts are on
297 */
298 static void
299 aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
300 {
301 struct aoe_hdr *h;
302 struct aoe_cfghdr *ch;
303 struct sk_buff *skb;
304 struct net_device *ifp;
305
306 read_lock(&dev_base_lock);
307 for_each_netdev(&init_net, ifp) {
308 dev_hold(ifp);
309 if (!is_aoe_netif(ifp))
310 goto cont;
311
312 skb = new_skb(sizeof *h + sizeof *ch);
313 if (skb == NULL) {
314 printk(KERN_INFO "aoe: skb alloc failure\n");
315 goto cont;
316 }
317 skb_put(skb, sizeof *h + sizeof *ch);
318 skb->dev = ifp;
319 __skb_queue_tail(queue, skb);
320 h = (struct aoe_hdr *) skb_mac_header(skb);
321 memset(h, 0, sizeof *h + sizeof *ch);
322
323 memset(h->dst, 0xff, sizeof h->dst);
324 memcpy(h->src, ifp->dev_addr, sizeof h->src);
325 h->type = __constant_cpu_to_be16(ETH_P_AOE);
326 h->verfl = AOE_HVER;
327 h->major = cpu_to_be16(aoemajor);
328 h->minor = aoeminor;
329 h->cmd = AOECMD_CFG;
330
331 cont:
332 dev_put(ifp);
333 }
334 read_unlock(&dev_base_lock);
335 }
336
337 static void
338 resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
339 {
340 struct sk_buff *skb;
341 struct aoe_hdr *h;
342 struct aoe_atahdr *ah;
343 char buf[128];
344 u32 n;
345
346 ifrotate(t);
347 n = newtag(t);
348 skb = f->skb;
349 h = (struct aoe_hdr *) skb_mac_header(skb);
350 ah = (struct aoe_atahdr *) (h+1);
351
352 snprintf(buf, sizeof buf,
353 "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
354 "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
355 h->src, h->dst, t->nout);
356 aoechr_error(buf);
357
358 f->tag = n;
359 h->tag = cpu_to_be32(n);
360 memcpy(h->dst, t->addr, sizeof h->dst);
361 memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
362
363 switch (ah->cmdstat) {
364 default:
365 break;
366 case ATA_CMD_PIO_READ:
367 case ATA_CMD_PIO_READ_EXT:
368 case ATA_CMD_PIO_WRITE:
369 case ATA_CMD_PIO_WRITE_EXT:
370 put_lba(ah, f->lba);
371
372 n = f->bcnt;
373 if (n > DEFAULTBCNT)
374 n = DEFAULTBCNT;
375 ah->scnt = n >> 9;
376 if (ah->aflags & AOEAFL_WRITE) {
377 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
378 offset_in_page(f->bufaddr), n);
379 skb->len = sizeof *h + sizeof *ah + n;
380 skb->data_len = n;
381 }
382 }
383 skb->dev = t->ifp->nd;
384 skb = skb_clone(skb, GFP_ATOMIC);
385 if (skb == NULL)
386 return;
387 __skb_queue_tail(&d->sendq, skb);
388 }
389
390 static int
391 tsince(int tag)
392 {
393 int n;
394
395 n = jiffies & 0xffff;
396 n -= tag & 0xffff;
397 if (n < 0)
398 n += 1<<16;
399 return n;
400 }
401
402 static struct aoeif *
403 getif(struct aoetgt *t, struct net_device *nd)
404 {
405 struct aoeif *p, *e;
406
407 p = t->ifs;
408 e = p + NAOEIFS;
409 for (; p < e; p++)
410 if (p->nd == nd)
411 return p;
412 return NULL;
413 }
414
415 static struct aoeif *
416 addif(struct aoetgt *t, struct net_device *nd)
417 {
418 struct aoeif *p;
419
420 p = getif(t, NULL);
421 if (!p)
422 return NULL;
423 p->nd = nd;
424 p->maxbcnt = DEFAULTBCNT;
425 p->lost = 0;
426 p->lostjumbo = 0;
427 return p;
428 }
429
430 static void
431 ejectif(struct aoetgt *t, struct aoeif *ifp)
432 {
433 struct aoeif *e;
434 ulong n;
435
436 e = t->ifs + NAOEIFS - 1;
437 n = (e - ifp) * sizeof *ifp;
438 memmove(ifp, ifp+1, n);
439 e->nd = NULL;
440 }
441
442 static int
443 sthtith(struct aoedev *d)
444 {
445 struct frame *f, *e, *nf;
446 struct sk_buff *skb;
447 struct aoetgt *ht = *d->htgt;
448
449 f = ht->frames;
450 e = f + ht->nframes;
451 for (; f < e; f++) {
452 if (f->tag == FREETAG)
453 continue;
454 nf = freeframe(d);
455 if (!nf)
456 return 0;
457 skb = nf->skb;
458 *nf = *f;
459 f->skb = skb;
460 f->tag = FREETAG;
461 nf->waited = 0;
462 ht->nout--;
463 (*d->tgt)->nout++;
464 resend(d, *d->tgt, nf);
465 }
466 /* he's clean, he's useless. take away his interfaces */
467 memset(ht->ifs, 0, sizeof ht->ifs);
468 d->htgt = NULL;
469 return 1;
470 }
471
472 static inline unsigned char
473 ata_scnt(unsigned char *packet) {
474 struct aoe_hdr *h;
475 struct aoe_atahdr *ah;
476
477 h = (struct aoe_hdr *) packet;
478 ah = (struct aoe_atahdr *) (h+1);
479 return ah->scnt;
480 }
481
482 static void
483 rexmit_timer(ulong vp)
484 {
485 struct sk_buff_head queue;
486 struct aoedev *d;
487 struct aoetgt *t, **tt, **te;
488 struct aoeif *ifp;
489 struct frame *f, *e;
490 register long timeout;
491 ulong flags, n;
492
493 d = (struct aoedev *) vp;
494
495 /* timeout is always ~150% of the moving average */
496 timeout = d->rttavg;
497 timeout += timeout >> 1;
498
499 spin_lock_irqsave(&d->lock, flags);
500
501 if (d->flags & DEVFL_TKILL) {
502 spin_unlock_irqrestore(&d->lock, flags);
503 return;
504 }
505 tt = d->targets;
506 te = tt + NTARGETS;
507 for (; tt < te && *tt; tt++) {
508 t = *tt;
509 f = t->frames;
510 e = f + t->nframes;
511 for (; f < e; f++) {
512 if (f->tag == FREETAG
513 || tsince(f->tag) < timeout)
514 continue;
515 n = f->waited += timeout;
516 n /= HZ;
517 if (n > aoe_deadsecs) {
518 /* waited too long. device failure. */
519 aoedev_downdev(d);
520 break;
521 }
522
523 if (n > HELPWAIT /* see if another target can help */
524 && (tt != d->targets || d->targets[1]))
525 d->htgt = tt;
526
527 if (t->nout == t->maxout) {
528 if (t->maxout > 1)
529 t->maxout--;
530 t->lastwadj = jiffies;
531 }
532
533 ifp = getif(t, f->skb->dev);
534 if (ifp && ++ifp->lost > (t->nframes << 1)
535 && (ifp != t->ifs || t->ifs[1].nd)) {
536 ejectif(t, ifp);
537 ifp = NULL;
538 }
539
540 if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512
541 && ifp && ++ifp->lostjumbo > (t->nframes << 1)
542 && ifp->maxbcnt != DEFAULTBCNT) {
543 printk(KERN_INFO
544 "aoe: e%ld.%d: "
545 "too many lost jumbo on "
546 "%s:%pm - "
547 "falling back to %d frames.\n",
548 d->aoemajor, d->aoeminor,
549 ifp->nd->name, t->addr,
550 DEFAULTBCNT);
551 ifp->maxbcnt = 0;
552 }
553 resend(d, t, f);
554 }
555
556 /* window check */
557 if (t->nout == t->maxout
558 && t->maxout < t->nframes
559 && (jiffies - t->lastwadj)/HZ > 10) {
560 t->maxout++;
561 t->lastwadj = jiffies;
562 }
563 }
564
565 if (!skb_queue_empty(&d->sendq)) {
566 n = d->rttavg <<= 1;
567 if (n > MAXTIMER)
568 d->rttavg = MAXTIMER;
569 }
570
571 if (d->flags & DEVFL_KICKME || d->htgt) {
572 d->flags &= ~DEVFL_KICKME;
573 aoecmd_work(d);
574 }
575
576 __skb_queue_head_init(&queue);
577 skb_queue_splice_init(&d->sendq, &queue);
578
579 d->timer.expires = jiffies + TIMERTICK;
580 add_timer(&d->timer);
581
582 spin_unlock_irqrestore(&d->lock, flags);
583
584 aoenet_xmit(&queue);
585 }
586
587 /* enters with d->lock held */
588 void
589 aoecmd_work(struct aoedev *d)
590 {
591 struct buf *buf;
592 loop:
593 if (d->htgt && !sthtith(d))
594 return;
595 if (d->inprocess == NULL) {
596 if (list_empty(&d->bufq))
597 return;
598 buf = container_of(d->bufq.next, struct buf, bufs);
599 list_del(d->bufq.next);
600 d->inprocess = buf;
601 }
602 if (aoecmd_ata_rw(d))
603 goto loop;
604 }
605
606 /* this function performs work that has been deferred until sleeping is OK
607 */
608 void
609 aoecmd_sleepwork(struct work_struct *work)
610 {
611 struct aoedev *d = container_of(work, struct aoedev, work);
612
613 if (d->flags & DEVFL_GDALLOC)
614 aoeblk_gdalloc(d);
615
616 if (d->flags & DEVFL_NEWSIZE) {
617 struct block_device *bd;
618 unsigned long flags;
619 u64 ssize;
620
621 ssize = get_capacity(d->gd);
622 bd = bdget_disk(d->gd, 0);
623
624 if (bd) {
625 mutex_lock(&bd->bd_inode->i_mutex);
626 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
627 mutex_unlock(&bd->bd_inode->i_mutex);
628 bdput(bd);
629 }
630 spin_lock_irqsave(&d->lock, flags);
631 d->flags |= DEVFL_UP;
632 d->flags &= ~DEVFL_NEWSIZE;
633 spin_unlock_irqrestore(&d->lock, flags);
634 }
635 }
636
637 static void
638 ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
639 {
640 u64 ssize;
641 u16 n;
642
643 /* word 83: command set supported */
644 n = get_unaligned_le16(&id[83 << 1]);
645
646 /* word 86: command set/feature enabled */
647 n |= get_unaligned_le16(&id[86 << 1]);
648
649 if (n & (1<<10)) { /* bit 10: LBA 48 */
650 d->flags |= DEVFL_EXT;
651
652 /* word 100: number lba48 sectors */
653 ssize = get_unaligned_le64(&id[100 << 1]);
654
655 /* set as in ide-disk.c:init_idedisk_capacity */
656 d->geo.cylinders = ssize;
657 d->geo.cylinders /= (255 * 63);
658 d->geo.heads = 255;
659 d->geo.sectors = 63;
660 } else {
661 d->flags &= ~DEVFL_EXT;
662
663 /* number lba28 sectors */
664 ssize = get_unaligned_le32(&id[60 << 1]);
665
666 /* NOTE: obsolete in ATA 6 */
667 d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
668 d->geo.heads = get_unaligned_le16(&id[55 << 1]);
669 d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
670 }
671
672 if (d->ssize != ssize)
673 printk(KERN_INFO
674 "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
675 t->addr,
676 d->aoemajor, d->aoeminor,
677 d->fw_ver, (long long)ssize);
678 d->ssize = ssize;
679 d->geo.start = 0;
680 if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
681 return;
682 if (d->gd != NULL) {
683 set_capacity(d->gd, ssize);
684 d->flags |= DEVFL_NEWSIZE;
685 } else
686 d->flags |= DEVFL_GDALLOC;
687 schedule_work(&d->work);
688 }
689
690 static void
691 calc_rttavg(struct aoedev *d, int rtt)
692 {
693 register long n;
694
695 n = rtt;
696 if (n < 0) {
697 n = -rtt;
698 if (n < MINTIMER)
699 n = MINTIMER;
700 else if (n > MAXTIMER)
701 n = MAXTIMER;
702 d->mintimer += (n - d->mintimer) >> 1;
703 } else if (n < d->mintimer)
704 n = d->mintimer;
705 else if (n > MAXTIMER)
706 n = MAXTIMER;
707
708 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
709 n -= d->rttavg;
710 d->rttavg += n >> 2;
711 }
712
713 static struct aoetgt *
714 gettgt(struct aoedev *d, char *addr)
715 {
716 struct aoetgt **t, **e;
717
718 t = d->targets;
719 e = t + NTARGETS;
720 for (; t < e && *t; t++)
721 if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
722 return *t;
723 return NULL;
724 }
725
726 static inline void
727 diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector)
728 {
729 unsigned long n_sect = bio->bi_size >> 9;
730 const int rw = bio_data_dir(bio);
731 struct hd_struct *part;
732 int cpu;
733
734 cpu = part_stat_lock();
735 part = disk_map_sector_rcu(disk, sector);
736
737 part_stat_inc(cpu, part, ios[rw]);
738 part_stat_add(cpu, part, ticks[rw], duration);
739 part_stat_add(cpu, part, sectors[rw], n_sect);
740 part_stat_add(cpu, part, io_ticks, duration);
741
742 part_stat_unlock();
743 }
744
745 void
746 aoecmd_ata_rsp(struct sk_buff *skb)
747 {
748 struct sk_buff_head queue;
749 struct aoedev *d;
750 struct aoe_hdr *hin, *hout;
751 struct aoe_atahdr *ahin, *ahout;
752 struct frame *f;
753 struct buf *buf;
754 struct aoetgt *t;
755 struct aoeif *ifp;
756 register long n;
757 ulong flags;
758 char ebuf[128];
759 u16 aoemajor;
760
761 hin = (struct aoe_hdr *) skb_mac_header(skb);
762 aoemajor = get_unaligned_be16(&hin->major);
763 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
764 if (d == NULL) {
765 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
766 "for unknown device %d.%d\n",
767 aoemajor, hin->minor);
768 aoechr_error(ebuf);
769 return;
770 }
771
772 spin_lock_irqsave(&d->lock, flags);
773
774 n = get_unaligned_be32(&hin->tag);
775 t = gettgt(d, hin->src);
776 if (t == NULL) {
777 printk(KERN_INFO "aoe: can't find target e%ld.%d:%pm\n",
778 d->aoemajor, d->aoeminor, hin->src);
779 spin_unlock_irqrestore(&d->lock, flags);
780 return;
781 }
782 f = getframe(t, n);
783 if (f == NULL) {
784 calc_rttavg(d, -tsince(n));
785 spin_unlock_irqrestore(&d->lock, flags);
786 snprintf(ebuf, sizeof ebuf,
787 "%15s e%d.%d tag=%08x@%08lx\n",
788 "unexpected rsp",
789 get_unaligned_be16(&hin->major),
790 hin->minor,
791 get_unaligned_be32(&hin->tag),
792 jiffies);
793 aoechr_error(ebuf);
794 return;
795 }
796
797 calc_rttavg(d, tsince(f->tag));
798
799 ahin = (struct aoe_atahdr *) (hin+1);
800 hout = (struct aoe_hdr *) skb_mac_header(f->skb);
801 ahout = (struct aoe_atahdr *) (hout+1);
802 buf = f->buf;
803
804 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
805 printk(KERN_ERR
806 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
807 ahout->cmdstat, ahin->cmdstat,
808 d->aoemajor, d->aoeminor);
809 if (buf)
810 buf->flags |= BUFFL_FAIL;
811 } else {
812 if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */
813 d->htgt = NULL;
814 n = ahout->scnt << 9;
815 switch (ahout->cmdstat) {
816 case ATA_CMD_PIO_READ:
817 case ATA_CMD_PIO_READ_EXT:
818 if (skb->len - sizeof *hin - sizeof *ahin < n) {
819 printk(KERN_ERR
820 "aoe: %s. skb->len=%d need=%ld\n",
821 "runt data size in read", skb->len, n);
822 /* fail frame f? just returning will rexmit. */
823 spin_unlock_irqrestore(&d->lock, flags);
824 return;
825 }
826 memcpy(f->bufaddr, ahin+1, n);
827 case ATA_CMD_PIO_WRITE:
828 case ATA_CMD_PIO_WRITE_EXT:
829 ifp = getif(t, skb->dev);
830 if (ifp) {
831 ifp->lost = 0;
832 if (n > DEFAULTBCNT)
833 ifp->lostjumbo = 0;
834 }
835 if (f->bcnt -= n) {
836 f->lba += n >> 9;
837 f->bufaddr += n;
838 resend(d, t, f);
839 goto xmit;
840 }
841 break;
842 case ATA_CMD_ID_ATA:
843 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
844 printk(KERN_INFO
845 "aoe: runt data size in ataid. skb->len=%d\n",
846 skb->len);
847 spin_unlock_irqrestore(&d->lock, flags);
848 return;
849 }
850 ataid_complete(d, t, (char *) (ahin+1));
851 break;
852 default:
853 printk(KERN_INFO
854 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
855 ahout->cmdstat,
856 get_unaligned_be16(&hin->major),
857 hin->minor);
858 }
859 }
860
861 if (buf && --buf->nframesout == 0 && buf->resid == 0) {
862 diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector);
863 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
864 bio_endio(buf->bio, n);
865 mempool_free(buf, d->bufpool);
866 }
867
868 f->buf = NULL;
869 f->tag = FREETAG;
870 t->nout--;
871
872 aoecmd_work(d);
873 xmit:
874 __skb_queue_head_init(&queue);
875 skb_queue_splice_init(&d->sendq, &queue);
876
877 spin_unlock_irqrestore(&d->lock, flags);
878 aoenet_xmit(&queue);
879 }
880
881 void
882 aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
883 {
884 struct sk_buff_head queue;
885
886 __skb_queue_head_init(&queue);
887 aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
888 aoenet_xmit(&queue);
889 }
890
891 struct sk_buff *
892 aoecmd_ata_id(struct aoedev *d)
893 {
894 struct aoe_hdr *h;
895 struct aoe_atahdr *ah;
896 struct frame *f;
897 struct sk_buff *skb;
898 struct aoetgt *t;
899
900 f = freeframe(d);
901 if (f == NULL)
902 return NULL;
903
904 t = *d->tgt;
905
906 /* initialize the headers & frame */
907 skb = f->skb;
908 h = (struct aoe_hdr *) skb_mac_header(skb);
909 ah = (struct aoe_atahdr *) (h+1);
910 skb_put(skb, sizeof *h + sizeof *ah);
911 memset(h, 0, skb->len);
912 f->tag = aoehdr_atainit(d, t, h);
913 t->nout++;
914 f->waited = 0;
915
916 /* set up ata header */
917 ah->scnt = 1;
918 ah->cmdstat = ATA_CMD_ID_ATA;
919 ah->lba3 = 0xa0;
920
921 skb->dev = t->ifp->nd;
922
923 d->rttavg = MAXTIMER;
924 d->timer.function = rexmit_timer;
925
926 return skb_clone(skb, GFP_ATOMIC);
927 }
928
929 static struct aoetgt *
930 addtgt(struct aoedev *d, char *addr, ulong nframes)
931 {
932 struct aoetgt *t, **tt, **te;
933 struct frame *f, *e;
934
935 tt = d->targets;
936 te = tt + NTARGETS;
937 for (; tt < te && *tt; tt++)
938 ;
939
940 if (tt == te) {
941 printk(KERN_INFO
942 "aoe: device addtgt failure; too many targets\n");
943 return NULL;
944 }
945 t = kcalloc(1, sizeof *t, GFP_ATOMIC);
946 f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
947 if (!t || !f) {
948 kfree(f);
949 kfree(t);
950 printk(KERN_INFO "aoe: cannot allocate memory to add target\n");
951 return NULL;
952 }
953
954 t->nframes = nframes;
955 t->frames = f;
956 e = f + nframes;
957 for (; f < e; f++)
958 f->tag = FREETAG;
959 memcpy(t->addr, addr, sizeof t->addr);
960 t->ifp = t->ifs;
961 t->maxout = t->nframes;
962 return *tt = t;
963 }
964
965 void
966 aoecmd_cfg_rsp(struct sk_buff *skb)
967 {
968 struct aoedev *d;
969 struct aoe_hdr *h;
970 struct aoe_cfghdr *ch;
971 struct aoetgt *t;
972 struct aoeif *ifp;
973 ulong flags, sysminor, aoemajor;
974 struct sk_buff *sl;
975 u16 n;
976
977 h = (struct aoe_hdr *) skb_mac_header(skb);
978 ch = (struct aoe_cfghdr *) (h+1);
979
980 /*
981 * Enough people have their dip switches set backwards to
982 * warrant a loud message for this special case.
983 */
984 aoemajor = get_unaligned_be16(&h->major);
985 if (aoemajor == 0xfff) {
986 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
987 "Check shelf dip switches.\n");
988 return;
989 }
990
991 sysminor = SYSMINOR(aoemajor, h->minor);
992 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
993 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
994 aoemajor, (int) h->minor);
995 return;
996 }
997
998 n = be16_to_cpu(ch->bufcnt);
999 if (n > aoe_maxout) /* keep it reasonable */
1000 n = aoe_maxout;
1001
1002 d = aoedev_by_sysminor_m(sysminor);
1003 if (d == NULL) {
1004 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1005 return;
1006 }
1007
1008 spin_lock_irqsave(&d->lock, flags);
1009
1010 t = gettgt(d, h->src);
1011 if (!t) {
1012 t = addtgt(d, h->src, n);
1013 if (!t) {
1014 spin_unlock_irqrestore(&d->lock, flags);
1015 return;
1016 }
1017 }
1018 ifp = getif(t, skb->dev);
1019 if (!ifp) {
1020 ifp = addif(t, skb->dev);
1021 if (!ifp) {
1022 printk(KERN_INFO
1023 "aoe: device addif failure; "
1024 "too many interfaces?\n");
1025 spin_unlock_irqrestore(&d->lock, flags);
1026 return;
1027 }
1028 }
1029 if (ifp->maxbcnt) {
1030 n = ifp->nd->mtu;
1031 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
1032 n /= 512;
1033 if (n > ch->scnt)
1034 n = ch->scnt;
1035 n = n ? n * 512 : DEFAULTBCNT;
1036 if (n != ifp->maxbcnt) {
1037 printk(KERN_INFO
1038 "aoe: e%ld.%d: setting %d%s%s:%pm\n",
1039 d->aoemajor, d->aoeminor, n,
1040 " byte data frames on ", ifp->nd->name,
1041 t->addr);
1042 ifp->maxbcnt = n;
1043 }
1044 }
1045
1046 /* don't change users' perspective */
1047 if (d->nopen) {
1048 spin_unlock_irqrestore(&d->lock, flags);
1049 return;
1050 }
1051 d->fw_ver = be16_to_cpu(ch->fwver);
1052
1053 sl = aoecmd_ata_id(d);
1054
1055 spin_unlock_irqrestore(&d->lock, flags);
1056
1057 if (sl) {
1058 struct sk_buff_head queue;
1059 __skb_queue_head_init(&queue);
1060 __skb_queue_tail(&queue, sl);
1061 aoenet_xmit(&queue);
1062 }
1063 }
1064
1065 void
1066 aoecmd_cleanslate(struct aoedev *d)
1067 {
1068 struct aoetgt **t, **te;
1069 struct aoeif *p, *e;
1070
1071 d->mintimer = MINTIMER;
1072
1073 t = d->targets;
1074 te = t + NTARGETS;
1075 for (; t < te && *t; t++) {
1076 (*t)->maxout = (*t)->nframes;
1077 p = (*t)->ifs;
1078 e = p + NAOEIFS;
1079 for (; p < e; p++) {
1080 p->lostjumbo = 0;
1081 p->lost = 0;
1082 p->maxbcnt = DEFAULTBCNT;
1083 }
1084 }
1085 }
This page took 0.073742 seconds and 5 git commands to generate.