[AOE]: Introduce aoe_hdr()
[deliverable/linux.git] / drivers / block / aoe / aoecmd.c
CommitLineData
2611464d 1/* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */
1da177e4
LT
2/*
3 * aoecmd.c
4 * Filesystem request handling methods
5 */
6
7#include <linux/hdreg.h>
8#include <linux/blkdev.h>
9#include <linux/skbuff.h>
10#include <linux/netdevice.h>
3ae1c24e 11#include <linux/genhd.h>
475172fb 12#include <asm/unaligned.h>
1da177e4
LT
13#include "aoe.h"
14
15#define TIMERTICK (HZ / 10)
16#define MINTIMER (2 * TIMERTICK)
17#define MAXTIMER (HZ << 1)
b751e8b6
EC
18
19static int aoe_deadsecs = 60 * 3;
20module_param(aoe_deadsecs, int, 0644);
21MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
1da177e4 22
e407a7f6
EC
23struct sk_buff *
24new_skb(ulong len)
1da177e4
LT
25{
26 struct sk_buff *skb;
27
28 skb = alloc_skb(len, GFP_ATOMIC);
29 if (skb) {
30 skb->nh.raw = skb->mac.raw = skb->data;
1da177e4
LT
31 skb->protocol = __constant_htons(ETH_P_AOE);
32 skb->priority = 0;
1da177e4
LT
33 skb->next = skb->prev = NULL;
34
35 /* tell the network layer not to perform IP checksums
36 * or to get the NIC to do it
37 */
38 skb->ip_summed = CHECKSUM_NONE;
39 }
40 return skb;
41}
42
1da177e4
LT
43static struct frame *
44getframe(struct aoedev *d, int tag)
45{
46 struct frame *f, *e;
47
48 f = d->frames;
49 e = f + d->nframes;
50 for (; f<e; f++)
51 if (f->tag == tag)
52 return f;
53 return NULL;
54}
55
56/*
57 * Leave the top bit clear so we have tagspace for userland.
58 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
59 * This driver reserves tag -1 to mean "unused frame."
60 */
61static int
62newtag(struct aoedev *d)
63{
64 register ulong n;
65
66 n = jiffies & 0xffff;
67 return n |= (++d->lasttag & 0x7fff) << 16;
68}
69
70static int
71aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
72{
1da177e4 73 u32 host_tag = newtag(d);
1da177e4
LT
74
75 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
76 memcpy(h->dst, d->addr, sizeof h->dst);
63e9cc5d 77 h->type = __constant_cpu_to_be16(ETH_P_AOE);
1da177e4 78 h->verfl = AOE_HVER;
63e9cc5d 79 h->major = cpu_to_be16(d->aoemajor);
1da177e4
LT
80 h->minor = d->aoeminor;
81 h->cmd = AOECMD_ATA;
63e9cc5d 82 h->tag = cpu_to_be32(host_tag);
1da177e4
LT
83
84 return host_tag;
85}
86
19bf2635
EC
87static inline void
88put_lba(struct aoe_atahdr *ah, sector_t lba)
89{
90 ah->lba0 = lba;
91 ah->lba1 = lba >>= 8;
92 ah->lba2 = lba >>= 8;
93 ah->lba3 = lba >>= 8;
94 ah->lba4 = lba >>= 8;
95 ah->lba5 = lba >>= 8;
96}
97
1da177e4
LT
98static void
99aoecmd_ata_rw(struct aoedev *d, struct frame *f)
100{
101 struct aoe_hdr *h;
102 struct aoe_atahdr *ah;
103 struct buf *buf;
104 struct sk_buff *skb;
105 ulong bcnt;
106 register sector_t sector;
107 char writebit, extbit;
108
109 writebit = 0x10;
110 extbit = 0x4;
111
112 buf = d->inprocess;
113
114 sector = buf->sector;
115 bcnt = buf->bv_resid;
19bf2635
EC
116 if (bcnt > d->maxbcnt)
117 bcnt = d->maxbcnt;
1da177e4
LT
118
119 /* initialize the headers & frame */
e407a7f6 120 skb = f->skb;
029720f1 121 h = aoe_hdr(skb);
1da177e4 122 ah = (struct aoe_atahdr *) (h+1);
19900cde
EC
123 skb_put(skb, sizeof *h + sizeof *ah);
124 memset(h, 0, skb->len);
1da177e4
LT
125 f->tag = aoehdr_atainit(d, h);
126 f->waited = 0;
127 f->buf = buf;
128 f->bufaddr = buf->bufaddr;
19bf2635
EC
129 f->bcnt = bcnt;
130 f->lba = sector;
1da177e4
LT
131
132 /* set up ata header */
133 ah->scnt = bcnt >> 9;
19bf2635 134 put_lba(ah, sector);
1da177e4
LT
135 if (d->flags & DEVFL_EXT) {
136 ah->aflags |= AOEAFL_EXT;
1da177e4
LT
137 } else {
138 extbit = 0;
139 ah->lba3 &= 0x0f;
140 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
141 }
142
143 if (bio_data_dir(buf->bio) == WRITE) {
e407a7f6
EC
144 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
145 offset_in_page(f->bufaddr), bcnt);
1da177e4 146 ah->aflags |= AOEAFL_WRITE;
4f51dc5e
EC
147 skb->len += bcnt;
148 skb->data_len = bcnt;
1da177e4
LT
149 } else {
150 writebit = 0;
1da177e4
LT
151 }
152
153 ah->cmdstat = WIN_READ | writebit | extbit;
154
155 /* mark all tracking fields and load out */
156 buf->nframesout += 1;
157 buf->bufaddr += bcnt;
158 buf->bv_resid -= bcnt;
a12c93f0 159/* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */
1da177e4
LT
160 buf->resid -= bcnt;
161 buf->sector += bcnt >> 9;
162 if (buf->resid == 0) {
163 d->inprocess = NULL;
164 } else if (buf->bv_resid == 0) {
165 buf->bv++;
392e4845 166 WARN_ON(buf->bv->bv_len == 0);
1da177e4
LT
167 buf->bv_resid = buf->bv->bv_len;
168 buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
169 }
170
e407a7f6 171 skb->dev = d->ifp;
4f51dc5e
EC
172 skb = skb_clone(skb, GFP_ATOMIC);
173 if (skb == NULL)
174 return;
e407a7f6
EC
175 if (d->sendq_hd)
176 d->sendq_tl->next = skb;
177 else
178 d->sendq_hd = skb;
179 d->sendq_tl = skb;
1da177e4
LT
180}
181
3ae1c24e
EC
182/* some callers cannot sleep, and they can call this function,
183 * transmitting the packets later, when interrupts are on
184 */
185static struct sk_buff *
186aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
187{
188 struct aoe_hdr *h;
189 struct aoe_cfghdr *ch;
190 struct sk_buff *skb, *sl, *sl_tail;
191 struct net_device *ifp;
192
193 sl = sl_tail = NULL;
194
195 read_lock(&dev_base_lock);
196 for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
197 dev_hold(ifp);
198 if (!is_aoe_netif(ifp))
199 continue;
200
e407a7f6 201 skb = new_skb(sizeof *h + sizeof *ch);
3ae1c24e 202 if (skb == NULL) {
a12c93f0 203 printk(KERN_INFO "aoe: skb alloc failure\n");
3ae1c24e
EC
204 continue;
205 }
19900cde 206 skb_put(skb, sizeof *h + sizeof *ch);
e407a7f6 207 skb->dev = ifp;
3ae1c24e
EC
208 if (sl_tail == NULL)
209 sl_tail = skb;
029720f1 210 h = aoe_hdr(skb);
3ae1c24e
EC
211 memset(h, 0, sizeof *h + sizeof *ch);
212
213 memset(h->dst, 0xff, sizeof h->dst);
214 memcpy(h->src, ifp->dev_addr, sizeof h->src);
215 h->type = __constant_cpu_to_be16(ETH_P_AOE);
216 h->verfl = AOE_HVER;
217 h->major = cpu_to_be16(aoemajor);
218 h->minor = aoeminor;
219 h->cmd = AOECMD_CFG;
220
221 skb->next = sl;
222 sl = skb;
223 }
224 read_unlock(&dev_base_lock);
225
226 if (tail != NULL)
227 *tail = sl_tail;
228 return sl;
229}
230
4f51dc5e
EC
231static struct frame *
232freeframe(struct aoedev *d)
233{
234 struct frame *f, *e;
235 int n = 0;
236
237 f = d->frames;
238 e = f + d->nframes;
239 for (; f<e; f++) {
240 if (f->tag != FREETAG)
241 continue;
242 if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) {
243 skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0;
19900cde 244 skb_trim(f->skb, 0);
4f51dc5e
EC
245 return f;
246 }
247 n++;
248 }
249 if (n == d->nframes) /* wait for network layer */
250 d->flags |= DEVFL_KICKME;
251
252 return NULL;
253}
254
1da177e4
LT
255/* enters with d->lock held */
256void
257aoecmd_work(struct aoedev *d)
258{
259 struct frame *f;
260 struct buf *buf;
3ae1c24e
EC
261
262 if (d->flags & DEVFL_PAUSE) {
263 if (!aoedev_isbusy(d))
264 d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
265 d->aoeminor, &d->sendq_tl);
266 return;
267 }
268
1da177e4 269loop:
4f51dc5e 270 f = freeframe(d);
1da177e4
LT
271 if (f == NULL)
272 return;
273 if (d->inprocess == NULL) {
274 if (list_empty(&d->bufq))
275 return;
276 buf = container_of(d->bufq.next, struct buf, bufs);
277 list_del(d->bufq.next);
a12c93f0 278/*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */
1da177e4
LT
279 d->inprocess = buf;
280 }
281 aoecmd_ata_rw(d, f);
282 goto loop;
283}
284
285static void
286rexmit(struct aoedev *d, struct frame *f)
287{
288 struct sk_buff *skb;
289 struct aoe_hdr *h;
19bf2635 290 struct aoe_atahdr *ah;
1da177e4
LT
291 char buf[128];
292 u32 n;
1da177e4
LT
293
294 n = newtag(d);
295
296 snprintf(buf, sizeof buf,
297 "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
298 "retransmit",
299 d->aoemajor, d->aoeminor, f->tag, jiffies, n);
300 aoechr_error(buf);
301
e407a7f6 302 skb = f->skb;
029720f1 303 h = aoe_hdr(skb);
19bf2635 304 ah = (struct aoe_atahdr *) (h+1);
1da177e4 305 f->tag = n;
63e9cc5d 306 h->tag = cpu_to_be32(n);
2dd5e422
EC
307 memcpy(h->dst, d->addr, sizeof h->dst);
308 memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
1da177e4 309
19bf2635
EC
310 n = DEFAULTBCNT / 512;
311 if (ah->scnt > n) {
312 ah->scnt = n;
4f51dc5e 313 if (ah->aflags & AOEAFL_WRITE) {
19bf2635
EC
314 skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
315 offset_in_page(f->bufaddr), DEFAULTBCNT);
4f51dc5e
EC
316 skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT;
317 skb->data_len = DEFAULTBCNT;
318 }
19bf2635
EC
319 if (++d->lostjumbo > (d->nframes << 1))
320 if (d->maxbcnt != DEFAULTBCNT) {
a12c93f0 321 printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n",
4f51dc5e 322 d->aoemajor, d->aoeminor, d->ifp->name);
19bf2635
EC
323 d->maxbcnt = DEFAULTBCNT;
324 d->flags |= DEVFL_MAXBCNT;
325 }
326 }
327
e407a7f6 328 skb->dev = d->ifp;
4f51dc5e
EC
329 skb = skb_clone(skb, GFP_ATOMIC);
330 if (skb == NULL)
331 return;
e407a7f6
EC
332 if (d->sendq_hd)
333 d->sendq_tl->next = skb;
334 else
335 d->sendq_hd = skb;
336 d->sendq_tl = skb;
1da177e4
LT
337}
338
339static int
340tsince(int tag)
341{
342 int n;
343
344 n = jiffies & 0xffff;
345 n -= tag & 0xffff;
346 if (n < 0)
347 n += 1<<16;
348 return n;
349}
350
351static void
352rexmit_timer(ulong vp)
353{
354 struct aoedev *d;
355 struct frame *f, *e;
356 struct sk_buff *sl;
357 register long timeout;
358 ulong flags, n;
359
360 d = (struct aoedev *) vp;
361 sl = NULL;
362
363 /* timeout is always ~150% of the moving average */
364 timeout = d->rttavg;
365 timeout += timeout >> 1;
366
367 spin_lock_irqsave(&d->lock, flags);
368
369 if (d->flags & DEVFL_TKILL) {
1c6f3fca 370 spin_unlock_irqrestore(&d->lock, flags);
1da177e4
LT
371 return;
372 }
373 f = d->frames;
374 e = f + d->nframes;
375 for (; f<e; f++) {
376 if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
377 n = f->waited += timeout;
378 n /= HZ;
b751e8b6 379 if (n > aoe_deadsecs) { /* waited too long for response */
1da177e4 380 aoedev_downdev(d);
1c6f3fca 381 break;
1da177e4
LT
382 }
383 rexmit(d, f);
384 }
385 }
4f51dc5e
EC
386 if (d->flags & DEVFL_KICKME) {
387 d->flags &= ~DEVFL_KICKME;
388 aoecmd_work(d);
389 }
1da177e4 390
a4b38364 391 sl = d->sendq_hd;
392 d->sendq_hd = d->sendq_tl = NULL;
1da177e4
LT
393 if (sl) {
394 n = d->rttavg <<= 1;
395 if (n > MAXTIMER)
396 d->rttavg = MAXTIMER;
397 }
398
399 d->timer.expires = jiffies + TIMERTICK;
400 add_timer(&d->timer);
401
402 spin_unlock_irqrestore(&d->lock, flags);
403
404 aoenet_xmit(sl);
405}
406
3ae1c24e
EC
407/* this function performs work that has been deferred until sleeping is OK
408 */
409void
c4028958 410aoecmd_sleepwork(struct work_struct *work)
3ae1c24e 411{
c4028958 412 struct aoedev *d = container_of(work, struct aoedev, work);
3ae1c24e
EC
413
414 if (d->flags & DEVFL_GDALLOC)
415 aoeblk_gdalloc(d);
416
417 if (d->flags & DEVFL_NEWSIZE) {
418 struct block_device *bd;
419 unsigned long flags;
420 u64 ssize;
421
422 ssize = d->gd->capacity;
423 bd = bdget_disk(d->gd, 0);
424
425 if (bd) {
426 mutex_lock(&bd->bd_inode->i_mutex);
427 i_size_write(bd->bd_inode, (loff_t)ssize<<9);
428 mutex_unlock(&bd->bd_inode->i_mutex);
429 bdput(bd);
430 }
431 spin_lock_irqsave(&d->lock, flags);
432 d->flags |= DEVFL_UP;
433 d->flags &= ~DEVFL_NEWSIZE;
434 spin_unlock_irqrestore(&d->lock, flags);
435 }
436}
437
1da177e4
LT
438static void
439ataid_complete(struct aoedev *d, unsigned char *id)
440{
441 u64 ssize;
442 u16 n;
443
444 /* word 83: command set supported */
475172fb 445 n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
1da177e4
LT
446
447 /* word 86: command set/feature enabled */
475172fb 448 n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
1da177e4
LT
449
450 if (n & (1<<10)) { /* bit 10: LBA 48 */
451 d->flags |= DEVFL_EXT;
452
453 /* word 100: number lba48 sectors */
475172fb 454 ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
1da177e4
LT
455
456 /* set as in ide-disk.c:init_idedisk_capacity */
457 d->geo.cylinders = ssize;
458 d->geo.cylinders /= (255 * 63);
459 d->geo.heads = 255;
460 d->geo.sectors = 63;
461 } else {
462 d->flags &= ~DEVFL_EXT;
463
464 /* number lba28 sectors */
475172fb 465 ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
1da177e4
LT
466
467 /* NOTE: obsolete in ATA 6 */
475172fb
EC
468 d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
469 d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
470 d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
1da177e4 471 }
3ae1c24e
EC
472
473 if (d->ssize != ssize)
a12c93f0 474 printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
6bb6285f 475 (unsigned long long)mac_addr(d->addr),
3ae1c24e
EC
476 d->aoemajor, d->aoeminor,
477 d->fw_ver, (long long)ssize);
1da177e4
LT
478 d->ssize = ssize;
479 d->geo.start = 0;
480 if (d->gd != NULL) {
481 d->gd->capacity = ssize;
3ae1c24e
EC
482 d->flags |= DEVFL_NEWSIZE;
483 } else {
484 if (d->flags & DEVFL_GDALLOC) {
a12c93f0 485 printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n",
3ae1c24e 486 d->aoemajor, d->aoeminor,
6bb6285f 487 "it's already on! This shouldn't happen.\n");
3ae1c24e
EC
488 return;
489 }
490 d->flags |= DEVFL_GDALLOC;
1da177e4 491 }
1da177e4 492 schedule_work(&d->work);
1da177e4
LT
493}
494
495static void
496calc_rttavg(struct aoedev *d, int rtt)
497{
498 register long n;
499
500 n = rtt;
dced3a05
EC
501 if (n < 0) {
502 n = -rtt;
503 if (n < MINTIMER)
504 n = MINTIMER;
505 else if (n > MAXTIMER)
506 n = MAXTIMER;
507 d->mintimer += (n - d->mintimer) >> 1;
508 } else if (n < d->mintimer)
509 n = d->mintimer;
1da177e4
LT
510 else if (n > MAXTIMER)
511 n = MAXTIMER;
512
513 /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
514 n -= d->rttavg;
515 d->rttavg += n >> 2;
516}
517
518void
519aoecmd_ata_rsp(struct sk_buff *skb)
520{
521 struct aoedev *d;
ddec63e8 522 struct aoe_hdr *hin, *hout;
1da177e4
LT
523 struct aoe_atahdr *ahin, *ahout;
524 struct frame *f;
525 struct buf *buf;
526 struct sk_buff *sl;
527 register long n;
528 ulong flags;
529 char ebuf[128];
32465c65 530 u16 aoemajor;
531
029720f1 532 hin = aoe_hdr(skb);
43ecf529 533 aoemajor = be16_to_cpu(get_unaligned(&hin->major));
32465c65 534 d = aoedev_by_aoeaddr(aoemajor, hin->minor);
1da177e4
LT
535 if (d == NULL) {
536 snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
537 "for unknown device %d.%d\n",
32465c65 538 aoemajor, hin->minor);
1da177e4
LT
539 aoechr_error(ebuf);
540 return;
541 }
542
543 spin_lock_irqsave(&d->lock, flags);
544
43ecf529 545 n = be32_to_cpu(get_unaligned(&hin->tag));
dced3a05 546 f = getframe(d, n);
1da177e4 547 if (f == NULL) {
dced3a05 548 calc_rttavg(d, -tsince(n));
1da177e4
LT
549 spin_unlock_irqrestore(&d->lock, flags);
550 snprintf(ebuf, sizeof ebuf,
551 "%15s e%d.%d tag=%08x@%08lx\n",
552 "unexpected rsp",
43ecf529 553 be16_to_cpu(get_unaligned(&hin->major)),
1da177e4 554 hin->minor,
43ecf529 555 be32_to_cpu(get_unaligned(&hin->tag)),
1da177e4
LT
556 jiffies);
557 aoechr_error(ebuf);
558 return;
559 }
560
561 calc_rttavg(d, tsince(f->tag));
562
563 ahin = (struct aoe_atahdr *) (hin+1);
029720f1 564 hout = aoe_hdr(f->skb);
ddec63e8 565 ahout = (struct aoe_atahdr *) (hout+1);
1da177e4
LT
566 buf = f->buf;
567
9d41965b
EC
568 if (ahout->cmdstat == WIN_IDENTIFY)
569 d->flags &= ~DEVFL_PAUSE;
1da177e4 570 if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
a12c93f0
EC
571 printk(KERN_ERR
572 "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
1da177e4
LT
573 ahout->cmdstat, ahin->cmdstat,
574 d->aoemajor, d->aoeminor);
575 if (buf)
576 buf->flags |= BUFFL_FAIL;
577 } else {
19bf2635 578 n = ahout->scnt << 9;
1da177e4
LT
579 switch (ahout->cmdstat) {
580 case WIN_READ:
581 case WIN_READ_EXT:
1da177e4 582 if (skb->len - sizeof *hin - sizeof *ahin < n) {
a12c93f0
EC
583 printk(KERN_ERR
584 "aoe: runt data size in read. skb->len=%d\n",
1da177e4
LT
585 skb->len);
586 /* fail frame f? just returning will rexmit. */
587 spin_unlock_irqrestore(&d->lock, flags);
588 return;
589 }
590 memcpy(f->bufaddr, ahin+1, n);
591 case WIN_WRITE:
592 case WIN_WRITE_EXT:
19bf2635 593 if (f->bcnt -= n) {
4f51dc5e 594 skb = f->skb;
19bf2635
EC
595 f->bufaddr += n;
596 put_lba(ahout, f->lba += ahout->scnt);
6bb6285f
EC
597 n = f->bcnt;
598 if (n > DEFAULTBCNT)
599 n = DEFAULTBCNT;
19bf2635 600 ahout->scnt = n >> 9;
4f51dc5e
EC
601 if (ahout->aflags & AOEAFL_WRITE) {
602 skb_fill_page_desc(skb, 0,
6bb6285f 603 virt_to_page(f->bufaddr),
19bf2635 604 offset_in_page(f->bufaddr), n);
4f51dc5e
EC
605 skb->len = sizeof *hout + sizeof *ahout + n;
606 skb->data_len = n;
607 }
ddec63e8
EC
608 f->tag = newtag(d);
609 hout->tag = cpu_to_be32(f->tag);
610 skb->dev = d->ifp;
4f51dc5e 611 skb = skb_clone(skb, GFP_ATOMIC);
19bf2635 612 spin_unlock_irqrestore(&d->lock, flags);
4f51dc5e
EC
613 if (skb)
614 aoenet_xmit(skb);
19bf2635
EC
615 return;
616 }
617 if (n > DEFAULTBCNT)
618 d->lostjumbo = 0;
1da177e4
LT
619 break;
620 case WIN_IDENTIFY:
621 if (skb->len - sizeof *hin - sizeof *ahin < 512) {
a12c93f0
EC
622 printk(KERN_INFO
623 "aoe: runt data size in ataid. skb->len=%d\n",
6bb6285f 624 skb->len);
1da177e4
LT
625 spin_unlock_irqrestore(&d->lock, flags);
626 return;
627 }
628 ataid_complete(d, (char *) (ahin+1));
1da177e4
LT
629 break;
630 default:
a12c93f0
EC
631 printk(KERN_INFO
632 "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
6bb6285f 633 ahout->cmdstat,
43ecf529 634 be16_to_cpu(get_unaligned(&hin->major)),
6bb6285f 635 hin->minor);
1da177e4
LT
636 }
637 }
638
639 if (buf) {
640 buf->nframesout -= 1;
641 if (buf->nframesout == 0 && buf->resid == 0) {
0c6f0e79 642 unsigned long duration = jiffies - buf->start_time;
643 unsigned long n_sect = buf->bio->bi_size >> 9;
644 struct gendisk *disk = d->gd;
496456c2 645 const int rw = bio_data_dir(buf->bio);
0c6f0e79 646
496456c2
JA
647 disk_stat_inc(disk, ios[rw]);
648 disk_stat_add(disk, ticks[rw], duration);
649 disk_stat_add(disk, sectors[rw], n_sect);
0c6f0e79 650 disk_stat_add(disk, io_ticks, duration);
1da177e4
LT
651 n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
652 bio_endio(buf->bio, buf->bio->bi_size, n);
653 mempool_free(buf, d->bufpool);
654 }
655 }
656
657 f->buf = NULL;
658 f->tag = FREETAG;
659
660 aoecmd_work(d);
a4b38364 661 sl = d->sendq_hd;
662 d->sendq_hd = d->sendq_tl = NULL;
1da177e4
LT
663
664 spin_unlock_irqrestore(&d->lock, flags);
1da177e4
LT
665 aoenet_xmit(sl);
666}
667
668void
669aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
670{
3ae1c24e 671 struct sk_buff *sl;
1da177e4 672
3ae1c24e 673 sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
1da177e4
LT
674
675 aoenet_xmit(sl);
676}
677
678/*
679 * Since we only call this in one place (and it only prepares one frame)
a4b38364 680 * we just return the skb. Usually we'd chain it up to the aoedev sendq.
1da177e4
LT
681 */
682static struct sk_buff *
683aoecmd_ata_id(struct aoedev *d)
684{
685 struct aoe_hdr *h;
686 struct aoe_atahdr *ah;
687 struct frame *f;
688 struct sk_buff *skb;
689
4f51dc5e 690 f = freeframe(d);
1da177e4 691 if (f == NULL) {
a12c93f0 692 printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n");
1da177e4
LT
693 return NULL;
694 }
695
696 /* initialize the headers & frame */
e407a7f6 697 skb = f->skb;
029720f1 698 h = aoe_hdr(skb);
1da177e4 699 ah = (struct aoe_atahdr *) (h+1);
19900cde
EC
700 skb_put(skb, sizeof *h + sizeof *ah);
701 memset(h, 0, skb->len);
1da177e4
LT
702 f->tag = aoehdr_atainit(d, h);
703 f->waited = 0;
1da177e4 704
1da177e4
LT
705 /* set up ata header */
706 ah->scnt = 1;
707 ah->cmdstat = WIN_IDENTIFY;
708 ah->lba3 = 0xa0;
709
e407a7f6 710 skb->dev = d->ifp;
1da177e4 711
3ae1c24e 712 d->rttavg = MAXTIMER;
1da177e4 713 d->timer.function = rexmit_timer;
1da177e4 714
4f51dc5e 715 return skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
716}
717
718void
719aoecmd_cfg_rsp(struct sk_buff *skb)
720{
721 struct aoedev *d;
722 struct aoe_hdr *h;
723 struct aoe_cfghdr *ch;
63e9cc5d 724 ulong flags, sysminor, aoemajor;
1da177e4 725 struct sk_buff *sl;
eaf0a3cb 726 enum { MAXFRAMES = 16 };
19bf2635 727 u16 n;
1da177e4 728
029720f1 729 h = aoe_hdr(skb);
1da177e4
LT
730 ch = (struct aoe_cfghdr *) (h+1);
731
732 /*
733 * Enough people have their dip switches set backwards to
734 * warrant a loud message for this special case.
735 */
43ecf529 736 aoemajor = be16_to_cpu(get_unaligned(&h->major));
1da177e4 737 if (aoemajor == 0xfff) {
a12c93f0 738 printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
6bb6285f 739 "Check shelf dip switches.\n");
1da177e4
LT
740 return;
741 }
742
743 sysminor = SYSMINOR(aoemajor, h->minor);
fc458dcd 744 if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
a12c93f0 745 printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
fc458dcd 746 aoemajor, (int) h->minor);
1da177e4
LT
747 return;
748 }
749
19bf2635
EC
750 n = be16_to_cpu(ch->bufcnt);
751 if (n > MAXFRAMES) /* keep it reasonable */
752 n = MAXFRAMES;
1da177e4 753
19bf2635 754 d = aoedev_by_sysminor_m(sysminor, n);
1da177e4 755 if (d == NULL) {
a12c93f0 756 printk(KERN_INFO "aoe: device sysminor_m failure\n");
1da177e4
LT
757 return;
758 }
759
760 spin_lock_irqsave(&d->lock, flags);
761
3ae1c24e
EC
762 /* permit device to migrate mac and network interface */
763 d->ifp = skb->dev;
764 memcpy(d->addr, h->src, sizeof d->addr);
19bf2635
EC
765 if (!(d->flags & DEVFL_MAXBCNT)) {
766 n = d->ifp->mtu;
767 n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
768 n /= 512;
769 if (n > ch->scnt)
770 n = ch->scnt;
4f51dc5e
EC
771 n = n ? n * 512 : DEFAULTBCNT;
772 if (n != d->maxbcnt) {
a12c93f0
EC
773 printk(KERN_INFO
774 "aoe: e%ld.%ld: setting %d byte data frames on %s\n",
4f51dc5e
EC
775 d->aoemajor, d->aoeminor, n, d->ifp->name);
776 d->maxbcnt = n;
777 }
19bf2635 778 }
3ae1c24e
EC
779
780 /* don't change users' perspective */
781 if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
1da177e4
LT
782 spin_unlock_irqrestore(&d->lock, flags);
783 return;
784 }
3ae1c24e 785 d->flags |= DEVFL_PAUSE; /* force pause */
dced3a05 786 d->mintimer = MINTIMER;
63e9cc5d 787 d->fw_ver = be16_to_cpu(ch->fwver);
1da177e4 788
3ae1c24e
EC
789 /* check for already outstanding ataid */
790 sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
1da177e4
LT
791
792 spin_unlock_irqrestore(&d->lock, flags);
793
794 aoenet_xmit(sl);
795}
796
This page took 0.485988 seconds and 5 git commands to generate.