Merge branch 'upstream'
[deliverable/linux.git] / drivers / s390 / net / qeth_eddp.c
1 /*
2 * linux/drivers/s390/net/qeth_eddp.c
3 *
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11 #include <linux/config.h>
12 #include <linux/errno.h>
13 #include <linux/ip.h>
14 #include <linux/inetdevice.h>
15 #include <linux/netdevice.h>
16 #include <linux/kernel.h>
17 #include <linux/tcp.h>
18 #include <net/tcp.h>
19 #include <linux/skbuff.h>
20
21 #include <net/ip.h>
22
23 #include "qeth.h"
24 #include "qeth_mpc.h"
25 #include "qeth_eddp.h"
26
27 int
28 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
29 struct qeth_eddp_context *ctx)
30 {
31 int index = queue->next_buf_to_fill;
32 int elements_needed = ctx->num_elements;
33 int elements_in_buffer;
34 int skbs_in_buffer;
35 int buffers_needed = 0;
36
37 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
38 while(elements_needed > 0) {
39 buffers_needed++;
40 if (atomic_read(&queue->bufs[index].state) !=
41 QETH_QDIO_BUF_EMPTY)
42 return -EBUSY;
43
44 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
45 queue->bufs[index].next_element_to_fill;
46 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
47 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
48 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
49 }
50 return buffers_needed;
51 }
52
53 static inline void
54 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
55 {
56 int i;
57
58 QETH_DBF_TEXT(trace, 5, "eddpfctx");
59 for (i = 0; i < ctx->num_pages; ++i)
60 free_page((unsigned long)ctx->pages[i]);
61 kfree(ctx->pages);
62 kfree(ctx->elements);
63 kfree(ctx);
64 }
65
66
67 static inline void
68 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
69 {
70 atomic_inc(&ctx->refcnt);
71 }
72
73 void
74 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
75 {
76 if (atomic_dec_return(&ctx->refcnt) == 0)
77 qeth_eddp_free_context(ctx);
78 }
79
80 void
81 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
82 {
83 struct qeth_eddp_context_reference *ref;
84
85 QETH_DBF_TEXT(trace, 6, "eddprctx");
86 while (!list_empty(&buf->ctx_list)){
87 ref = list_entry(buf->ctx_list.next,
88 struct qeth_eddp_context_reference, list);
89 qeth_eddp_put_context(ref->ctx);
90 list_del(&ref->list);
91 kfree(ref);
92 }
93 }
94
95 static inline int
96 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
97 struct qeth_eddp_context *ctx)
98 {
99 struct qeth_eddp_context_reference *ref;
100
101 QETH_DBF_TEXT(trace, 6, "eddprfcx");
102 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
103 if (ref == NULL)
104 return -ENOMEM;
105 qeth_eddp_get_context(ctx);
106 ref->ctx = ctx;
107 list_add_tail(&ref->list, &buf->ctx_list);
108 return 0;
109 }
110
111 int
112 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
113 struct qeth_eddp_context *ctx,
114 int index)
115 {
116 struct qeth_qdio_out_buffer *buf = NULL;
117 struct qdio_buffer *buffer;
118 int elements = ctx->num_elements;
119 int element = 0;
120 int flush_cnt = 0;
121 int must_refcnt = 1;
122 int i;
123
124 QETH_DBF_TEXT(trace, 5, "eddpfibu");
125 while (elements > 0) {
126 buf = &queue->bufs[index];
127 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
128 /* normally this should not happen since we checked for
129 * available elements in qeth_check_elements_for_context
130 */
131 if (element == 0)
132 return -EBUSY;
133 else {
134 PRINT_WARN("could only partially fill eddp "
135 "buffer!\n");
136 goto out;
137 }
138 }
139 /* check if the whole next skb fits into current buffer */
140 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
141 buf->next_element_to_fill)
142 < ctx->elements_per_skb){
143 /* no -> go to next buffer */
144 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
145 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
146 flush_cnt++;
147 /* new buffer, so we have to add ctx to buffer'ctx_list
148 * and increment ctx's refcnt */
149 must_refcnt = 1;
150 continue;
151 }
152 if (must_refcnt){
153 must_refcnt = 0;
154 if (qeth_eddp_buf_ref_context(buf, ctx)){
155 PRINT_WARN("no memory to create eddp context "
156 "reference\n");
157 goto out_check;
158 }
159 }
160 buffer = buf->buffer;
161 /* fill one skb into buffer */
162 for (i = 0; i < ctx->elements_per_skb; ++i){
163 buffer->element[buf->next_element_to_fill].addr =
164 ctx->elements[element].addr;
165 buffer->element[buf->next_element_to_fill].length =
166 ctx->elements[element].length;
167 buffer->element[buf->next_element_to_fill].flags =
168 ctx->elements[element].flags;
169 buf->next_element_to_fill++;
170 element++;
171 elements--;
172 }
173 }
174 out_check:
175 if (!queue->do_pack) {
176 QETH_DBF_TEXT(trace, 6, "fillbfnp");
177 /* set state to PRIMED -> will be flushed */
178 if (buf->next_element_to_fill > 0){
179 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
180 flush_cnt++;
181 }
182 } else {
183 #ifdef CONFIG_QETH_PERF_STATS
184 queue->card->perf_stats.skbs_sent_pack++;
185 #endif
186 QETH_DBF_TEXT(trace, 6, "fillbfpa");
187 if (buf->next_element_to_fill >=
188 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
189 /*
190 * packed buffer if full -> set state PRIMED
191 * -> will be flushed
192 */
193 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
194 flush_cnt++;
195 }
196 }
197 out:
198 return flush_cnt;
199 }
200
201 static inline void
202 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
203 struct qeth_eddp_data *eddp, int data_len)
204 {
205 u8 *page;
206 int page_remainder;
207 int page_offset;
208 int pkt_len;
209 struct qeth_eddp_element *element;
210
211 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
212 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
213 page_offset = ctx->offset % PAGE_SIZE;
214 element = &ctx->elements[ctx->num_elements];
215 pkt_len = eddp->nhl + eddp->thl + data_len;
216 /* FIXME: layer2 and VLAN !!! */
217 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
218 pkt_len += ETH_HLEN;
219 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
220 pkt_len += VLAN_HLEN;
221 /* does complete packet fit in current page ? */
222 page_remainder = PAGE_SIZE - page_offset;
223 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
224 /* no -> go to start of next page */
225 ctx->offset += page_remainder;
226 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
227 page_offset = 0;
228 }
229 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
230 element->addr = page + page_offset;
231 element->length = sizeof(struct qeth_hdr);
232 ctx->offset += sizeof(struct qeth_hdr);
233 page_offset += sizeof(struct qeth_hdr);
234 /* add mac header (?) */
235 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
236 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
237 element->length += ETH_HLEN;
238 ctx->offset += ETH_HLEN;
239 page_offset += ETH_HLEN;
240 }
241 /* add VLAN tag */
242 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
243 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
244 element->length += VLAN_HLEN;
245 ctx->offset += VLAN_HLEN;
246 page_offset += VLAN_HLEN;
247 }
248 /* add network header */
249 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
250 element->length += eddp->nhl;
251 eddp->nh_in_ctx = page + page_offset;
252 ctx->offset += eddp->nhl;
253 page_offset += eddp->nhl;
254 /* add transport header */
255 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
256 element->length += eddp->thl;
257 eddp->th_in_ctx = page + page_offset;
258 ctx->offset += eddp->thl;
259 }
260
261 static inline void
262 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
263 u32 *hcsum)
264 {
265 struct skb_frag_struct *frag;
266 int left_in_frag;
267 int copy_len;
268 u8 *src;
269
270 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
271 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
272 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
273 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
274 *hcsum);
275 eddp->skb_offset += len;
276 } else {
277 while (len > 0) {
278 if (eddp->frag < 0) {
279 /* we're in skb->data */
280 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
281 - eddp->skb_offset;
282 src = eddp->skb->data + eddp->skb_offset;
283 } else {
284 frag = &skb_shinfo(eddp->skb)->
285 frags[eddp->frag];
286 left_in_frag = frag->size - eddp->frag_offset;
287 src = (u8 *)(
288 (page_to_pfn(frag->page) << PAGE_SHIFT)+
289 frag->page_offset + eddp->frag_offset);
290 }
291 if (left_in_frag <= 0) {
292 eddp->frag++;
293 eddp->frag_offset = 0;
294 continue;
295 }
296 copy_len = min(left_in_frag, len);
297 memcpy(dst, src, copy_len);
298 *hcsum = csum_partial(src, copy_len, *hcsum);
299 dst += copy_len;
300 eddp->frag_offset += copy_len;
301 eddp->skb_offset += copy_len;
302 len -= copy_len;
303 }
304 }
305 }
306
307 static inline void
308 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
309 struct qeth_eddp_data *eddp, int data_len,
310 u32 hcsum)
311 {
312 u8 *page;
313 int page_remainder;
314 int page_offset;
315 struct qeth_eddp_element *element;
316 int first_lap = 1;
317
318 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
319 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
320 page_offset = ctx->offset % PAGE_SIZE;
321 element = &ctx->elements[ctx->num_elements];
322 while (data_len){
323 page_remainder = PAGE_SIZE - page_offset;
324 if (page_remainder < data_len){
325 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
326 page_remainder, &hcsum);
327 element->length += page_remainder;
328 if (first_lap)
329 element->flags = SBAL_FLAGS_FIRST_FRAG;
330 else
331 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
332 ctx->num_elements++;
333 element++;
334 data_len -= page_remainder;
335 ctx->offset += page_remainder;
336 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
337 page_offset = 0;
338 element->addr = page + page_offset;
339 } else {
340 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
341 data_len, &hcsum);
342 element->length += data_len;
343 if (!first_lap)
344 element->flags = SBAL_FLAGS_LAST_FRAG;
345 ctx->num_elements++;
346 ctx->offset += data_len;
347 data_len = 0;
348 }
349 first_lap = 0;
350 }
351 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
352 }
353
354 static inline u32
355 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
356 {
357 u32 phcsum; /* pseudo header checksum */
358
359 QETH_DBF_TEXT(trace, 5, "eddpckt4");
360 eddp->th.tcp.h.check = 0;
361 /* compute pseudo header checksum */
362 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
363 eddp->thl + data_len, IPPROTO_TCP, 0);
364 /* compute checksum of tcp header */
365 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
366 }
367
368 static inline u32
369 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
370 {
371 u32 proto;
372 u32 phcsum; /* pseudo header checksum */
373
374 QETH_DBF_TEXT(trace, 5, "eddpckt6");
375 eddp->th.tcp.h.check = 0;
376 /* compute pseudo header checksum */
377 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
378 sizeof(struct in6_addr), 0);
379 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
380 sizeof(struct in6_addr), phcsum);
381 proto = htonl(IPPROTO_TCP);
382 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
383 return phcsum;
384 }
385
386 static inline struct qeth_eddp_data *
387 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
388 {
389 struct qeth_eddp_data *eddp;
390
391 QETH_DBF_TEXT(trace, 5, "eddpcrda");
392 eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
393 if (eddp){
394 memset(eddp, 0, sizeof(struct qeth_eddp_data));
395 eddp->nhl = nhl;
396 eddp->thl = thl;
397 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
398 memcpy(&eddp->nh, nh, nhl);
399 memcpy(&eddp->th, th, thl);
400 eddp->frag = -1; /* initially we're in skb->data */
401 }
402 return eddp;
403 }
404
405 static inline void
406 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
407 struct qeth_eddp_data *eddp)
408 {
409 struct tcphdr *tcph;
410 int data_len;
411 u32 hcsum;
412
413 QETH_DBF_TEXT(trace, 5, "eddpftcp");
414 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
415 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
416 eddp->skb_offset += sizeof(struct ethhdr);
417 #ifdef CONFIG_QETH_VLAN
418 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
419 eddp->skb_offset += VLAN_HLEN;
420 #endif /* CONFIG_QETH_VLAN */
421 }
422 tcph = eddp->skb->h.th;
423 while (eddp->skb_offset < eddp->skb->len) {
424 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
425 (int)(eddp->skb->len - eddp->skb_offset));
426 /* prepare qdio hdr */
427 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
428 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
429 eddp->nhl + eddp->thl -
430 sizeof(struct qeth_hdr);
431 #ifdef CONFIG_QETH_VLAN
432 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
433 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
434 #endif /* CONFIG_QETH_VLAN */
435 } else
436 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
437 eddp->thl;
438 /* prepare ip hdr */
439 if (eddp->skb->protocol == ETH_P_IP){
440 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
441 eddp->thl;
442 eddp->nh.ip4.h.check = 0;
443 eddp->nh.ip4.h.check =
444 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
445 eddp->nh.ip4.h.ihl);
446 } else
447 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
448 /* prepare tcp hdr */
449 if (data_len == (eddp->skb->len - eddp->skb_offset)){
450 /* last segment -> set FIN and PSH flags */
451 eddp->th.tcp.h.fin = tcph->fin;
452 eddp->th.tcp.h.psh = tcph->psh;
453 }
454 if (eddp->skb->protocol == ETH_P_IP)
455 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
456 else
457 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
458 /* fill the next segment into the context */
459 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
460 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
461 if (eddp->skb_offset >= eddp->skb->len)
462 break;
463 /* prepare headers for next round */
464 if (eddp->skb->protocol == ETH_P_IP)
465 eddp->nh.ip4.h.id++;
466 eddp->th.tcp.h.seq += data_len;
467 }
468 }
469
470 static inline int
471 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
472 struct sk_buff *skb, struct qeth_hdr *qhdr)
473 {
474 struct qeth_eddp_data *eddp = NULL;
475
476 QETH_DBF_TEXT(trace, 5, "eddpficx");
477 /* create our segmentation headers and copy original headers */
478 if (skb->protocol == ETH_P_IP)
479 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
480 skb->nh.iph->ihl*4,
481 (u8 *)skb->h.th, skb->h.th->doff*4);
482 else
483 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
484 sizeof(struct ipv6hdr),
485 (u8 *)skb->h.th, skb->h.th->doff*4);
486
487 if (eddp == NULL) {
488 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
489 return -ENOMEM;
490 }
491 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
492 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
493 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
494 #ifdef CONFIG_QETH_VLAN
495 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
496 eddp->vlan[0] = __constant_htons(skb->protocol);
497 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
498 }
499 #endif /* CONFIG_QETH_VLAN */
500 }
501 /* the next flags will only be set on the last segment */
502 eddp->th.tcp.h.fin = 0;
503 eddp->th.tcp.h.psh = 0;
504 eddp->skb = skb;
505 /* begin segmentation and fill context */
506 __qeth_eddp_fill_context_tcp(ctx, eddp);
507 kfree(eddp);
508 return 0;
509 }
510
511 static inline void
512 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
513 int hdr_len)
514 {
515 int skbs_per_page;
516
517 QETH_DBF_TEXT(trace, 5, "eddpcanp");
518 /* can we put multiple skbs in one page? */
519 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
520 if (skbs_per_page > 1){
521 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
522 skbs_per_page + 1;
523 ctx->elements_per_skb = 1;
524 } else {
525 /* no -> how many elements per skb? */
526 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
527 PAGE_SIZE) >> PAGE_SHIFT;
528 ctx->num_pages = ctx->elements_per_skb *
529 (skb_shinfo(skb)->tso_segs + 1);
530 }
531 ctx->num_elements = ctx->elements_per_skb *
532 (skb_shinfo(skb)->tso_segs + 1);
533 }
534
535 static inline struct qeth_eddp_context *
536 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
537 int hdr_len)
538 {
539 struct qeth_eddp_context *ctx = NULL;
540 u8 *addr;
541 int i;
542
543 QETH_DBF_TEXT(trace, 5, "creddpcg");
544 /* create the context and allocate pages */
545 ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
546 if (ctx == NULL){
547 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
548 return NULL;
549 }
550 memset(ctx, 0, sizeof(struct qeth_eddp_context));
551 ctx->type = QETH_LARGE_SEND_EDDP;
552 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
553 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
554 QETH_DBF_TEXT(trace, 2, "ceddpcis");
555 kfree(ctx);
556 return NULL;
557 }
558 ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
559 if (ctx->pages == NULL){
560 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
561 kfree(ctx);
562 return NULL;
563 }
564 memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
565 for (i = 0; i < ctx->num_pages; ++i){
566 addr = (u8 *)__get_free_page(GFP_ATOMIC);
567 if (addr == NULL){
568 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
569 ctx->num_pages = i;
570 qeth_eddp_free_context(ctx);
571 return NULL;
572 }
573 memset(addr, 0, PAGE_SIZE);
574 ctx->pages[i] = addr;
575 }
576 ctx->elements = kmalloc(ctx->num_elements *
577 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
578 if (ctx->elements == NULL){
579 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
580 qeth_eddp_free_context(ctx);
581 return NULL;
582 }
583 memset(ctx->elements, 0,
584 ctx->num_elements * sizeof(struct qeth_eddp_element));
585 /* reset num_elements; will be incremented again in fill_buffer to
586 * reflect number of actually used elements */
587 ctx->num_elements = 0;
588 return ctx;
589 }
590
591 static inline struct qeth_eddp_context *
592 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
593 struct qeth_hdr *qhdr)
594 {
595 struct qeth_eddp_context *ctx = NULL;
596
597 QETH_DBF_TEXT(trace, 5, "creddpct");
598 if (skb->protocol == ETH_P_IP)
599 ctx = qeth_eddp_create_context_generic(card, skb,
600 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
601 skb->h.th->doff*4);
602 else if (skb->protocol == ETH_P_IPV6)
603 ctx = qeth_eddp_create_context_generic(card, skb,
604 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
605 skb->h.th->doff*4);
606 else
607 QETH_DBF_TEXT(trace, 2, "cetcpinv");
608
609 if (ctx == NULL) {
610 QETH_DBF_TEXT(trace, 2, "creddpnl");
611 return NULL;
612 }
613 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
614 QETH_DBF_TEXT(trace, 2, "ceddptfe");
615 qeth_eddp_free_context(ctx);
616 return NULL;
617 }
618 atomic_set(&ctx->refcnt, 1);
619 return ctx;
620 }
621
622 struct qeth_eddp_context *
623 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
624 struct qeth_hdr *qhdr)
625 {
626 QETH_DBF_TEXT(trace, 5, "creddpc");
627 switch (skb->sk->sk_protocol){
628 case IPPROTO_TCP:
629 return qeth_eddp_create_context_tcp(card, skb, qhdr);
630 default:
631 QETH_DBF_TEXT(trace, 2, "eddpinvp");
632 }
633 return NULL;
634 }
635
636
This page took 0.055554 seconds and 5 git commands to generate.