Pull acpi_os_free into release branch
[deliverable/linux.git] / drivers / s390 / net / qeth_eddp.c
1 /*
2 * linux/drivers/s390/net/qeth_eddp.c
3 *
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5 *
6 * Copyright 2004 IBM Corporation
7 *
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
9 *
10 */
11 #include <linux/errno.h>
12 #include <linux/ip.h>
13 #include <linux/inetdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/kernel.h>
16 #include <linux/tcp.h>
17 #include <net/tcp.h>
18 #include <linux/skbuff.h>
19
20 #include <net/ip.h>
21
22 #include "qeth.h"
23 #include "qeth_mpc.h"
24 #include "qeth_eddp.h"
25
26 int
27 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
28 struct qeth_eddp_context *ctx)
29 {
30 int index = queue->next_buf_to_fill;
31 int elements_needed = ctx->num_elements;
32 int elements_in_buffer;
33 int skbs_in_buffer;
34 int buffers_needed = 0;
35
36 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37 while(elements_needed > 0) {
38 buffers_needed++;
39 if (atomic_read(&queue->bufs[index].state) !=
40 QETH_QDIO_BUF_EMPTY)
41 return -EBUSY;
42
43 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
44 queue->bufs[index].next_element_to_fill;
45 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
46 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
47 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
48 }
49 return buffers_needed;
50 }
51
52 static inline void
53 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54 {
55 int i;
56
57 QETH_DBF_TEXT(trace, 5, "eddpfctx");
58 for (i = 0; i < ctx->num_pages; ++i)
59 free_page((unsigned long)ctx->pages[i]);
60 kfree(ctx->pages);
61 kfree(ctx->elements);
62 kfree(ctx);
63 }
64
65
66 static inline void
67 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68 {
69 atomic_inc(&ctx->refcnt);
70 }
71
72 void
73 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74 {
75 if (atomic_dec_return(&ctx->refcnt) == 0)
76 qeth_eddp_free_context(ctx);
77 }
78
79 void
80 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81 {
82 struct qeth_eddp_context_reference *ref;
83
84 QETH_DBF_TEXT(trace, 6, "eddprctx");
85 while (!list_empty(&buf->ctx_list)){
86 ref = list_entry(buf->ctx_list.next,
87 struct qeth_eddp_context_reference, list);
88 qeth_eddp_put_context(ref->ctx);
89 list_del(&ref->list);
90 kfree(ref);
91 }
92 }
93
94 static inline int
95 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx)
97 {
98 struct qeth_eddp_context_reference *ref;
99
100 QETH_DBF_TEXT(trace, 6, "eddprfcx");
101 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
102 if (ref == NULL)
103 return -ENOMEM;
104 qeth_eddp_get_context(ctx);
105 ref->ctx = ctx;
106 list_add_tail(&ref->list, &buf->ctx_list);
107 return 0;
108 }
109
110 int
111 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
112 struct qeth_eddp_context *ctx,
113 int index)
114 {
115 struct qeth_qdio_out_buffer *buf = NULL;
116 struct qdio_buffer *buffer;
117 int elements = ctx->num_elements;
118 int element = 0;
119 int flush_cnt = 0;
120 int must_refcnt = 1;
121 int i;
122
123 QETH_DBF_TEXT(trace, 5, "eddpfibu");
124 while (elements > 0) {
125 buf = &queue->bufs[index];
126 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
127 /* normally this should not happen since we checked for
128 * available elements in qeth_check_elements_for_context
129 */
130 if (element == 0)
131 return -EBUSY;
132 else {
133 PRINT_WARN("could only partially fill eddp "
134 "buffer!\n");
135 goto out;
136 }
137 }
138 /* check if the whole next skb fits into current buffer */
139 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
140 buf->next_element_to_fill)
141 < ctx->elements_per_skb){
142 /* no -> go to next buffer */
143 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
144 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
145 flush_cnt++;
146 /* new buffer, so we have to add ctx to buffer'ctx_list
147 * and increment ctx's refcnt */
148 must_refcnt = 1;
149 continue;
150 }
151 if (must_refcnt){
152 must_refcnt = 0;
153 if (qeth_eddp_buf_ref_context(buf, ctx)){
154 PRINT_WARN("no memory to create eddp context "
155 "reference\n");
156 goto out_check;
157 }
158 }
159 buffer = buf->buffer;
160 /* fill one skb into buffer */
161 for (i = 0; i < ctx->elements_per_skb; ++i){
162 buffer->element[buf->next_element_to_fill].addr =
163 ctx->elements[element].addr;
164 buffer->element[buf->next_element_to_fill].length =
165 ctx->elements[element].length;
166 buffer->element[buf->next_element_to_fill].flags =
167 ctx->elements[element].flags;
168 buf->next_element_to_fill++;
169 element++;
170 elements--;
171 }
172 }
173 out_check:
174 if (!queue->do_pack) {
175 QETH_DBF_TEXT(trace, 6, "fillbfnp");
176 /* set state to PRIMED -> will be flushed */
177 if (buf->next_element_to_fill > 0){
178 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
179 flush_cnt++;
180 }
181 } else {
182 #ifdef CONFIG_QETH_PERF_STATS
183 queue->card->perf_stats.skbs_sent_pack++;
184 #endif
185 QETH_DBF_TEXT(trace, 6, "fillbfpa");
186 if (buf->next_element_to_fill >=
187 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
188 /*
189 * packed buffer if full -> set state PRIMED
190 * -> will be flushed
191 */
192 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
193 flush_cnt++;
194 }
195 }
196 out:
197 return flush_cnt;
198 }
199
200 static inline void
201 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
202 struct qeth_eddp_data *eddp, int data_len)
203 {
204 u8 *page;
205 int page_remainder;
206 int page_offset;
207 int pkt_len;
208 struct qeth_eddp_element *element;
209
210 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
211 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
212 page_offset = ctx->offset % PAGE_SIZE;
213 element = &ctx->elements[ctx->num_elements];
214 pkt_len = eddp->nhl + eddp->thl + data_len;
215 /* FIXME: layer2 and VLAN !!! */
216 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
217 pkt_len += ETH_HLEN;
218 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
219 pkt_len += VLAN_HLEN;
220 /* does complete packet fit in current page ? */
221 page_remainder = PAGE_SIZE - page_offset;
222 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
223 /* no -> go to start of next page */
224 ctx->offset += page_remainder;
225 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
226 page_offset = 0;
227 }
228 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
229 element->addr = page + page_offset;
230 element->length = sizeof(struct qeth_hdr);
231 ctx->offset += sizeof(struct qeth_hdr);
232 page_offset += sizeof(struct qeth_hdr);
233 /* add mac header (?) */
234 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
235 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
236 element->length += ETH_HLEN;
237 ctx->offset += ETH_HLEN;
238 page_offset += ETH_HLEN;
239 }
240 /* add VLAN tag */
241 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
242 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
243 element->length += VLAN_HLEN;
244 ctx->offset += VLAN_HLEN;
245 page_offset += VLAN_HLEN;
246 }
247 /* add network header */
248 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
249 element->length += eddp->nhl;
250 eddp->nh_in_ctx = page + page_offset;
251 ctx->offset += eddp->nhl;
252 page_offset += eddp->nhl;
253 /* add transport header */
254 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
255 element->length += eddp->thl;
256 eddp->th_in_ctx = page + page_offset;
257 ctx->offset += eddp->thl;
258 }
259
260 static inline void
261 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
262 u32 *hcsum)
263 {
264 struct skb_frag_struct *frag;
265 int left_in_frag;
266 int copy_len;
267 u8 *src;
268
269 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
270 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
271 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
272 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
273 *hcsum);
274 eddp->skb_offset += len;
275 } else {
276 while (len > 0) {
277 if (eddp->frag < 0) {
278 /* we're in skb->data */
279 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
280 - eddp->skb_offset;
281 src = eddp->skb->data + eddp->skb_offset;
282 } else {
283 frag = &skb_shinfo(eddp->skb)->
284 frags[eddp->frag];
285 left_in_frag = frag->size - eddp->frag_offset;
286 src = (u8 *)(
287 (page_to_pfn(frag->page) << PAGE_SHIFT)+
288 frag->page_offset + eddp->frag_offset);
289 }
290 if (left_in_frag <= 0) {
291 eddp->frag++;
292 eddp->frag_offset = 0;
293 continue;
294 }
295 copy_len = min(left_in_frag, len);
296 memcpy(dst, src, copy_len);
297 *hcsum = csum_partial(src, copy_len, *hcsum);
298 dst += copy_len;
299 eddp->frag_offset += copy_len;
300 eddp->skb_offset += copy_len;
301 len -= copy_len;
302 }
303 }
304 }
305
306 static inline void
307 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
308 struct qeth_eddp_data *eddp, int data_len,
309 u32 hcsum)
310 {
311 u8 *page;
312 int page_remainder;
313 int page_offset;
314 struct qeth_eddp_element *element;
315 int first_lap = 1;
316
317 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
318 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
319 page_offset = ctx->offset % PAGE_SIZE;
320 element = &ctx->elements[ctx->num_elements];
321 while (data_len){
322 page_remainder = PAGE_SIZE - page_offset;
323 if (page_remainder < data_len){
324 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
325 page_remainder, &hcsum);
326 element->length += page_remainder;
327 if (first_lap)
328 element->flags = SBAL_FLAGS_FIRST_FRAG;
329 else
330 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
331 ctx->num_elements++;
332 element++;
333 data_len -= page_remainder;
334 ctx->offset += page_remainder;
335 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
336 page_offset = 0;
337 element->addr = page + page_offset;
338 } else {
339 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
340 data_len, &hcsum);
341 element->length += data_len;
342 if (!first_lap)
343 element->flags = SBAL_FLAGS_LAST_FRAG;
344 ctx->num_elements++;
345 ctx->offset += data_len;
346 data_len = 0;
347 }
348 first_lap = 0;
349 }
350 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
351 }
352
353 static inline u32
354 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
355 {
356 u32 phcsum; /* pseudo header checksum */
357
358 QETH_DBF_TEXT(trace, 5, "eddpckt4");
359 eddp->th.tcp.h.check = 0;
360 /* compute pseudo header checksum */
361 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
362 eddp->thl + data_len, IPPROTO_TCP, 0);
363 /* compute checksum of tcp header */
364 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
365 }
366
367 static inline u32
368 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
369 {
370 u32 proto;
371 u32 phcsum; /* pseudo header checksum */
372
373 QETH_DBF_TEXT(trace, 5, "eddpckt6");
374 eddp->th.tcp.h.check = 0;
375 /* compute pseudo header checksum */
376 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
377 sizeof(struct in6_addr), 0);
378 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
379 sizeof(struct in6_addr), phcsum);
380 proto = htonl(IPPROTO_TCP);
381 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
382 return phcsum;
383 }
384
385 static inline struct qeth_eddp_data *
386 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
387 {
388 struct qeth_eddp_data *eddp;
389
390 QETH_DBF_TEXT(trace, 5, "eddpcrda");
391 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
392 if (eddp){
393 eddp->nhl = nhl;
394 eddp->thl = thl;
395 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
396 memcpy(&eddp->nh, nh, nhl);
397 memcpy(&eddp->th, th, thl);
398 eddp->frag = -1; /* initially we're in skb->data */
399 }
400 return eddp;
401 }
402
403 static inline void
404 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
405 struct qeth_eddp_data *eddp)
406 {
407 struct tcphdr *tcph;
408 int data_len;
409 u32 hcsum;
410
411 QETH_DBF_TEXT(trace, 5, "eddpftcp");
412 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
413 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
414 eddp->skb_offset += sizeof(struct ethhdr);
415 #ifdef CONFIG_QETH_VLAN
416 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
417 eddp->skb_offset += VLAN_HLEN;
418 #endif /* CONFIG_QETH_VLAN */
419 }
420 tcph = eddp->skb->h.th;
421 while (eddp->skb_offset < eddp->skb->len) {
422 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
423 (int)(eddp->skb->len - eddp->skb_offset));
424 /* prepare qdio hdr */
425 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
426 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
427 eddp->nhl + eddp->thl -
428 sizeof(struct qeth_hdr);
429 #ifdef CONFIG_QETH_VLAN
430 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
431 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
432 #endif /* CONFIG_QETH_VLAN */
433 } else
434 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
435 eddp->thl;
436 /* prepare ip hdr */
437 if (eddp->skb->protocol == ETH_P_IP){
438 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
439 eddp->thl;
440 eddp->nh.ip4.h.check = 0;
441 eddp->nh.ip4.h.check =
442 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
443 eddp->nh.ip4.h.ihl);
444 } else
445 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
446 /* prepare tcp hdr */
447 if (data_len == (eddp->skb->len - eddp->skb_offset)){
448 /* last segment -> set FIN and PSH flags */
449 eddp->th.tcp.h.fin = tcph->fin;
450 eddp->th.tcp.h.psh = tcph->psh;
451 }
452 if (eddp->skb->protocol == ETH_P_IP)
453 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
454 else
455 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
456 /* fill the next segment into the context */
457 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
458 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
459 if (eddp->skb_offset >= eddp->skb->len)
460 break;
461 /* prepare headers for next round */
462 if (eddp->skb->protocol == ETH_P_IP)
463 eddp->nh.ip4.h.id++;
464 eddp->th.tcp.h.seq += data_len;
465 }
466 }
467
468 static inline int
469 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
470 struct sk_buff *skb, struct qeth_hdr *qhdr)
471 {
472 struct qeth_eddp_data *eddp = NULL;
473
474 QETH_DBF_TEXT(trace, 5, "eddpficx");
475 /* create our segmentation headers and copy original headers */
476 if (skb->protocol == ETH_P_IP)
477 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
478 skb->nh.iph->ihl*4,
479 (u8 *)skb->h.th, skb->h.th->doff*4);
480 else
481 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
482 sizeof(struct ipv6hdr),
483 (u8 *)skb->h.th, skb->h.th->doff*4);
484
485 if (eddp == NULL) {
486 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
487 return -ENOMEM;
488 }
489 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
490 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
491 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
492 #ifdef CONFIG_QETH_VLAN
493 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
494 eddp->vlan[0] = __constant_htons(skb->protocol);
495 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
496 }
497 #endif /* CONFIG_QETH_VLAN */
498 }
499 /* the next flags will only be set on the last segment */
500 eddp->th.tcp.h.fin = 0;
501 eddp->th.tcp.h.psh = 0;
502 eddp->skb = skb;
503 /* begin segmentation and fill context */
504 __qeth_eddp_fill_context_tcp(ctx, eddp);
505 kfree(eddp);
506 return 0;
507 }
508
509 static inline void
510 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
511 int hdr_len)
512 {
513 int skbs_per_page;
514
515 QETH_DBF_TEXT(trace, 5, "eddpcanp");
516 /* can we put multiple skbs in one page? */
517 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
518 if (skbs_per_page > 1){
519 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
520 skbs_per_page + 1;
521 ctx->elements_per_skb = 1;
522 } else {
523 /* no -> how many elements per skb? */
524 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
525 PAGE_SIZE) >> PAGE_SHIFT;
526 ctx->num_pages = ctx->elements_per_skb *
527 (skb_shinfo(skb)->gso_segs + 1);
528 }
529 ctx->num_elements = ctx->elements_per_skb *
530 (skb_shinfo(skb)->gso_segs + 1);
531 }
532
533 static inline struct qeth_eddp_context *
534 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
535 int hdr_len)
536 {
537 struct qeth_eddp_context *ctx = NULL;
538 u8 *addr;
539 int i;
540
541 QETH_DBF_TEXT(trace, 5, "creddpcg");
542 /* create the context and allocate pages */
543 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
544 if (ctx == NULL){
545 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
546 return NULL;
547 }
548 ctx->type = QETH_LARGE_SEND_EDDP;
549 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
550 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
551 QETH_DBF_TEXT(trace, 2, "ceddpcis");
552 kfree(ctx);
553 return NULL;
554 }
555 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
556 if (ctx->pages == NULL){
557 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
558 kfree(ctx);
559 return NULL;
560 }
561 for (i = 0; i < ctx->num_pages; ++i){
562 addr = (u8 *)__get_free_page(GFP_ATOMIC);
563 if (addr == NULL){
564 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
565 ctx->num_pages = i;
566 qeth_eddp_free_context(ctx);
567 return NULL;
568 }
569 memset(addr, 0, PAGE_SIZE);
570 ctx->pages[i] = addr;
571 }
572 ctx->elements = kcalloc(ctx->num_elements,
573 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
574 if (ctx->elements == NULL){
575 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
576 qeth_eddp_free_context(ctx);
577 return NULL;
578 }
579 /* reset num_elements; will be incremented again in fill_buffer to
580 * reflect number of actually used elements */
581 ctx->num_elements = 0;
582 return ctx;
583 }
584
585 static inline struct qeth_eddp_context *
586 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
587 struct qeth_hdr *qhdr)
588 {
589 struct qeth_eddp_context *ctx = NULL;
590
591 QETH_DBF_TEXT(trace, 5, "creddpct");
592 if (skb->protocol == ETH_P_IP)
593 ctx = qeth_eddp_create_context_generic(card, skb,
594 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
595 skb->h.th->doff*4);
596 else if (skb->protocol == ETH_P_IPV6)
597 ctx = qeth_eddp_create_context_generic(card, skb,
598 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
599 skb->h.th->doff*4);
600 else
601 QETH_DBF_TEXT(trace, 2, "cetcpinv");
602
603 if (ctx == NULL) {
604 QETH_DBF_TEXT(trace, 2, "creddpnl");
605 return NULL;
606 }
607 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
608 QETH_DBF_TEXT(trace, 2, "ceddptfe");
609 qeth_eddp_free_context(ctx);
610 return NULL;
611 }
612 atomic_set(&ctx->refcnt, 1);
613 return ctx;
614 }
615
616 struct qeth_eddp_context *
617 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
618 struct qeth_hdr *qhdr)
619 {
620 QETH_DBF_TEXT(trace, 5, "creddpc");
621 switch (skb->sk->sk_protocol){
622 case IPPROTO_TCP:
623 return qeth_eddp_create_context_tcp(card, skb, qhdr);
624 default:
625 QETH_DBF_TEXT(trace, 2, "eddpinvp");
626 }
627 return NULL;
628 }
629
630
This page took 0.04445 seconds and 6 git commands to generate.