2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/slab.h>
35 #include <linux/workqueue.h>
36 #include <linux/skbuff.h>
37 #include <linux/timer.h>
38 #include <linux/notifier.h>
39 #include <linux/inetdevice.h>
41 #include <net/neighbour.h>
42 #include <net/netevent.h>
43 #include <net/route.h>
46 #include "cxgb3_offload.h"
48 #include "iwch_provider.h"
51 static char *states
[] = {
68 module_param(peer2peer
, int, 0644);
69 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
71 static int ep_timeout_secs
= 60;
72 module_param(ep_timeout_secs
, int, 0644);
73 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
74 "in seconds (default=60)");
76 static int mpa_rev
= 1;
77 module_param(mpa_rev
, int, 0644);
78 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
79 "1 is spec compliant. (default=1)");
81 static int markers_enabled
= 0;
82 module_param(markers_enabled
, int, 0644);
83 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
85 static int crc_enabled
= 1;
86 module_param(crc_enabled
, int, 0644);
87 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
89 static int rcv_win
= 256 * 1024;
90 module_param(rcv_win
, int, 0644);
91 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256)");
93 static int snd_win
= 32 * 1024;
94 module_param(snd_win
, int, 0644);
95 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=32KB)");
97 static unsigned int nocong
= 0;
98 module_param(nocong
, uint
, 0644);
99 MODULE_PARM_DESC(nocong
, "Turn off congestion control (default=0)");
101 static unsigned int cong_flavor
= 1;
102 module_param(cong_flavor
, uint
, 0644);
103 MODULE_PARM_DESC(cong_flavor
, "TCP Congestion control flavor (default=1)");
105 static struct workqueue_struct
*workq
;
107 static struct sk_buff_head rxq
;
109 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
110 static void ep_timeout(unsigned long arg
);
111 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
);
113 static void start_ep_timer(struct iwch_ep
*ep
)
115 PDBG("%s ep %p\n", __func__
, ep
);
116 if (timer_pending(&ep
->timer
)) {
117 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
118 del_timer_sync(&ep
->timer
);
121 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
122 ep
->timer
.data
= (unsigned long)ep
;
123 ep
->timer
.function
= ep_timeout
;
124 add_timer(&ep
->timer
);
127 static void stop_ep_timer(struct iwch_ep
*ep
)
129 PDBG("%s ep %p\n", __func__
, ep
);
130 if (!timer_pending(&ep
->timer
)) {
131 printk(KERN_ERR
"%s timer stopped when its not running! ep %p state %u\n",
132 __func__
, ep
, ep
->com
.state
);
136 del_timer_sync(&ep
->timer
);
140 static int iwch_l2t_send(struct t3cdev
*tdev
, struct sk_buff
*skb
, struct l2t_entry
*l2e
)
143 struct cxio_rdev
*rdev
;
145 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
146 if (cxio_fatal_error(rdev
)) {
150 error
= l2t_send(tdev
, skb
, l2e
);
156 int iwch_cxgb3_ofld_send(struct t3cdev
*tdev
, struct sk_buff
*skb
)
159 struct cxio_rdev
*rdev
;
161 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
162 if (cxio_fatal_error(rdev
)) {
166 error
= cxgb3_ofld_send(tdev
, skb
);
172 static void release_tid(struct t3cdev
*tdev
, u32 hwtid
, struct sk_buff
*skb
)
174 struct cpl_tid_release
*req
;
176 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
179 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
180 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
181 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
182 skb
->priority
= CPL_PRIORITY_SETUP
;
183 iwch_cxgb3_ofld_send(tdev
, skb
);
187 int iwch_quiesce_tid(struct iwch_ep
*ep
)
189 struct cpl_set_tcb_field
*req
;
190 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
194 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
195 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
196 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
197 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
200 req
->word
= htons(W_TCB_RX_QUIESCE
);
201 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
202 req
->val
= cpu_to_be64(1 << S_TCB_RX_QUIESCE
);
204 skb
->priority
= CPL_PRIORITY_DATA
;
205 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
208 int iwch_resume_tid(struct iwch_ep
*ep
)
210 struct cpl_set_tcb_field
*req
;
211 struct sk_buff
*skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
215 req
= (struct cpl_set_tcb_field
*) skb_put(skb
, sizeof(*req
));
216 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
217 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
218 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, ep
->hwtid
));
221 req
->word
= htons(W_TCB_RX_QUIESCE
);
222 req
->mask
= cpu_to_be64(1ULL << S_TCB_RX_QUIESCE
);
225 skb
->priority
= CPL_PRIORITY_DATA
;
226 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
229 static void set_emss(struct iwch_ep
*ep
, u16 opt
)
231 PDBG("%s ep %p opt %u\n", __func__
, ep
, opt
);
232 ep
->emss
= T3C_DATA(ep
->com
.tdev
)->mtus
[G_TCPOPT_MSS(opt
)] - 40;
233 if (G_TCPOPT_TSTAMP(opt
))
237 PDBG("emss=%d\n", ep
->emss
);
240 static enum iwch_ep_state
state_read(struct iwch_ep_common
*epc
)
243 enum iwch_ep_state state
;
245 spin_lock_irqsave(&epc
->lock
, flags
);
247 spin_unlock_irqrestore(&epc
->lock
, flags
);
251 static void __state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
256 static void state_set(struct iwch_ep_common
*epc
, enum iwch_ep_state
new)
260 spin_lock_irqsave(&epc
->lock
, flags
);
261 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
262 __state_set(epc
, new);
263 spin_unlock_irqrestore(&epc
->lock
, flags
);
267 static void *alloc_ep(int size
, gfp_t gfp
)
269 struct iwch_ep_common
*epc
;
271 epc
= kzalloc(size
, gfp
);
273 kref_init(&epc
->kref
);
274 spin_lock_init(&epc
->lock
);
275 init_waitqueue_head(&epc
->waitq
);
277 PDBG("%s alloc ep %p\n", __func__
, epc
);
281 void __free_ep(struct kref
*kref
)
284 ep
= container_of(container_of(kref
, struct iwch_ep_common
, kref
),
285 struct iwch_ep
, com
);
286 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
287 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
288 cxgb3_remove_tid(ep
->com
.tdev
, (void *)ep
, ep
->hwtid
);
289 dst_release(ep
->dst
);
290 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
295 static void release_ep_resources(struct iwch_ep
*ep
)
297 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
298 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
302 static int status2errno(int status
)
307 case CPL_ERR_CONN_RESET
:
309 case CPL_ERR_ARP_MISS
:
310 return -EHOSTUNREACH
;
311 case CPL_ERR_CONN_TIMEDOUT
:
313 case CPL_ERR_TCAM_FULL
:
315 case CPL_ERR_CONN_EXIST
:
323 * Try and reuse skbs already allocated...
325 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
327 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
331 skb
= alloc_skb(len
, gfp
);
336 static struct rtable
*find_route(struct t3cdev
*dev
, __be32 local_ip
,
337 __be32 peer_ip
, __be16 local_port
,
338 __be16 peer_port
, u8 tos
)
342 rt
= ip_route_output_ports(&init_net
, NULL
, peer_ip
, local_ip
,
343 peer_port
, local_port
, IPPROTO_TCP
,
350 static unsigned int find_best_mtu(const struct t3c_data
*d
, unsigned short mtu
)
354 while (i
< d
->nmtus
- 1 && d
->mtus
[i
+ 1] <= mtu
)
359 static void arp_failure_discard(struct t3cdev
*dev
, struct sk_buff
*skb
)
361 PDBG("%s t3cdev %p\n", __func__
, dev
);
366 * Handle an ARP failure for an active open.
368 static void act_open_req_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
370 printk(KERN_ERR MOD
"ARP failure duing connect\n");
375 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
378 static void abort_arp_failure(struct t3cdev
*dev
, struct sk_buff
*skb
)
380 struct cpl_abort_req
*req
= cplhdr(skb
);
382 PDBG("%s t3cdev %p\n", __func__
, dev
);
383 req
->cmd
= CPL_ABORT_NO_RST
;
384 iwch_cxgb3_ofld_send(dev
, skb
);
387 static int send_halfclose(struct iwch_ep
*ep
, gfp_t gfp
)
389 struct cpl_close_con_req
*req
;
392 PDBG("%s ep %p\n", __func__
, ep
);
393 skb
= get_skb(NULL
, sizeof(*req
), gfp
);
395 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
398 skb
->priority
= CPL_PRIORITY_DATA
;
399 set_arp_failure_handler(skb
, arp_failure_discard
);
400 req
= (struct cpl_close_con_req
*) skb_put(skb
, sizeof(*req
));
401 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON
));
402 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
403 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
, ep
->hwtid
));
404 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
407 static int send_abort(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
409 struct cpl_abort_req
*req
;
411 PDBG("%s ep %p\n", __func__
, ep
);
412 skb
= get_skb(skb
, sizeof(*req
), gfp
);
414 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
418 skb
->priority
= CPL_PRIORITY_DATA
;
419 set_arp_failure_handler(skb
, abort_arp_failure
);
420 req
= (struct cpl_abort_req
*) skb_put(skb
, sizeof(*req
));
421 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ
));
422 req
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
423 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
424 req
->cmd
= CPL_ABORT_SEND_RST
;
425 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
428 static int send_connect(struct iwch_ep
*ep
)
430 struct cpl_act_open_req
*req
;
432 u32 opt0h
, opt0l
, opt2
;
433 unsigned int mtu_idx
;
436 PDBG("%s ep %p\n", __func__
, ep
);
438 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
440 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
444 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
445 wscale
= compute_wscale(rcv_win
);
450 V_WND_SCALE(wscale
) |
452 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
453 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
454 opt2
= F_RX_COALESCE_VALID
| V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
455 V_CONG_CONTROL_FLAVOR(cong_flavor
);
456 skb
->priority
= CPL_PRIORITY_SETUP
;
457 set_arp_failure_handler(skb
, act_open_req_arp_failure
);
459 req
= (struct cpl_act_open_req
*) skb_put(skb
, sizeof(*req
));
460 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
461 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ep
->atid
));
462 req
->local_port
= ep
->com
.local_addr
.sin_port
;
463 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
464 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
465 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
466 req
->opt0h
= htonl(opt0h
);
467 req
->opt0l
= htonl(opt0l
);
469 req
->opt2
= htonl(opt2
);
470 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
473 static void send_mpa_req(struct iwch_ep
*ep
, struct sk_buff
*skb
)
476 struct tx_data_wr
*req
;
477 struct mpa_message
*mpa
;
480 PDBG("%s ep %p pd_len %d\n", __func__
, ep
, ep
->plen
);
482 BUG_ON(skb_cloned(skb
));
484 mpalen
= sizeof(*mpa
) + ep
->plen
;
485 if (skb
->data
+ mpalen
+ sizeof(*req
) > skb_end_pointer(skb
)) {
487 skb
=alloc_skb(mpalen
+ sizeof(*req
), GFP_KERNEL
);
489 connect_reply_upcall(ep
, -ENOMEM
);
494 skb_reserve(skb
, sizeof(*req
));
495 skb_put(skb
, mpalen
);
496 skb
->priority
= CPL_PRIORITY_DATA
;
497 mpa
= (struct mpa_message
*) skb
->data
;
498 memset(mpa
, 0, sizeof(*mpa
));
499 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
500 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
501 (markers_enabled
? MPA_MARKERS
: 0);
502 mpa
->private_data_size
= htons(ep
->plen
);
503 mpa
->revision
= mpa_rev
;
506 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
509 * Reference the mpa skb. This ensures the data area
510 * will remain in memory until the hw acks the tx.
511 * Function tx_ack() will deref it.
514 set_arp_failure_handler(skb
, arp_failure_discard
);
515 skb_reset_transport_header(skb
);
517 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
518 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
519 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
520 req
->len
= htonl(len
);
521 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
522 V_TX_SNDBUF(snd_win
>>15));
523 req
->flags
= htonl(F_TX_INIT
);
524 req
->sndseq
= htonl(ep
->snd_seq
);
527 iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
529 state_set(&ep
->com
, MPA_REQ_SENT
);
533 static int send_mpa_reject(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
536 struct tx_data_wr
*req
;
537 struct mpa_message
*mpa
;
540 PDBG("%s ep %p plen %d\n", __func__
, ep
, plen
);
542 mpalen
= sizeof(*mpa
) + plen
;
544 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
546 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
549 skb_reserve(skb
, sizeof(*req
));
550 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
551 memset(mpa
, 0, sizeof(*mpa
));
552 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
553 mpa
->flags
= MPA_REJECT
;
554 mpa
->revision
= mpa_rev
;
555 mpa
->private_data_size
= htons(plen
);
557 memcpy(mpa
->private_data
, pdata
, plen
);
560 * Reference the mpa skb again. This ensures the data area
561 * will remain in memory until the hw acks the tx.
562 * Function tx_ack() will deref it.
565 skb
->priority
= CPL_PRIORITY_DATA
;
566 set_arp_failure_handler(skb
, arp_failure_discard
);
567 skb_reset_transport_header(skb
);
568 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
569 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
570 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
571 req
->len
= htonl(mpalen
);
572 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
573 V_TX_SNDBUF(snd_win
>>15));
574 req
->flags
= htonl(F_TX_INIT
);
575 req
->sndseq
= htonl(ep
->snd_seq
);
578 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
581 static int send_mpa_reply(struct iwch_ep
*ep
, const void *pdata
, u8 plen
)
584 struct tx_data_wr
*req
;
585 struct mpa_message
*mpa
;
589 PDBG("%s ep %p plen %d\n", __func__
, ep
, plen
);
591 mpalen
= sizeof(*mpa
) + plen
;
593 skb
= get_skb(NULL
, mpalen
+ sizeof(*req
), GFP_KERNEL
);
595 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
598 skb
->priority
= CPL_PRIORITY_DATA
;
599 skb_reserve(skb
, sizeof(*req
));
600 mpa
= (struct mpa_message
*) skb_put(skb
, mpalen
);
601 memset(mpa
, 0, sizeof(*mpa
));
602 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
603 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
604 (markers_enabled
? MPA_MARKERS
: 0);
605 mpa
->revision
= mpa_rev
;
606 mpa
->private_data_size
= htons(plen
);
608 memcpy(mpa
->private_data
, pdata
, plen
);
611 * Reference the mpa skb. This ensures the data area
612 * will remain in memory until the hw acks the tx.
613 * Function tx_ack() will deref it.
616 set_arp_failure_handler(skb
, arp_failure_discard
);
617 skb_reset_transport_header(skb
);
619 req
= (struct tx_data_wr
*) skb_push(skb
, sizeof(*req
));
620 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA
)|F_WR_COMPL
);
621 req
->wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
622 req
->len
= htonl(len
);
623 req
->param
= htonl(V_TX_PORT(ep
->l2t
->smt_idx
) |
624 V_TX_SNDBUF(snd_win
>>15));
625 req
->flags
= htonl(F_TX_INIT
);
626 req
->sndseq
= htonl(ep
->snd_seq
);
628 state_set(&ep
->com
, MPA_REP_SENT
);
629 return iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
632 static int act_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
634 struct iwch_ep
*ep
= ctx
;
635 struct cpl_act_establish
*req
= cplhdr(skb
);
636 unsigned int tid
= GET_TID(req
);
638 PDBG("%s ep %p tid %d\n", __func__
, ep
, tid
);
640 dst_confirm(ep
->dst
);
642 /* setup the hwtid for this connection */
644 cxgb3_insert_tid(ep
->com
.tdev
, &t3c_client
, ep
, tid
);
646 ep
->snd_seq
= ntohl(req
->snd_isn
);
647 ep
->rcv_seq
= ntohl(req
->rcv_isn
);
649 set_emss(ep
, ntohs(req
->tcp_opt
));
651 /* dealloc the atid */
652 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
654 /* start MPA negotiation */
655 send_mpa_req(ep
, skb
);
660 static void abort_connection(struct iwch_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
662 PDBG("%s ep %p\n", __FILE__
, ep
);
663 state_set(&ep
->com
, ABORTING
);
664 send_abort(ep
, skb
, gfp
);
667 static void close_complete_upcall(struct iwch_ep
*ep
)
669 struct iw_cm_event event
;
671 PDBG("%s ep %p\n", __func__
, ep
);
672 memset(&event
, 0, sizeof(event
));
673 event
.event
= IW_CM_EVENT_CLOSE
;
675 PDBG("close complete delivered ep %p cm_id %p tid %d\n",
676 ep
, ep
->com
.cm_id
, ep
->hwtid
);
677 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
678 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
679 ep
->com
.cm_id
= NULL
;
684 static void peer_close_upcall(struct iwch_ep
*ep
)
686 struct iw_cm_event event
;
688 PDBG("%s ep %p\n", __func__
, ep
);
689 memset(&event
, 0, sizeof(event
));
690 event
.event
= IW_CM_EVENT_DISCONNECT
;
692 PDBG("peer close delivered ep %p cm_id %p tid %d\n",
693 ep
, ep
->com
.cm_id
, ep
->hwtid
);
694 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
698 static void peer_abort_upcall(struct iwch_ep
*ep
)
700 struct iw_cm_event event
;
702 PDBG("%s ep %p\n", __func__
, ep
);
703 memset(&event
, 0, sizeof(event
));
704 event
.event
= IW_CM_EVENT_CLOSE
;
705 event
.status
= -ECONNRESET
;
707 PDBG("abort delivered ep %p cm_id %p tid %d\n", ep
,
708 ep
->com
.cm_id
, ep
->hwtid
);
709 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
710 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
711 ep
->com
.cm_id
= NULL
;
716 static void connect_reply_upcall(struct iwch_ep
*ep
, int status
)
718 struct iw_cm_event event
;
720 PDBG("%s ep %p status %d\n", __func__
, ep
, status
);
721 memset(&event
, 0, sizeof(event
));
722 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
723 event
.status
= status
;
724 event
.local_addr
= ep
->com
.local_addr
;
725 event
.remote_addr
= ep
->com
.remote_addr
;
727 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
728 event
.private_data_len
= ep
->plen
;
729 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
732 PDBG("%s ep %p tid %d status %d\n", __func__
, ep
,
734 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
737 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
738 ep
->com
.cm_id
= NULL
;
743 static void connect_request_upcall(struct iwch_ep
*ep
)
745 struct iw_cm_event event
;
747 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
748 memset(&event
, 0, sizeof(event
));
749 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
750 event
.local_addr
= ep
->com
.local_addr
;
751 event
.remote_addr
= ep
->com
.remote_addr
;
752 event
.private_data_len
= ep
->plen
;
753 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
754 event
.provider_data
= ep
;
755 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
757 ep
->parent_ep
->com
.cm_id
->event_handler(
758 ep
->parent_ep
->com
.cm_id
,
761 put_ep(&ep
->parent_ep
->com
);
762 ep
->parent_ep
= NULL
;
765 static void established_upcall(struct iwch_ep
*ep
)
767 struct iw_cm_event event
;
769 PDBG("%s ep %p\n", __func__
, ep
);
770 memset(&event
, 0, sizeof(event
));
771 event
.event
= IW_CM_EVENT_ESTABLISHED
;
773 PDBG("%s ep %p tid %d\n", __func__
, ep
, ep
->hwtid
);
774 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
778 static int update_rx_credits(struct iwch_ep
*ep
, u32 credits
)
780 struct cpl_rx_data_ack
*req
;
783 PDBG("%s ep %p credits %u\n", __func__
, ep
, credits
);
784 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
786 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
790 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, sizeof(*req
));
791 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
792 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK
, ep
->hwtid
));
793 req
->credit_dack
= htonl(V_RX_CREDITS(credits
) | V_RX_FORCE_ACK(1));
794 skb
->priority
= CPL_PRIORITY_ACK
;
795 iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
799 static void process_mpa_reply(struct iwch_ep
*ep
, struct sk_buff
*skb
)
801 struct mpa_message
*mpa
;
803 struct iwch_qp_attributes attrs
;
804 enum iwch_qp_attr_mask mask
;
807 PDBG("%s ep %p\n", __func__
, ep
);
810 * Stop mpa timer. If it expired, then the state has
811 * changed and we bail since ep_timeout already aborted
815 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
819 * If we get more than the supported amount of private data
820 * then we must fail this connection.
822 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
828 * copy the new data into our accumulation buffer.
830 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
832 ep
->mpa_pkt_len
+= skb
->len
;
835 * if we don't even have the mpa message, then bail.
837 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
839 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
841 /* Validate MPA header. */
842 if (mpa
->revision
!= mpa_rev
) {
846 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
851 plen
= ntohs(mpa
->private_data_size
);
854 * Fail if there's too much private data.
856 if (plen
> MPA_MAX_PRIVATE_DATA
) {
862 * If plen does not account for pkt size
864 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
869 ep
->plen
= (u8
) plen
;
872 * If we don't have all the pdata yet, then bail.
873 * We'll continue process when more data arrives.
875 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
878 if (mpa
->flags
& MPA_REJECT
) {
884 * If we get here we have accumulated the entire mpa
885 * start reply message including private data. And
886 * the MPA header is valid.
888 state_set(&ep
->com
, FPDU_MODE
);
889 ep
->mpa_attr
.initiator
= 1;
890 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
891 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
892 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
893 ep
->mpa_attr
.version
= mpa_rev
;
894 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
895 "xmit_marker_enabled=%d, version=%d\n", __func__
,
896 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
897 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
899 attrs
.mpa_attr
= ep
->mpa_attr
;
900 attrs
.max_ird
= ep
->ird
;
901 attrs
.max_ord
= ep
->ord
;
902 attrs
.llp_stream_handle
= ep
;
903 attrs
.next_state
= IWCH_QP_STATE_RTS
;
905 mask
= IWCH_QP_ATTR_NEXT_STATE
|
906 IWCH_QP_ATTR_LLP_STREAM_HANDLE
| IWCH_QP_ATTR_MPA_ATTR
|
907 IWCH_QP_ATTR_MAX_IRD
| IWCH_QP_ATTR_MAX_ORD
;
909 /* bind QP and TID with INIT_WR */
910 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
911 ep
->com
.qp
, mask
, &attrs
, 1);
915 if (peer2peer
&& iwch_rqes_posted(ep
->com
.qp
) == 0) {
916 iwch_post_zb_read(ep
);
921 abort_connection(ep
, skb
, GFP_KERNEL
);
923 connect_reply_upcall(ep
, err
);
927 static void process_mpa_request(struct iwch_ep
*ep
, struct sk_buff
*skb
)
929 struct mpa_message
*mpa
;
932 PDBG("%s ep %p\n", __func__
, ep
);
935 * Stop mpa timer. If it expired, then the state has
936 * changed and we bail since ep_timeout already aborted
940 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
944 * If we get more than the supported amount of private data
945 * then we must fail this connection.
947 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
948 abort_connection(ep
, skb
, GFP_KERNEL
);
952 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
955 * Copy the new data into our accumulation buffer.
957 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
959 ep
->mpa_pkt_len
+= skb
->len
;
962 * If we don't even have the mpa message, then bail.
963 * We'll continue process when more data arrives.
965 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
967 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
968 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
971 * Validate MPA Header.
973 if (mpa
->revision
!= mpa_rev
) {
974 abort_connection(ep
, skb
, GFP_KERNEL
);
978 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
979 abort_connection(ep
, skb
, GFP_KERNEL
);
983 plen
= ntohs(mpa
->private_data_size
);
986 * Fail if there's too much private data.
988 if (plen
> MPA_MAX_PRIVATE_DATA
) {
989 abort_connection(ep
, skb
, GFP_KERNEL
);
994 * If plen does not account for pkt size
996 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
997 abort_connection(ep
, skb
, GFP_KERNEL
);
1000 ep
->plen
= (u8
) plen
;
1003 * If we don't have all the pdata yet, then bail.
1005 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1009 * If we get here we have accumulated the entire mpa
1010 * start reply message including private data.
1012 ep
->mpa_attr
.initiator
= 0;
1013 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1014 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1015 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1016 ep
->mpa_attr
.version
= mpa_rev
;
1017 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1018 "xmit_marker_enabled=%d, version=%d\n", __func__
,
1019 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1020 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
1022 state_set(&ep
->com
, MPA_REQ_RCVD
);
1025 connect_request_upcall(ep
);
1029 static int rx_data(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1031 struct iwch_ep
*ep
= ctx
;
1032 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1033 unsigned int dlen
= ntohs(hdr
->len
);
1035 PDBG("%s ep %p dlen %u\n", __func__
, ep
, dlen
);
1037 skb_pull(skb
, sizeof(*hdr
));
1038 skb_trim(skb
, dlen
);
1040 ep
->rcv_seq
+= dlen
;
1041 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1043 switch (state_read(&ep
->com
)) {
1045 process_mpa_reply(ep
, skb
);
1048 process_mpa_request(ep
, skb
);
1053 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1054 " ep %p state %d tid %d\n",
1055 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1058 * The ep will timeout and inform the ULP of the failure.
1064 /* update RX credits */
1065 update_rx_credits(ep
, dlen
);
1067 return CPL_RET_BUF_DONE
;
1071 * Upcall from the adapter indicating data has been transmitted.
1072 * For us its just the single MPA request or reply. We can now free
1073 * the skb holding the mpa message.
1075 static int tx_ack(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1077 struct iwch_ep
*ep
= ctx
;
1078 struct cpl_wr_ack
*hdr
= cplhdr(skb
);
1079 unsigned int credits
= ntohs(hdr
->credits
);
1080 unsigned long flags
;
1083 PDBG("%s ep %p credits %u\n", __func__
, ep
, credits
);
1086 PDBG("%s 0 credit ack ep %p state %u\n",
1087 __func__
, ep
, state_read(&ep
->com
));
1088 return CPL_RET_BUF_DONE
;
1091 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1092 BUG_ON(credits
!= 1);
1093 dst_confirm(ep
->dst
);
1095 PDBG("%s rdma_init wr_ack ep %p state %u\n",
1096 __func__
, ep
, ep
->com
.state
);
1097 if (ep
->mpa_attr
.initiator
) {
1098 PDBG("%s initiator ep %p state %u\n",
1099 __func__
, ep
, ep
->com
.state
);
1100 if (peer2peer
&& ep
->com
.state
== FPDU_MODE
)
1103 PDBG("%s responder ep %p state %u\n",
1104 __func__
, ep
, ep
->com
.state
);
1105 if (ep
->com
.state
== MPA_REQ_RCVD
) {
1106 ep
->com
.rpl_done
= 1;
1107 wake_up(&ep
->com
.waitq
);
1111 PDBG("%s lsm ack ep %p state %u freeing skb\n",
1112 __func__
, ep
, ep
->com
.state
);
1113 kfree_skb(ep
->mpa_skb
);
1116 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1118 iwch_post_zb_read(ep
);
1119 return CPL_RET_BUF_DONE
;
1122 static int abort_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1124 struct iwch_ep
*ep
= ctx
;
1125 unsigned long flags
;
1128 PDBG("%s ep %p\n", __func__
, ep
);
1132 * We get 2 abort replies from the HW. The first one must
1133 * be ignored except for scribbling that we need one more.
1135 if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS
, &ep
->com
.flags
)) {
1136 return CPL_RET_BUF_DONE
;
1139 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1140 switch (ep
->com
.state
) {
1142 close_complete_upcall(ep
);
1143 __state_set(&ep
->com
, DEAD
);
1147 printk(KERN_ERR
"%s ep %p state %d\n",
1148 __func__
, ep
, ep
->com
.state
);
1151 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1154 release_ep_resources(ep
);
1155 return CPL_RET_BUF_DONE
;
1159 * Return whether a failed active open has allocated a TID
1161 static inline int act_open_has_tid(int status
)
1163 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1164 status
!= CPL_ERR_ARP_MISS
;
1167 static int act_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1169 struct iwch_ep
*ep
= ctx
;
1170 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1172 PDBG("%s ep %p status %u errno %d\n", __func__
, ep
, rpl
->status
,
1173 status2errno(rpl
->status
));
1174 connect_reply_upcall(ep
, status2errno(rpl
->status
));
1175 state_set(&ep
->com
, DEAD
);
1176 if (ep
->com
.tdev
->type
!= T3A
&& act_open_has_tid(rpl
->status
))
1177 release_tid(ep
->com
.tdev
, GET_TID(rpl
), NULL
);
1178 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1179 dst_release(ep
->dst
);
1180 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
1182 return CPL_RET_BUF_DONE
;
1185 static int listen_start(struct iwch_listen_ep
*ep
)
1187 struct sk_buff
*skb
;
1188 struct cpl_pass_open_req
*req
;
1190 PDBG("%s ep %p\n", __func__
, ep
);
1191 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1193 printk(KERN_ERR MOD
"t3c_listen_start failed to alloc skb!\n");
1197 req
= (struct cpl_pass_open_req
*) skb_put(skb
, sizeof(*req
));
1198 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1199 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, ep
->stid
));
1200 req
->local_port
= ep
->com
.local_addr
.sin_port
;
1201 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1204 req
->peer_netmask
= 0;
1205 req
->opt0h
= htonl(F_DELACK
| F_TCAM_BYPASS
);
1206 req
->opt0l
= htonl(V_RCV_BUFSIZ(rcv_win
>>10));
1207 req
->opt1
= htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK
));
1210 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
1213 static int pass_open_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1215 struct iwch_listen_ep
*ep
= ctx
;
1216 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1218 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1219 rpl
->status
, status2errno(rpl
->status
));
1220 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1221 ep
->com
.rpl_done
= 1;
1222 wake_up(&ep
->com
.waitq
);
1224 return CPL_RET_BUF_DONE
;
1227 static int listen_stop(struct iwch_listen_ep
*ep
)
1229 struct sk_buff
*skb
;
1230 struct cpl_close_listserv_req
*req
;
1232 PDBG("%s ep %p\n", __func__
, ep
);
1233 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1235 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1238 req
= (struct cpl_close_listserv_req
*) skb_put(skb
, sizeof(*req
));
1239 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1241 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, ep
->stid
));
1243 return iwch_cxgb3_ofld_send(ep
->com
.tdev
, skb
);
1246 static int close_listsrv_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
,
1249 struct iwch_listen_ep
*ep
= ctx
;
1250 struct cpl_close_listserv_rpl
*rpl
= cplhdr(skb
);
1252 PDBG("%s ep %p\n", __func__
, ep
);
1253 ep
->com
.rpl_err
= status2errno(rpl
->status
);
1254 ep
->com
.rpl_done
= 1;
1255 wake_up(&ep
->com
.waitq
);
1256 return CPL_RET_BUF_DONE
;
1259 static void accept_cr(struct iwch_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
)
1261 struct cpl_pass_accept_rpl
*rpl
;
1262 unsigned int mtu_idx
;
1263 u32 opt0h
, opt0l
, opt2
;
1266 PDBG("%s ep %p\n", __func__
, ep
);
1267 BUG_ON(skb_cloned(skb
));
1268 skb_trim(skb
, sizeof(*rpl
));
1270 mtu_idx
= find_best_mtu(T3C_DATA(ep
->com
.tdev
), dst_mtu(ep
->dst
));
1271 wscale
= compute_wscale(rcv_win
);
1272 opt0h
= V_NAGLE(0) |
1276 V_WND_SCALE(wscale
) |
1277 V_MSS_IDX(mtu_idx
) |
1278 V_L2T_IDX(ep
->l2t
->idx
) | V_TX_CHANNEL(ep
->l2t
->smt_idx
);
1279 opt0l
= V_TOS((ep
->tos
>> 2) & M_TOS
) | V_RCV_BUFSIZ(rcv_win
>>10);
1280 opt2
= F_RX_COALESCE_VALID
| V_RX_COALESCE(0) | V_FLAVORS_VALID(1) |
1281 V_CONG_CONTROL_FLAVOR(cong_flavor
);
1284 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1285 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
, ep
->hwtid
));
1286 rpl
->peer_ip
= peer_ip
;
1287 rpl
->opt0h
= htonl(opt0h
);
1288 rpl
->opt0l_status
= htonl(opt0l
| CPL_PASS_OPEN_ACCEPT
);
1289 rpl
->opt2
= htonl(opt2
);
1290 rpl
->rsvd
= rpl
->opt2
; /* workaround for HW bug */
1291 skb
->priority
= CPL_PRIORITY_SETUP
;
1292 iwch_l2t_send(ep
->com
.tdev
, skb
, ep
->l2t
);
1297 static void reject_cr(struct t3cdev
*tdev
, u32 hwtid
, __be32 peer_ip
,
1298 struct sk_buff
*skb
)
1300 PDBG("%s t3cdev %p tid %u peer_ip %x\n", __func__
, tdev
, hwtid
,
1302 BUG_ON(skb_cloned(skb
));
1303 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1306 if (tdev
->type
!= T3A
)
1307 release_tid(tdev
, hwtid
, skb
);
1309 struct cpl_pass_accept_rpl
*rpl
;
1312 skb
->priority
= CPL_PRIORITY_SETUP
;
1313 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
1314 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1316 rpl
->peer_ip
= peer_ip
;
1317 rpl
->opt0h
= htonl(F_TCAM_BYPASS
);
1318 rpl
->opt0l_status
= htonl(CPL_PASS_OPEN_REJECT
);
1320 rpl
->rsvd
= rpl
->opt2
;
1321 iwch_cxgb3_ofld_send(tdev
, skb
);
1325 static int pass_accept_req(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1327 struct iwch_ep
*child_ep
, *parent_ep
= ctx
;
1328 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1329 unsigned int hwtid
= GET_TID(req
);
1330 struct dst_entry
*dst
;
1331 struct l2t_entry
*l2t
;
1335 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1337 if (state_read(&parent_ep
->com
) != LISTEN
) {
1338 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1344 * Find the netdev for this connection request.
1346 tim
.mac_addr
= req
->dst_mac
;
1347 tim
.vlan_tag
= ntohs(req
->vlan_tag
);
1348 if (tdev
->ctl(tdev
, GET_IFF_FROM_MAC
, &tim
) < 0 || !tim
.dev
) {
1349 printk(KERN_ERR
"%s bad dst mac %pM\n",
1350 __func__
, req
->dst_mac
);
1354 /* Find output route */
1355 rt
= find_route(tdev
,
1359 req
->peer_port
, G_PASS_OPEN_TOS(ntohl(req
->tos_tid
)));
1361 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1366 l2t
= t3_l2t_get(tdev
, dst
->neighbour
, dst
->neighbour
->dev
);
1368 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1373 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1375 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1377 l2t_release(L2DATA(tdev
), l2t
);
1381 state_set(&child_ep
->com
, CONNECTING
);
1382 child_ep
->com
.tdev
= tdev
;
1383 child_ep
->com
.cm_id
= NULL
;
1384 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1385 child_ep
->com
.local_addr
.sin_port
= req
->local_port
;
1386 child_ep
->com
.local_addr
.sin_addr
.s_addr
= req
->local_ip
;
1387 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1388 child_ep
->com
.remote_addr
.sin_port
= req
->peer_port
;
1389 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= req
->peer_ip
;
1390 get_ep(&parent_ep
->com
);
1391 child_ep
->parent_ep
= parent_ep
;
1392 child_ep
->tos
= G_PASS_OPEN_TOS(ntohl(req
->tos_tid
));
1393 child_ep
->l2t
= l2t
;
1394 child_ep
->dst
= dst
;
1395 child_ep
->hwtid
= hwtid
;
1396 init_timer(&child_ep
->timer
);
1397 cxgb3_insert_tid(tdev
, &t3c_client
, child_ep
, hwtid
);
1398 accept_cr(child_ep
, req
->peer_ip
, skb
);
1401 reject_cr(tdev
, hwtid
, req
->peer_ip
, skb
);
1403 return CPL_RET_BUF_DONE
;
1406 static int pass_establish(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1408 struct iwch_ep
*ep
= ctx
;
1409 struct cpl_pass_establish
*req
= cplhdr(skb
);
1411 PDBG("%s ep %p\n", __func__
, ep
);
1412 ep
->snd_seq
= ntohl(req
->snd_isn
);
1413 ep
->rcv_seq
= ntohl(req
->rcv_isn
);
1415 set_emss(ep
, ntohs(req
->tcp_opt
));
1417 dst_confirm(ep
->dst
);
1418 state_set(&ep
->com
, MPA_REQ_WAIT
);
1421 return CPL_RET_BUF_DONE
;
1424 static int peer_close(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1426 struct iwch_ep
*ep
= ctx
;
1427 struct iwch_qp_attributes attrs
;
1428 unsigned long flags
;
1432 PDBG("%s ep %p\n", __func__
, ep
);
1433 dst_confirm(ep
->dst
);
1435 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1436 switch (ep
->com
.state
) {
1438 __state_set(&ep
->com
, CLOSING
);
1441 __state_set(&ep
->com
, CLOSING
);
1442 connect_reply_upcall(ep
, -ECONNRESET
);
1447 * We're gonna mark this puppy DEAD, but keep
1448 * the reference on it until the ULP accepts or
1449 * rejects the CR. Also wake up anyone waiting
1450 * in rdma connection migration (see iwch_accept_cr()).
1452 __state_set(&ep
->com
, CLOSING
);
1453 ep
->com
.rpl_done
= 1;
1454 ep
->com
.rpl_err
= -ECONNRESET
;
1455 PDBG("waking up ep %p\n", ep
);
1456 wake_up(&ep
->com
.waitq
);
1459 __state_set(&ep
->com
, CLOSING
);
1460 ep
->com
.rpl_done
= 1;
1461 ep
->com
.rpl_err
= -ECONNRESET
;
1462 PDBG("waking up ep %p\n", ep
);
1463 wake_up(&ep
->com
.waitq
);
1467 __state_set(&ep
->com
, CLOSING
);
1468 attrs
.next_state
= IWCH_QP_STATE_CLOSING
;
1469 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1470 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1471 peer_close_upcall(ep
);
1477 __state_set(&ep
->com
, MORIBUND
);
1482 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1483 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1484 iwch_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1485 IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1487 close_complete_upcall(ep
);
1488 __state_set(&ep
->com
, DEAD
);
1498 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1500 iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1502 release_ep_resources(ep
);
1503 return CPL_RET_BUF_DONE
;
1507 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1509 static int is_neg_adv_abort(unsigned int status
)
1511 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1512 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1515 static int peer_abort(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1517 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1518 struct iwch_ep
*ep
= ctx
;
1519 struct cpl_abort_rpl
*rpl
;
1520 struct sk_buff
*rpl_skb
;
1521 struct iwch_qp_attributes attrs
;
1524 unsigned long flags
;
1526 if (is_neg_adv_abort(req
->status
)) {
1527 PDBG("%s neg_adv_abort ep %p tid %d\n", __func__
, ep
,
1529 t3_l2t_send_event(ep
->com
.tdev
, ep
->l2t
);
1530 return CPL_RET_BUF_DONE
;
1534 * We get 2 peer aborts from the HW. The first one must
1535 * be ignored except for scribbling that we need one more.
1537 if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS
, &ep
->com
.flags
)) {
1538 return CPL_RET_BUF_DONE
;
1541 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1542 PDBG("%s ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
1543 switch (ep
->com
.state
) {
1551 connect_reply_upcall(ep
, -ECONNRESET
);
1554 ep
->com
.rpl_done
= 1;
1555 ep
->com
.rpl_err
= -ECONNRESET
;
1556 PDBG("waking up ep %p\n", ep
);
1557 wake_up(&ep
->com
.waitq
);
1562 * We're gonna mark this puppy DEAD, but keep
1563 * the reference on it until the ULP accepts or
1564 * rejects the CR. Also wake up anyone waiting
1565 * in rdma connection migration (see iwch_accept_cr()).
1567 ep
->com
.rpl_done
= 1;
1568 ep
->com
.rpl_err
= -ECONNRESET
;
1569 PDBG("waking up ep %p\n", ep
);
1570 wake_up(&ep
->com
.waitq
);
1577 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1578 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1579 ret
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1580 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1584 "%s - qp <- error failed!\n",
1587 peer_abort_upcall(ep
);
1592 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1593 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1594 return CPL_RET_BUF_DONE
;
1599 dst_confirm(ep
->dst
);
1600 if (ep
->com
.state
!= ABORTING
) {
1601 __state_set(&ep
->com
, DEAD
);
1604 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1606 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1608 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1613 rpl_skb
->priority
= CPL_PRIORITY_DATA
;
1614 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1615 rpl
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL
));
1616 rpl
->wr
.wr_lo
= htonl(V_WR_TID(ep
->hwtid
));
1617 OPCODE_TID(rpl
) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1618 rpl
->cmd
= CPL_ABORT_NO_RST
;
1619 iwch_cxgb3_ofld_send(ep
->com
.tdev
, rpl_skb
);
1622 release_ep_resources(ep
);
1623 return CPL_RET_BUF_DONE
;
1626 static int close_con_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1628 struct iwch_ep
*ep
= ctx
;
1629 struct iwch_qp_attributes attrs
;
1630 unsigned long flags
;
1633 PDBG("%s ep %p\n", __func__
, ep
);
1636 /* The cm_id may be null if we failed to connect */
1637 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1638 switch (ep
->com
.state
) {
1640 __state_set(&ep
->com
, MORIBUND
);
1644 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1645 attrs
.next_state
= IWCH_QP_STATE_IDLE
;
1646 iwch_modify_qp(ep
->com
.qp
->rhp
,
1648 IWCH_QP_ATTR_NEXT_STATE
,
1651 close_complete_upcall(ep
);
1652 __state_set(&ep
->com
, DEAD
);
1662 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1664 release_ep_resources(ep
);
1665 return CPL_RET_BUF_DONE
;
1669 * T3A does 3 things when a TERM is received:
1670 * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet
1671 * 2) generate an async event on the QP with the TERMINATE opcode
1672 * 3) post a TERMINATE opcde cqe into the associated CQ.
1674 * For (1), we save the message in the qp for later consumer consumption.
1675 * For (2), we move the QP into TERMINATE, post a QP event and disconnect.
1676 * For (3), we toss the CQE in cxio_poll_cq().
1678 * terminate() handles case (1)...
1680 static int terminate(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1682 struct iwch_ep
*ep
= ctx
;
1684 if (state_read(&ep
->com
) != FPDU_MODE
)
1685 return CPL_RET_BUF_DONE
;
1687 PDBG("%s ep %p\n", __func__
, ep
);
1688 skb_pull(skb
, sizeof(struct cpl_rdma_terminate
));
1689 PDBG("%s saving %d bytes of term msg\n", __func__
, skb
->len
);
1690 skb_copy_from_linear_data(skb
, ep
->com
.qp
->attr
.terminate_buffer
,
1692 ep
->com
.qp
->attr
.terminate_msg_len
= skb
->len
;
1693 ep
->com
.qp
->attr
.is_terminate_local
= 0;
1694 return CPL_RET_BUF_DONE
;
1697 static int ec_status(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
1699 struct cpl_rdma_ec_status
*rep
= cplhdr(skb
);
1700 struct iwch_ep
*ep
= ctx
;
1702 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
,
1705 struct iwch_qp_attributes attrs
;
1707 printk(KERN_ERR MOD
"%s BAD CLOSE - Aborting tid %u\n",
1708 __func__
, ep
->hwtid
);
1710 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1711 iwch_modify_qp(ep
->com
.qp
->rhp
,
1712 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1714 abort_connection(ep
, NULL
, GFP_KERNEL
);
1716 return CPL_RET_BUF_DONE
;
1719 static void ep_timeout(unsigned long arg
)
1721 struct iwch_ep
*ep
= (struct iwch_ep
*)arg
;
1722 struct iwch_qp_attributes attrs
;
1723 unsigned long flags
;
1726 spin_lock_irqsave(&ep
->com
.lock
, flags
);
1727 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
1729 switch (ep
->com
.state
) {
1731 __state_set(&ep
->com
, ABORTING
);
1732 connect_reply_upcall(ep
, -ETIMEDOUT
);
1735 __state_set(&ep
->com
, ABORTING
);
1739 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1740 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
1741 iwch_modify_qp(ep
->com
.qp
->rhp
,
1742 ep
->com
.qp
, IWCH_QP_ATTR_NEXT_STATE
,
1745 __state_set(&ep
->com
, ABORTING
);
1748 printk(KERN_ERR
"%s unexpected state ep %p state %u\n",
1749 __func__
, ep
, ep
->com
.state
);
1753 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
1755 abort_connection(ep
, NULL
, GFP_ATOMIC
);
1759 int iwch_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1762 struct iwch_ep
*ep
= to_ep(cm_id
);
1763 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1765 if (state_read(&ep
->com
) == DEAD
) {
1769 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1771 abort_connection(ep
, NULL
, GFP_KERNEL
);
1773 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1774 err
= iwch_ep_disconnect(ep
, 0, GFP_KERNEL
);
1780 int iwch_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1783 struct iwch_qp_attributes attrs
;
1784 enum iwch_qp_attr_mask mask
;
1785 struct iwch_ep
*ep
= to_ep(cm_id
);
1786 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1787 struct iwch_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1789 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1790 if (state_read(&ep
->com
) == DEAD
) {
1795 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1798 if ((conn_param
->ord
> qp
->rhp
->attr
.max_rdma_read_qp_depth
) ||
1799 (conn_param
->ird
> qp
->rhp
->attr
.max_rdma_reads_per_qp
)) {
1800 abort_connection(ep
, NULL
, GFP_KERNEL
);
1805 cm_id
->add_ref(cm_id
);
1806 ep
->com
.cm_id
= cm_id
;
1809 ep
->ird
= conn_param
->ird
;
1810 ep
->ord
= conn_param
->ord
;
1812 if (peer2peer
&& ep
->ird
== 0)
1815 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1817 /* bind QP to EP and move to RTS */
1818 attrs
.mpa_attr
= ep
->mpa_attr
;
1819 attrs
.max_ird
= ep
->ird
;
1820 attrs
.max_ord
= ep
->ord
;
1821 attrs
.llp_stream_handle
= ep
;
1822 attrs
.next_state
= IWCH_QP_STATE_RTS
;
1824 /* bind QP and TID with INIT_WR */
1825 mask
= IWCH_QP_ATTR_NEXT_STATE
|
1826 IWCH_QP_ATTR_LLP_STREAM_HANDLE
|
1827 IWCH_QP_ATTR_MPA_ATTR
|
1828 IWCH_QP_ATTR_MAX_IRD
|
1829 IWCH_QP_ATTR_MAX_ORD
;
1831 err
= iwch_modify_qp(ep
->com
.qp
->rhp
,
1832 ep
->com
.qp
, mask
, &attrs
, 1);
1836 /* if needed, wait for wr_ack */
1837 if (iwch_rqes_posted(qp
)) {
1838 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
1839 err
= ep
->com
.rpl_err
;
1844 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1845 conn_param
->private_data_len
);
1850 state_set(&ep
->com
, FPDU_MODE
);
1851 established_upcall(ep
);
1855 ep
->com
.cm_id
= NULL
;
1857 cm_id
->rem_ref(cm_id
);
1863 static int is_loopback_dst(struct iw_cm_id
*cm_id
)
1865 struct net_device
*dev
;
1867 dev
= ip_dev_find(&init_net
, cm_id
->remote_addr
.sin_addr
.s_addr
);
1874 int iwch_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1877 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1881 if (is_loopback_dst(cm_id
)) {
1886 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1888 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1892 init_timer(&ep
->timer
);
1893 ep
->plen
= conn_param
->private_data_len
;
1895 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1896 conn_param
->private_data
, ep
->plen
);
1897 ep
->ird
= conn_param
->ird
;
1898 ep
->ord
= conn_param
->ord
;
1900 if (peer2peer
&& ep
->ord
== 0)
1903 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
1905 cm_id
->add_ref(cm_id
);
1906 ep
->com
.cm_id
= cm_id
;
1907 ep
->com
.qp
= get_qhp(h
, conn_param
->qpn
);
1908 BUG_ON(!ep
->com
.qp
);
1909 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1913 * Allocate an active TID to initiate a TCP connection.
1915 ep
->atid
= cxgb3_alloc_atid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
1916 if (ep
->atid
== -1) {
1917 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1923 rt
= find_route(h
->rdev
.t3cdev_p
,
1924 cm_id
->local_addr
.sin_addr
.s_addr
,
1925 cm_id
->remote_addr
.sin_addr
.s_addr
,
1926 cm_id
->local_addr
.sin_port
,
1927 cm_id
->remote_addr
.sin_port
, IPTOS_LOWDELAY
);
1929 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1930 err
= -EHOSTUNREACH
;
1935 /* get a l2t entry */
1936 ep
->l2t
= t3_l2t_get(ep
->com
.tdev
, ep
->dst
->neighbour
,
1937 ep
->dst
->neighbour
->dev
);
1939 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
1944 state_set(&ep
->com
, CONNECTING
);
1945 ep
->tos
= IPTOS_LOWDELAY
;
1946 ep
->com
.local_addr
= cm_id
->local_addr
;
1947 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1949 /* send connect request to rnic */
1950 err
= send_connect(ep
);
1954 l2t_release(L2DATA(h
->rdev
.t3cdev_p
), ep
->l2t
);
1956 dst_release(ep
->dst
);
1958 cxgb3_free_atid(ep
->com
.tdev
, ep
->atid
);
1960 cm_id
->rem_ref(cm_id
);
1966 int iwch_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
1969 struct iwch_dev
*h
= to_iwch_dev(cm_id
->device
);
1970 struct iwch_listen_ep
*ep
;
1975 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1977 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1981 PDBG("%s ep %p\n", __func__
, ep
);
1982 ep
->com
.tdev
= h
->rdev
.t3cdev_p
;
1983 cm_id
->add_ref(cm_id
);
1984 ep
->com
.cm_id
= cm_id
;
1985 ep
->backlog
= backlog
;
1986 ep
->com
.local_addr
= cm_id
->local_addr
;
1989 * Allocate a server TID.
1991 ep
->stid
= cxgb3_alloc_stid(h
->rdev
.t3cdev_p
, &t3c_client
, ep
);
1992 if (ep
->stid
== -1) {
1993 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1998 state_set(&ep
->com
, LISTEN
);
1999 err
= listen_start(ep
);
2003 /* wait for pass_open_rpl */
2004 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2005 err
= ep
->com
.rpl_err
;
2007 cm_id
->provider_data
= ep
;
2011 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
2013 cm_id
->rem_ref(cm_id
);
2020 int iwch_destroy_listen(struct iw_cm_id
*cm_id
)
2023 struct iwch_listen_ep
*ep
= to_listen_ep(cm_id
);
2025 PDBG("%s ep %p\n", __func__
, ep
);
2028 state_set(&ep
->com
, DEAD
);
2029 ep
->com
.rpl_done
= 0;
2030 ep
->com
.rpl_err
= 0;
2031 err
= listen_stop(ep
);
2034 wait_event(ep
->com
.waitq
, ep
->com
.rpl_done
);
2035 cxgb3_free_stid(ep
->com
.tdev
, ep
->stid
);
2037 err
= ep
->com
.rpl_err
;
2038 cm_id
->rem_ref(cm_id
);
2043 int iwch_ep_disconnect(struct iwch_ep
*ep
, int abrupt
, gfp_t gfp
)
2046 unsigned long flags
;
2049 struct t3cdev
*tdev
;
2050 struct cxio_rdev
*rdev
;
2052 spin_lock_irqsave(&ep
->com
.lock
, flags
);
2054 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2055 states
[ep
->com
.state
], abrupt
);
2057 tdev
= (struct t3cdev
*)ep
->com
.tdev
;
2058 rdev
= (struct cxio_rdev
*)tdev
->ulp
;
2059 if (cxio_fatal_error(rdev
)) {
2061 close_complete_upcall(ep
);
2062 ep
->com
.state
= DEAD
;
2064 switch (ep
->com
.state
) {
2072 ep
->com
.state
= ABORTING
;
2074 ep
->com
.state
= CLOSING
;
2077 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2080 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2084 ep
->com
.state
= ABORTING
;
2086 ep
->com
.state
= MORIBUND
;
2092 PDBG("%s ignoring disconnect ep %p state %u\n",
2093 __func__
, ep
, ep
->com
.state
);
2100 spin_unlock_irqrestore(&ep
->com
.lock
, flags
);
2103 ret
= send_abort(ep
, NULL
, gfp
);
2105 ret
= send_halfclose(ep
, gfp
);
2110 release_ep_resources(ep
);
2114 int iwch_ep_redirect(void *ctx
, struct dst_entry
*old
, struct dst_entry
*new,
2115 struct l2t_entry
*l2t
)
2117 struct iwch_ep
*ep
= ctx
;
2122 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__
, ep
, new,
2125 l2t_release(L2DATA(ep
->com
.tdev
), ep
->l2t
);
2133 * All the CM events are handled on a work queue to have a safe context.
2134 * These are the real handlers that are called from the work queue.
2136 static const cxgb3_cpl_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2137 [CPL_ACT_ESTABLISH
] = act_establish
,
2138 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2139 [CPL_RX_DATA
] = rx_data
,
2140 [CPL_TX_DMA_ACK
] = tx_ack
,
2141 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2142 [CPL_ABORT_RPL
] = abort_rpl
,
2143 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2144 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2145 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2146 [CPL_PASS_ESTABLISH
] = pass_establish
,
2147 [CPL_PEER_CLOSE
] = peer_close
,
2148 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2149 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2150 [CPL_RDMA_TERMINATE
] = terminate
,
2151 [CPL_RDMA_EC_STATUS
] = ec_status
,
2154 static void process_work(struct work_struct
*work
)
2156 struct sk_buff
*skb
= NULL
;
2158 struct t3cdev
*tdev
;
2161 while ((skb
= skb_dequeue(&rxq
))) {
2162 ep
= *((void **) (skb
->cb
));
2163 tdev
= *((struct t3cdev
**) (skb
->cb
+ sizeof(void *)));
2164 ret
= work_handlers
[G_OPCODE(ntohl((__force __be32
)skb
->csum
))](tdev
, skb
, ep
);
2165 if (ret
& CPL_RET_BUF_DONE
)
2169 * ep was referenced in sched(), and is freed here.
2171 put_ep((struct iwch_ep_common
*)ep
);
2175 static DECLARE_WORK(skb_work
, process_work
);
2177 static int sched(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
2179 struct iwch_ep_common
*epc
= ctx
;
2184 * Save ctx and tdev in the skb->cb area.
2186 *((void **) skb
->cb
) = ctx
;
2187 *((struct t3cdev
**) (skb
->cb
+ sizeof(void *))) = tdev
;
2190 * Queue the skb and schedule the worker thread.
2192 skb_queue_tail(&rxq
, skb
);
2193 queue_work(workq
, &skb_work
);
2197 static int set_tcb_rpl(struct t3cdev
*tdev
, struct sk_buff
*skb
, void *ctx
)
2199 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2201 if (rpl
->status
!= CPL_ERR_NONE
) {
2202 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2203 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2205 return CPL_RET_BUF_DONE
;
2209 * All upcalls from the T3 Core go to sched() to schedule the
2210 * processing on a work queue.
2212 cxgb3_cpl_handler_func t3c_handlers
[NUM_CPL_CMDS
] = {
2213 [CPL_ACT_ESTABLISH
] = sched
,
2214 [CPL_ACT_OPEN_RPL
] = sched
,
2215 [CPL_RX_DATA
] = sched
,
2216 [CPL_TX_DMA_ACK
] = sched
,
2217 [CPL_ABORT_RPL_RSS
] = sched
,
2218 [CPL_ABORT_RPL
] = sched
,
2219 [CPL_PASS_OPEN_RPL
] = sched
,
2220 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
2221 [CPL_PASS_ACCEPT_REQ
] = sched
,
2222 [CPL_PASS_ESTABLISH
] = sched
,
2223 [CPL_PEER_CLOSE
] = sched
,
2224 [CPL_CLOSE_CON_RPL
] = sched
,
2225 [CPL_ABORT_REQ_RSS
] = sched
,
2226 [CPL_RDMA_TERMINATE
] = sched
,
2227 [CPL_RDMA_EC_STATUS
] = sched
,
2228 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
2231 int __init
iwch_cm_init(void)
2233 skb_queue_head_init(&rxq
);
2235 workq
= create_singlethread_workqueue("iw_cxgb3");
2242 void __exit
iwch_cm_term(void)
2244 flush_workqueue(workq
);
2245 destroy_workqueue(workq
);