2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
50 static char *states
[] = {
67 module_param(nocong
, int, 0644);
68 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
70 static int enable_ecn
;
71 module_param(enable_ecn
, int, 0644);
72 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
74 static int dack_mode
= 1;
75 module_param(dack_mode
, int, 0644);
76 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
78 int c4iw_max_read_depth
= 8;
79 module_param(c4iw_max_read_depth
, int, 0644);
80 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
82 static int enable_tcp_timestamps
;
83 module_param(enable_tcp_timestamps
, int, 0644);
84 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
86 static int enable_tcp_sack
;
87 module_param(enable_tcp_sack
, int, 0644);
88 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
90 static int enable_tcp_window_scaling
= 1;
91 module_param(enable_tcp_window_scaling
, int, 0644);
92 MODULE_PARM_DESC(enable_tcp_window_scaling
,
93 "Enable tcp window scaling (default=1)");
96 module_param(c4iw_debug
, int, 0644);
97 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
100 module_param(peer2peer
, int, 0644);
101 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
103 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
104 module_param(p2p_type
, int, 0644);
105 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
106 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
108 static int ep_timeout_secs
= 60;
109 module_param(ep_timeout_secs
, int, 0644);
110 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
111 "in seconds (default=60)");
113 static int mpa_rev
= 1;
114 module_param(mpa_rev
, int, 0644);
115 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
116 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
117 " compliant (default=1)");
119 static int markers_enabled
;
120 module_param(markers_enabled
, int, 0644);
121 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
123 static int crc_enabled
= 1;
124 module_param(crc_enabled
, int, 0644);
125 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
127 static int rcv_win
= 256 * 1024;
128 module_param(rcv_win
, int, 0644);
129 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
131 static int snd_win
= 128 * 1024;
132 module_param(snd_win
, int, 0644);
133 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
135 static struct workqueue_struct
*workq
;
137 static struct sk_buff_head rxq
;
139 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
140 static void ep_timeout(unsigned long arg
);
141 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
143 static LIST_HEAD(timeout_list
);
144 static spinlock_t timeout_lock
;
146 static void start_ep_timer(struct c4iw_ep
*ep
)
148 PDBG("%s ep %p\n", __func__
, ep
);
149 if (timer_pending(&ep
->timer
)) {
150 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
151 del_timer_sync(&ep
->timer
);
153 c4iw_get_ep(&ep
->com
);
154 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
155 ep
->timer
.data
= (unsigned long)ep
;
156 ep
->timer
.function
= ep_timeout
;
157 add_timer(&ep
->timer
);
160 static void stop_ep_timer(struct c4iw_ep
*ep
)
162 PDBG("%s ep %p\n", __func__
, ep
);
163 if (!timer_pending(&ep
->timer
)) {
164 WARN(1, "%s timer stopped when its not running! "
165 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
168 del_timer_sync(&ep
->timer
);
169 c4iw_put_ep(&ep
->com
);
172 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
173 struct l2t_entry
*l2e
)
177 if (c4iw_fatal_error(rdev
)) {
179 PDBG("%s - device in error state - dropping\n", __func__
);
182 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
185 return error
< 0 ? error
: 0;
188 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
192 if (c4iw_fatal_error(rdev
)) {
194 PDBG("%s - device in error state - dropping\n", __func__
);
197 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
200 return error
< 0 ? error
: 0;
203 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
205 struct cpl_tid_release
*req
;
207 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
210 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
211 INIT_TP_WR(req
, hwtid
);
212 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
213 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
214 c4iw_ofld_send(rdev
, skb
);
218 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
220 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
222 if (GET_TCPOPT_TSTAMP(opt
))
226 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
230 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
232 enum c4iw_ep_state state
;
234 mutex_lock(&epc
->mutex
);
236 mutex_unlock(&epc
->mutex
);
240 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
245 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
247 mutex_lock(&epc
->mutex
);
248 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
249 __state_set(epc
, new);
250 mutex_unlock(&epc
->mutex
);
254 static void *alloc_ep(int size
, gfp_t gfp
)
256 struct c4iw_ep_common
*epc
;
258 epc
= kzalloc(size
, gfp
);
260 kref_init(&epc
->kref
);
261 mutex_init(&epc
->mutex
);
262 c4iw_init_wr_wait(&epc
->wr_wait
);
264 PDBG("%s alloc ep %p\n", __func__
, epc
);
268 void _c4iw_free_ep(struct kref
*kref
)
272 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
273 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
274 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
275 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
276 dst_release(ep
->dst
);
277 cxgb4_l2t_release(ep
->l2t
);
282 static void release_ep_resources(struct c4iw_ep
*ep
)
284 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
285 c4iw_put_ep(&ep
->com
);
288 static int status2errno(int status
)
293 case CPL_ERR_CONN_RESET
:
295 case CPL_ERR_ARP_MISS
:
296 return -EHOSTUNREACH
;
297 case CPL_ERR_CONN_TIMEDOUT
:
299 case CPL_ERR_TCAM_FULL
:
301 case CPL_ERR_CONN_EXIST
:
309 * Try and reuse skbs already allocated...
311 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
313 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
316 skb_reset_transport_header(skb
);
318 skb
= alloc_skb(len
, gfp
);
323 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
324 __be32 peer_ip
, __be16 local_port
,
325 __be16 peer_port
, u8 tos
)
330 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
331 peer_port
, local_port
, IPPROTO_TCP
,
338 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
340 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
345 * Handle an ARP failure for an active open.
347 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
349 printk(KERN_ERR MOD
"ARP failure duing connect\n");
354 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
357 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
359 struct c4iw_rdev
*rdev
= handle
;
360 struct cpl_abort_req
*req
= cplhdr(skb
);
362 PDBG("%s rdev %p\n", __func__
, rdev
);
363 req
->cmd
= CPL_ABORT_NO_RST
;
364 c4iw_ofld_send(rdev
, skb
);
367 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
369 unsigned int flowclen
= 80;
370 struct fw_flowc_wr
*flowc
;
373 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
374 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
376 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
377 FW_FLOWC_WR_NPARAMS(8));
378 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
379 16)) | FW_WR_FLOWID(ep
->hwtid
));
381 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
382 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
383 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
384 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
385 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
386 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
387 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
388 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
389 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
390 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
391 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
392 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
393 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
394 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
395 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
396 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
397 /* Pad WR to 16 byte boundary */
398 flowc
->mnemval
[8].mnemonic
= 0;
399 flowc
->mnemval
[8].val
= 0;
400 for (i
= 0; i
< 9; i
++) {
401 flowc
->mnemval
[i
].r4
[0] = 0;
402 flowc
->mnemval
[i
].r4
[1] = 0;
403 flowc
->mnemval
[i
].r4
[2] = 0;
406 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
407 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
410 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
412 struct cpl_close_con_req
*req
;
414 int wrlen
= roundup(sizeof *req
, 16);
416 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
417 skb
= get_skb(NULL
, wrlen
, gfp
);
419 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
422 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
423 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
424 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
425 memset(req
, 0, wrlen
);
426 INIT_TP_WR(req
, ep
->hwtid
);
427 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
429 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
432 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
434 struct cpl_abort_req
*req
;
435 int wrlen
= roundup(sizeof *req
, 16);
437 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
438 skb
= get_skb(skb
, wrlen
, gfp
);
440 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
444 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
445 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
446 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
447 memset(req
, 0, wrlen
);
448 INIT_TP_WR(req
, ep
->hwtid
);
449 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
450 req
->cmd
= CPL_ABORT_SEND_RST
;
451 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
454 #define VLAN_NONE 0xfff
455 #define FILTER_SEL_VLAN_NONE 0xffff
456 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
457 #define FILTER_SEL_WIDTH_VIN_P_FC \
458 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
459 #define FILTER_SEL_WIDTH_TAG_P_FC \
460 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
461 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
463 static unsigned int select_ntuple(struct c4iw_dev
*dev
, struct dst_entry
*dst
,
464 struct l2t_entry
*l2t
)
466 unsigned int ntuple
= 0;
469 switch (dev
->rdev
.lldi
.filt_mode
) {
471 /* default filter mode */
472 case HW_TPL_FR_MT_PR_IV_P_FC
:
473 if (l2t
->vlan
== VLAN_NONE
)
474 ntuple
|= FILTER_SEL_VLAN_NONE
<< FILTER_SEL_WIDTH_P_FC
;
476 ntuple
|= l2t
->vlan
<< FILTER_SEL_WIDTH_P_FC
;
477 ntuple
|= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
479 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
480 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
482 case HW_TPL_FR_MT_PR_OV_P_FC
: {
483 viid
= cxgb4_port_viid(l2t
->neigh
->dev
);
485 ntuple
|= FW_VIID_VIN_GET(viid
) << FILTER_SEL_WIDTH_P_FC
;
486 ntuple
|= FW_VIID_PFN_GET(viid
) << FILTER_SEL_WIDTH_VIN_P_FC
;
487 ntuple
|= FW_VIID_VIVLD_GET(viid
) << FILTER_SEL_WIDTH_TAG_P_FC
;
488 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
489 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
498 static int send_connect(struct c4iw_ep
*ep
)
500 struct cpl_act_open_req
*req
;
504 unsigned int mtu_idx
;
506 int wrlen
= roundup(sizeof *req
, 16);
508 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
510 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
512 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
516 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
518 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
519 wscale
= compute_wscale(rcv_win
);
520 opt0
= (nocong
? NO_CONG(1) : 0) |
525 L2T_IDX(ep
->l2t
->idx
) |
526 TX_CHAN(ep
->tx_chan
) |
527 SMAC_SEL(ep
->smac_idx
) |
529 ULP_MODE(ULP_MODE_TCPDDP
) |
530 RCV_BUFSIZ(rcv_win
>>10);
531 opt2
= RX_CHANNEL(0) |
532 CCTRL_ECN(enable_ecn
) |
533 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
534 if (enable_tcp_timestamps
)
535 opt2
|= TSTAMPS_EN(1);
538 if (wscale
&& enable_tcp_window_scaling
)
539 opt2
|= WND_SCALE_EN(1);
540 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
542 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
544 OPCODE_TID(req
) = cpu_to_be32(
545 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
546 req
->local_port
= ep
->com
.local_addr
.sin_port
;
547 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
548 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
549 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
550 req
->opt0
= cpu_to_be64(opt0
);
551 req
->params
= cpu_to_be32(select_ntuple(ep
->com
.dev
, ep
->dst
, ep
->l2t
));
552 req
->opt2
= cpu_to_be32(opt2
);
553 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
556 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
560 struct fw_ofld_tx_data_wr
*req
;
561 struct mpa_message
*mpa
;
562 struct mpa_v2_conn_params mpa_v2_params
;
564 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
566 BUG_ON(skb_cloned(skb
));
568 mpalen
= sizeof(*mpa
) + ep
->plen
;
569 if (mpa_rev_to_use
== 2)
570 mpalen
+= sizeof(struct mpa_v2_conn_params
);
571 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
572 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
574 connect_reply_upcall(ep
, -ENOMEM
);
577 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
579 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
580 memset(req
, 0, wrlen
);
581 req
->op_to_immdlen
= cpu_to_be32(
582 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
584 FW_WR_IMMDLEN(mpalen
));
585 req
->flowid_len16
= cpu_to_be32(
586 FW_WR_FLOWID(ep
->hwtid
) |
587 FW_WR_LEN16(wrlen
>> 4));
588 req
->plen
= cpu_to_be32(mpalen
);
589 req
->tunnel_to_proxy
= cpu_to_be32(
590 FW_OFLD_TX_DATA_WR_FLUSH(1) |
591 FW_OFLD_TX_DATA_WR_SHOVE(1));
593 mpa
= (struct mpa_message
*)(req
+ 1);
594 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
595 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
596 (markers_enabled
? MPA_MARKERS
: 0) |
597 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
598 mpa
->private_data_size
= htons(ep
->plen
);
599 mpa
->revision
= mpa_rev_to_use
;
600 if (mpa_rev_to_use
== 1) {
601 ep
->tried_with_mpa_v1
= 1;
602 ep
->retry_with_mpa_v1
= 0;
605 if (mpa_rev_to_use
== 2) {
606 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
607 sizeof (struct mpa_v2_conn_params
));
608 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
609 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
612 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
613 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
615 htons(MPA_V2_RDMA_WRITE_RTR
);
616 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
618 htons(MPA_V2_RDMA_READ_RTR
);
620 memcpy(mpa
->private_data
, &mpa_v2_params
,
621 sizeof(struct mpa_v2_conn_params
));
624 memcpy(mpa
->private_data
+
625 sizeof(struct mpa_v2_conn_params
),
626 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
629 memcpy(mpa
->private_data
,
630 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
633 * Reference the mpa skb. This ensures the data area
634 * will remain in memory until the hw acks the tx.
635 * Function fw4_ack() will deref it.
638 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
641 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
643 state_set(&ep
->com
, MPA_REQ_SENT
);
644 ep
->mpa_attr
.initiator
= 1;
648 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
651 struct fw_ofld_tx_data_wr
*req
;
652 struct mpa_message
*mpa
;
654 struct mpa_v2_conn_params mpa_v2_params
;
656 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
658 mpalen
= sizeof(*mpa
) + plen
;
659 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
660 mpalen
+= sizeof(struct mpa_v2_conn_params
);
661 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
663 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
665 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
668 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
670 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
671 memset(req
, 0, wrlen
);
672 req
->op_to_immdlen
= cpu_to_be32(
673 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
675 FW_WR_IMMDLEN(mpalen
));
676 req
->flowid_len16
= cpu_to_be32(
677 FW_WR_FLOWID(ep
->hwtid
) |
678 FW_WR_LEN16(wrlen
>> 4));
679 req
->plen
= cpu_to_be32(mpalen
);
680 req
->tunnel_to_proxy
= cpu_to_be32(
681 FW_OFLD_TX_DATA_WR_FLUSH(1) |
682 FW_OFLD_TX_DATA_WR_SHOVE(1));
684 mpa
= (struct mpa_message
*)(req
+ 1);
685 memset(mpa
, 0, sizeof(*mpa
));
686 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
687 mpa
->flags
= MPA_REJECT
;
688 mpa
->revision
= mpa_rev
;
689 mpa
->private_data_size
= htons(plen
);
691 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
692 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
693 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
694 sizeof (struct mpa_v2_conn_params
));
695 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
696 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
698 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
700 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
701 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
702 FW_RI_INIT_P2PTYPE_READ_REQ
?
703 MPA_V2_RDMA_READ_RTR
: 0) : 0));
704 memcpy(mpa
->private_data
, &mpa_v2_params
,
705 sizeof(struct mpa_v2_conn_params
));
708 memcpy(mpa
->private_data
+
709 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
712 memcpy(mpa
->private_data
, pdata
, plen
);
715 * Reference the mpa skb again. This ensures the data area
716 * will remain in memory until the hw acks the tx.
717 * Function fw4_ack() will deref it.
720 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
721 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
724 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
727 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
730 struct fw_ofld_tx_data_wr
*req
;
731 struct mpa_message
*mpa
;
733 struct mpa_v2_conn_params mpa_v2_params
;
735 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
737 mpalen
= sizeof(*mpa
) + plen
;
738 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
739 mpalen
+= sizeof(struct mpa_v2_conn_params
);
740 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
742 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
744 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
747 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
749 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
750 memset(req
, 0, wrlen
);
751 req
->op_to_immdlen
= cpu_to_be32(
752 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
754 FW_WR_IMMDLEN(mpalen
));
755 req
->flowid_len16
= cpu_to_be32(
756 FW_WR_FLOWID(ep
->hwtid
) |
757 FW_WR_LEN16(wrlen
>> 4));
758 req
->plen
= cpu_to_be32(mpalen
);
759 req
->tunnel_to_proxy
= cpu_to_be32(
760 FW_OFLD_TX_DATA_WR_FLUSH(1) |
761 FW_OFLD_TX_DATA_WR_SHOVE(1));
763 mpa
= (struct mpa_message
*)(req
+ 1);
764 memset(mpa
, 0, sizeof(*mpa
));
765 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
766 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
767 (markers_enabled
? MPA_MARKERS
: 0);
768 mpa
->revision
= ep
->mpa_attr
.version
;
769 mpa
->private_data_size
= htons(plen
);
771 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
772 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
773 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
774 sizeof (struct mpa_v2_conn_params
));
775 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
776 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
777 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
778 FW_RI_INIT_P2PTYPE_DISABLED
)) {
779 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
781 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
783 htons(MPA_V2_RDMA_WRITE_RTR
);
784 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
786 htons(MPA_V2_RDMA_READ_RTR
);
789 memcpy(mpa
->private_data
, &mpa_v2_params
,
790 sizeof(struct mpa_v2_conn_params
));
793 memcpy(mpa
->private_data
+
794 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
797 memcpy(mpa
->private_data
, pdata
, plen
);
800 * Reference the mpa skb. This ensures the data area
801 * will remain in memory until the hw acks the tx.
802 * Function fw4_ack() will deref it.
805 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
807 state_set(&ep
->com
, MPA_REP_SENT
);
808 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
811 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
814 struct cpl_act_establish
*req
= cplhdr(skb
);
815 unsigned int tid
= GET_TID(req
);
816 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
817 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
819 ep
= lookup_atid(t
, atid
);
821 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
822 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
824 dst_confirm(ep
->dst
);
826 /* setup the hwtid for this connection */
828 cxgb4_insert_tid(t
, ep
, tid
);
830 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
831 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
833 set_emss(ep
, ntohs(req
->tcp_opt
));
835 /* dealloc the atid */
836 cxgb4_free_atid(t
, atid
);
838 /* start MPA negotiation */
839 send_flowc(ep
, NULL
);
840 if (ep
->retry_with_mpa_v1
)
841 send_mpa_req(ep
, skb
, 1);
843 send_mpa_req(ep
, skb
, mpa_rev
);
848 static void close_complete_upcall(struct c4iw_ep
*ep
)
850 struct iw_cm_event event
;
852 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
853 memset(&event
, 0, sizeof(event
));
854 event
.event
= IW_CM_EVENT_CLOSE
;
856 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
857 ep
, ep
->com
.cm_id
, ep
->hwtid
);
858 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
859 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
860 ep
->com
.cm_id
= NULL
;
865 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
867 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
868 close_complete_upcall(ep
);
869 state_set(&ep
->com
, ABORTING
);
870 return send_abort(ep
, skb
, gfp
);
873 static void peer_close_upcall(struct c4iw_ep
*ep
)
875 struct iw_cm_event event
;
877 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
878 memset(&event
, 0, sizeof(event
));
879 event
.event
= IW_CM_EVENT_DISCONNECT
;
881 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
882 ep
, ep
->com
.cm_id
, ep
->hwtid
);
883 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
887 static void peer_abort_upcall(struct c4iw_ep
*ep
)
889 struct iw_cm_event event
;
891 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
892 memset(&event
, 0, sizeof(event
));
893 event
.event
= IW_CM_EVENT_CLOSE
;
894 event
.status
= -ECONNRESET
;
896 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
897 ep
->com
.cm_id
, ep
->hwtid
);
898 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
899 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
900 ep
->com
.cm_id
= NULL
;
905 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
907 struct iw_cm_event event
;
909 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
910 memset(&event
, 0, sizeof(event
));
911 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
912 event
.status
= status
;
913 event
.local_addr
= ep
->com
.local_addr
;
914 event
.remote_addr
= ep
->com
.remote_addr
;
916 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
917 if (!ep
->tried_with_mpa_v1
) {
918 /* this means MPA_v2 is used */
919 event
.private_data_len
= ep
->plen
-
920 sizeof(struct mpa_v2_conn_params
);
921 event
.private_data
= ep
->mpa_pkt
+
922 sizeof(struct mpa_message
) +
923 sizeof(struct mpa_v2_conn_params
);
925 /* this means MPA_v1 is used */
926 event
.private_data_len
= ep
->plen
;
927 event
.private_data
= ep
->mpa_pkt
+
928 sizeof(struct mpa_message
);
932 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
934 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
937 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
938 ep
->com
.cm_id
= NULL
;
943 static void connect_request_upcall(struct c4iw_ep
*ep
)
945 struct iw_cm_event event
;
947 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
948 memset(&event
, 0, sizeof(event
));
949 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
950 event
.local_addr
= ep
->com
.local_addr
;
951 event
.remote_addr
= ep
->com
.remote_addr
;
952 event
.provider_data
= ep
;
953 if (!ep
->tried_with_mpa_v1
) {
954 /* this means MPA_v2 is used */
957 event
.private_data_len
= ep
->plen
-
958 sizeof(struct mpa_v2_conn_params
);
959 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
960 sizeof(struct mpa_v2_conn_params
);
962 /* this means MPA_v1 is used. Send max supported */
963 event
.ord
= c4iw_max_read_depth
;
964 event
.ird
= c4iw_max_read_depth
;
965 event
.private_data_len
= ep
->plen
;
966 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
968 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
969 c4iw_get_ep(&ep
->com
);
970 ep
->parent_ep
->com
.cm_id
->event_handler(
971 ep
->parent_ep
->com
.cm_id
,
974 c4iw_put_ep(&ep
->parent_ep
->com
);
975 ep
->parent_ep
= NULL
;
978 static void established_upcall(struct c4iw_ep
*ep
)
980 struct iw_cm_event event
;
982 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
983 memset(&event
, 0, sizeof(event
));
984 event
.event
= IW_CM_EVENT_ESTABLISHED
;
988 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
989 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
993 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
995 struct cpl_rx_data_ack
*req
;
997 int wrlen
= roundup(sizeof *req
, 16);
999 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1000 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1002 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1006 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1007 memset(req
, 0, wrlen
);
1008 INIT_TP_WR(req
, ep
->hwtid
);
1009 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1011 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
1013 V_RX_DACK_MODE(dack_mode
));
1014 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1015 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1019 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1021 struct mpa_message
*mpa
;
1022 struct mpa_v2_conn_params
*mpa_v2_params
;
1024 u16 resp_ird
, resp_ord
;
1025 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1026 struct c4iw_qp_attributes attrs
;
1027 enum c4iw_qp_attr_mask mask
;
1030 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1033 * Stop mpa timer. If it expired, then the state has
1034 * changed and we bail since ep_timeout already aborted
1038 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
1042 * If we get more than the supported amount of private data
1043 * then we must fail this connection.
1045 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1051 * copy the new data into our accumulation buffer.
1053 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1055 ep
->mpa_pkt_len
+= skb
->len
;
1058 * if we don't even have the mpa message, then bail.
1060 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1062 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1064 /* Validate MPA header. */
1065 if (mpa
->revision
> mpa_rev
) {
1066 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1067 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1071 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1076 plen
= ntohs(mpa
->private_data_size
);
1079 * Fail if there's too much private data.
1081 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1087 * If plen does not account for pkt size
1089 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1094 ep
->plen
= (u8
) plen
;
1097 * If we don't have all the pdata yet, then bail.
1098 * We'll continue process when more data arrives.
1100 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1103 if (mpa
->flags
& MPA_REJECT
) {
1104 err
= -ECONNREFUSED
;
1109 * If we get here we have accumulated the entire mpa
1110 * start reply message including private data. And
1111 * the MPA header is valid.
1113 state_set(&ep
->com
, FPDU_MODE
);
1114 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1115 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1116 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1117 ep
->mpa_attr
.version
= mpa
->revision
;
1118 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1120 if (mpa
->revision
== 2) {
1121 ep
->mpa_attr
.enhanced_rdma_conn
=
1122 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1123 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1124 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1125 (ep
->mpa_pkt
+ sizeof(*mpa
));
1126 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1127 MPA_V2_IRD_ORD_MASK
;
1128 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1129 MPA_V2_IRD_ORD_MASK
;
1132 * This is a double-check. Ideally, below checks are
1133 * not required since ird/ord stuff has been taken
1134 * care of in c4iw_accept_cr
1136 if ((ep
->ird
< resp_ord
) || (ep
->ord
> resp_ird
)) {
1143 if (ntohs(mpa_v2_params
->ird
) &
1144 MPA_V2_PEER2PEER_MODEL
) {
1145 if (ntohs(mpa_v2_params
->ord
) &
1146 MPA_V2_RDMA_WRITE_RTR
)
1147 ep
->mpa_attr
.p2p_type
=
1148 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1149 else if (ntohs(mpa_v2_params
->ord
) &
1150 MPA_V2_RDMA_READ_RTR
)
1151 ep
->mpa_attr
.p2p_type
=
1152 FW_RI_INIT_P2PTYPE_READ_REQ
;
1155 } else if (mpa
->revision
== 1)
1157 ep
->mpa_attr
.p2p_type
= p2p_type
;
1159 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1160 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1161 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1162 ep
->mpa_attr
.recv_marker_enabled
,
1163 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1164 ep
->mpa_attr
.p2p_type
, p2p_type
);
1167 * If responder's RTR does not match with that of initiator, assign
1168 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1169 * generated when moving QP to RTS state.
1170 * A TERM message will be sent after QP has moved to RTS state
1172 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1173 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1174 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1178 attrs
.mpa_attr
= ep
->mpa_attr
;
1179 attrs
.max_ird
= ep
->ird
;
1180 attrs
.max_ord
= ep
->ord
;
1181 attrs
.llp_stream_handle
= ep
;
1182 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1184 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1185 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1186 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1188 /* bind QP and TID with INIT_WR */
1189 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1190 ep
->com
.qp
, mask
, &attrs
, 1);
1195 * If responder's RTR requirement did not match with what initiator
1196 * supports, generate TERM message
1199 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1200 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1201 attrs
.ecode
= MPA_NOMATCH_RTR
;
1202 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1203 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1204 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1210 * Generate TERM if initiator IRD is not sufficient for responder
1211 * provided ORD. Currently, we do the same behaviour even when
1212 * responder provided IRD is also not sufficient as regards to
1216 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1218 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1219 attrs
.ecode
= MPA_INSUFF_IRD
;
1220 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1221 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1222 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1228 state_set(&ep
->com
, ABORTING
);
1229 send_abort(ep
, skb
, GFP_KERNEL
);
1231 connect_reply_upcall(ep
, err
);
1235 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1237 struct mpa_message
*mpa
;
1238 struct mpa_v2_conn_params
*mpa_v2_params
;
1241 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1243 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
1247 * If we get more than the supported amount of private data
1248 * then we must fail this connection.
1250 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1252 abort_connection(ep
, skb
, GFP_KERNEL
);
1256 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1259 * Copy the new data into our accumulation buffer.
1261 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1263 ep
->mpa_pkt_len
+= skb
->len
;
1266 * If we don't even have the mpa message, then bail.
1267 * We'll continue process when more data arrives.
1269 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1272 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1274 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1277 * Validate MPA Header.
1279 if (mpa
->revision
> mpa_rev
) {
1280 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1281 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1282 abort_connection(ep
, skb
, GFP_KERNEL
);
1286 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1287 abort_connection(ep
, skb
, GFP_KERNEL
);
1291 plen
= ntohs(mpa
->private_data_size
);
1294 * Fail if there's too much private data.
1296 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1297 abort_connection(ep
, skb
, GFP_KERNEL
);
1302 * If plen does not account for pkt size
1304 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1305 abort_connection(ep
, skb
, GFP_KERNEL
);
1308 ep
->plen
= (u8
) plen
;
1311 * If we don't have all the pdata yet, then bail.
1313 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1317 * If we get here we have accumulated the entire mpa
1318 * start reply message including private data.
1320 ep
->mpa_attr
.initiator
= 0;
1321 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1322 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1323 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1324 ep
->mpa_attr
.version
= mpa
->revision
;
1325 if (mpa
->revision
== 1)
1326 ep
->tried_with_mpa_v1
= 1;
1327 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1329 if (mpa
->revision
== 2) {
1330 ep
->mpa_attr
.enhanced_rdma_conn
=
1331 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1332 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1333 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1334 (ep
->mpa_pkt
+ sizeof(*mpa
));
1335 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1336 MPA_V2_IRD_ORD_MASK
;
1337 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1338 MPA_V2_IRD_ORD_MASK
;
1339 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1341 if (ntohs(mpa_v2_params
->ord
) &
1342 MPA_V2_RDMA_WRITE_RTR
)
1343 ep
->mpa_attr
.p2p_type
=
1344 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1345 else if (ntohs(mpa_v2_params
->ord
) &
1346 MPA_V2_RDMA_READ_RTR
)
1347 ep
->mpa_attr
.p2p_type
=
1348 FW_RI_INIT_P2PTYPE_READ_REQ
;
1351 } else if (mpa
->revision
== 1)
1353 ep
->mpa_attr
.p2p_type
= p2p_type
;
1355 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1356 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1357 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1358 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1359 ep
->mpa_attr
.p2p_type
);
1361 state_set(&ep
->com
, MPA_REQ_RCVD
);
1364 connect_request_upcall(ep
);
1368 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1371 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1372 unsigned int dlen
= ntohs(hdr
->len
);
1373 unsigned int tid
= GET_TID(hdr
);
1374 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1376 ep
= lookup_tid(t
, tid
);
1377 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1378 skb_pull(skb
, sizeof(*hdr
));
1379 skb_trim(skb
, dlen
);
1381 ep
->rcv_seq
+= dlen
;
1382 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1384 /* update RX credits */
1385 update_rx_credits(ep
, dlen
);
1387 switch (state_read(&ep
->com
)) {
1389 process_mpa_reply(ep
, skb
);
1392 process_mpa_request(ep
, skb
);
1397 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1398 " ep %p state %d tid %u\n",
1399 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1402 * The ep will timeout and inform the ULP of the failure.
1410 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1413 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1415 unsigned int tid
= GET_TID(rpl
);
1416 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1418 ep
= lookup_tid(t
, tid
);
1420 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1423 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1424 mutex_lock(&ep
->com
.mutex
);
1425 switch (ep
->com
.state
) {
1427 __state_set(&ep
->com
, DEAD
);
1431 printk(KERN_ERR
"%s ep %p state %d\n",
1432 __func__
, ep
, ep
->com
.state
);
1435 mutex_unlock(&ep
->com
.mutex
);
1438 release_ep_resources(ep
);
1442 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1444 struct sk_buff
*skb
;
1445 struct fw_ofld_connection_wr
*req
;
1446 unsigned int mtu_idx
;
1449 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1450 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1451 memset(req
, 0, sizeof(*req
));
1452 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
));
1453 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
1454 req
->le
.filter
= cpu_to_be32(select_ntuple(ep
->com
.dev
, ep
->dst
,
1456 req
->le
.lport
= ep
->com
.local_addr
.sin_port
;
1457 req
->le
.pport
= ep
->com
.remote_addr
.sin_port
;
1458 req
->le
.u
.ipv4
.lip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1459 req
->le
.u
.ipv4
.pip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
1460 req
->tcb
.t_state_to_astid
=
1461 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT
) |
1462 V_FW_OFLD_CONNECTION_WR_ASTID(atid
));
1463 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1464 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK
);
1465 req
->tcb
.tx_max
= jiffies
;
1466 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1467 wscale
= compute_wscale(rcv_win
);
1468 req
->tcb
.opt0
= TCAM_BYPASS(1) |
1469 (nocong
? NO_CONG(1) : 0) |
1474 L2T_IDX(ep
->l2t
->idx
) |
1475 TX_CHAN(ep
->tx_chan
) |
1476 SMAC_SEL(ep
->smac_idx
) |
1478 ULP_MODE(ULP_MODE_TCPDDP
) |
1479 RCV_BUFSIZ(rcv_win
>> 10);
1480 req
->tcb
.opt2
= PACE(1) |
1481 TX_QUEUE(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1483 CCTRL_ECN(enable_ecn
) |
1484 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1485 if (enable_tcp_timestamps
)
1486 req
->tcb
.opt2
|= TSTAMPS_EN(1);
1487 if (enable_tcp_sack
)
1488 req
->tcb
.opt2
|= SACK_EN(1);
1489 if (wscale
&& enable_tcp_window_scaling
)
1490 req
->tcb
.opt2
|= WND_SCALE_EN(1);
1491 req
->tcb
.opt0
= cpu_to_be64(req
->tcb
.opt0
);
1492 req
->tcb
.opt2
= cpu_to_be32(req
->tcb
.opt2
);
1493 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
1494 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1498 * Return whether a failed active open has allocated a TID
1500 static inline int act_open_has_tid(int status
)
1502 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1503 status
!= CPL_ERR_ARP_MISS
;
1506 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1509 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1510 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1511 ntohl(rpl
->atid_status
)));
1512 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1513 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1515 ep
= lookup_atid(t
, atid
);
1517 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1518 status
, status2errno(status
));
1520 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1521 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1527 * Log interesting failures.
1530 case CPL_ERR_CONN_RESET
:
1531 case CPL_ERR_CONN_TIMEDOUT
:
1533 case CPL_ERR_TCAM_FULL
:
1534 mutex_lock(&dev
->rdev
.stats
.lock
);
1535 dev
->rdev
.stats
.tcam_full
++;
1536 mutex_unlock(&dev
->rdev
.stats
.lock
);
1537 send_fw_act_open_req(ep
,
1538 GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl
->atid_status
))));
1542 printk(KERN_INFO MOD
"Active open failure - "
1543 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1544 atid
, status
, status2errno(status
),
1545 &ep
->com
.local_addr
.sin_addr
.s_addr
,
1546 ntohs(ep
->com
.local_addr
.sin_port
),
1547 &ep
->com
.remote_addr
.sin_addr
.s_addr
,
1548 ntohs(ep
->com
.remote_addr
.sin_port
));
1552 connect_reply_upcall(ep
, status2errno(status
));
1553 state_set(&ep
->com
, DEAD
);
1555 if (status
&& act_open_has_tid(status
))
1556 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1558 cxgb4_free_atid(t
, atid
);
1559 dst_release(ep
->dst
);
1560 cxgb4_l2t_release(ep
->l2t
);
1561 c4iw_put_ep(&ep
->com
);
1566 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1568 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1569 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1570 unsigned int stid
= GET_TID(rpl
);
1571 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1574 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
1577 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1578 rpl
->status
, status2errno(rpl
->status
));
1579 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1585 static int listen_stop(struct c4iw_listen_ep
*ep
)
1587 struct sk_buff
*skb
;
1588 struct cpl_close_listsvr_req
*req
;
1590 PDBG("%s ep %p\n", __func__
, ep
);
1591 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1593 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1596 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1598 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1600 req
->reply_ctrl
= cpu_to_be16(
1601 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1602 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1603 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1606 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1608 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1609 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1610 unsigned int stid
= GET_TID(rpl
);
1611 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1613 PDBG("%s ep %p\n", __func__
, ep
);
1614 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1618 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1619 struct cpl_pass_accept_req
*req
)
1621 struct cpl_pass_accept_rpl
*rpl
;
1622 unsigned int mtu_idx
;
1627 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1628 BUG_ON(skb_cloned(skb
));
1629 skb_trim(skb
, sizeof(*rpl
));
1631 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1632 wscale
= compute_wscale(rcv_win
);
1633 opt0
= (nocong
? NO_CONG(1) : 0) |
1638 L2T_IDX(ep
->l2t
->idx
) |
1639 TX_CHAN(ep
->tx_chan
) |
1640 SMAC_SEL(ep
->smac_idx
) |
1641 DSCP(ep
->tos
>> 2) |
1642 ULP_MODE(ULP_MODE_TCPDDP
) |
1643 RCV_BUFSIZ(rcv_win
>>10);
1644 opt2
= RX_CHANNEL(0) |
1645 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1647 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1648 opt2
|= TSTAMPS_EN(1);
1649 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1651 if (wscale
&& enable_tcp_window_scaling
)
1652 opt2
|= WND_SCALE_EN(1);
1654 const struct tcphdr
*tcph
;
1655 u32 hlen
= ntohl(req
->hdr_len
);
1657 tcph
= (const void *)(req
+ 1) + G_ETH_HDR_LEN(hlen
) +
1659 if (tcph
->ece
&& tcph
->cwr
)
1660 opt2
|= CCTRL_ECN(1);
1664 INIT_TP_WR(rpl
, ep
->hwtid
);
1665 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1667 rpl
->opt0
= cpu_to_be64(opt0
);
1668 rpl
->opt2
= cpu_to_be32(opt2
);
1669 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
1670 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1675 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1676 struct sk_buff
*skb
)
1678 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1680 BUG_ON(skb_cloned(skb
));
1681 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1683 release_tid(&dev
->rdev
, hwtid
, skb
);
1687 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1688 __be32
*local_ip
, __be32
*peer_ip
,
1689 __be16
*local_port
, __be16
*peer_port
)
1691 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1692 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1693 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1694 struct tcphdr
*tcp
= (struct tcphdr
*)
1695 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1697 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1698 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1701 *peer_ip
= ip
->saddr
;
1702 *local_ip
= ip
->daddr
;
1703 *peer_port
= tcp
->source
;
1704 *local_port
= tcp
->dest
;
1709 static int import_ep(struct c4iw_ep
*ep
, __be32 peer_ip
, struct dst_entry
*dst
,
1710 struct c4iw_dev
*cdev
, bool clear_mpa_v1
)
1712 struct neighbour
*n
;
1715 n
= dst_neigh_lookup(dst
, &peer_ip
);
1721 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1722 struct net_device
*pdev
;
1724 pdev
= ip_dev_find(&init_net
, peer_ip
);
1729 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1733 ep
->mtu
= pdev
->mtu
;
1734 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1735 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1736 step
= cdev
->rdev
.lldi
.ntxq
/
1737 cdev
->rdev
.lldi
.nchan
;
1738 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1739 step
= cdev
->rdev
.lldi
.nrxq
/
1740 cdev
->rdev
.lldi
.nchan
;
1741 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1742 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1743 cxgb4_port_idx(pdev
) * step
];
1746 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1750 ep
->mtu
= dst_mtu(dst
);
1751 ep
->tx_chan
= cxgb4_port_chan(n
->dev
);
1752 ep
->smac_idx
= (cxgb4_port_viid(n
->dev
) & 0x7F) << 1;
1753 step
= cdev
->rdev
.lldi
.ntxq
/
1754 cdev
->rdev
.lldi
.nchan
;
1755 ep
->txq_idx
= cxgb4_port_idx(n
->dev
) * step
;
1756 ep
->ctrlq_idx
= cxgb4_port_idx(n
->dev
);
1757 step
= cdev
->rdev
.lldi
.nrxq
/
1758 cdev
->rdev
.lldi
.nchan
;
1759 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1760 cxgb4_port_idx(n
->dev
) * step
];
1763 ep
->retry_with_mpa_v1
= 0;
1764 ep
->tried_with_mpa_v1
= 0;
1776 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1778 struct c4iw_ep
*child_ep
, *parent_ep
;
1779 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1780 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1781 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1782 unsigned int hwtid
= GET_TID(req
);
1783 struct dst_entry
*dst
;
1785 __be32 local_ip
, peer_ip
= 0;
1786 __be16 local_port
, peer_port
;
1788 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
1790 parent_ep
= lookup_stid(t
, stid
);
1792 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
1795 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1797 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1798 "rport %d peer_mss %d\n", __func__
, parent_ep
, hwtid
,
1799 ntohl(local_ip
), ntohl(peer_ip
), ntohs(local_port
),
1800 ntohs(peer_port
), peer_mss
);
1802 if (state_read(&parent_ep
->com
) != LISTEN
) {
1803 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1808 /* Find output route */
1809 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1810 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1812 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1818 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1820 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1826 err
= import_ep(child_ep
, peer_ip
, dst
, dev
, false);
1828 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1835 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ 40))
1836 child_ep
->mtu
= peer_mss
+ 40;
1838 state_set(&child_ep
->com
, CONNECTING
);
1839 child_ep
->com
.dev
= dev
;
1840 child_ep
->com
.cm_id
= NULL
;
1841 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1842 child_ep
->com
.local_addr
.sin_port
= local_port
;
1843 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1844 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1845 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1846 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1847 c4iw_get_ep(&parent_ep
->com
);
1848 child_ep
->parent_ep
= parent_ep
;
1849 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1850 child_ep
->dst
= dst
;
1851 child_ep
->hwtid
= hwtid
;
1853 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1854 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
1856 init_timer(&child_ep
->timer
);
1857 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1858 accept_cr(child_ep
, peer_ip
, skb
, req
);
1861 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1866 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1869 struct cpl_pass_establish
*req
= cplhdr(skb
);
1870 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1871 unsigned int tid
= GET_TID(req
);
1873 ep
= lookup_tid(t
, tid
);
1874 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1875 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1876 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1878 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
1879 ntohs(req
->tcp_opt
));
1881 set_emss(ep
, ntohs(req
->tcp_opt
));
1883 dst_confirm(ep
->dst
);
1884 state_set(&ep
->com
, MPA_REQ_WAIT
);
1886 send_flowc(ep
, skb
);
1891 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1893 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1895 struct c4iw_qp_attributes attrs
;
1898 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1899 unsigned int tid
= GET_TID(hdr
);
1902 ep
= lookup_tid(t
, tid
);
1903 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1904 dst_confirm(ep
->dst
);
1906 mutex_lock(&ep
->com
.mutex
);
1907 switch (ep
->com
.state
) {
1909 __state_set(&ep
->com
, CLOSING
);
1912 __state_set(&ep
->com
, CLOSING
);
1913 connect_reply_upcall(ep
, -ECONNRESET
);
1918 * We're gonna mark this puppy DEAD, but keep
1919 * the reference on it until the ULP accepts or
1920 * rejects the CR. Also wake up anyone waiting
1921 * in rdma connection migration (see c4iw_accept_cr()).
1923 __state_set(&ep
->com
, CLOSING
);
1924 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1925 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1928 __state_set(&ep
->com
, CLOSING
);
1929 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1930 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1934 __state_set(&ep
->com
, CLOSING
);
1935 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1936 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1937 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1938 if (ret
!= -ECONNRESET
) {
1939 peer_close_upcall(ep
);
1947 __state_set(&ep
->com
, MORIBUND
);
1952 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1953 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1954 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1955 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1957 close_complete_upcall(ep
);
1958 __state_set(&ep
->com
, DEAD
);
1968 mutex_unlock(&ep
->com
.mutex
);
1970 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1972 release_ep_resources(ep
);
1977 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1979 static int is_neg_adv_abort(unsigned int status
)
1981 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1982 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1985 static int c4iw_reconnect(struct c4iw_ep
*ep
)
1990 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
1991 init_timer(&ep
->timer
);
1994 * Allocate an active TID to initiate a TCP connection.
1996 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
1997 if (ep
->atid
== -1) {
1998 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2004 rt
= find_route(ep
->com
.dev
,
2005 ep
->com
.cm_id
->local_addr
.sin_addr
.s_addr
,
2006 ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
,
2007 ep
->com
.cm_id
->local_addr
.sin_port
,
2008 ep
->com
.cm_id
->remote_addr
.sin_port
, 0);
2010 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2011 err
= -EHOSTUNREACH
;
2016 err
= import_ep(ep
, ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
,
2017 ep
->dst
, ep
->com
.dev
, false);
2019 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2023 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2024 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2027 state_set(&ep
->com
, CONNECTING
);
2030 /* send connect request to rnic */
2031 err
= send_connect(ep
);
2035 cxgb4_l2t_release(ep
->l2t
);
2037 dst_release(ep
->dst
);
2039 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2042 * remember to send notification to upper layer.
2043 * We are in here so the upper layer is not aware that this is
2044 * re-connect attempt and so, upper layer is still waiting for
2045 * response of 1st connect request.
2047 connect_reply_upcall(ep
, -ECONNRESET
);
2048 c4iw_put_ep(&ep
->com
);
2053 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2055 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2057 struct cpl_abort_rpl
*rpl
;
2058 struct sk_buff
*rpl_skb
;
2059 struct c4iw_qp_attributes attrs
;
2062 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2063 unsigned int tid
= GET_TID(req
);
2065 ep
= lookup_tid(t
, tid
);
2066 if (is_neg_adv_abort(req
->status
)) {
2067 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
2071 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2075 * Wake up any threads in rdma_init() or rdma_fini().
2076 * However, this is not needed if com state is just
2079 if (ep
->com
.state
!= MPA_REQ_SENT
)
2080 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2082 mutex_lock(&ep
->com
.mutex
);
2083 switch (ep
->com
.state
) {
2091 if (mpa_rev
== 2 && ep
->tried_with_mpa_v1
)
2092 connect_reply_upcall(ep
, -ECONNRESET
);
2095 * we just don't send notification upwards because we
2096 * want to retry with mpa_v1 without upper layers even
2099 * do some housekeeping so as to re-initiate the
2102 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2104 ep
->retry_with_mpa_v1
= 1;
2116 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2117 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2118 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2119 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2123 "%s - qp <- error failed!\n",
2126 peer_abort_upcall(ep
);
2131 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2132 mutex_unlock(&ep
->com
.mutex
);
2138 dst_confirm(ep
->dst
);
2139 if (ep
->com
.state
!= ABORTING
) {
2140 __state_set(&ep
->com
, DEAD
);
2141 /* we don't release if we want to retry with mpa_v1 */
2142 if (!ep
->retry_with_mpa_v1
)
2145 mutex_unlock(&ep
->com
.mutex
);
2147 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2149 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2154 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2155 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2156 INIT_TP_WR(rpl
, ep
->hwtid
);
2157 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2158 rpl
->cmd
= CPL_ABORT_NO_RST
;
2159 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2162 release_ep_resources(ep
);
2164 /* retry with mpa-v1 */
2165 if (ep
&& ep
->retry_with_mpa_v1
) {
2166 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2167 dst_release(ep
->dst
);
2168 cxgb4_l2t_release(ep
->l2t
);
2175 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2178 struct c4iw_qp_attributes attrs
;
2179 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2181 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2182 unsigned int tid
= GET_TID(rpl
);
2184 ep
= lookup_tid(t
, tid
);
2186 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2189 /* The cm_id may be null if we failed to connect */
2190 mutex_lock(&ep
->com
.mutex
);
2191 switch (ep
->com
.state
) {
2193 __state_set(&ep
->com
, MORIBUND
);
2197 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2198 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2199 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2201 C4IW_QP_ATTR_NEXT_STATE
,
2204 close_complete_upcall(ep
);
2205 __state_set(&ep
->com
, DEAD
);
2215 mutex_unlock(&ep
->com
.mutex
);
2217 release_ep_resources(ep
);
2221 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2223 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2224 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2225 unsigned int tid
= GET_TID(rpl
);
2227 struct c4iw_qp_attributes attrs
;
2229 ep
= lookup_tid(t
, tid
);
2232 if (ep
&& ep
->com
.qp
) {
2233 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2234 ep
->com
.qp
->wq
.sq
.qid
);
2235 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2236 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2237 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2239 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2245 * Upcall from the adapter indicating data has been transmitted.
2246 * For us its just the single MPA request or reply. We can now free
2247 * the skb holding the mpa message.
2249 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2252 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2253 u8 credits
= hdr
->credits
;
2254 unsigned int tid
= GET_TID(hdr
);
2255 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2258 ep
= lookup_tid(t
, tid
);
2259 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2261 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2262 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2266 dst_confirm(ep
->dst
);
2268 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2269 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2270 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2271 kfree_skb(ep
->mpa_skb
);
2277 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2280 struct c4iw_ep
*ep
= to_ep(cm_id
);
2281 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2283 if (state_read(&ep
->com
) == DEAD
) {
2284 c4iw_put_ep(&ep
->com
);
2287 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2289 abort_connection(ep
, NULL
, GFP_KERNEL
);
2291 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2292 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2294 c4iw_put_ep(&ep
->com
);
2298 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2301 struct c4iw_qp_attributes attrs
;
2302 enum c4iw_qp_attr_mask mask
;
2303 struct c4iw_ep
*ep
= to_ep(cm_id
);
2304 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2305 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2307 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2308 if (state_read(&ep
->com
) == DEAD
) {
2313 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2316 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2317 (conn_param
->ird
> c4iw_max_read_depth
)) {
2318 abort_connection(ep
, NULL
, GFP_KERNEL
);
2323 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2324 if (conn_param
->ord
> ep
->ird
) {
2325 ep
->ird
= conn_param
->ird
;
2326 ep
->ord
= conn_param
->ord
;
2327 send_mpa_reject(ep
, conn_param
->private_data
,
2328 conn_param
->private_data_len
);
2329 abort_connection(ep
, NULL
, GFP_KERNEL
);
2333 if (conn_param
->ird
> ep
->ord
) {
2335 conn_param
->ird
= 1;
2337 abort_connection(ep
, NULL
, GFP_KERNEL
);
2344 ep
->ird
= conn_param
->ird
;
2345 ep
->ord
= conn_param
->ord
;
2347 if (ep
->mpa_attr
.version
!= 2)
2348 if (peer2peer
&& ep
->ird
== 0)
2351 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2353 cm_id
->add_ref(cm_id
);
2354 ep
->com
.cm_id
= cm_id
;
2357 /* bind QP to EP and move to RTS */
2358 attrs
.mpa_attr
= ep
->mpa_attr
;
2359 attrs
.max_ird
= ep
->ird
;
2360 attrs
.max_ord
= ep
->ord
;
2361 attrs
.llp_stream_handle
= ep
;
2362 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2364 /* bind QP and TID with INIT_WR */
2365 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2366 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2367 C4IW_QP_ATTR_MPA_ATTR
|
2368 C4IW_QP_ATTR_MAX_IRD
|
2369 C4IW_QP_ATTR_MAX_ORD
;
2371 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2372 ep
->com
.qp
, mask
, &attrs
, 1);
2375 err
= send_mpa_reply(ep
, conn_param
->private_data
,
2376 conn_param
->private_data_len
);
2380 state_set(&ep
->com
, FPDU_MODE
);
2381 established_upcall(ep
);
2382 c4iw_put_ep(&ep
->com
);
2385 ep
->com
.cm_id
= NULL
;
2387 cm_id
->rem_ref(cm_id
);
2389 c4iw_put_ep(&ep
->com
);
2393 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2395 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2400 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2401 (conn_param
->ird
> c4iw_max_read_depth
)) {
2405 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2407 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2411 init_timer(&ep
->timer
);
2412 ep
->plen
= conn_param
->private_data_len
;
2414 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
2415 conn_param
->private_data
, ep
->plen
);
2416 ep
->ird
= conn_param
->ird
;
2417 ep
->ord
= conn_param
->ord
;
2419 if (peer2peer
&& ep
->ord
== 0)
2422 cm_id
->add_ref(cm_id
);
2424 ep
->com
.cm_id
= cm_id
;
2425 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
2426 BUG_ON(!ep
->com
.qp
);
2427 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
2431 * Allocate an active TID to initiate a TCP connection.
2433 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
2434 if (ep
->atid
== -1) {
2435 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2440 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
2441 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
2442 ntohs(cm_id
->local_addr
.sin_port
),
2443 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
2444 ntohs(cm_id
->remote_addr
.sin_port
));
2447 rt
= find_route(dev
,
2448 cm_id
->local_addr
.sin_addr
.s_addr
,
2449 cm_id
->remote_addr
.sin_addr
.s_addr
,
2450 cm_id
->local_addr
.sin_port
,
2451 cm_id
->remote_addr
.sin_port
, 0);
2453 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2454 err
= -EHOSTUNREACH
;
2459 err
= import_ep(ep
, cm_id
->remote_addr
.sin_addr
.s_addr
,
2460 ep
->dst
, ep
->com
.dev
, true);
2462 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2466 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2467 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2470 state_set(&ep
->com
, CONNECTING
);
2472 ep
->com
.local_addr
= cm_id
->local_addr
;
2473 ep
->com
.remote_addr
= cm_id
->remote_addr
;
2475 /* send connect request to rnic */
2476 err
= send_connect(ep
);
2480 cxgb4_l2t_release(ep
->l2t
);
2482 dst_release(ep
->dst
);
2484 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2486 cm_id
->rem_ref(cm_id
);
2487 c4iw_put_ep(&ep
->com
);
2492 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2495 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2496 struct c4iw_listen_ep
*ep
;
2500 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2502 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2506 PDBG("%s ep %p\n", __func__
, ep
);
2507 cm_id
->add_ref(cm_id
);
2508 ep
->com
.cm_id
= cm_id
;
2510 ep
->backlog
= backlog
;
2511 ep
->com
.local_addr
= cm_id
->local_addr
;
2514 * Allocate a server TID.
2516 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
)
2517 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2519 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2521 if (ep
->stid
== -1) {
2522 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2526 state_set(&ep
->com
, LISTEN
);
2527 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2529 err
= cxgb4_create_server_filter(
2530 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2531 ep
->com
.local_addr
.sin_addr
.s_addr
,
2532 ep
->com
.local_addr
.sin_port
,
2533 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2534 if (err
== -EBUSY
) {
2535 set_current_state(TASK_UNINTERRUPTIBLE
);
2536 schedule_timeout(usecs_to_jiffies(100));
2538 } while (err
== -EBUSY
);
2540 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2541 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
2542 ep
->stid
, ep
->com
.local_addr
.sin_addr
.s_addr
,
2543 ep
->com
.local_addr
.sin_port
,
2544 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2546 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
2551 cm_id
->provider_data
= ep
;
2554 pr_err("%s cxgb4_create_server/filter failed err %d " \
2555 "stid %d laddr %08x lport %d\n", \
2556 __func__
, err
, ep
->stid
,
2557 ntohl(ep
->com
.local_addr
.sin_addr
.s_addr
),
2558 ntohs(ep
->com
.local_addr
.sin_port
));
2559 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2561 cm_id
->rem_ref(cm_id
);
2562 c4iw_put_ep(&ep
->com
);
2568 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2571 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2573 PDBG("%s ep %p\n", __func__
, ep
);
2576 state_set(&ep
->com
, DEAD
);
2577 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2578 err
= cxgb4_remove_server_filter(
2579 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2580 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
2582 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2583 err
= listen_stop(ep
);
2586 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
2589 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2591 cm_id
->rem_ref(cm_id
);
2592 c4iw_put_ep(&ep
->com
);
2596 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2601 struct c4iw_rdev
*rdev
;
2603 mutex_lock(&ep
->com
.mutex
);
2605 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2606 states
[ep
->com
.state
], abrupt
);
2608 rdev
= &ep
->com
.dev
->rdev
;
2609 if (c4iw_fatal_error(rdev
)) {
2611 close_complete_upcall(ep
);
2612 ep
->com
.state
= DEAD
;
2614 switch (ep
->com
.state
) {
2622 ep
->com
.state
= ABORTING
;
2624 ep
->com
.state
= CLOSING
;
2627 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2630 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2634 ep
->com
.state
= ABORTING
;
2636 ep
->com
.state
= MORIBUND
;
2642 PDBG("%s ignoring disconnect ep %p state %u\n",
2643 __func__
, ep
, ep
->com
.state
);
2652 close_complete_upcall(ep
);
2653 ret
= send_abort(ep
, NULL
, gfp
);
2655 ret
= send_halfclose(ep
, gfp
);
2659 mutex_unlock(&ep
->com
.mutex
);
2661 release_ep_resources(ep
);
2665 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2666 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
2670 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
, req
->tid
);
2674 switch (req
->retval
) {
2677 PDBG("%s ofld conn wr ret %d\n", __func__
, req
->retval
);
2680 pr_info("%s unexpected ofld conn wr retval %d\n",
2681 __func__
, req
->retval
);
2684 connect_reply_upcall(ep
, status2errno(req
->retval
));
2687 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2688 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
2690 struct sk_buff
*rpl_skb
;
2691 struct cpl_pass_accept_req
*cpl
;
2694 rpl_skb
= (struct sk_buff
*)cpu_to_be64(req
->cookie
);
2697 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
2700 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
2701 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
2703 ret
= pass_accept_req(dev
, rpl_skb
);
2710 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2712 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2713 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
2715 switch (rpl
->type
) {
2717 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2719 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
2720 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
2721 switch (req
->t_state
) {
2723 active_ofld_conn_reply(dev
, skb
, req
);
2726 passive_ofld_conn_reply(dev
, skb
, req
);
2729 pr_err("%s unexpected ofld conn wr state %d\n",
2730 __func__
, req
->t_state
);
2738 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
2741 u16 vlantag
, len
, hdr_len
;
2743 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
2744 struct cpl_pass_accept_req
*req
;
2745 struct tcp_options_received tmp_opt
;
2747 /* Store values from cpl_rx_pkt in temporary location. */
2748 vlantag
= cpl
->vlan
;
2750 l2info
= cpl
->l2info
;
2751 hdr_len
= cpl
->hdr_len
;
2754 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
2757 * We need to parse the TCP options from SYN packet.
2758 * to generate cpl_pass_accept_req.
2760 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
2761 tcp_clear_options(&tmp_opt
);
2762 tcp_parse_options(skb
, &tmp_opt
, 0, 0, NULL
);
2764 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
2765 memset(req
, 0, sizeof(*req
));
2766 req
->l2info
= cpu_to_be16(V_SYN_INTF(intf
) |
2767 V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info
))) |
2769 req
->hdr_len
= cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info
))) |
2770 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len
))) |
2771 V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len
))) |
2772 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info
))));
2773 req
->vlan
= vlantag
;
2775 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID(stid
) |
2776 PASS_OPEN_TOS(tos
));
2777 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
2778 if (tmp_opt
.wscale_ok
)
2779 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
2780 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
2781 if (tmp_opt
.sack_ok
)
2782 req
->tcpopt
.sack
= 1;
2783 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
2787 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
2788 __be32 laddr
, __be16 lport
,
2789 __be32 raddr
, __be16 rport
,
2790 u32 rcv_isn
, u32 filter
, u16 window
,
2791 u32 rss_qid
, u8 port_id
)
2793 struct sk_buff
*req_skb
;
2794 struct fw_ofld_connection_wr
*req
;
2795 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
2797 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
2798 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
2799 memset(req
, 0, sizeof(*req
));
2800 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL(1));
2801 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
2802 req
->le
.version_cpl
= htonl(F_FW_OFLD_CONNECTION_WR_CPL
);
2803 req
->le
.filter
= filter
;
2804 req
->le
.lport
= lport
;
2805 req
->le
.pport
= rport
;
2806 req
->le
.u
.ipv4
.lip
= laddr
;
2807 req
->le
.u
.ipv4
.pip
= raddr
;
2808 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
2809 req
->tcb
.rcv_adv
= htons(window
);
2810 req
->tcb
.t_state_to_astid
=
2811 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV
) |
2812 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl
->tcpopt
.wsf
) |
2813 V_FW_OFLD_CONNECTION_WR_ASTID(
2814 GET_PASS_OPEN_TID(ntohl(cpl
->tos_stid
))));
2817 * We store the qid in opt2 which will be used by the firmware
2818 * to send us the wr response.
2820 req
->tcb
.opt2
= htonl(V_RSS_QUEUE(rss_qid
));
2823 * We initialize the MSS index in TCB to 0xF.
2824 * So that when driver sends cpl_pass_accept_rpl
2825 * TCB picks up the correct value. If this was 0
2826 * TP will ignore any value > 0 for MSS index.
2828 req
->tcb
.opt0
= cpu_to_be64(V_MSS_IDX(0xF));
2829 req
->cookie
= cpu_to_be64((u64
)skb
);
2831 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
2832 cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
2836 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
2837 * messages when a filter is being used instead of server to
2838 * redirect a syn packet. When packets hit filter they are redirected
2839 * to the offload queue and driver tries to establish the connection
2840 * using firmware work request.
2842 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2845 unsigned int filter
;
2846 struct ethhdr
*eh
= NULL
;
2847 struct vlan_ethhdr
*vlan_eh
= NULL
;
2849 struct tcphdr
*tcph
;
2850 struct rss_header
*rss
= (void *)skb
->data
;
2851 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
2852 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
2853 struct l2t_entry
*e
;
2854 struct dst_entry
*dst
;
2856 struct c4iw_ep
*lep
;
2858 struct port_info
*pi
;
2859 struct net_device
*pdev
;
2863 struct neighbour
*neigh
;
2865 /* Drop all non-SYN packets */
2866 if (!(cpl
->l2info
& cpu_to_be32(F_RXF_SYN
)))
2870 * Drop all packets which did not hit the filter.
2871 * Unlikely to happen.
2873 if (!(rss
->filter_hit
&& rss
->filter_tid
))
2877 * Calculate the server tid from filter hit index from cpl_rx_pkt.
2879 stid
= cpu_to_be32(rss
->hash_val
) - dev
->rdev
.lldi
.tids
->sftid_base
2880 + dev
->rdev
.lldi
.tids
->nstids
;
2882 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
2884 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2888 if (G_RX_ETHHDR_LEN(ntohl(cpl
->l2info
)) == ETH_HLEN
) {
2889 eh
= (struct ethhdr
*)(req
+ 1);
2890 iph
= (struct iphdr
*)(eh
+ 1);
2892 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
2893 iph
= (struct iphdr
*)(vlan_eh
+ 1);
2894 skb
->vlan_tci
= ntohs(cpl
->vlan
);
2897 if (iph
->version
!= 0x4)
2900 tcph
= (struct tcphdr
*)(iph
+ 1);
2901 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
2902 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
2905 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
2906 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
2907 ntohs(tcph
->source
), iph
->tos
);
2909 rt
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
2912 pr_err("%s - failed to find dst entry!\n",
2917 neigh
= dst_neigh_lookup_skb(dst
, skb
);
2919 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
2920 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
2921 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
2923 pi
= (struct port_info
*)netdev_priv(pdev
);
2924 tx_chan
= cxgb4_port_chan(pdev
);
2927 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
2929 pi
= (struct port_info
*)netdev_priv(neigh
->dev
);
2930 tx_chan
= cxgb4_port_chan(neigh
->dev
);
2933 pr_err("%s - failed to allocate l2t entry!\n",
2938 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
2939 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
2940 window
= htons(tcph
->window
);
2942 /* Calcuate filter portion for LE region. */
2943 filter
= cpu_to_be32(select_ntuple(dev
, dst
, e
));
2946 * Synthesize the cpl_pass_accept_req. We have everything except the
2947 * TID. Once firmware sends a reply with TID we update the TID field
2948 * in cpl and pass it through the regular cpl_pass_accept_req path.
2950 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
2951 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
2952 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
2953 rss_qid
, pi
->port_id
);
2954 cxgb4_l2t_release(e
);
2962 * These are the real handlers that are called from a
2965 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2966 [CPL_ACT_ESTABLISH
] = act_establish
,
2967 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2968 [CPL_RX_DATA
] = rx_data
,
2969 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2970 [CPL_ABORT_RPL
] = abort_rpl
,
2971 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2972 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2973 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2974 [CPL_PASS_ESTABLISH
] = pass_establish
,
2975 [CPL_PEER_CLOSE
] = peer_close
,
2976 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2977 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2978 [CPL_RDMA_TERMINATE
] = terminate
,
2979 [CPL_FW4_ACK
] = fw4_ack
,
2980 [CPL_FW6_MSG
] = deferred_fw6_msg
,
2981 [CPL_RX_PKT
] = rx_pkt
2984 static void process_timeout(struct c4iw_ep
*ep
)
2986 struct c4iw_qp_attributes attrs
;
2989 mutex_lock(&ep
->com
.mutex
);
2990 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
2992 switch (ep
->com
.state
) {
2994 __state_set(&ep
->com
, ABORTING
);
2995 connect_reply_upcall(ep
, -ETIMEDOUT
);
2998 __state_set(&ep
->com
, ABORTING
);
3002 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3003 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3004 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3005 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3008 __state_set(&ep
->com
, ABORTING
);
3011 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3012 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
3015 mutex_unlock(&ep
->com
.mutex
);
3017 abort_connection(ep
, NULL
, GFP_KERNEL
);
3018 c4iw_put_ep(&ep
->com
);
3021 static void process_timedout_eps(void)
3025 spin_lock_irq(&timeout_lock
);
3026 while (!list_empty(&timeout_list
)) {
3027 struct list_head
*tmp
;
3029 tmp
= timeout_list
.next
;
3031 spin_unlock_irq(&timeout_lock
);
3032 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
3033 process_timeout(ep
);
3034 spin_lock_irq(&timeout_lock
);
3036 spin_unlock_irq(&timeout_lock
);
3039 static void process_work(struct work_struct
*work
)
3041 struct sk_buff
*skb
= NULL
;
3042 struct c4iw_dev
*dev
;
3043 struct cpl_act_establish
*rpl
;
3044 unsigned int opcode
;
3047 while ((skb
= skb_dequeue(&rxq
))) {
3049 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3050 opcode
= rpl
->ot
.opcode
;
3052 BUG_ON(!work_handlers
[opcode
]);
3053 ret
= work_handlers
[opcode
](dev
, skb
);
3057 process_timedout_eps();
3060 static DECLARE_WORK(skb_work
, process_work
);
3062 static void ep_timeout(unsigned long arg
)
3064 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
3066 spin_lock(&timeout_lock
);
3067 list_add_tail(&ep
->entry
, &timeout_list
);
3068 spin_unlock(&timeout_lock
);
3069 queue_work(workq
, &skb_work
);
3073 * All the CM events are handled on a work queue to have a safe context.
3075 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3079 * Save dev in the skb->cb area.
3081 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
3084 * Queue the skb and schedule the worker thread.
3086 skb_queue_tail(&rxq
, skb
);
3087 queue_work(workq
, &skb_work
);
3091 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3093 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
3095 if (rpl
->status
!= CPL_ERR_NONE
) {
3096 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
3097 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
3103 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3105 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3106 struct c4iw_wr_wait
*wr_waitp
;
3109 PDBG("%s type %u\n", __func__
, rpl
->type
);
3111 switch (rpl
->type
) {
3112 case FW6_TYPE_WR_RPL
:
3113 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
3114 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
3115 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
3117 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
3121 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3125 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
3133 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3135 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
3137 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
3138 unsigned int tid
= GET_TID(req
);
3140 ep
= lookup_tid(t
, tid
);
3142 printk(KERN_WARNING MOD
3143 "Abort on non-existent endpoint, tid %d\n", tid
);
3147 if (is_neg_adv_abort(req
->status
)) {
3148 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
3153 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
3157 * Wake up any threads in rdma_init() or rdma_fini().
3159 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
3165 * Most upcalls from the T4 Core go to sched() to
3166 * schedule the processing on a work queue.
3168 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
3169 [CPL_ACT_ESTABLISH
] = sched
,
3170 [CPL_ACT_OPEN_RPL
] = sched
,
3171 [CPL_RX_DATA
] = sched
,
3172 [CPL_ABORT_RPL_RSS
] = sched
,
3173 [CPL_ABORT_RPL
] = sched
,
3174 [CPL_PASS_OPEN_RPL
] = sched
,
3175 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
3176 [CPL_PASS_ACCEPT_REQ
] = sched
,
3177 [CPL_PASS_ESTABLISH
] = sched
,
3178 [CPL_PEER_CLOSE
] = sched
,
3179 [CPL_CLOSE_CON_RPL
] = sched
,
3180 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
3181 [CPL_RDMA_TERMINATE
] = sched
,
3182 [CPL_FW4_ACK
] = sched
,
3183 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
3184 [CPL_FW6_MSG
] = fw6_msg
,
3185 [CPL_RX_PKT
] = sched
3188 int __init
c4iw_cm_init(void)
3190 spin_lock_init(&timeout_lock
);
3191 skb_queue_head_init(&rxq
);
3193 workq
= create_singlethread_workqueue("iw_cxgb4");
3200 void __exit
c4iw_cm_term(void)
3202 WARN_ON(!list_empty(&timeout_list
));
3203 flush_workqueue(workq
);
3204 destroy_workqueue(workq
);