2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states
[] = {
65 module_param(nocong
, int, 0644);
66 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
68 static int enable_ecn
;
69 module_param(enable_ecn
, int, 0644);
70 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
72 static int dack_mode
= 1;
73 module_param(dack_mode
, int, 0644);
74 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
76 int c4iw_max_read_depth
= 8;
77 module_param(c4iw_max_read_depth
, int, 0644);
78 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
80 static int enable_tcp_timestamps
;
81 module_param(enable_tcp_timestamps
, int, 0644);
82 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
84 static int enable_tcp_sack
;
85 module_param(enable_tcp_sack
, int, 0644);
86 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
88 static int enable_tcp_window_scaling
= 1;
89 module_param(enable_tcp_window_scaling
, int, 0644);
90 MODULE_PARM_DESC(enable_tcp_window_scaling
,
91 "Enable tcp window scaling (default=1)");
94 module_param(c4iw_debug
, int, 0644);
95 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
98 module_param(peer2peer
, int, 0644);
99 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
101 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
102 module_param(p2p_type
, int, 0644);
103 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
104 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
106 static int ep_timeout_secs
= 60;
107 module_param(ep_timeout_secs
, int, 0644);
108 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
109 "in seconds (default=60)");
111 static int mpa_rev
= 1;
112 module_param(mpa_rev
, int, 0644);
113 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
114 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
115 " compliant (default=1)");
117 static int markers_enabled
;
118 module_param(markers_enabled
, int, 0644);
119 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
121 static int crc_enabled
= 1;
122 module_param(crc_enabled
, int, 0644);
123 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
125 static int rcv_win
= 256 * 1024;
126 module_param(rcv_win
, int, 0644);
127 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
129 static int snd_win
= 128 * 1024;
130 module_param(snd_win
, int, 0644);
131 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
133 static struct workqueue_struct
*workq
;
135 static struct sk_buff_head rxq
;
137 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
138 static void ep_timeout(unsigned long arg
);
139 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
141 static LIST_HEAD(timeout_list
);
142 static spinlock_t timeout_lock
;
144 static void start_ep_timer(struct c4iw_ep
*ep
)
146 PDBG("%s ep %p\n", __func__
, ep
);
147 if (timer_pending(&ep
->timer
)) {
148 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
149 del_timer_sync(&ep
->timer
);
151 c4iw_get_ep(&ep
->com
);
152 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
153 ep
->timer
.data
= (unsigned long)ep
;
154 ep
->timer
.function
= ep_timeout
;
155 add_timer(&ep
->timer
);
158 static void stop_ep_timer(struct c4iw_ep
*ep
)
160 PDBG("%s ep %p\n", __func__
, ep
);
161 if (!timer_pending(&ep
->timer
)) {
162 WARN(1, "%s timer stopped when its not running! "
163 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
166 del_timer_sync(&ep
->timer
);
167 c4iw_put_ep(&ep
->com
);
170 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
171 struct l2t_entry
*l2e
)
175 if (c4iw_fatal_error(rdev
)) {
177 PDBG("%s - device in error state - dropping\n", __func__
);
180 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
183 return error
< 0 ? error
: 0;
186 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
190 if (c4iw_fatal_error(rdev
)) {
192 PDBG("%s - device in error state - dropping\n", __func__
);
195 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
198 return error
< 0 ? error
: 0;
201 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
203 struct cpl_tid_release
*req
;
205 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
208 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
209 INIT_TP_WR(req
, hwtid
);
210 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
211 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
212 c4iw_ofld_send(rdev
, skb
);
216 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
218 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
220 if (GET_TCPOPT_TSTAMP(opt
))
224 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
228 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
230 enum c4iw_ep_state state
;
232 mutex_lock(&epc
->mutex
);
234 mutex_unlock(&epc
->mutex
);
238 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
243 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
245 mutex_lock(&epc
->mutex
);
246 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
247 __state_set(epc
, new);
248 mutex_unlock(&epc
->mutex
);
252 static void *alloc_ep(int size
, gfp_t gfp
)
254 struct c4iw_ep_common
*epc
;
256 epc
= kzalloc(size
, gfp
);
258 kref_init(&epc
->kref
);
259 mutex_init(&epc
->mutex
);
260 c4iw_init_wr_wait(&epc
->wr_wait
);
262 PDBG("%s alloc ep %p\n", __func__
, epc
);
266 void _c4iw_free_ep(struct kref
*kref
)
270 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
271 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
272 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
273 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
274 dst_release(ep
->dst
);
275 cxgb4_l2t_release(ep
->l2t
);
280 static void release_ep_resources(struct c4iw_ep
*ep
)
282 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
283 c4iw_put_ep(&ep
->com
);
286 static int status2errno(int status
)
291 case CPL_ERR_CONN_RESET
:
293 case CPL_ERR_ARP_MISS
:
294 return -EHOSTUNREACH
;
295 case CPL_ERR_CONN_TIMEDOUT
:
297 case CPL_ERR_TCAM_FULL
:
299 case CPL_ERR_CONN_EXIST
:
307 * Try and reuse skbs already allocated...
309 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
311 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
314 skb_reset_transport_header(skb
);
316 skb
= alloc_skb(len
, gfp
);
321 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
322 __be32 peer_ip
, __be16 local_port
,
323 __be16 peer_port
, u8 tos
)
328 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
329 peer_port
, local_port
, IPPROTO_TCP
,
336 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
338 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
343 * Handle an ARP failure for an active open.
345 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
347 printk(KERN_ERR MOD
"ARP failure duing connect\n");
352 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
355 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
357 struct c4iw_rdev
*rdev
= handle
;
358 struct cpl_abort_req
*req
= cplhdr(skb
);
360 PDBG("%s rdev %p\n", __func__
, rdev
);
361 req
->cmd
= CPL_ABORT_NO_RST
;
362 c4iw_ofld_send(rdev
, skb
);
365 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
367 unsigned int flowclen
= 80;
368 struct fw_flowc_wr
*flowc
;
371 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
372 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
374 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
375 FW_FLOWC_WR_NPARAMS(8));
376 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
377 16)) | FW_WR_FLOWID(ep
->hwtid
));
379 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
380 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
381 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
382 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
383 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
384 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
385 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
386 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
387 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
388 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
389 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
390 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
391 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
392 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
393 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
394 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
395 /* Pad WR to 16 byte boundary */
396 flowc
->mnemval
[8].mnemonic
= 0;
397 flowc
->mnemval
[8].val
= 0;
398 for (i
= 0; i
< 9; i
++) {
399 flowc
->mnemval
[i
].r4
[0] = 0;
400 flowc
->mnemval
[i
].r4
[1] = 0;
401 flowc
->mnemval
[i
].r4
[2] = 0;
404 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
405 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
408 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
410 struct cpl_close_con_req
*req
;
412 int wrlen
= roundup(sizeof *req
, 16);
414 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
415 skb
= get_skb(NULL
, wrlen
, gfp
);
417 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
420 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
421 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
422 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
423 memset(req
, 0, wrlen
);
424 INIT_TP_WR(req
, ep
->hwtid
);
425 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
427 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
430 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
432 struct cpl_abort_req
*req
;
433 int wrlen
= roundup(sizeof *req
, 16);
435 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
436 skb
= get_skb(skb
, wrlen
, gfp
);
438 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
442 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
443 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
444 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
445 memset(req
, 0, wrlen
);
446 INIT_TP_WR(req
, ep
->hwtid
);
447 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
448 req
->cmd
= CPL_ABORT_SEND_RST
;
449 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
452 #define VLAN_NONE 0xfff
453 #define FILTER_SEL_VLAN_NONE 0xffff
454 #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
455 #define FILTER_SEL_WIDTH_VIN_P_FC \
456 (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
457 #define FILTER_SEL_WIDTH_TAG_P_FC \
458 (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
459 #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
461 static unsigned int select_ntuple(struct c4iw_dev
*dev
, struct dst_entry
*dst
,
462 struct l2t_entry
*l2t
)
464 unsigned int ntuple
= 0;
467 switch (dev
->rdev
.lldi
.filt_mode
) {
469 /* default filter mode */
470 case HW_TPL_FR_MT_PR_IV_P_FC
:
471 if (l2t
->vlan
== VLAN_NONE
)
472 ntuple
|= FILTER_SEL_VLAN_NONE
<< FILTER_SEL_WIDTH_P_FC
;
474 ntuple
|= l2t
->vlan
<< FILTER_SEL_WIDTH_P_FC
;
475 ntuple
|= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
477 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
478 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
480 case HW_TPL_FR_MT_PR_OV_P_FC
: {
481 viid
= cxgb4_port_viid(l2t
->neigh
->dev
);
483 ntuple
|= FW_VIID_VIN_GET(viid
) << FILTER_SEL_WIDTH_P_FC
;
484 ntuple
|= FW_VIID_PFN_GET(viid
) << FILTER_SEL_WIDTH_VIN_P_FC
;
485 ntuple
|= FW_VIID_VIVLD_GET(viid
) << FILTER_SEL_WIDTH_TAG_P_FC
;
486 ntuple
|= l2t
->lport
<< S_PORT
| IPPROTO_TCP
<<
487 FILTER_SEL_WIDTH_VLD_TAG_P_FC
;
496 static int send_connect(struct c4iw_ep
*ep
)
498 struct cpl_act_open_req
*req
;
502 unsigned int mtu_idx
;
504 int wrlen
= roundup(sizeof *req
, 16);
506 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
508 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
510 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
514 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
516 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
517 wscale
= compute_wscale(rcv_win
);
518 opt0
= (nocong
? NO_CONG(1) : 0) |
523 L2T_IDX(ep
->l2t
->idx
) |
524 TX_CHAN(ep
->tx_chan
) |
525 SMAC_SEL(ep
->smac_idx
) |
527 ULP_MODE(ULP_MODE_TCPDDP
) |
528 RCV_BUFSIZ(rcv_win
>>10);
529 opt2
= RX_CHANNEL(0) |
530 CCTRL_ECN(enable_ecn
) |
531 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
532 if (enable_tcp_timestamps
)
533 opt2
|= TSTAMPS_EN(1);
536 if (wscale
&& enable_tcp_window_scaling
)
537 opt2
|= WND_SCALE_EN(1);
538 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
540 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
542 OPCODE_TID(req
) = cpu_to_be32(
543 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
544 req
->local_port
= ep
->com
.local_addr
.sin_port
;
545 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
546 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
547 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
548 req
->opt0
= cpu_to_be64(opt0
);
549 req
->params
= cpu_to_be32(select_ntuple(ep
->com
.dev
, ep
->dst
, ep
->l2t
));
550 req
->opt2
= cpu_to_be32(opt2
);
551 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
554 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
558 struct fw_ofld_tx_data_wr
*req
;
559 struct mpa_message
*mpa
;
560 struct mpa_v2_conn_params mpa_v2_params
;
562 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
564 BUG_ON(skb_cloned(skb
));
566 mpalen
= sizeof(*mpa
) + ep
->plen
;
567 if (mpa_rev_to_use
== 2)
568 mpalen
+= sizeof(struct mpa_v2_conn_params
);
569 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
570 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
572 connect_reply_upcall(ep
, -ENOMEM
);
575 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
577 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
578 memset(req
, 0, wrlen
);
579 req
->op_to_immdlen
= cpu_to_be32(
580 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
582 FW_WR_IMMDLEN(mpalen
));
583 req
->flowid_len16
= cpu_to_be32(
584 FW_WR_FLOWID(ep
->hwtid
) |
585 FW_WR_LEN16(wrlen
>> 4));
586 req
->plen
= cpu_to_be32(mpalen
);
587 req
->tunnel_to_proxy
= cpu_to_be32(
588 FW_OFLD_TX_DATA_WR_FLUSH(1) |
589 FW_OFLD_TX_DATA_WR_SHOVE(1));
591 mpa
= (struct mpa_message
*)(req
+ 1);
592 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
593 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
594 (markers_enabled
? MPA_MARKERS
: 0) |
595 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
596 mpa
->private_data_size
= htons(ep
->plen
);
597 mpa
->revision
= mpa_rev_to_use
;
598 if (mpa_rev_to_use
== 1) {
599 ep
->tried_with_mpa_v1
= 1;
600 ep
->retry_with_mpa_v1
= 0;
603 if (mpa_rev_to_use
== 2) {
604 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
605 sizeof (struct mpa_v2_conn_params
));
606 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
607 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
610 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
611 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
613 htons(MPA_V2_RDMA_WRITE_RTR
);
614 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
616 htons(MPA_V2_RDMA_READ_RTR
);
618 memcpy(mpa
->private_data
, &mpa_v2_params
,
619 sizeof(struct mpa_v2_conn_params
));
622 memcpy(mpa
->private_data
+
623 sizeof(struct mpa_v2_conn_params
),
624 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
627 memcpy(mpa
->private_data
,
628 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
631 * Reference the mpa skb. This ensures the data area
632 * will remain in memory until the hw acks the tx.
633 * Function fw4_ack() will deref it.
636 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
639 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
641 state_set(&ep
->com
, MPA_REQ_SENT
);
642 ep
->mpa_attr
.initiator
= 1;
646 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
649 struct fw_ofld_tx_data_wr
*req
;
650 struct mpa_message
*mpa
;
652 struct mpa_v2_conn_params mpa_v2_params
;
654 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
656 mpalen
= sizeof(*mpa
) + plen
;
657 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
658 mpalen
+= sizeof(struct mpa_v2_conn_params
);
659 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
661 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
663 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
666 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
668 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
669 memset(req
, 0, wrlen
);
670 req
->op_to_immdlen
= cpu_to_be32(
671 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
673 FW_WR_IMMDLEN(mpalen
));
674 req
->flowid_len16
= cpu_to_be32(
675 FW_WR_FLOWID(ep
->hwtid
) |
676 FW_WR_LEN16(wrlen
>> 4));
677 req
->plen
= cpu_to_be32(mpalen
);
678 req
->tunnel_to_proxy
= cpu_to_be32(
679 FW_OFLD_TX_DATA_WR_FLUSH(1) |
680 FW_OFLD_TX_DATA_WR_SHOVE(1));
682 mpa
= (struct mpa_message
*)(req
+ 1);
683 memset(mpa
, 0, sizeof(*mpa
));
684 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
685 mpa
->flags
= MPA_REJECT
;
686 mpa
->revision
= mpa_rev
;
687 mpa
->private_data_size
= htons(plen
);
689 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
690 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
691 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
692 sizeof (struct mpa_v2_conn_params
));
693 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
694 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
696 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
698 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
699 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
700 FW_RI_INIT_P2PTYPE_READ_REQ
?
701 MPA_V2_RDMA_READ_RTR
: 0) : 0));
702 memcpy(mpa
->private_data
, &mpa_v2_params
,
703 sizeof(struct mpa_v2_conn_params
));
706 memcpy(mpa
->private_data
+
707 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
710 memcpy(mpa
->private_data
, pdata
, plen
);
713 * Reference the mpa skb again. This ensures the data area
714 * will remain in memory until the hw acks the tx.
715 * Function fw4_ack() will deref it.
718 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
719 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
722 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
725 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
728 struct fw_ofld_tx_data_wr
*req
;
729 struct mpa_message
*mpa
;
731 struct mpa_v2_conn_params mpa_v2_params
;
733 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
735 mpalen
= sizeof(*mpa
) + plen
;
736 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
737 mpalen
+= sizeof(struct mpa_v2_conn_params
);
738 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
740 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
742 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
745 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
747 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
748 memset(req
, 0, wrlen
);
749 req
->op_to_immdlen
= cpu_to_be32(
750 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
752 FW_WR_IMMDLEN(mpalen
));
753 req
->flowid_len16
= cpu_to_be32(
754 FW_WR_FLOWID(ep
->hwtid
) |
755 FW_WR_LEN16(wrlen
>> 4));
756 req
->plen
= cpu_to_be32(mpalen
);
757 req
->tunnel_to_proxy
= cpu_to_be32(
758 FW_OFLD_TX_DATA_WR_FLUSH(1) |
759 FW_OFLD_TX_DATA_WR_SHOVE(1));
761 mpa
= (struct mpa_message
*)(req
+ 1);
762 memset(mpa
, 0, sizeof(*mpa
));
763 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
764 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
765 (markers_enabled
? MPA_MARKERS
: 0);
766 mpa
->revision
= ep
->mpa_attr
.version
;
767 mpa
->private_data_size
= htons(plen
);
769 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
770 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
771 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
772 sizeof (struct mpa_v2_conn_params
));
773 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
774 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
775 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
776 FW_RI_INIT_P2PTYPE_DISABLED
)) {
777 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
779 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
781 htons(MPA_V2_RDMA_WRITE_RTR
);
782 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
784 htons(MPA_V2_RDMA_READ_RTR
);
787 memcpy(mpa
->private_data
, &mpa_v2_params
,
788 sizeof(struct mpa_v2_conn_params
));
791 memcpy(mpa
->private_data
+
792 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
795 memcpy(mpa
->private_data
, pdata
, plen
);
798 * Reference the mpa skb. This ensures the data area
799 * will remain in memory until the hw acks the tx.
800 * Function fw4_ack() will deref it.
803 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
805 state_set(&ep
->com
, MPA_REP_SENT
);
806 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
809 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
812 struct cpl_act_establish
*req
= cplhdr(skb
);
813 unsigned int tid
= GET_TID(req
);
814 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
815 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
817 ep
= lookup_atid(t
, atid
);
819 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
820 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
822 dst_confirm(ep
->dst
);
824 /* setup the hwtid for this connection */
826 cxgb4_insert_tid(t
, ep
, tid
);
828 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
829 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
831 set_emss(ep
, ntohs(req
->tcp_opt
));
833 /* dealloc the atid */
834 cxgb4_free_atid(t
, atid
);
836 /* start MPA negotiation */
837 send_flowc(ep
, NULL
);
838 if (ep
->retry_with_mpa_v1
)
839 send_mpa_req(ep
, skb
, 1);
841 send_mpa_req(ep
, skb
, mpa_rev
);
846 static void close_complete_upcall(struct c4iw_ep
*ep
)
848 struct iw_cm_event event
;
850 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
851 memset(&event
, 0, sizeof(event
));
852 event
.event
= IW_CM_EVENT_CLOSE
;
854 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
855 ep
, ep
->com
.cm_id
, ep
->hwtid
);
856 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
857 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
858 ep
->com
.cm_id
= NULL
;
863 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
865 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
866 close_complete_upcall(ep
);
867 state_set(&ep
->com
, ABORTING
);
868 return send_abort(ep
, skb
, gfp
);
871 static void peer_close_upcall(struct c4iw_ep
*ep
)
873 struct iw_cm_event event
;
875 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
876 memset(&event
, 0, sizeof(event
));
877 event
.event
= IW_CM_EVENT_DISCONNECT
;
879 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
880 ep
, ep
->com
.cm_id
, ep
->hwtid
);
881 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
885 static void peer_abort_upcall(struct c4iw_ep
*ep
)
887 struct iw_cm_event event
;
889 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
890 memset(&event
, 0, sizeof(event
));
891 event
.event
= IW_CM_EVENT_CLOSE
;
892 event
.status
= -ECONNRESET
;
894 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
895 ep
->com
.cm_id
, ep
->hwtid
);
896 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
897 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
898 ep
->com
.cm_id
= NULL
;
903 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
905 struct iw_cm_event event
;
907 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
908 memset(&event
, 0, sizeof(event
));
909 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
910 event
.status
= status
;
911 event
.local_addr
= ep
->com
.local_addr
;
912 event
.remote_addr
= ep
->com
.remote_addr
;
914 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
915 if (!ep
->tried_with_mpa_v1
) {
916 /* this means MPA_v2 is used */
917 event
.private_data_len
= ep
->plen
-
918 sizeof(struct mpa_v2_conn_params
);
919 event
.private_data
= ep
->mpa_pkt
+
920 sizeof(struct mpa_message
) +
921 sizeof(struct mpa_v2_conn_params
);
923 /* this means MPA_v1 is used */
924 event
.private_data_len
= ep
->plen
;
925 event
.private_data
= ep
->mpa_pkt
+
926 sizeof(struct mpa_message
);
930 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
932 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
935 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
936 ep
->com
.cm_id
= NULL
;
941 static void connect_request_upcall(struct c4iw_ep
*ep
)
943 struct iw_cm_event event
;
945 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
946 memset(&event
, 0, sizeof(event
));
947 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
948 event
.local_addr
= ep
->com
.local_addr
;
949 event
.remote_addr
= ep
->com
.remote_addr
;
950 event
.provider_data
= ep
;
951 if (!ep
->tried_with_mpa_v1
) {
952 /* this means MPA_v2 is used */
955 event
.private_data_len
= ep
->plen
-
956 sizeof(struct mpa_v2_conn_params
);
957 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
958 sizeof(struct mpa_v2_conn_params
);
960 /* this means MPA_v1 is used. Send max supported */
961 event
.ord
= c4iw_max_read_depth
;
962 event
.ird
= c4iw_max_read_depth
;
963 event
.private_data_len
= ep
->plen
;
964 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
966 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
967 c4iw_get_ep(&ep
->com
);
968 ep
->parent_ep
->com
.cm_id
->event_handler(
969 ep
->parent_ep
->com
.cm_id
,
972 c4iw_put_ep(&ep
->parent_ep
->com
);
973 ep
->parent_ep
= NULL
;
976 static void established_upcall(struct c4iw_ep
*ep
)
978 struct iw_cm_event event
;
980 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
981 memset(&event
, 0, sizeof(event
));
982 event
.event
= IW_CM_EVENT_ESTABLISHED
;
986 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
987 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
991 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
993 struct cpl_rx_data_ack
*req
;
995 int wrlen
= roundup(sizeof *req
, 16);
997 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
998 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1000 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1004 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1005 memset(req
, 0, wrlen
);
1006 INIT_TP_WR(req
, ep
->hwtid
);
1007 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1009 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
1011 V_RX_DACK_MODE(dack_mode
));
1012 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1013 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1017 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1019 struct mpa_message
*mpa
;
1020 struct mpa_v2_conn_params
*mpa_v2_params
;
1022 u16 resp_ird
, resp_ord
;
1023 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1024 struct c4iw_qp_attributes attrs
;
1025 enum c4iw_qp_attr_mask mask
;
1028 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1031 * Stop mpa timer. If it expired, then the state has
1032 * changed and we bail since ep_timeout already aborted
1036 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
1040 * If we get more than the supported amount of private data
1041 * then we must fail this connection.
1043 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1049 * copy the new data into our accumulation buffer.
1051 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1053 ep
->mpa_pkt_len
+= skb
->len
;
1056 * if we don't even have the mpa message, then bail.
1058 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1060 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1062 /* Validate MPA header. */
1063 if (mpa
->revision
> mpa_rev
) {
1064 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1065 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1069 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1074 plen
= ntohs(mpa
->private_data_size
);
1077 * Fail if there's too much private data.
1079 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1085 * If plen does not account for pkt size
1087 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1092 ep
->plen
= (u8
) plen
;
1095 * If we don't have all the pdata yet, then bail.
1096 * We'll continue process when more data arrives.
1098 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1101 if (mpa
->flags
& MPA_REJECT
) {
1102 err
= -ECONNREFUSED
;
1107 * If we get here we have accumulated the entire mpa
1108 * start reply message including private data. And
1109 * the MPA header is valid.
1111 state_set(&ep
->com
, FPDU_MODE
);
1112 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1113 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1114 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1115 ep
->mpa_attr
.version
= mpa
->revision
;
1116 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1118 if (mpa
->revision
== 2) {
1119 ep
->mpa_attr
.enhanced_rdma_conn
=
1120 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1121 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1122 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1123 (ep
->mpa_pkt
+ sizeof(*mpa
));
1124 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1125 MPA_V2_IRD_ORD_MASK
;
1126 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1127 MPA_V2_IRD_ORD_MASK
;
1130 * This is a double-check. Ideally, below checks are
1131 * not required since ird/ord stuff has been taken
1132 * care of in c4iw_accept_cr
1134 if ((ep
->ird
< resp_ord
) || (ep
->ord
> resp_ird
)) {
1141 if (ntohs(mpa_v2_params
->ird
) &
1142 MPA_V2_PEER2PEER_MODEL
) {
1143 if (ntohs(mpa_v2_params
->ord
) &
1144 MPA_V2_RDMA_WRITE_RTR
)
1145 ep
->mpa_attr
.p2p_type
=
1146 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1147 else if (ntohs(mpa_v2_params
->ord
) &
1148 MPA_V2_RDMA_READ_RTR
)
1149 ep
->mpa_attr
.p2p_type
=
1150 FW_RI_INIT_P2PTYPE_READ_REQ
;
1153 } else if (mpa
->revision
== 1)
1155 ep
->mpa_attr
.p2p_type
= p2p_type
;
1157 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1158 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1159 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1160 ep
->mpa_attr
.recv_marker_enabled
,
1161 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1162 ep
->mpa_attr
.p2p_type
, p2p_type
);
1165 * If responder's RTR does not match with that of initiator, assign
1166 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1167 * generated when moving QP to RTS state.
1168 * A TERM message will be sent after QP has moved to RTS state
1170 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1171 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1172 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1176 attrs
.mpa_attr
= ep
->mpa_attr
;
1177 attrs
.max_ird
= ep
->ird
;
1178 attrs
.max_ord
= ep
->ord
;
1179 attrs
.llp_stream_handle
= ep
;
1180 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1182 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1183 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1184 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1186 /* bind QP and TID with INIT_WR */
1187 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1188 ep
->com
.qp
, mask
, &attrs
, 1);
1193 * If responder's RTR requirement did not match with what initiator
1194 * supports, generate TERM message
1197 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1198 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1199 attrs
.ecode
= MPA_NOMATCH_RTR
;
1200 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1201 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1202 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1208 * Generate TERM if initiator IRD is not sufficient for responder
1209 * provided ORD. Currently, we do the same behaviour even when
1210 * responder provided IRD is also not sufficient as regards to
1214 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1216 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1217 attrs
.ecode
= MPA_INSUFF_IRD
;
1218 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1219 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1220 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1226 state_set(&ep
->com
, ABORTING
);
1227 send_abort(ep
, skb
, GFP_KERNEL
);
1229 connect_reply_upcall(ep
, err
);
1233 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1235 struct mpa_message
*mpa
;
1236 struct mpa_v2_conn_params
*mpa_v2_params
;
1239 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1241 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
1245 * If we get more than the supported amount of private data
1246 * then we must fail this connection.
1248 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1250 abort_connection(ep
, skb
, GFP_KERNEL
);
1254 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1257 * Copy the new data into our accumulation buffer.
1259 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1261 ep
->mpa_pkt_len
+= skb
->len
;
1264 * If we don't even have the mpa message, then bail.
1265 * We'll continue process when more data arrives.
1267 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1270 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1272 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1275 * Validate MPA Header.
1277 if (mpa
->revision
> mpa_rev
) {
1278 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1279 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1280 abort_connection(ep
, skb
, GFP_KERNEL
);
1284 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1285 abort_connection(ep
, skb
, GFP_KERNEL
);
1289 plen
= ntohs(mpa
->private_data_size
);
1292 * Fail if there's too much private data.
1294 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1295 abort_connection(ep
, skb
, GFP_KERNEL
);
1300 * If plen does not account for pkt size
1302 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1303 abort_connection(ep
, skb
, GFP_KERNEL
);
1306 ep
->plen
= (u8
) plen
;
1309 * If we don't have all the pdata yet, then bail.
1311 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1315 * If we get here we have accumulated the entire mpa
1316 * start reply message including private data.
1318 ep
->mpa_attr
.initiator
= 0;
1319 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1320 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1321 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1322 ep
->mpa_attr
.version
= mpa
->revision
;
1323 if (mpa
->revision
== 1)
1324 ep
->tried_with_mpa_v1
= 1;
1325 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1327 if (mpa
->revision
== 2) {
1328 ep
->mpa_attr
.enhanced_rdma_conn
=
1329 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1330 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1331 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1332 (ep
->mpa_pkt
+ sizeof(*mpa
));
1333 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1334 MPA_V2_IRD_ORD_MASK
;
1335 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1336 MPA_V2_IRD_ORD_MASK
;
1337 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1339 if (ntohs(mpa_v2_params
->ord
) &
1340 MPA_V2_RDMA_WRITE_RTR
)
1341 ep
->mpa_attr
.p2p_type
=
1342 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1343 else if (ntohs(mpa_v2_params
->ord
) &
1344 MPA_V2_RDMA_READ_RTR
)
1345 ep
->mpa_attr
.p2p_type
=
1346 FW_RI_INIT_P2PTYPE_READ_REQ
;
1349 } else if (mpa
->revision
== 1)
1351 ep
->mpa_attr
.p2p_type
= p2p_type
;
1353 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1354 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1355 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1356 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1357 ep
->mpa_attr
.p2p_type
);
1359 state_set(&ep
->com
, MPA_REQ_RCVD
);
1362 connect_request_upcall(ep
);
1366 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1369 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1370 unsigned int dlen
= ntohs(hdr
->len
);
1371 unsigned int tid
= GET_TID(hdr
);
1372 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1374 ep
= lookup_tid(t
, tid
);
1375 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1376 skb_pull(skb
, sizeof(*hdr
));
1377 skb_trim(skb
, dlen
);
1379 ep
->rcv_seq
+= dlen
;
1380 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1382 /* update RX credits */
1383 update_rx_credits(ep
, dlen
);
1385 switch (state_read(&ep
->com
)) {
1387 process_mpa_reply(ep
, skb
);
1390 process_mpa_request(ep
, skb
);
1395 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1396 " ep %p state %d tid %u\n",
1397 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1400 * The ep will timeout and inform the ULP of the failure.
1408 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1411 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1413 unsigned int tid
= GET_TID(rpl
);
1414 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1416 ep
= lookup_tid(t
, tid
);
1418 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1421 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1422 mutex_lock(&ep
->com
.mutex
);
1423 switch (ep
->com
.state
) {
1425 __state_set(&ep
->com
, DEAD
);
1429 printk(KERN_ERR
"%s ep %p state %d\n",
1430 __func__
, ep
, ep
->com
.state
);
1433 mutex_unlock(&ep
->com
.mutex
);
1436 release_ep_resources(ep
);
1440 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1442 struct sk_buff
*skb
;
1443 struct fw_ofld_connection_wr
*req
;
1444 unsigned int mtu_idx
;
1447 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1448 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1449 memset(req
, 0, sizeof(*req
));
1450 req
->op_compl
= htonl(V_WR_OP(FW_OFLD_CONNECTION_WR
));
1451 req
->len16_pkd
= htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req
), 16)));
1452 req
->le
.filter
= cpu_to_be32(select_ntuple(ep
->com
.dev
, ep
->dst
,
1454 req
->le
.lport
= ep
->com
.local_addr
.sin_port
;
1455 req
->le
.pport
= ep
->com
.remote_addr
.sin_port
;
1456 req
->le
.u
.ipv4
.lip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
1457 req
->le
.u
.ipv4
.pip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
1458 req
->tcb
.t_state_to_astid
=
1459 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT
) |
1460 V_FW_OFLD_CONNECTION_WR_ASTID(atid
));
1461 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1462 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK
);
1463 req
->tcb
.tx_max
= jiffies
;
1464 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1465 wscale
= compute_wscale(rcv_win
);
1466 req
->tcb
.opt0
= TCAM_BYPASS(1) |
1467 (nocong
? NO_CONG(1) : 0) |
1472 L2T_IDX(ep
->l2t
->idx
) |
1473 TX_CHAN(ep
->tx_chan
) |
1474 SMAC_SEL(ep
->smac_idx
) |
1476 ULP_MODE(ULP_MODE_TCPDDP
) |
1477 RCV_BUFSIZ(rcv_win
>> 10);
1478 req
->tcb
.opt2
= PACE(1) |
1479 TX_QUEUE(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1481 CCTRL_ECN(enable_ecn
) |
1482 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1483 if (enable_tcp_timestamps
)
1484 req
->tcb
.opt2
|= TSTAMPS_EN(1);
1485 if (enable_tcp_sack
)
1486 req
->tcb
.opt2
|= SACK_EN(1);
1487 if (wscale
&& enable_tcp_window_scaling
)
1488 req
->tcb
.opt2
|= WND_SCALE_EN(1);
1489 req
->tcb
.opt0
= cpu_to_be64(req
->tcb
.opt0
);
1490 req
->tcb
.opt2
= cpu_to_be32(req
->tcb
.opt2
);
1491 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
1492 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1496 * Return whether a failed active open has allocated a TID
1498 static inline int act_open_has_tid(int status
)
1500 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1501 status
!= CPL_ERR_ARP_MISS
;
1504 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1507 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1508 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1509 ntohl(rpl
->atid_status
)));
1510 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1511 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1513 ep
= lookup_atid(t
, atid
);
1515 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1516 status
, status2errno(status
));
1518 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1519 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1525 * Log interesting failures.
1528 case CPL_ERR_CONN_RESET
:
1529 case CPL_ERR_CONN_TIMEDOUT
:
1531 case CPL_ERR_TCAM_FULL
:
1532 mutex_lock(&dev
->rdev
.stats
.lock
);
1533 dev
->rdev
.stats
.tcam_full
++;
1534 mutex_unlock(&dev
->rdev
.stats
.lock
);
1535 send_fw_act_open_req(ep
,
1536 GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl
->atid_status
))));
1540 printk(KERN_INFO MOD
"Active open failure - "
1541 "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
1542 atid
, status
, status2errno(status
),
1543 &ep
->com
.local_addr
.sin_addr
.s_addr
,
1544 ntohs(ep
->com
.local_addr
.sin_port
),
1545 &ep
->com
.remote_addr
.sin_addr
.s_addr
,
1546 ntohs(ep
->com
.remote_addr
.sin_port
));
1550 connect_reply_upcall(ep
, status2errno(status
));
1551 state_set(&ep
->com
, DEAD
);
1553 if (status
&& act_open_has_tid(status
))
1554 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1556 cxgb4_free_atid(t
, atid
);
1557 dst_release(ep
->dst
);
1558 cxgb4_l2t_release(ep
->l2t
);
1559 c4iw_put_ep(&ep
->com
);
1564 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1566 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1567 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1568 unsigned int stid
= GET_TID(rpl
);
1569 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1572 printk(KERN_ERR MOD
"stid %d lookup failure!\n", stid
);
1575 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1576 rpl
->status
, status2errno(rpl
->status
));
1577 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1582 static int listen_stop(struct c4iw_listen_ep
*ep
)
1584 struct sk_buff
*skb
;
1585 struct cpl_close_listsvr_req
*req
;
1587 PDBG("%s ep %p\n", __func__
, ep
);
1588 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1590 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1593 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1595 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1597 req
->reply_ctrl
= cpu_to_be16(
1598 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1599 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1600 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1603 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1605 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1606 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1607 unsigned int stid
= GET_TID(rpl
);
1608 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1610 PDBG("%s ep %p\n", __func__
, ep
);
1611 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
1615 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1616 struct cpl_pass_accept_req
*req
)
1618 struct cpl_pass_accept_rpl
*rpl
;
1619 unsigned int mtu_idx
;
1624 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1625 BUG_ON(skb_cloned(skb
));
1626 skb_trim(skb
, sizeof(*rpl
));
1628 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1629 wscale
= compute_wscale(rcv_win
);
1630 opt0
= (nocong
? NO_CONG(1) : 0) |
1635 L2T_IDX(ep
->l2t
->idx
) |
1636 TX_CHAN(ep
->tx_chan
) |
1637 SMAC_SEL(ep
->smac_idx
) |
1638 DSCP(ep
->tos
>> 2) |
1639 ULP_MODE(ULP_MODE_TCPDDP
) |
1640 RCV_BUFSIZ(rcv_win
>>10);
1641 opt2
= RX_CHANNEL(0) |
1642 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1644 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1645 opt2
|= TSTAMPS_EN(1);
1646 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1648 if (wscale
&& enable_tcp_window_scaling
)
1649 opt2
|= WND_SCALE_EN(1);
1651 const struct tcphdr
*tcph
;
1652 u32 hlen
= ntohl(req
->hdr_len
);
1654 tcph
= (const void *)(req
+ 1) + G_ETH_HDR_LEN(hlen
) +
1656 if (tcph
->ece
&& tcph
->cwr
)
1657 opt2
|= CCTRL_ECN(1);
1661 INIT_TP_WR(rpl
, ep
->hwtid
);
1662 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1664 rpl
->opt0
= cpu_to_be64(opt0
);
1665 rpl
->opt2
= cpu_to_be32(opt2
);
1666 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
1667 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1672 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1673 struct sk_buff
*skb
)
1675 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1677 BUG_ON(skb_cloned(skb
));
1678 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1680 release_tid(&dev
->rdev
, hwtid
, skb
);
1684 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1685 __be32
*local_ip
, __be32
*peer_ip
,
1686 __be16
*local_port
, __be16
*peer_port
)
1688 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1689 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1690 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1691 struct tcphdr
*tcp
= (struct tcphdr
*)
1692 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1694 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1695 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1698 *peer_ip
= ip
->saddr
;
1699 *local_ip
= ip
->daddr
;
1700 *peer_port
= tcp
->source
;
1701 *local_port
= tcp
->dest
;
1706 static int import_ep(struct c4iw_ep
*ep
, __be32 peer_ip
, struct dst_entry
*dst
,
1707 struct c4iw_dev
*cdev
, bool clear_mpa_v1
)
1709 struct neighbour
*n
;
1712 n
= dst_neigh_lookup(dst
, &peer_ip
);
1718 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1719 struct net_device
*pdev
;
1721 pdev
= ip_dev_find(&init_net
, peer_ip
);
1726 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1730 ep
->mtu
= pdev
->mtu
;
1731 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1732 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1733 step
= cdev
->rdev
.lldi
.ntxq
/
1734 cdev
->rdev
.lldi
.nchan
;
1735 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1736 step
= cdev
->rdev
.lldi
.nrxq
/
1737 cdev
->rdev
.lldi
.nchan
;
1738 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1739 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1740 cxgb4_port_idx(pdev
) * step
];
1743 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1747 ep
->mtu
= dst_mtu(dst
);
1748 ep
->tx_chan
= cxgb4_port_chan(n
->dev
);
1749 ep
->smac_idx
= (cxgb4_port_viid(n
->dev
) & 0x7F) << 1;
1750 step
= cdev
->rdev
.lldi
.ntxq
/
1751 cdev
->rdev
.lldi
.nchan
;
1752 ep
->txq_idx
= cxgb4_port_idx(n
->dev
) * step
;
1753 ep
->ctrlq_idx
= cxgb4_port_idx(n
->dev
);
1754 step
= cdev
->rdev
.lldi
.nrxq
/
1755 cdev
->rdev
.lldi
.nchan
;
1756 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1757 cxgb4_port_idx(n
->dev
) * step
];
1760 ep
->retry_with_mpa_v1
= 0;
1761 ep
->tried_with_mpa_v1
= 0;
1773 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1775 struct c4iw_ep
*child_ep
, *parent_ep
;
1776 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1777 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1778 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1779 unsigned int hwtid
= GET_TID(req
);
1780 struct dst_entry
*dst
;
1782 __be32 local_ip
, peer_ip
;
1783 __be16 local_port
, peer_port
;
1786 parent_ep
= lookup_stid(t
, stid
);
1787 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1789 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1791 if (state_read(&parent_ep
->com
) != LISTEN
) {
1792 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1797 /* Find output route */
1798 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1799 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1801 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1807 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1809 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1815 err
= import_ep(child_ep
, peer_ip
, dst
, dev
, false);
1817 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1824 state_set(&child_ep
->com
, CONNECTING
);
1825 child_ep
->com
.dev
= dev
;
1826 child_ep
->com
.cm_id
= NULL
;
1827 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1828 child_ep
->com
.local_addr
.sin_port
= local_port
;
1829 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1830 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1831 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1832 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1833 c4iw_get_ep(&parent_ep
->com
);
1834 child_ep
->parent_ep
= parent_ep
;
1835 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1836 child_ep
->dst
= dst
;
1837 child_ep
->hwtid
= hwtid
;
1839 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1840 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
1842 init_timer(&child_ep
->timer
);
1843 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1844 accept_cr(child_ep
, peer_ip
, skb
, req
);
1847 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1852 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1855 struct cpl_pass_establish
*req
= cplhdr(skb
);
1856 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1857 unsigned int tid
= GET_TID(req
);
1859 ep
= lookup_tid(t
, tid
);
1860 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1861 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1862 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1864 set_emss(ep
, ntohs(req
->tcp_opt
));
1866 dst_confirm(ep
->dst
);
1867 state_set(&ep
->com
, MPA_REQ_WAIT
);
1869 send_flowc(ep
, skb
);
1874 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1876 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1878 struct c4iw_qp_attributes attrs
;
1881 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1882 unsigned int tid
= GET_TID(hdr
);
1885 ep
= lookup_tid(t
, tid
);
1886 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1887 dst_confirm(ep
->dst
);
1889 mutex_lock(&ep
->com
.mutex
);
1890 switch (ep
->com
.state
) {
1892 __state_set(&ep
->com
, CLOSING
);
1895 __state_set(&ep
->com
, CLOSING
);
1896 connect_reply_upcall(ep
, -ECONNRESET
);
1901 * We're gonna mark this puppy DEAD, but keep
1902 * the reference on it until the ULP accepts or
1903 * rejects the CR. Also wake up anyone waiting
1904 * in rdma connection migration (see c4iw_accept_cr()).
1906 __state_set(&ep
->com
, CLOSING
);
1907 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1908 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1911 __state_set(&ep
->com
, CLOSING
);
1912 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1913 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1917 __state_set(&ep
->com
, CLOSING
);
1918 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1919 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1920 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1921 if (ret
!= -ECONNRESET
) {
1922 peer_close_upcall(ep
);
1930 __state_set(&ep
->com
, MORIBUND
);
1935 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1936 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1937 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1938 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1940 close_complete_upcall(ep
);
1941 __state_set(&ep
->com
, DEAD
);
1951 mutex_unlock(&ep
->com
.mutex
);
1953 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1955 release_ep_resources(ep
);
1960 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1962 static int is_neg_adv_abort(unsigned int status
)
1964 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1965 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1968 static int c4iw_reconnect(struct c4iw_ep
*ep
)
1973 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
1974 init_timer(&ep
->timer
);
1977 * Allocate an active TID to initiate a TCP connection.
1979 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
1980 if (ep
->atid
== -1) {
1981 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1987 rt
= find_route(ep
->com
.dev
,
1988 ep
->com
.cm_id
->local_addr
.sin_addr
.s_addr
,
1989 ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
,
1990 ep
->com
.cm_id
->local_addr
.sin_port
,
1991 ep
->com
.cm_id
->remote_addr
.sin_port
, 0);
1993 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1994 err
= -EHOSTUNREACH
;
1999 err
= import_ep(ep
, ep
->com
.cm_id
->remote_addr
.sin_addr
.s_addr
,
2000 ep
->dst
, ep
->com
.dev
, false);
2002 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2006 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2007 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2010 state_set(&ep
->com
, CONNECTING
);
2013 /* send connect request to rnic */
2014 err
= send_connect(ep
);
2018 cxgb4_l2t_release(ep
->l2t
);
2020 dst_release(ep
->dst
);
2022 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2025 * remember to send notification to upper layer.
2026 * We are in here so the upper layer is not aware that this is
2027 * re-connect attempt and so, upper layer is still waiting for
2028 * response of 1st connect request.
2030 connect_reply_upcall(ep
, -ECONNRESET
);
2031 c4iw_put_ep(&ep
->com
);
2036 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2038 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2040 struct cpl_abort_rpl
*rpl
;
2041 struct sk_buff
*rpl_skb
;
2042 struct c4iw_qp_attributes attrs
;
2045 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2046 unsigned int tid
= GET_TID(req
);
2048 ep
= lookup_tid(t
, tid
);
2049 if (is_neg_adv_abort(req
->status
)) {
2050 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
2054 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2058 * Wake up any threads in rdma_init() or rdma_fini().
2059 * However, this is not needed if com state is just
2062 if (ep
->com
.state
!= MPA_REQ_SENT
)
2063 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2065 mutex_lock(&ep
->com
.mutex
);
2066 switch (ep
->com
.state
) {
2074 if (mpa_rev
== 2 && ep
->tried_with_mpa_v1
)
2075 connect_reply_upcall(ep
, -ECONNRESET
);
2078 * we just don't send notification upwards because we
2079 * want to retry with mpa_v1 without upper layers even
2082 * do some housekeeping so as to re-initiate the
2085 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2087 ep
->retry_with_mpa_v1
= 1;
2099 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2100 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2101 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2102 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2106 "%s - qp <- error failed!\n",
2109 peer_abort_upcall(ep
);
2114 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2115 mutex_unlock(&ep
->com
.mutex
);
2121 dst_confirm(ep
->dst
);
2122 if (ep
->com
.state
!= ABORTING
) {
2123 __state_set(&ep
->com
, DEAD
);
2124 /* we don't release if we want to retry with mpa_v1 */
2125 if (!ep
->retry_with_mpa_v1
)
2128 mutex_unlock(&ep
->com
.mutex
);
2130 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2132 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2137 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2138 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2139 INIT_TP_WR(rpl
, ep
->hwtid
);
2140 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2141 rpl
->cmd
= CPL_ABORT_NO_RST
;
2142 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2145 release_ep_resources(ep
);
2147 /* retry with mpa-v1 */
2148 if (ep
&& ep
->retry_with_mpa_v1
) {
2149 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2150 dst_release(ep
->dst
);
2151 cxgb4_l2t_release(ep
->l2t
);
2158 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2161 struct c4iw_qp_attributes attrs
;
2162 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2164 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2165 unsigned int tid
= GET_TID(rpl
);
2167 ep
= lookup_tid(t
, tid
);
2169 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2172 /* The cm_id may be null if we failed to connect */
2173 mutex_lock(&ep
->com
.mutex
);
2174 switch (ep
->com
.state
) {
2176 __state_set(&ep
->com
, MORIBUND
);
2180 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2181 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2182 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2184 C4IW_QP_ATTR_NEXT_STATE
,
2187 close_complete_upcall(ep
);
2188 __state_set(&ep
->com
, DEAD
);
2198 mutex_unlock(&ep
->com
.mutex
);
2200 release_ep_resources(ep
);
2204 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2206 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2207 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2208 unsigned int tid
= GET_TID(rpl
);
2210 struct c4iw_qp_attributes attrs
;
2212 ep
= lookup_tid(t
, tid
);
2215 if (ep
&& ep
->com
.qp
) {
2216 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2217 ep
->com
.qp
->wq
.sq
.qid
);
2218 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2219 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2220 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2222 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2228 * Upcall from the adapter indicating data has been transmitted.
2229 * For us its just the single MPA request or reply. We can now free
2230 * the skb holding the mpa message.
2232 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2235 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2236 u8 credits
= hdr
->credits
;
2237 unsigned int tid
= GET_TID(hdr
);
2238 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2241 ep
= lookup_tid(t
, tid
);
2242 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2244 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2245 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2249 dst_confirm(ep
->dst
);
2251 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2252 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2253 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2254 kfree_skb(ep
->mpa_skb
);
2260 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2263 struct c4iw_ep
*ep
= to_ep(cm_id
);
2264 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2266 if (state_read(&ep
->com
) == DEAD
) {
2267 c4iw_put_ep(&ep
->com
);
2270 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2272 abort_connection(ep
, NULL
, GFP_KERNEL
);
2274 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2275 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2277 c4iw_put_ep(&ep
->com
);
2281 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2284 struct c4iw_qp_attributes attrs
;
2285 enum c4iw_qp_attr_mask mask
;
2286 struct c4iw_ep
*ep
= to_ep(cm_id
);
2287 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2288 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2290 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2291 if (state_read(&ep
->com
) == DEAD
) {
2296 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
2299 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2300 (conn_param
->ird
> c4iw_max_read_depth
)) {
2301 abort_connection(ep
, NULL
, GFP_KERNEL
);
2306 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2307 if (conn_param
->ord
> ep
->ird
) {
2308 ep
->ird
= conn_param
->ird
;
2309 ep
->ord
= conn_param
->ord
;
2310 send_mpa_reject(ep
, conn_param
->private_data
,
2311 conn_param
->private_data_len
);
2312 abort_connection(ep
, NULL
, GFP_KERNEL
);
2316 if (conn_param
->ird
> ep
->ord
) {
2318 conn_param
->ird
= 1;
2320 abort_connection(ep
, NULL
, GFP_KERNEL
);
2327 ep
->ird
= conn_param
->ird
;
2328 ep
->ord
= conn_param
->ord
;
2330 if (ep
->mpa_attr
.version
!= 2)
2331 if (peer2peer
&& ep
->ird
== 0)
2334 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2336 cm_id
->add_ref(cm_id
);
2337 ep
->com
.cm_id
= cm_id
;
2340 /* bind QP to EP and move to RTS */
2341 attrs
.mpa_attr
= ep
->mpa_attr
;
2342 attrs
.max_ird
= ep
->ird
;
2343 attrs
.max_ord
= ep
->ord
;
2344 attrs
.llp_stream_handle
= ep
;
2345 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2347 /* bind QP and TID with INIT_WR */
2348 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2349 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2350 C4IW_QP_ATTR_MPA_ATTR
|
2351 C4IW_QP_ATTR_MAX_IRD
|
2352 C4IW_QP_ATTR_MAX_ORD
;
2354 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2355 ep
->com
.qp
, mask
, &attrs
, 1);
2358 err
= send_mpa_reply(ep
, conn_param
->private_data
,
2359 conn_param
->private_data_len
);
2363 state_set(&ep
->com
, FPDU_MODE
);
2364 established_upcall(ep
);
2365 c4iw_put_ep(&ep
->com
);
2368 ep
->com
.cm_id
= NULL
;
2370 cm_id
->rem_ref(cm_id
);
2372 c4iw_put_ep(&ep
->com
);
2376 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2378 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2383 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
2384 (conn_param
->ird
> c4iw_max_read_depth
)) {
2388 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2390 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2394 init_timer(&ep
->timer
);
2395 ep
->plen
= conn_param
->private_data_len
;
2397 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
2398 conn_param
->private_data
, ep
->plen
);
2399 ep
->ird
= conn_param
->ird
;
2400 ep
->ord
= conn_param
->ord
;
2402 if (peer2peer
&& ep
->ord
== 0)
2405 cm_id
->add_ref(cm_id
);
2407 ep
->com
.cm_id
= cm_id
;
2408 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
2409 BUG_ON(!ep
->com
.qp
);
2410 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
2414 * Allocate an active TID to initiate a TCP connection.
2416 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
2417 if (ep
->atid
== -1) {
2418 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
2423 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
2424 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
2425 ntohs(cm_id
->local_addr
.sin_port
),
2426 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
2427 ntohs(cm_id
->remote_addr
.sin_port
));
2430 rt
= find_route(dev
,
2431 cm_id
->local_addr
.sin_addr
.s_addr
,
2432 cm_id
->remote_addr
.sin_addr
.s_addr
,
2433 cm_id
->local_addr
.sin_port
,
2434 cm_id
->remote_addr
.sin_port
, 0);
2436 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
2437 err
= -EHOSTUNREACH
;
2442 err
= import_ep(ep
, cm_id
->remote_addr
.sin_addr
.s_addr
,
2443 ep
->dst
, ep
->com
.dev
, true);
2445 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
2449 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2450 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2453 state_set(&ep
->com
, CONNECTING
);
2455 ep
->com
.local_addr
= cm_id
->local_addr
;
2456 ep
->com
.remote_addr
= cm_id
->remote_addr
;
2458 /* send connect request to rnic */
2459 err
= send_connect(ep
);
2463 cxgb4_l2t_release(ep
->l2t
);
2465 dst_release(ep
->dst
);
2467 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2469 cm_id
->rem_ref(cm_id
);
2470 c4iw_put_ep(&ep
->com
);
2475 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
2478 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2479 struct c4iw_listen_ep
*ep
;
2484 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2486 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2490 PDBG("%s ep %p\n", __func__
, ep
);
2491 cm_id
->add_ref(cm_id
);
2492 ep
->com
.cm_id
= cm_id
;
2494 ep
->backlog
= backlog
;
2495 ep
->com
.local_addr
= cm_id
->local_addr
;
2498 * Allocate a server TID.
2500 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2501 if (ep
->stid
== -1) {
2502 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2507 state_set(&ep
->com
, LISTEN
);
2508 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2509 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2510 ep
->com
.local_addr
.sin_addr
.s_addr
,
2511 ep
->com
.local_addr
.sin_port
,
2512 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2516 /* wait for pass_open_rpl */
2517 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
, 0, 0,
2520 cm_id
->provider_data
= ep
;
2524 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2526 cm_id
->rem_ref(cm_id
);
2527 c4iw_put_ep(&ep
->com
);
2533 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2536 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2538 PDBG("%s ep %p\n", __func__
, ep
);
2541 state_set(&ep
->com
, DEAD
);
2542 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2543 err
= listen_stop(ep
);
2546 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
, 0, 0,
2548 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2550 cm_id
->rem_ref(cm_id
);
2551 c4iw_put_ep(&ep
->com
);
2555 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2560 struct c4iw_rdev
*rdev
;
2562 mutex_lock(&ep
->com
.mutex
);
2564 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2565 states
[ep
->com
.state
], abrupt
);
2567 rdev
= &ep
->com
.dev
->rdev
;
2568 if (c4iw_fatal_error(rdev
)) {
2570 close_complete_upcall(ep
);
2571 ep
->com
.state
= DEAD
;
2573 switch (ep
->com
.state
) {
2581 ep
->com
.state
= ABORTING
;
2583 ep
->com
.state
= CLOSING
;
2586 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2589 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2593 ep
->com
.state
= ABORTING
;
2595 ep
->com
.state
= MORIBUND
;
2601 PDBG("%s ignoring disconnect ep %p state %u\n",
2602 __func__
, ep
, ep
->com
.state
);
2611 close_complete_upcall(ep
);
2612 ret
= send_abort(ep
, NULL
, gfp
);
2614 ret
= send_halfclose(ep
, gfp
);
2618 mutex_unlock(&ep
->com
.mutex
);
2620 release_ep_resources(ep
);
2624 static int async_event(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2626 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2627 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2632 * These are the real handlers that are called from a
2635 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2636 [CPL_ACT_ESTABLISH
] = act_establish
,
2637 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2638 [CPL_RX_DATA
] = rx_data
,
2639 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2640 [CPL_ABORT_RPL
] = abort_rpl
,
2641 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2642 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2643 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2644 [CPL_PASS_ESTABLISH
] = pass_establish
,
2645 [CPL_PEER_CLOSE
] = peer_close
,
2646 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2647 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2648 [CPL_RDMA_TERMINATE
] = terminate
,
2649 [CPL_FW4_ACK
] = fw4_ack
,
2650 [CPL_FW6_MSG
] = async_event
2653 static void process_timeout(struct c4iw_ep
*ep
)
2655 struct c4iw_qp_attributes attrs
;
2658 mutex_lock(&ep
->com
.mutex
);
2659 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
2661 switch (ep
->com
.state
) {
2663 __state_set(&ep
->com
, ABORTING
);
2664 connect_reply_upcall(ep
, -ETIMEDOUT
);
2667 __state_set(&ep
->com
, ABORTING
);
2671 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2672 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2673 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2674 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2677 __state_set(&ep
->com
, ABORTING
);
2680 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
2681 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
2684 mutex_unlock(&ep
->com
.mutex
);
2686 abort_connection(ep
, NULL
, GFP_KERNEL
);
2687 c4iw_put_ep(&ep
->com
);
2690 static void process_timedout_eps(void)
2694 spin_lock_irq(&timeout_lock
);
2695 while (!list_empty(&timeout_list
)) {
2696 struct list_head
*tmp
;
2698 tmp
= timeout_list
.next
;
2700 spin_unlock_irq(&timeout_lock
);
2701 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
2702 process_timeout(ep
);
2703 spin_lock_irq(&timeout_lock
);
2705 spin_unlock_irq(&timeout_lock
);
2708 static void process_work(struct work_struct
*work
)
2710 struct sk_buff
*skb
= NULL
;
2711 struct c4iw_dev
*dev
;
2712 struct cpl_act_establish
*rpl
;
2713 unsigned int opcode
;
2716 while ((skb
= skb_dequeue(&rxq
))) {
2718 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
2719 opcode
= rpl
->ot
.opcode
;
2721 BUG_ON(!work_handlers
[opcode
]);
2722 ret
= work_handlers
[opcode
](dev
, skb
);
2726 process_timedout_eps();
2729 static DECLARE_WORK(skb_work
, process_work
);
2731 static void ep_timeout(unsigned long arg
)
2733 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
2735 spin_lock(&timeout_lock
);
2736 list_add_tail(&ep
->entry
, &timeout_list
);
2737 spin_unlock(&timeout_lock
);
2738 queue_work(workq
, &skb_work
);
2742 * All the CM events are handled on a work queue to have a safe context.
2744 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2748 * Save dev in the skb->cb area.
2750 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
2753 * Queue the skb and schedule the worker thread.
2755 skb_queue_tail(&rxq
, skb
);
2756 queue_work(workq
, &skb_work
);
2760 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2762 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2764 if (rpl
->status
!= CPL_ERR_NONE
) {
2765 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2766 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2772 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2774 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2775 struct c4iw_wr_wait
*wr_waitp
;
2778 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
2781 PDBG("%s type %u\n", __func__
, rpl
->type
);
2783 switch (rpl
->type
) {
2784 case FW6_TYPE_WR_RPL
:
2785 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
2786 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
2787 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
2789 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
2795 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
2796 opcode
= *(const u8
*)rpl
->data
;
2797 if (opcode
== FW_OFLD_CONNECTION_WR
) {
2799 (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
2800 if (req
->t_state
== TCP_SYN_SENT
2801 && (req
->retval
== FW_ENOMEM
2802 || req
->retval
== FW_EADDRINUSE
)) {
2803 ep
= (struct c4iw_ep
*)
2804 lookup_atid(dev
->rdev
.lldi
.tids
,
2806 c4iw_l2t_send(&dev
->rdev
, skb
, ep
->l2t
);
2812 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
2820 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2822 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2824 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2825 unsigned int tid
= GET_TID(req
);
2827 ep
= lookup_tid(t
, tid
);
2829 printk(KERN_WARNING MOD
2830 "Abort on non-existent endpoint, tid %d\n", tid
);
2834 if (is_neg_adv_abort(req
->status
)) {
2835 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
2840 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2844 * Wake up any threads in rdma_init() or rdma_fini().
2846 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2852 * Most upcalls from the T4 Core go to sched() to
2853 * schedule the processing on a work queue.
2855 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
2856 [CPL_ACT_ESTABLISH
] = sched
,
2857 [CPL_ACT_OPEN_RPL
] = sched
,
2858 [CPL_RX_DATA
] = sched
,
2859 [CPL_ABORT_RPL_RSS
] = sched
,
2860 [CPL_ABORT_RPL
] = sched
,
2861 [CPL_PASS_OPEN_RPL
] = sched
,
2862 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
2863 [CPL_PASS_ACCEPT_REQ
] = sched
,
2864 [CPL_PASS_ESTABLISH
] = sched
,
2865 [CPL_PEER_CLOSE
] = sched
,
2866 [CPL_CLOSE_CON_RPL
] = sched
,
2867 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
2868 [CPL_RDMA_TERMINATE
] = sched
,
2869 [CPL_FW4_ACK
] = sched
,
2870 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
2871 [CPL_FW6_MSG
] = fw6_msg
2874 int __init
c4iw_cm_init(void)
2876 spin_lock_init(&timeout_lock
);
2877 skb_queue_head_init(&rxq
);
2879 workq
= create_singlethread_workqueue("iw_cxgb4");
2886 void __exit
c4iw_cm_term(void)
2888 WARN_ON(!list_empty(&timeout_list
));
2889 flush_workqueue(workq
);
2890 destroy_workqueue(workq
);