2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
50 #include <rdma/ib_addr.h>
54 static char *states
[] = {
71 module_param(nocong
, int, 0644);
72 MODULE_PARM_DESC(nocong
, "Turn of congestion control (default=0)");
74 static int enable_ecn
;
75 module_param(enable_ecn
, int, 0644);
76 MODULE_PARM_DESC(enable_ecn
, "Enable ECN (default=0/disabled)");
78 static int dack_mode
= 1;
79 module_param(dack_mode
, int, 0644);
80 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
82 uint c4iw_max_read_depth
= 32;
83 module_param(c4iw_max_read_depth
, int, 0644);
84 MODULE_PARM_DESC(c4iw_max_read_depth
,
85 "Per-connection max ORD/IRD (default=32)");
87 static int enable_tcp_timestamps
;
88 module_param(enable_tcp_timestamps
, int, 0644);
89 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
91 static int enable_tcp_sack
;
92 module_param(enable_tcp_sack
, int, 0644);
93 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
95 static int enable_tcp_window_scaling
= 1;
96 module_param(enable_tcp_window_scaling
, int, 0644);
97 MODULE_PARM_DESC(enable_tcp_window_scaling
,
98 "Enable tcp window scaling (default=1)");
101 module_param(c4iw_debug
, int, 0644);
102 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
104 static int peer2peer
= 1;
105 module_param(peer2peer
, int, 0644);
106 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=1)");
108 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
109 module_param(p2p_type
, int, 0644);
110 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
111 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
113 static int ep_timeout_secs
= 60;
114 module_param(ep_timeout_secs
, int, 0644);
115 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
116 "in seconds (default=60)");
118 static int mpa_rev
= 2;
119 module_param(mpa_rev
, int, 0644);
120 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
121 "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft"
122 " compliant (default=2)");
124 static int markers_enabled
;
125 module_param(markers_enabled
, int, 0644);
126 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
128 static int crc_enabled
= 1;
129 module_param(crc_enabled
, int, 0644);
130 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
132 static int rcv_win
= 256 * 1024;
133 module_param(rcv_win
, int, 0644);
134 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
136 static int snd_win
= 128 * 1024;
137 module_param(snd_win
, int, 0644);
138 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
140 static struct workqueue_struct
*workq
;
142 static struct sk_buff_head rxq
;
144 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
145 static void ep_timeout(unsigned long arg
);
146 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
148 static LIST_HEAD(timeout_list
);
149 static spinlock_t timeout_lock
;
151 static void deref_qp(struct c4iw_ep
*ep
)
153 c4iw_qp_rem_ref(&ep
->com
.qp
->ibqp
);
154 clear_bit(QP_REFERENCED
, &ep
->com
.flags
);
157 static void ref_qp(struct c4iw_ep
*ep
)
159 set_bit(QP_REFERENCED
, &ep
->com
.flags
);
160 c4iw_qp_add_ref(&ep
->com
.qp
->ibqp
);
163 static void start_ep_timer(struct c4iw_ep
*ep
)
165 PDBG("%s ep %p\n", __func__
, ep
);
166 if (timer_pending(&ep
->timer
)) {
167 pr_err("%s timer already started! ep %p\n",
171 clear_bit(TIMEOUT
, &ep
->com
.flags
);
172 c4iw_get_ep(&ep
->com
);
173 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
174 ep
->timer
.data
= (unsigned long)ep
;
175 ep
->timer
.function
= ep_timeout
;
176 add_timer(&ep
->timer
);
179 static int stop_ep_timer(struct c4iw_ep
*ep
)
181 PDBG("%s ep %p stopping\n", __func__
, ep
);
182 del_timer_sync(&ep
->timer
);
183 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
184 c4iw_put_ep(&ep
->com
);
190 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
191 struct l2t_entry
*l2e
)
195 if (c4iw_fatal_error(rdev
)) {
197 PDBG("%s - device in error state - dropping\n", __func__
);
200 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
203 return error
< 0 ? error
: 0;
206 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
210 if (c4iw_fatal_error(rdev
)) {
212 PDBG("%s - device in error state - dropping\n", __func__
);
215 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
218 return error
< 0 ? error
: 0;
221 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
223 struct cpl_tid_release
*req
;
225 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
228 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
229 INIT_TP_WR(req
, hwtid
);
230 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
231 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
232 c4iw_ofld_send(rdev
, skb
);
236 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
238 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[TCPOPT_MSS_G(opt
)] -
239 ((AF_INET
== ep
->com
.remote_addr
.ss_family
) ?
240 sizeof(struct iphdr
) : sizeof(struct ipv6hdr
)) -
241 sizeof(struct tcphdr
);
243 if (TCPOPT_TSTAMP_G(opt
))
244 ep
->emss
-= round_up(TCPOLEN_TIMESTAMP
, 4);
248 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
249 TCPOPT_MSS_G(opt
), ep
->mss
, ep
->emss
);
250 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, TCPOPT_MSS_G(opt
),
254 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
256 enum c4iw_ep_state state
;
258 mutex_lock(&epc
->mutex
);
260 mutex_unlock(&epc
->mutex
);
264 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
269 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
271 mutex_lock(&epc
->mutex
);
272 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
273 __state_set(epc
, new);
274 mutex_unlock(&epc
->mutex
);
278 static void *alloc_ep(int size
, gfp_t gfp
)
280 struct c4iw_ep_common
*epc
;
282 epc
= kzalloc(size
, gfp
);
284 kref_init(&epc
->kref
);
285 mutex_init(&epc
->mutex
);
286 c4iw_init_wr_wait(&epc
->wr_wait
);
288 PDBG("%s alloc ep %p\n", __func__
, epc
);
292 void _c4iw_free_ep(struct kref
*kref
)
296 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
297 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
298 if (test_bit(QP_REFERENCED
, &ep
->com
.flags
))
300 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
301 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
302 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
303 dst_release(ep
->dst
);
304 cxgb4_l2t_release(ep
->l2t
);
306 if (test_bit(RELEASE_MAPINFO
, &ep
->com
.flags
)) {
307 print_addr(&ep
->com
, __func__
, "remove_mapinfo/mapping");
308 iwpm_remove_mapinfo(&ep
->com
.local_addr
,
309 &ep
->com
.mapped_local_addr
);
310 iwpm_remove_mapping(&ep
->com
.local_addr
, RDMA_NL_C4IW
);
315 static void release_ep_resources(struct c4iw_ep
*ep
)
317 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
318 c4iw_put_ep(&ep
->com
);
321 static int status2errno(int status
)
326 case CPL_ERR_CONN_RESET
:
328 case CPL_ERR_ARP_MISS
:
329 return -EHOSTUNREACH
;
330 case CPL_ERR_CONN_TIMEDOUT
:
332 case CPL_ERR_TCAM_FULL
:
334 case CPL_ERR_CONN_EXIST
:
342 * Try and reuse skbs already allocated...
344 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
346 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
349 skb_reset_transport_header(skb
);
351 skb
= alloc_skb(len
, gfp
);
353 t4_set_arp_err_handler(skb
, NULL
, NULL
);
357 static struct net_device
*get_real_dev(struct net_device
*egress_dev
)
359 return rdma_vlan_dev_real_dev(egress_dev
) ? : egress_dev
;
362 static int our_interface(struct c4iw_dev
*dev
, struct net_device
*egress_dev
)
366 egress_dev
= get_real_dev(egress_dev
);
367 for (i
= 0; i
< dev
->rdev
.lldi
.nports
; i
++)
368 if (dev
->rdev
.lldi
.ports
[i
] == egress_dev
)
373 static struct dst_entry
*find_route6(struct c4iw_dev
*dev
, __u8
*local_ip
,
374 __u8
*peer_ip
, __be16 local_port
,
375 __be16 peer_port
, u8 tos
,
378 struct dst_entry
*dst
= NULL
;
380 if (IS_ENABLED(CONFIG_IPV6
)) {
383 memset(&fl6
, 0, sizeof(fl6
));
384 memcpy(&fl6
.daddr
, peer_ip
, 16);
385 memcpy(&fl6
.saddr
, local_ip
, 16);
386 if (ipv6_addr_type(&fl6
.daddr
) & IPV6_ADDR_LINKLOCAL
)
387 fl6
.flowi6_oif
= sin6_scope_id
;
388 dst
= ip6_route_output(&init_net
, NULL
, &fl6
);
391 if (!our_interface(dev
, ip6_dst_idev(dst
)->dev
) &&
392 !(ip6_dst_idev(dst
)->dev
->flags
& IFF_LOOPBACK
)) {
402 static struct dst_entry
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
403 __be32 peer_ip
, __be16 local_port
,
404 __be16 peer_port
, u8 tos
)
410 rt
= ip_route_output_ports(&init_net
, &fl4
, NULL
, peer_ip
, local_ip
,
411 peer_port
, local_port
, IPPROTO_TCP
,
415 n
= dst_neigh_lookup(&rt
->dst
, &peer_ip
);
418 if (!our_interface(dev
, n
->dev
) &&
419 !(n
->dev
->flags
& IFF_LOOPBACK
)) {
421 dst_release(&rt
->dst
);
428 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
430 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
435 * Handle an ARP failure for an active open.
437 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
439 struct c4iw_ep
*ep
= handle
;
441 printk(KERN_ERR MOD
"ARP failure duing connect\n");
443 connect_reply_upcall(ep
, -EHOSTUNREACH
);
444 state_set(&ep
->com
, DEAD
);
445 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
446 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
447 dst_release(ep
->dst
);
448 cxgb4_l2t_release(ep
->l2t
);
449 c4iw_put_ep(&ep
->com
);
453 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
456 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
458 struct c4iw_rdev
*rdev
= handle
;
459 struct cpl_abort_req
*req
= cplhdr(skb
);
461 PDBG("%s rdev %p\n", __func__
, rdev
);
462 req
->cmd
= CPL_ABORT_NO_RST
;
463 c4iw_ofld_send(rdev
, skb
);
466 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
468 unsigned int flowclen
= 80;
469 struct fw_flowc_wr
*flowc
;
472 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
473 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
475 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR
) |
476 FW_FLOWC_WR_NPARAMS_V(8));
477 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen
,
478 16)) | FW_WR_FLOWID_V(ep
->hwtid
));
480 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
481 flowc
->mnemval
[0].val
= cpu_to_be32(FW_PFVF_CMD_PFN_V
482 (ep
->com
.dev
->rdev
.lldi
.pf
));
483 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
484 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
485 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
486 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
487 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
488 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
489 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
490 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
491 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
492 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
493 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
494 flowc
->mnemval
[6].val
= cpu_to_be32(ep
->snd_win
);
495 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
496 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
497 /* Pad WR to 16 byte boundary */
498 flowc
->mnemval
[8].mnemonic
= 0;
499 flowc
->mnemval
[8].val
= 0;
500 for (i
= 0; i
< 9; i
++) {
501 flowc
->mnemval
[i
].r4
[0] = 0;
502 flowc
->mnemval
[i
].r4
[1] = 0;
503 flowc
->mnemval
[i
].r4
[2] = 0;
506 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
507 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
510 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
512 struct cpl_close_con_req
*req
;
514 int wrlen
= roundup(sizeof *req
, 16);
516 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
517 skb
= get_skb(NULL
, wrlen
, gfp
);
519 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
522 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
523 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
524 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
525 memset(req
, 0, wrlen
);
526 INIT_TP_WR(req
, ep
->hwtid
);
527 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
529 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
532 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
534 struct cpl_abort_req
*req
;
535 int wrlen
= roundup(sizeof *req
, 16);
537 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
538 skb
= get_skb(skb
, wrlen
, gfp
);
540 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
544 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
545 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
546 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
547 memset(req
, 0, wrlen
);
548 INIT_TP_WR(req
, ep
->hwtid
);
549 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
550 req
->cmd
= CPL_ABORT_SEND_RST
;
551 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
555 * c4iw_form_pm_msg - Form a port mapper message with mapping info
557 static void c4iw_form_pm_msg(struct c4iw_ep
*ep
,
558 struct iwpm_sa_data
*pm_msg
)
560 memcpy(&pm_msg
->loc_addr
, &ep
->com
.local_addr
,
561 sizeof(ep
->com
.local_addr
));
562 memcpy(&pm_msg
->rem_addr
, &ep
->com
.remote_addr
,
563 sizeof(ep
->com
.remote_addr
));
567 * c4iw_form_reg_msg - Form a port mapper message with dev info
569 static void c4iw_form_reg_msg(struct c4iw_dev
*dev
,
570 struct iwpm_dev_data
*pm_msg
)
572 memcpy(pm_msg
->dev_name
, dev
->ibdev
.name
, IWPM_DEVNAME_SIZE
);
573 memcpy(pm_msg
->if_name
, dev
->rdev
.lldi
.ports
[0]->name
,
577 static void c4iw_record_pm_msg(struct c4iw_ep
*ep
,
578 struct iwpm_sa_data
*pm_msg
)
580 memcpy(&ep
->com
.mapped_local_addr
, &pm_msg
->mapped_loc_addr
,
581 sizeof(ep
->com
.mapped_local_addr
));
582 memcpy(&ep
->com
.mapped_remote_addr
, &pm_msg
->mapped_rem_addr
,
583 sizeof(ep
->com
.mapped_remote_addr
));
586 static int get_remote_addr(struct c4iw_ep
*parent_ep
, struct c4iw_ep
*child_ep
)
590 print_addr(&parent_ep
->com
, __func__
, "get_remote_addr parent_ep ");
591 print_addr(&child_ep
->com
, __func__
, "get_remote_addr child_ep ");
593 ret
= iwpm_get_remote_info(&parent_ep
->com
.mapped_local_addr
,
594 &child_ep
->com
.mapped_remote_addr
,
595 &child_ep
->com
.remote_addr
, RDMA_NL_C4IW
);
597 PDBG("Unable to find remote peer addr info - err %d\n", ret
);
602 static void best_mtu(const unsigned short *mtus
, unsigned short mtu
,
603 unsigned int *idx
, int use_ts
, int ipv6
)
605 unsigned short hdr_size
= (ipv6
?
606 sizeof(struct ipv6hdr
) :
607 sizeof(struct iphdr
)) +
608 sizeof(struct tcphdr
) +
610 round_up(TCPOLEN_TIMESTAMP
, 4) : 0);
611 unsigned short data_size
= mtu
- hdr_size
;
613 cxgb4_best_aligned_mtu(mtus
, hdr_size
, data_size
, 8, idx
);
616 static int send_connect(struct c4iw_ep
*ep
)
618 struct cpl_act_open_req
*req
;
619 struct cpl_t5_act_open_req
*t5_req
;
620 struct cpl_act_open_req6
*req6
;
621 struct cpl_t5_act_open_req6
*t5_req6
;
625 unsigned int mtu_idx
;
628 int sizev4
= is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
) ?
629 sizeof(struct cpl_act_open_req
) :
630 sizeof(struct cpl_t5_act_open_req
);
631 int sizev6
= is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
) ?
632 sizeof(struct cpl_act_open_req6
) :
633 sizeof(struct cpl_t5_act_open_req6
);
634 struct sockaddr_in
*la
= (struct sockaddr_in
*)
635 &ep
->com
.mapped_local_addr
;
636 struct sockaddr_in
*ra
= (struct sockaddr_in
*)
637 &ep
->com
.mapped_remote_addr
;
638 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)
639 &ep
->com
.mapped_local_addr
;
640 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)
641 &ep
->com
.mapped_remote_addr
;
644 wrlen
= (ep
->com
.remote_addr
.ss_family
== AF_INET
) ?
645 roundup(sizev4
, 16) :
648 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
650 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
652 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
656 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
658 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
659 enable_tcp_timestamps
,
660 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
661 wscale
= compute_wscale(rcv_win
);
664 * Specify the largest window that will fit in opt0. The
665 * remainder will be specified in the rx_data_ack.
667 win
= ep
->rcv_win
>> 10;
668 if (win
> RCV_BUFSIZ_M
)
671 opt0
= (nocong
? NO_CONG_F
: 0) |
674 WND_SCALE_V(wscale
) |
676 L2T_IDX_V(ep
->l2t
->idx
) |
677 TX_CHAN_V(ep
->tx_chan
) |
678 SMAC_SEL_V(ep
->smac_idx
) |
680 ULP_MODE_V(ULP_MODE_TCPDDP
) |
682 opt2
= RX_CHANNEL_V(0) |
683 CCTRL_ECN_V(enable_ecn
) |
684 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
685 if (enable_tcp_timestamps
)
686 opt2
|= TSTAMPS_EN_F
;
689 if (wscale
&& enable_tcp_window_scaling
)
690 opt2
|= WND_SCALE_EN_F
;
691 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
692 opt2
|= T5_OPT_2_VALID_F
;
693 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
696 t4_set_arp_err_handler(skb
, ep
, act_open_req_arp_failure
);
698 if (is_t4(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
699 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
700 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
702 OPCODE_TID(req
) = cpu_to_be32(
703 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
704 ((ep
->rss_qid
<< 14) | ep
->atid
)));
705 req
->local_port
= la
->sin_port
;
706 req
->peer_port
= ra
->sin_port
;
707 req
->local_ip
= la
->sin_addr
.s_addr
;
708 req
->peer_ip
= ra
->sin_addr
.s_addr
;
709 req
->opt0
= cpu_to_be64(opt0
);
710 req
->params
= cpu_to_be32(cxgb4_select_ntuple(
711 ep
->com
.dev
->rdev
.lldi
.ports
[0],
713 req
->opt2
= cpu_to_be32(opt2
);
715 req6
= (struct cpl_act_open_req6
*)skb_put(skb
, wrlen
);
718 OPCODE_TID(req6
) = cpu_to_be32(
719 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
720 ((ep
->rss_qid
<<14)|ep
->atid
)));
721 req6
->local_port
= la6
->sin6_port
;
722 req6
->peer_port
= ra6
->sin6_port
;
723 req6
->local_ip_hi
= *((__be64
*)
724 (la6
->sin6_addr
.s6_addr
));
725 req6
->local_ip_lo
= *((__be64
*)
726 (la6
->sin6_addr
.s6_addr
+ 8));
727 req6
->peer_ip_hi
= *((__be64
*)
728 (ra6
->sin6_addr
.s6_addr
));
729 req6
->peer_ip_lo
= *((__be64
*)
730 (ra6
->sin6_addr
.s6_addr
+ 8));
731 req6
->opt0
= cpu_to_be64(opt0
);
732 req6
->params
= cpu_to_be32(cxgb4_select_ntuple(
733 ep
->com
.dev
->rdev
.lldi
.ports
[0],
735 req6
->opt2
= cpu_to_be32(opt2
);
738 u32 isn
= (prandom_u32() & ~7UL) - 1;
743 if (ep
->com
.remote_addr
.ss_family
== AF_INET
) {
744 t5_req
= (struct cpl_t5_act_open_req
*)
746 INIT_TP_WR(t5_req
, 0);
747 OPCODE_TID(t5_req
) = cpu_to_be32(
748 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
,
749 ((ep
->rss_qid
<< 14) | ep
->atid
)));
750 t5_req
->local_port
= la
->sin_port
;
751 t5_req
->peer_port
= ra
->sin_port
;
752 t5_req
->local_ip
= la
->sin_addr
.s_addr
;
753 t5_req
->peer_ip
= ra
->sin_addr
.s_addr
;
754 t5_req
->opt0
= cpu_to_be64(opt0
);
755 t5_req
->params
= cpu_to_be64(FILTER_TUPLE_V(
757 ep
->com
.dev
->rdev
.lldi
.ports
[0],
759 t5_req
->rsvd
= cpu_to_be32(isn
);
760 PDBG("%s snd_isn %u\n", __func__
,
761 be32_to_cpu(t5_req
->rsvd
));
762 t5_req
->opt2
= cpu_to_be32(opt2
);
764 t5_req6
= (struct cpl_t5_act_open_req6
*)
766 INIT_TP_WR(t5_req6
, 0);
767 OPCODE_TID(t5_req6
) = cpu_to_be32(
768 MK_OPCODE_TID(CPL_ACT_OPEN_REQ6
,
769 ((ep
->rss_qid
<<14)|ep
->atid
)));
770 t5_req6
->local_port
= la6
->sin6_port
;
771 t5_req6
->peer_port
= ra6
->sin6_port
;
772 t5_req6
->local_ip_hi
= *((__be64
*)
773 (la6
->sin6_addr
.s6_addr
));
774 t5_req6
->local_ip_lo
= *((__be64
*)
775 (la6
->sin6_addr
.s6_addr
+ 8));
776 t5_req6
->peer_ip_hi
= *((__be64
*)
777 (ra6
->sin6_addr
.s6_addr
));
778 t5_req6
->peer_ip_lo
= *((__be64
*)
779 (ra6
->sin6_addr
.s6_addr
+ 8));
780 t5_req6
->opt0
= cpu_to_be64(opt0
);
781 t5_req6
->params
= cpu_to_be64(FILTER_TUPLE_V(
783 ep
->com
.dev
->rdev
.lldi
.ports
[0],
785 t5_req6
->rsvd
= cpu_to_be32(isn
);
786 PDBG("%s snd_isn %u\n", __func__
,
787 be32_to_cpu(t5_req6
->rsvd
));
788 t5_req6
->opt2
= cpu_to_be32(opt2
);
792 set_bit(ACT_OPEN_REQ
, &ep
->com
.history
);
793 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
796 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
800 struct fw_ofld_tx_data_wr
*req
;
801 struct mpa_message
*mpa
;
802 struct mpa_v2_conn_params mpa_v2_params
;
804 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
806 BUG_ON(skb_cloned(skb
));
808 mpalen
= sizeof(*mpa
) + ep
->plen
;
809 if (mpa_rev_to_use
== 2)
810 mpalen
+= sizeof(struct mpa_v2_conn_params
);
811 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
812 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
814 connect_reply_upcall(ep
, -ENOMEM
);
817 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
819 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
820 memset(req
, 0, wrlen
);
821 req
->op_to_immdlen
= cpu_to_be32(
822 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
824 FW_WR_IMMDLEN_V(mpalen
));
825 req
->flowid_len16
= cpu_to_be32(
826 FW_WR_FLOWID_V(ep
->hwtid
) |
827 FW_WR_LEN16_V(wrlen
>> 4));
828 req
->plen
= cpu_to_be32(mpalen
);
829 req
->tunnel_to_proxy
= cpu_to_be32(
830 FW_OFLD_TX_DATA_WR_FLUSH_F
|
831 FW_OFLD_TX_DATA_WR_SHOVE_F
);
833 mpa
= (struct mpa_message
*)(req
+ 1);
834 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
835 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
836 (markers_enabled
? MPA_MARKERS
: 0) |
837 (mpa_rev_to_use
== 2 ? MPA_ENHANCED_RDMA_CONN
: 0);
838 mpa
->private_data_size
= htons(ep
->plen
);
839 mpa
->revision
= mpa_rev_to_use
;
840 if (mpa_rev_to_use
== 1) {
841 ep
->tried_with_mpa_v1
= 1;
842 ep
->retry_with_mpa_v1
= 0;
845 if (mpa_rev_to_use
== 2) {
846 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
847 sizeof (struct mpa_v2_conn_params
));
848 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
850 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
851 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
854 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
855 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
857 htons(MPA_V2_RDMA_WRITE_RTR
);
858 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
860 htons(MPA_V2_RDMA_READ_RTR
);
862 memcpy(mpa
->private_data
, &mpa_v2_params
,
863 sizeof(struct mpa_v2_conn_params
));
866 memcpy(mpa
->private_data
+
867 sizeof(struct mpa_v2_conn_params
),
868 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
871 memcpy(mpa
->private_data
,
872 ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
875 * Reference the mpa skb. This ensures the data area
876 * will remain in memory until the hw acks the tx.
877 * Function fw4_ack() will deref it.
880 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
883 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
885 __state_set(&ep
->com
, MPA_REQ_SENT
);
886 ep
->mpa_attr
.initiator
= 1;
887 ep
->snd_seq
+= mpalen
;
891 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
894 struct fw_ofld_tx_data_wr
*req
;
895 struct mpa_message
*mpa
;
897 struct mpa_v2_conn_params mpa_v2_params
;
899 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
901 mpalen
= sizeof(*mpa
) + plen
;
902 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
903 mpalen
+= sizeof(struct mpa_v2_conn_params
);
904 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
906 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
908 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
911 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
913 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
914 memset(req
, 0, wrlen
);
915 req
->op_to_immdlen
= cpu_to_be32(
916 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
918 FW_WR_IMMDLEN_V(mpalen
));
919 req
->flowid_len16
= cpu_to_be32(
920 FW_WR_FLOWID_V(ep
->hwtid
) |
921 FW_WR_LEN16_V(wrlen
>> 4));
922 req
->plen
= cpu_to_be32(mpalen
);
923 req
->tunnel_to_proxy
= cpu_to_be32(
924 FW_OFLD_TX_DATA_WR_FLUSH_F
|
925 FW_OFLD_TX_DATA_WR_SHOVE_F
);
927 mpa
= (struct mpa_message
*)(req
+ 1);
928 memset(mpa
, 0, sizeof(*mpa
));
929 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
930 mpa
->flags
= MPA_REJECT
;
931 mpa
->revision
= ep
->mpa_attr
.version
;
932 mpa
->private_data_size
= htons(plen
);
934 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
935 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
936 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
937 sizeof (struct mpa_v2_conn_params
));
938 mpa_v2_params
.ird
= htons(((u16
)ep
->ird
) |
939 (peer2peer
? MPA_V2_PEER2PEER_MODEL
:
941 mpa_v2_params
.ord
= htons(((u16
)ep
->ord
) | (peer2peer
?
943 FW_RI_INIT_P2PTYPE_RDMA_WRITE
?
944 MPA_V2_RDMA_WRITE_RTR
: p2p_type
==
945 FW_RI_INIT_P2PTYPE_READ_REQ
?
946 MPA_V2_RDMA_READ_RTR
: 0) : 0));
947 memcpy(mpa
->private_data
, &mpa_v2_params
,
948 sizeof(struct mpa_v2_conn_params
));
951 memcpy(mpa
->private_data
+
952 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
955 memcpy(mpa
->private_data
, pdata
, plen
);
958 * Reference the mpa skb again. This ensures the data area
959 * will remain in memory until the hw acks the tx.
960 * Function fw4_ack() will deref it.
963 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
964 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
967 ep
->snd_seq
+= mpalen
;
968 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
971 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
974 struct fw_ofld_tx_data_wr
*req
;
975 struct mpa_message
*mpa
;
977 struct mpa_v2_conn_params mpa_v2_params
;
979 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
981 mpalen
= sizeof(*mpa
) + plen
;
982 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
)
983 mpalen
+= sizeof(struct mpa_v2_conn_params
);
984 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
986 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
988 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
991 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
993 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
994 memset(req
, 0, wrlen
);
995 req
->op_to_immdlen
= cpu_to_be32(
996 FW_WR_OP_V(FW_OFLD_TX_DATA_WR
) |
998 FW_WR_IMMDLEN_V(mpalen
));
999 req
->flowid_len16
= cpu_to_be32(
1000 FW_WR_FLOWID_V(ep
->hwtid
) |
1001 FW_WR_LEN16_V(wrlen
>> 4));
1002 req
->plen
= cpu_to_be32(mpalen
);
1003 req
->tunnel_to_proxy
= cpu_to_be32(
1004 FW_OFLD_TX_DATA_WR_FLUSH_F
|
1005 FW_OFLD_TX_DATA_WR_SHOVE_F
);
1007 mpa
= (struct mpa_message
*)(req
+ 1);
1008 memset(mpa
, 0, sizeof(*mpa
));
1009 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
1010 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
1011 (markers_enabled
? MPA_MARKERS
: 0);
1012 mpa
->revision
= ep
->mpa_attr
.version
;
1013 mpa
->private_data_size
= htons(plen
);
1015 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
1016 mpa
->flags
|= MPA_ENHANCED_RDMA_CONN
;
1017 mpa
->private_data_size
= htons(ntohs(mpa
->private_data_size
) +
1018 sizeof (struct mpa_v2_conn_params
));
1019 mpa_v2_params
.ird
= htons((u16
)ep
->ird
);
1020 mpa_v2_params
.ord
= htons((u16
)ep
->ord
);
1021 if (peer2peer
&& (ep
->mpa_attr
.p2p_type
!=
1022 FW_RI_INIT_P2PTYPE_DISABLED
)) {
1023 mpa_v2_params
.ird
|= htons(MPA_V2_PEER2PEER_MODEL
);
1025 if (p2p_type
== FW_RI_INIT_P2PTYPE_RDMA_WRITE
)
1026 mpa_v2_params
.ord
|=
1027 htons(MPA_V2_RDMA_WRITE_RTR
);
1028 else if (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
)
1029 mpa_v2_params
.ord
|=
1030 htons(MPA_V2_RDMA_READ_RTR
);
1033 memcpy(mpa
->private_data
, &mpa_v2_params
,
1034 sizeof(struct mpa_v2_conn_params
));
1037 memcpy(mpa
->private_data
+
1038 sizeof(struct mpa_v2_conn_params
), pdata
, plen
);
1041 memcpy(mpa
->private_data
, pdata
, plen
);
1044 * Reference the mpa skb. This ensures the data area
1045 * will remain in memory until the hw acks the tx.
1046 * Function fw4_ack() will deref it.
1049 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
1051 __state_set(&ep
->com
, MPA_REP_SENT
);
1052 ep
->snd_seq
+= mpalen
;
1053 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1056 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1059 struct cpl_act_establish
*req
= cplhdr(skb
);
1060 unsigned int tid
= GET_TID(req
);
1061 unsigned int atid
= TID_TID_G(ntohl(req
->tos_atid
));
1062 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1064 ep
= lookup_atid(t
, atid
);
1066 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
1067 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
1069 mutex_lock(&ep
->com
.mutex
);
1070 dst_confirm(ep
->dst
);
1072 /* setup the hwtid for this connection */
1074 cxgb4_insert_tid(t
, ep
, tid
);
1075 insert_handle(dev
, &dev
->hwtid_idr
, ep
, ep
->hwtid
);
1077 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1078 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1080 set_emss(ep
, ntohs(req
->tcp_opt
));
1082 /* dealloc the atid */
1083 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
1084 cxgb4_free_atid(t
, atid
);
1085 set_bit(ACT_ESTAB
, &ep
->com
.history
);
1087 /* start MPA negotiation */
1088 send_flowc(ep
, NULL
);
1089 if (ep
->retry_with_mpa_v1
)
1090 send_mpa_req(ep
, skb
, 1);
1092 send_mpa_req(ep
, skb
, mpa_rev
);
1093 mutex_unlock(&ep
->com
.mutex
);
1097 static void close_complete_upcall(struct c4iw_ep
*ep
, int status
)
1099 struct iw_cm_event event
;
1101 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1102 memset(&event
, 0, sizeof(event
));
1103 event
.event
= IW_CM_EVENT_CLOSE
;
1104 event
.status
= status
;
1105 if (ep
->com
.cm_id
) {
1106 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
1107 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1108 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1109 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1110 ep
->com
.cm_id
= NULL
;
1111 set_bit(CLOSE_UPCALL
, &ep
->com
.history
);
1115 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
1117 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1118 __state_set(&ep
->com
, ABORTING
);
1119 set_bit(ABORT_CONN
, &ep
->com
.history
);
1120 return send_abort(ep
, skb
, gfp
);
1123 static void peer_close_upcall(struct c4iw_ep
*ep
)
1125 struct iw_cm_event event
;
1127 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1128 memset(&event
, 0, sizeof(event
));
1129 event
.event
= IW_CM_EVENT_DISCONNECT
;
1130 if (ep
->com
.cm_id
) {
1131 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
1132 ep
, ep
->com
.cm_id
, ep
->hwtid
);
1133 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1134 set_bit(DISCONN_UPCALL
, &ep
->com
.history
);
1138 static void peer_abort_upcall(struct c4iw_ep
*ep
)
1140 struct iw_cm_event event
;
1142 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1143 memset(&event
, 0, sizeof(event
));
1144 event
.event
= IW_CM_EVENT_CLOSE
;
1145 event
.status
= -ECONNRESET
;
1146 if (ep
->com
.cm_id
) {
1147 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
1148 ep
->com
.cm_id
, ep
->hwtid
);
1149 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1150 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1151 ep
->com
.cm_id
= NULL
;
1152 set_bit(ABORT_UPCALL
, &ep
->com
.history
);
1156 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
1158 struct iw_cm_event event
;
1160 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
1161 memset(&event
, 0, sizeof(event
));
1162 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
1163 event
.status
= status
;
1164 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1165 sizeof(ep
->com
.local_addr
));
1166 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1167 sizeof(ep
->com
.remote_addr
));
1169 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
1170 if (!ep
->tried_with_mpa_v1
) {
1171 /* this means MPA_v2 is used */
1172 event
.private_data_len
= ep
->plen
-
1173 sizeof(struct mpa_v2_conn_params
);
1174 event
.private_data
= ep
->mpa_pkt
+
1175 sizeof(struct mpa_message
) +
1176 sizeof(struct mpa_v2_conn_params
);
1178 /* this means MPA_v1 is used */
1179 event
.private_data_len
= ep
->plen
;
1180 event
.private_data
= ep
->mpa_pkt
+
1181 sizeof(struct mpa_message
);
1185 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
1187 set_bit(CONN_RPL_UPCALL
, &ep
->com
.history
);
1188 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1191 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
1192 ep
->com
.cm_id
= NULL
;
1196 static int connect_request_upcall(struct c4iw_ep
*ep
)
1198 struct iw_cm_event event
;
1201 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1202 memset(&event
, 0, sizeof(event
));
1203 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
1204 memcpy(&event
.local_addr
, &ep
->com
.local_addr
,
1205 sizeof(ep
->com
.local_addr
));
1206 memcpy(&event
.remote_addr
, &ep
->com
.remote_addr
,
1207 sizeof(ep
->com
.remote_addr
));
1208 event
.provider_data
= ep
;
1209 if (!ep
->tried_with_mpa_v1
) {
1210 /* this means MPA_v2 is used */
1211 event
.ord
= ep
->ord
;
1212 event
.ird
= ep
->ird
;
1213 event
.private_data_len
= ep
->plen
-
1214 sizeof(struct mpa_v2_conn_params
);
1215 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
) +
1216 sizeof(struct mpa_v2_conn_params
);
1218 /* this means MPA_v1 is used. Send max supported */
1219 event
.ord
= cur_max_read_depth(ep
->com
.dev
);
1220 event
.ird
= cur_max_read_depth(ep
->com
.dev
);
1221 event
.private_data_len
= ep
->plen
;
1222 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
1224 c4iw_get_ep(&ep
->com
);
1225 ret
= ep
->parent_ep
->com
.cm_id
->event_handler(ep
->parent_ep
->com
.cm_id
,
1228 c4iw_put_ep(&ep
->com
);
1229 set_bit(CONNREQ_UPCALL
, &ep
->com
.history
);
1230 c4iw_put_ep(&ep
->parent_ep
->com
);
1234 static void established_upcall(struct c4iw_ep
*ep
)
1236 struct iw_cm_event event
;
1238 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1239 memset(&event
, 0, sizeof(event
));
1240 event
.event
= IW_CM_EVENT_ESTABLISHED
;
1241 event
.ird
= ep
->ird
;
1242 event
.ord
= ep
->ord
;
1243 if (ep
->com
.cm_id
) {
1244 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1245 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
1246 set_bit(ESTAB_UPCALL
, &ep
->com
.history
);
1250 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
1252 struct cpl_rx_data_ack
*req
;
1253 struct sk_buff
*skb
;
1254 int wrlen
= roundup(sizeof *req
, 16);
1256 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1257 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
1259 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
1264 * If we couldn't specify the entire rcv window at connection setup
1265 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1266 * then add the overage in to the credits returned.
1268 if (ep
->rcv_win
> RCV_BUFSIZ_M
* 1024)
1269 credits
+= ep
->rcv_win
- RCV_BUFSIZ_M
* 1024;
1271 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
1272 memset(req
, 0, wrlen
);
1273 INIT_TP_WR(req
, ep
->hwtid
);
1274 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
1276 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK_F
|
1278 RX_DACK_MODE_V(dack_mode
));
1279 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
1280 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1284 #define RELAXED_IRD_NEGOTIATION 1
1286 static int process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1288 struct mpa_message
*mpa
;
1289 struct mpa_v2_conn_params
*mpa_v2_params
;
1291 u16 resp_ird
, resp_ord
;
1292 u8 rtr_mismatch
= 0, insuff_ird
= 0;
1293 struct c4iw_qp_attributes attrs
;
1294 enum c4iw_qp_attr_mask mask
;
1298 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1301 * Stop mpa timer. If it expired, then
1302 * we ignore the MPA reply. process_timeout()
1303 * will abort the connection.
1305 if (stop_ep_timer(ep
))
1309 * If we get more than the supported amount of private data
1310 * then we must fail this connection.
1312 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1318 * copy the new data into our accumulation buffer.
1320 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1322 ep
->mpa_pkt_len
+= skb
->len
;
1325 * if we don't even have the mpa message, then bail.
1327 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1329 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1331 /* Validate MPA header. */
1332 if (mpa
->revision
> mpa_rev
) {
1333 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1334 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1338 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
1343 plen
= ntohs(mpa
->private_data_size
);
1346 * Fail if there's too much private data.
1348 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1354 * If plen does not account for pkt size
1356 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1361 ep
->plen
= (u8
) plen
;
1364 * If we don't have all the pdata yet, then bail.
1365 * We'll continue process when more data arrives.
1367 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1370 if (mpa
->flags
& MPA_REJECT
) {
1371 err
= -ECONNREFUSED
;
1376 * If we get here we have accumulated the entire mpa
1377 * start reply message including private data. And
1378 * the MPA header is valid.
1380 __state_set(&ep
->com
, FPDU_MODE
);
1381 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1382 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1383 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1384 ep
->mpa_attr
.version
= mpa
->revision
;
1385 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1387 if (mpa
->revision
== 2) {
1388 ep
->mpa_attr
.enhanced_rdma_conn
=
1389 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1390 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1391 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1392 (ep
->mpa_pkt
+ sizeof(*mpa
));
1393 resp_ird
= ntohs(mpa_v2_params
->ird
) &
1394 MPA_V2_IRD_ORD_MASK
;
1395 resp_ord
= ntohs(mpa_v2_params
->ord
) &
1396 MPA_V2_IRD_ORD_MASK
;
1397 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1398 __func__
, resp_ird
, resp_ord
, ep
->ird
, ep
->ord
);
1401 * This is a double-check. Ideally, below checks are
1402 * not required since ird/ord stuff has been taken
1403 * care of in c4iw_accept_cr
1405 if (ep
->ird
< resp_ord
) {
1406 if (RELAXED_IRD_NEGOTIATION
&& resp_ord
<=
1407 ep
->com
.dev
->rdev
.lldi
.max_ordird_qp
)
1411 } else if (ep
->ird
> resp_ord
) {
1414 if (ep
->ord
> resp_ird
) {
1415 if (RELAXED_IRD_NEGOTIATION
)
1426 if (ntohs(mpa_v2_params
->ird
) &
1427 MPA_V2_PEER2PEER_MODEL
) {
1428 if (ntohs(mpa_v2_params
->ord
) &
1429 MPA_V2_RDMA_WRITE_RTR
)
1430 ep
->mpa_attr
.p2p_type
=
1431 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1432 else if (ntohs(mpa_v2_params
->ord
) &
1433 MPA_V2_RDMA_READ_RTR
)
1434 ep
->mpa_attr
.p2p_type
=
1435 FW_RI_INIT_P2PTYPE_READ_REQ
;
1438 } else if (mpa
->revision
== 1)
1440 ep
->mpa_attr
.p2p_type
= p2p_type
;
1442 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1443 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = "
1444 "%d\n", __func__
, ep
->mpa_attr
.crc_enabled
,
1445 ep
->mpa_attr
.recv_marker_enabled
,
1446 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1447 ep
->mpa_attr
.p2p_type
, p2p_type
);
1450 * If responder's RTR does not match with that of initiator, assign
1451 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1452 * generated when moving QP to RTS state.
1453 * A TERM message will be sent after QP has moved to RTS state
1455 if ((ep
->mpa_attr
.version
== 2) && peer2peer
&&
1456 (ep
->mpa_attr
.p2p_type
!= p2p_type
)) {
1457 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1461 attrs
.mpa_attr
= ep
->mpa_attr
;
1462 attrs
.max_ird
= ep
->ird
;
1463 attrs
.max_ord
= ep
->ord
;
1464 attrs
.llp_stream_handle
= ep
;
1465 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1467 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1468 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
1469 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
1471 /* bind QP and TID with INIT_WR */
1472 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1473 ep
->com
.qp
, mask
, &attrs
, 1);
1478 * If responder's RTR requirement did not match with what initiator
1479 * supports, generate TERM message
1482 printk(KERN_ERR
"%s: RTR mismatch, sending TERM\n", __func__
);
1483 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1484 attrs
.ecode
= MPA_NOMATCH_RTR
;
1485 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1486 attrs
.send_term
= 1;
1487 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1488 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1495 * Generate TERM if initiator IRD is not sufficient for responder
1496 * provided ORD. Currently, we do the same behaviour even when
1497 * responder provided IRD is also not sufficient as regards to
1501 printk(KERN_ERR
"%s: Insufficient IRD, sending TERM\n",
1503 attrs
.layer_etype
= LAYER_MPA
| DDP_LLP
;
1504 attrs
.ecode
= MPA_INSUFF_IRD
;
1505 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1506 attrs
.send_term
= 1;
1507 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1508 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1515 __state_set(&ep
->com
, ABORTING
);
1516 send_abort(ep
, skb
, GFP_KERNEL
);
1518 connect_reply_upcall(ep
, err
);
1522 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
1524 struct mpa_message
*mpa
;
1525 struct mpa_v2_conn_params
*mpa_v2_params
;
1528 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1531 * If we get more than the supported amount of private data
1532 * then we must fail this connection.
1534 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
1535 (void)stop_ep_timer(ep
);
1536 abort_connection(ep
, skb
, GFP_KERNEL
);
1540 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1543 * Copy the new data into our accumulation buffer.
1545 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
1547 ep
->mpa_pkt_len
+= skb
->len
;
1550 * If we don't even have the mpa message, then bail.
1551 * We'll continue process when more data arrives.
1553 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1556 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1557 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1560 * Validate MPA Header.
1562 if (mpa
->revision
> mpa_rev
) {
1563 printk(KERN_ERR MOD
"%s MPA version mismatch. Local = %d,"
1564 " Received = %d\n", __func__
, mpa_rev
, mpa
->revision
);
1565 (void)stop_ep_timer(ep
);
1566 abort_connection(ep
, skb
, GFP_KERNEL
);
1570 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1571 (void)stop_ep_timer(ep
);
1572 abort_connection(ep
, skb
, GFP_KERNEL
);
1576 plen
= ntohs(mpa
->private_data_size
);
1579 * Fail if there's too much private data.
1581 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1582 (void)stop_ep_timer(ep
);
1583 abort_connection(ep
, skb
, GFP_KERNEL
);
1588 * If plen does not account for pkt size
1590 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1591 (void)stop_ep_timer(ep
);
1592 abort_connection(ep
, skb
, GFP_KERNEL
);
1595 ep
->plen
= (u8
) plen
;
1598 * If we don't have all the pdata yet, then bail.
1600 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1604 * If we get here we have accumulated the entire mpa
1605 * start reply message including private data.
1607 ep
->mpa_attr
.initiator
= 0;
1608 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1609 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1610 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1611 ep
->mpa_attr
.version
= mpa
->revision
;
1612 if (mpa
->revision
== 1)
1613 ep
->tried_with_mpa_v1
= 1;
1614 ep
->mpa_attr
.p2p_type
= FW_RI_INIT_P2PTYPE_DISABLED
;
1616 if (mpa
->revision
== 2) {
1617 ep
->mpa_attr
.enhanced_rdma_conn
=
1618 mpa
->flags
& MPA_ENHANCED_RDMA_CONN
? 1 : 0;
1619 if (ep
->mpa_attr
.enhanced_rdma_conn
) {
1620 mpa_v2_params
= (struct mpa_v2_conn_params
*)
1621 (ep
->mpa_pkt
+ sizeof(*mpa
));
1622 ep
->ird
= ntohs(mpa_v2_params
->ird
) &
1623 MPA_V2_IRD_ORD_MASK
;
1624 ep
->ord
= ntohs(mpa_v2_params
->ord
) &
1625 MPA_V2_IRD_ORD_MASK
;
1626 PDBG("%s initiator ird %u ord %u\n", __func__
, ep
->ird
,
1628 if (ntohs(mpa_v2_params
->ird
) & MPA_V2_PEER2PEER_MODEL
)
1630 if (ntohs(mpa_v2_params
->ord
) &
1631 MPA_V2_RDMA_WRITE_RTR
)
1632 ep
->mpa_attr
.p2p_type
=
1633 FW_RI_INIT_P2PTYPE_RDMA_WRITE
;
1634 else if (ntohs(mpa_v2_params
->ord
) &
1635 MPA_V2_RDMA_READ_RTR
)
1636 ep
->mpa_attr
.p2p_type
=
1637 FW_RI_INIT_P2PTYPE_READ_REQ
;
1640 } else if (mpa
->revision
== 1)
1642 ep
->mpa_attr
.p2p_type
= p2p_type
;
1644 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1645 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1646 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1647 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1648 ep
->mpa_attr
.p2p_type
);
1651 * If the endpoint timer already expired, then we ignore
1652 * the start request. process_timeout() will abort
1655 if (!stop_ep_timer(ep
)) {
1656 __state_set(&ep
->com
, MPA_REQ_RCVD
);
1659 mutex_lock_nested(&ep
->parent_ep
->com
.mutex
,
1660 SINGLE_DEPTH_NESTING
);
1661 if (ep
->parent_ep
->com
.state
!= DEAD
) {
1662 if (connect_request_upcall(ep
))
1663 abort_connection(ep
, skb
, GFP_KERNEL
);
1665 abort_connection(ep
, skb
, GFP_KERNEL
);
1667 mutex_unlock(&ep
->parent_ep
->com
.mutex
);
1672 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1675 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1676 unsigned int dlen
= ntohs(hdr
->len
);
1677 unsigned int tid
= GET_TID(hdr
);
1678 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1679 __u8 status
= hdr
->status
;
1682 ep
= lookup_tid(t
, tid
);
1685 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1686 skb_pull(skb
, sizeof(*hdr
));
1687 skb_trim(skb
, dlen
);
1688 mutex_lock(&ep
->com
.mutex
);
1690 /* update RX credits */
1691 update_rx_credits(ep
, dlen
);
1693 switch (ep
->com
.state
) {
1695 ep
->rcv_seq
+= dlen
;
1696 disconnect
= process_mpa_reply(ep
, skb
);
1699 ep
->rcv_seq
+= dlen
;
1700 process_mpa_request(ep
, skb
);
1703 struct c4iw_qp_attributes attrs
;
1704 BUG_ON(!ep
->com
.qp
);
1706 pr_err("%s Unexpected streaming data." \
1707 " qpid %u ep %p state %d tid %u status %d\n",
1708 __func__
, ep
->com
.qp
->wq
.sq
.qid
, ep
,
1709 ep
->com
.state
, ep
->hwtid
, status
);
1710 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1711 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1712 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1719 mutex_unlock(&ep
->com
.mutex
);
1721 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1725 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1728 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1730 unsigned int tid
= GET_TID(rpl
);
1731 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1733 ep
= lookup_tid(t
, tid
);
1735 printk(KERN_WARNING MOD
"Abort rpl to freed endpoint\n");
1738 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1739 mutex_lock(&ep
->com
.mutex
);
1740 switch (ep
->com
.state
) {
1742 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
1743 __state_set(&ep
->com
, DEAD
);
1747 printk(KERN_ERR
"%s ep %p state %d\n",
1748 __func__
, ep
, ep
->com
.state
);
1751 mutex_unlock(&ep
->com
.mutex
);
1754 release_ep_resources(ep
);
1758 static void send_fw_act_open_req(struct c4iw_ep
*ep
, unsigned int atid
)
1760 struct sk_buff
*skb
;
1761 struct fw_ofld_connection_wr
*req
;
1762 unsigned int mtu_idx
;
1764 struct sockaddr_in
*sin
;
1767 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1768 req
= (struct fw_ofld_connection_wr
*)__skb_put(skb
, sizeof(*req
));
1769 memset(req
, 0, sizeof(*req
));
1770 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
));
1771 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
1772 req
->le
.filter
= cpu_to_be32(cxgb4_select_ntuple(
1773 ep
->com
.dev
->rdev
.lldi
.ports
[0],
1775 sin
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
1776 req
->le
.lport
= sin
->sin_port
;
1777 req
->le
.u
.ipv4
.lip
= sin
->sin_addr
.s_addr
;
1778 sin
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
1779 req
->le
.pport
= sin
->sin_port
;
1780 req
->le
.u
.ipv4
.pip
= sin
->sin_addr
.s_addr
;
1781 req
->tcb
.t_state_to_astid
=
1782 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT
) |
1783 FW_OFLD_CONNECTION_WR_ASTID_V(atid
));
1784 req
->tcb
.cplrxdataack_cplpassacceptrpl
=
1785 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F
);
1786 req
->tcb
.tx_max
= (__force __be32
) jiffies
;
1787 req
->tcb
.rcv_adv
= htons(1);
1788 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
1789 enable_tcp_timestamps
,
1790 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
1791 wscale
= compute_wscale(rcv_win
);
1794 * Specify the largest window that will fit in opt0. The
1795 * remainder will be specified in the rx_data_ack.
1797 win
= ep
->rcv_win
>> 10;
1798 if (win
> RCV_BUFSIZ_M
)
1801 req
->tcb
.opt0
= (__force __be64
) (TCAM_BYPASS_F
|
1802 (nocong
? NO_CONG_F
: 0) |
1805 WND_SCALE_V(wscale
) |
1806 MSS_IDX_V(mtu_idx
) |
1807 L2T_IDX_V(ep
->l2t
->idx
) |
1808 TX_CHAN_V(ep
->tx_chan
) |
1809 SMAC_SEL_V(ep
->smac_idx
) |
1811 ULP_MODE_V(ULP_MODE_TCPDDP
) |
1813 req
->tcb
.opt2
= (__force __be32
) (PACE_V(1) |
1814 TX_QUEUE_V(ep
->com
.dev
->rdev
.lldi
.tx_modq
[ep
->tx_chan
]) |
1816 CCTRL_ECN_V(enable_ecn
) |
1817 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
));
1818 if (enable_tcp_timestamps
)
1819 req
->tcb
.opt2
|= (__force __be32
)TSTAMPS_EN_F
;
1820 if (enable_tcp_sack
)
1821 req
->tcb
.opt2
|= (__force __be32
)SACK_EN_F
;
1822 if (wscale
&& enable_tcp_window_scaling
)
1823 req
->tcb
.opt2
|= (__force __be32
)WND_SCALE_EN_F
;
1824 req
->tcb
.opt0
= cpu_to_be64((__force u64
)req
->tcb
.opt0
);
1825 req
->tcb
.opt2
= cpu_to_be32((__force u32
)req
->tcb
.opt2
);
1826 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, ep
->ctrlq_idx
);
1827 set_bit(ACT_OFLD_CONN
, &ep
->com
.history
);
1828 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1832 * Return whether a failed active open has allocated a TID
1834 static inline int act_open_has_tid(int status
)
1836 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1837 status
!= CPL_ERR_ARP_MISS
;
1840 /* Returns whether a CPL status conveys negative advice.
1842 static int is_neg_adv(unsigned int status
)
1844 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1845 status
== CPL_ERR_PERSIST_NEG_ADVICE
||
1846 status
== CPL_ERR_KEEPALV_NEG_ADVICE
;
1849 static char *neg_adv_str(unsigned int status
)
1852 case CPL_ERR_RTX_NEG_ADVICE
:
1853 return "Retransmit timeout";
1854 case CPL_ERR_PERSIST_NEG_ADVICE
:
1855 return "Persist timeout";
1856 case CPL_ERR_KEEPALV_NEG_ADVICE
:
1857 return "Keepalive timeout";
1863 static void set_tcp_window(struct c4iw_ep
*ep
, struct port_info
*pi
)
1865 ep
->snd_win
= snd_win
;
1866 ep
->rcv_win
= rcv_win
;
1867 PDBG("%s snd_win %d rcv_win %d\n", __func__
, ep
->snd_win
, ep
->rcv_win
);
1870 #define ACT_OPEN_RETRY_COUNT 2
1872 static int import_ep(struct c4iw_ep
*ep
, int iptype
, __u8
*peer_ip
,
1873 struct dst_entry
*dst
, struct c4iw_dev
*cdev
,
1876 struct neighbour
*n
;
1878 struct net_device
*pdev
;
1880 n
= dst_neigh_lookup(dst
, peer_ip
);
1886 if (n
->dev
->flags
& IFF_LOOPBACK
) {
1888 pdev
= ip_dev_find(&init_net
, *(__be32
*)peer_ip
);
1889 else if (IS_ENABLED(CONFIG_IPV6
))
1890 for_each_netdev(&init_net
, pdev
) {
1891 if (ipv6_chk_addr(&init_net
,
1892 (struct in6_addr
*)peer_ip
,
1903 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1907 ep
->mtu
= pdev
->mtu
;
1908 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1909 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1910 step
= cdev
->rdev
.lldi
.ntxq
/
1911 cdev
->rdev
.lldi
.nchan
;
1912 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1913 step
= cdev
->rdev
.lldi
.nrxq
/
1914 cdev
->rdev
.lldi
.nchan
;
1915 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1916 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1917 cxgb4_port_idx(pdev
) * step
];
1918 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
1921 pdev
= get_real_dev(n
->dev
);
1922 ep
->l2t
= cxgb4_l2t_get(cdev
->rdev
.lldi
.l2t
,
1926 ep
->mtu
= dst_mtu(dst
);
1927 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1928 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1929 step
= cdev
->rdev
.lldi
.ntxq
/
1930 cdev
->rdev
.lldi
.nchan
;
1931 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1932 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1933 step
= cdev
->rdev
.lldi
.nrxq
/
1934 cdev
->rdev
.lldi
.nchan
;
1935 ep
->rss_qid
= cdev
->rdev
.lldi
.rxq_ids
[
1936 cxgb4_port_idx(pdev
) * step
];
1937 set_tcp_window(ep
, (struct port_info
*)netdev_priv(pdev
));
1940 ep
->retry_with_mpa_v1
= 0;
1941 ep
->tried_with_mpa_v1
= 0;
1953 static int c4iw_reconnect(struct c4iw_ep
*ep
)
1956 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)
1957 &ep
->com
.cm_id
->local_addr
;
1958 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)
1959 &ep
->com
.cm_id
->remote_addr
;
1960 struct sockaddr_in6
*laddr6
= (struct sockaddr_in6
*)
1961 &ep
->com
.cm_id
->local_addr
;
1962 struct sockaddr_in6
*raddr6
= (struct sockaddr_in6
*)
1963 &ep
->com
.cm_id
->remote_addr
;
1967 PDBG("%s qp %p cm_id %p\n", __func__
, ep
->com
.qp
, ep
->com
.cm_id
);
1968 init_timer(&ep
->timer
);
1971 * Allocate an active TID to initiate a TCP connection.
1973 ep
->atid
= cxgb4_alloc_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
);
1974 if (ep
->atid
== -1) {
1975 pr_err("%s - cannot alloc atid.\n", __func__
);
1979 insert_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
, ep
->atid
);
1982 if (ep
->com
.cm_id
->local_addr
.ss_family
== AF_INET
) {
1983 ep
->dst
= find_route(ep
->com
.dev
, laddr
->sin_addr
.s_addr
,
1984 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
1985 raddr
->sin_port
, 0);
1987 ra
= (__u8
*)&raddr
->sin_addr
;
1989 ep
->dst
= find_route6(ep
->com
.dev
, laddr6
->sin6_addr
.s6_addr
,
1990 raddr6
->sin6_addr
.s6_addr
,
1991 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
1992 raddr6
->sin6_scope_id
);
1994 ra
= (__u8
*)&raddr6
->sin6_addr
;
1997 pr_err("%s - cannot find route.\n", __func__
);
1998 err
= -EHOSTUNREACH
;
2001 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, false);
2003 pr_err("%s - cannot alloc l2e.\n", __func__
);
2007 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2008 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
2011 state_set(&ep
->com
, CONNECTING
);
2014 /* send connect request to rnic */
2015 err
= send_connect(ep
);
2019 cxgb4_l2t_release(ep
->l2t
);
2021 dst_release(ep
->dst
);
2023 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
2024 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
2027 * remember to send notification to upper layer.
2028 * We are in here so the upper layer is not aware that this is
2029 * re-connect attempt and so, upper layer is still waiting for
2030 * response of 1st connect request.
2032 connect_reply_upcall(ep
, -ECONNRESET
);
2033 c4iw_put_ep(&ep
->com
);
2038 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2041 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
2042 unsigned int atid
= TID_TID_G(AOPEN_ATID_G(
2043 ntohl(rpl
->atid_status
)));
2044 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2045 int status
= AOPEN_STATUS_G(ntohl(rpl
->atid_status
));
2046 struct sockaddr_in
*la
;
2047 struct sockaddr_in
*ra
;
2048 struct sockaddr_in6
*la6
;
2049 struct sockaddr_in6
*ra6
;
2051 ep
= lookup_atid(t
, atid
);
2052 la
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
2053 ra
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
2054 la6
= (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
2055 ra6
= (struct sockaddr_in6
*)&ep
->com
.mapped_remote_addr
;
2057 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
2058 status
, status2errno(status
));
2060 if (is_neg_adv(status
)) {
2061 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2062 __func__
, atid
, status
, neg_adv_str(status
));
2063 ep
->stats
.connect_neg_adv
++;
2064 mutex_lock(&dev
->rdev
.stats
.lock
);
2065 dev
->rdev
.stats
.neg_adv
++;
2066 mutex_unlock(&dev
->rdev
.stats
.lock
);
2070 set_bit(ACT_OPEN_RPL
, &ep
->com
.history
);
2073 * Log interesting failures.
2076 case CPL_ERR_CONN_RESET
:
2077 case CPL_ERR_CONN_TIMEDOUT
:
2079 case CPL_ERR_TCAM_FULL
:
2080 mutex_lock(&dev
->rdev
.stats
.lock
);
2081 dev
->rdev
.stats
.tcam_full
++;
2082 mutex_unlock(&dev
->rdev
.stats
.lock
);
2083 if (ep
->com
.local_addr
.ss_family
== AF_INET
&&
2084 dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
2085 send_fw_act_open_req(ep
,
2086 TID_TID_G(AOPEN_ATID_G(
2087 ntohl(rpl
->atid_status
))));
2091 case CPL_ERR_CONN_EXIST
:
2092 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
2093 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
2094 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
,
2096 cxgb4_free_atid(t
, atid
);
2097 dst_release(ep
->dst
);
2098 cxgb4_l2t_release(ep
->l2t
);
2104 if (ep
->com
.local_addr
.ss_family
== AF_INET
) {
2105 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2106 atid
, status
, status2errno(status
),
2107 &la
->sin_addr
.s_addr
, ntohs(la
->sin_port
),
2108 &ra
->sin_addr
.s_addr
, ntohs(ra
->sin_port
));
2110 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2111 atid
, status
, status2errno(status
),
2112 la6
->sin6_addr
.s6_addr
, ntohs(la6
->sin6_port
),
2113 ra6
->sin6_addr
.s6_addr
, ntohs(ra6
->sin6_port
));
2118 connect_reply_upcall(ep
, status2errno(status
));
2119 state_set(&ep
->com
, DEAD
);
2121 if (status
&& act_open_has_tid(status
))
2122 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
2124 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, atid
);
2125 cxgb4_free_atid(t
, atid
);
2126 dst_release(ep
->dst
);
2127 cxgb4_l2t_release(ep
->l2t
);
2128 c4iw_put_ep(&ep
->com
);
2133 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2135 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
2136 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2137 unsigned int stid
= GET_TID(rpl
);
2138 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2141 PDBG("%s stid %d lookup failure!\n", __func__
, stid
);
2144 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
2145 rpl
->status
, status2errno(rpl
->status
));
2146 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2152 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2154 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
2155 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2156 unsigned int stid
= GET_TID(rpl
);
2157 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
2159 PDBG("%s ep %p\n", __func__
, ep
);
2160 c4iw_wake_up(&ep
->com
.wr_wait
, status2errno(rpl
->status
));
2164 static void accept_cr(struct c4iw_ep
*ep
, struct sk_buff
*skb
,
2165 struct cpl_pass_accept_req
*req
)
2167 struct cpl_pass_accept_rpl
*rpl
;
2168 unsigned int mtu_idx
;
2172 struct cpl_t5_pass_accept_rpl
*rpl5
= NULL
;
2175 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2176 BUG_ON(skb_cloned(skb
));
2180 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
2181 skb_trim(skb
, roundup(sizeof(*rpl5
), 16));
2183 INIT_TP_WR(rpl5
, ep
->hwtid
);
2185 skb_trim(skb
, sizeof(*rpl
));
2186 INIT_TP_WR(rpl
, ep
->hwtid
);
2188 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
2191 best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
,
2192 enable_tcp_timestamps
&& req
->tcpopt
.tstamp
,
2193 (AF_INET
== ep
->com
.remote_addr
.ss_family
) ? 0 : 1);
2194 wscale
= compute_wscale(rcv_win
);
2197 * Specify the largest window that will fit in opt0. The
2198 * remainder will be specified in the rx_data_ack.
2200 win
= ep
->rcv_win
>> 10;
2201 if (win
> RCV_BUFSIZ_M
)
2203 opt0
= (nocong
? NO_CONG_F
: 0) |
2206 WND_SCALE_V(wscale
) |
2207 MSS_IDX_V(mtu_idx
) |
2208 L2T_IDX_V(ep
->l2t
->idx
) |
2209 TX_CHAN_V(ep
->tx_chan
) |
2210 SMAC_SEL_V(ep
->smac_idx
) |
2211 DSCP_V(ep
->tos
>> 2) |
2212 ULP_MODE_V(ULP_MODE_TCPDDP
) |
2214 opt2
= RX_CHANNEL_V(0) |
2215 RSS_QUEUE_VALID_F
| RSS_QUEUE_V(ep
->rss_qid
);
2217 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
2218 opt2
|= TSTAMPS_EN_F
;
2219 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
2221 if (wscale
&& enable_tcp_window_scaling
)
2222 opt2
|= WND_SCALE_EN_F
;
2224 const struct tcphdr
*tcph
;
2225 u32 hlen
= ntohl(req
->hdr_len
);
2227 tcph
= (const void *)(req
+ 1) + ETH_HDR_LEN_G(hlen
) +
2229 if (tcph
->ece
&& tcph
->cwr
)
2230 opt2
|= CCTRL_ECN_V(1);
2232 if (is_t5(ep
->com
.dev
->rdev
.lldi
.adapter_type
)) {
2233 u32 isn
= (prandom_u32() & ~7UL) - 1;
2234 opt2
|= T5_OPT_2_VALID_F
;
2235 opt2
|= CONG_CNTRL_V(CONG_ALG_TAHOE
);
2238 memset(&rpl5
->iss
, 0, roundup(sizeof(*rpl5
)-sizeof(*rpl
), 16));
2241 rpl5
->iss
= cpu_to_be32(isn
);
2242 PDBG("%s iss %u\n", __func__
, be32_to_cpu(rpl5
->iss
));
2245 rpl
->opt0
= cpu_to_be64(opt0
);
2246 rpl
->opt2
= cpu_to_be32(opt2
);
2247 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
2248 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
2249 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
2254 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, struct sk_buff
*skb
)
2256 PDBG("%s c4iw_dev %p tid %u\n", __func__
, dev
, hwtid
);
2257 BUG_ON(skb_cloned(skb
));
2258 skb_trim(skb
, sizeof(struct cpl_tid_release
));
2259 release_tid(&dev
->rdev
, hwtid
, skb
);
2263 static void get_4tuple(struct cpl_pass_accept_req
*req
, int *iptype
,
2264 __u8
*local_ip
, __u8
*peer_ip
,
2265 __be16
*local_port
, __be16
*peer_port
)
2267 int eth_len
= ETH_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2268 int ip_len
= IP_HDR_LEN_G(be32_to_cpu(req
->hdr_len
));
2269 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
2270 struct ipv6hdr
*ip6
= (struct ipv6hdr
*)((u8
*)(req
+ 1) + eth_len
);
2271 struct tcphdr
*tcp
= (struct tcphdr
*)
2272 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
2274 if (ip
->version
== 4) {
2275 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
2276 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
2279 memcpy(peer_ip
, &ip
->saddr
, 4);
2280 memcpy(local_ip
, &ip
->daddr
, 4);
2282 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__
,
2283 ip6
->saddr
.s6_addr
, ip6
->daddr
.s6_addr
, ntohs(tcp
->source
),
2286 memcpy(peer_ip
, ip6
->saddr
.s6_addr
, 16);
2287 memcpy(local_ip
, ip6
->daddr
.s6_addr
, 16);
2289 *peer_port
= tcp
->source
;
2290 *local_port
= tcp
->dest
;
2295 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2297 struct c4iw_ep
*child_ep
= NULL
, *parent_ep
;
2298 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
2299 unsigned int stid
= PASS_OPEN_TID_G(ntohl(req
->tos_stid
));
2300 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2301 unsigned int hwtid
= GET_TID(req
);
2302 struct dst_entry
*dst
;
2303 __u8 local_ip
[16], peer_ip
[16];
2304 __be16 local_port
, peer_port
;
2306 u16 peer_mss
= ntohs(req
->tcpopt
.mss
);
2308 unsigned short hdrs
;
2310 parent_ep
= lookup_stid(t
, stid
);
2312 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
2316 if (state_read(&parent_ep
->com
) != LISTEN
) {
2317 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
2322 get_4tuple(req
, &iptype
, local_ip
, peer_ip
, &local_port
, &peer_port
);
2324 /* Find output route */
2326 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2327 , __func__
, parent_ep
, hwtid
,
2328 local_ip
, peer_ip
, ntohs(local_port
),
2329 ntohs(peer_port
), peer_mss
);
2330 dst
= find_route(dev
, *(__be32
*)local_ip
, *(__be32
*)peer_ip
,
2331 local_port
, peer_port
,
2332 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)));
2334 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2335 , __func__
, parent_ep
, hwtid
,
2336 local_ip
, peer_ip
, ntohs(local_port
),
2337 ntohs(peer_port
), peer_mss
);
2338 dst
= find_route6(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
2339 PASS_OPEN_TOS_G(ntohl(req
->tos_stid
)),
2340 ((struct sockaddr_in6
*)
2341 &parent_ep
->com
.local_addr
)->sin6_scope_id
);
2344 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
2349 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
2351 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
2357 err
= import_ep(child_ep
, iptype
, peer_ip
, dst
, dev
, false);
2359 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
2366 hdrs
= sizeof(struct iphdr
) + sizeof(struct tcphdr
) +
2367 ((enable_tcp_timestamps
&& req
->tcpopt
.tstamp
) ? 12 : 0);
2368 if (peer_mss
&& child_ep
->mtu
> (peer_mss
+ hdrs
))
2369 child_ep
->mtu
= peer_mss
+ hdrs
;
2371 state_set(&child_ep
->com
, CONNECTING
);
2372 child_ep
->com
.dev
= dev
;
2373 child_ep
->com
.cm_id
= NULL
;
2376 * The mapped_local and mapped_remote addresses get setup with
2377 * the actual 4-tuple. The local address will be based on the
2378 * actual local address of the connection, but on the port number
2379 * of the parent listening endpoint. The remote address is
2380 * setup based on a query to the IWPM since we don't know what it
2381 * originally was before mapping. If no mapping was done, then
2382 * mapped_remote == remote, and mapped_local == local.
2385 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
2386 &child_ep
->com
.mapped_local_addr
;
2388 sin
->sin_family
= PF_INET
;
2389 sin
->sin_port
= local_port
;
2390 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2392 sin
= (struct sockaddr_in
*)&child_ep
->com
.local_addr
;
2393 sin
->sin_family
= PF_INET
;
2394 sin
->sin_port
= ((struct sockaddr_in
*)
2395 &parent_ep
->com
.local_addr
)->sin_port
;
2396 sin
->sin_addr
.s_addr
= *(__be32
*)local_ip
;
2398 sin
= (struct sockaddr_in
*)&child_ep
->com
.mapped_remote_addr
;
2399 sin
->sin_family
= PF_INET
;
2400 sin
->sin_port
= peer_port
;
2401 sin
->sin_addr
.s_addr
= *(__be32
*)peer_ip
;
2403 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
2404 &child_ep
->com
.mapped_local_addr
;
2406 sin6
->sin6_family
= PF_INET6
;
2407 sin6
->sin6_port
= local_port
;
2408 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2410 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.local_addr
;
2411 sin6
->sin6_family
= PF_INET6
;
2412 sin6
->sin6_port
= ((struct sockaddr_in6
*)
2413 &parent_ep
->com
.local_addr
)->sin6_port
;
2414 memcpy(sin6
->sin6_addr
.s6_addr
, local_ip
, 16);
2416 sin6
= (struct sockaddr_in6
*)&child_ep
->com
.mapped_remote_addr
;
2417 sin6
->sin6_family
= PF_INET6
;
2418 sin6
->sin6_port
= peer_port
;
2419 memcpy(sin6
->sin6_addr
.s6_addr
, peer_ip
, 16);
2421 memcpy(&child_ep
->com
.remote_addr
, &child_ep
->com
.mapped_remote_addr
,
2422 sizeof(child_ep
->com
.remote_addr
));
2423 get_remote_addr(parent_ep
, child_ep
);
2425 c4iw_get_ep(&parent_ep
->com
);
2426 child_ep
->parent_ep
= parent_ep
;
2427 child_ep
->tos
= PASS_OPEN_TOS_G(ntohl(req
->tos_stid
));
2428 child_ep
->dst
= dst
;
2429 child_ep
->hwtid
= hwtid
;
2431 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
2432 child_ep
->tx_chan
, child_ep
->smac_idx
, child_ep
->rss_qid
);
2434 init_timer(&child_ep
->timer
);
2435 cxgb4_insert_tid(t
, child_ep
, hwtid
);
2436 insert_handle(dev
, &dev
->hwtid_idr
, child_ep
, child_ep
->hwtid
);
2437 accept_cr(child_ep
, skb
, req
);
2438 set_bit(PASS_ACCEPT_REQ
, &child_ep
->com
.history
);
2441 reject_cr(dev
, hwtid
, skb
);
2446 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2449 struct cpl_pass_establish
*req
= cplhdr(skb
);
2450 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2451 unsigned int tid
= GET_TID(req
);
2453 ep
= lookup_tid(t
, tid
);
2454 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2455 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
2456 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
2458 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__
, ep
, tid
,
2459 ntohs(req
->tcp_opt
));
2461 set_emss(ep
, ntohs(req
->tcp_opt
));
2463 dst_confirm(ep
->dst
);
2464 state_set(&ep
->com
, MPA_REQ_WAIT
);
2466 send_flowc(ep
, skb
);
2467 set_bit(PASS_ESTAB
, &ep
->com
.history
);
2472 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2474 struct cpl_peer_close
*hdr
= cplhdr(skb
);
2476 struct c4iw_qp_attributes attrs
;
2479 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2480 unsigned int tid
= GET_TID(hdr
);
2483 ep
= lookup_tid(t
, tid
);
2484 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2485 dst_confirm(ep
->dst
);
2487 set_bit(PEER_CLOSE
, &ep
->com
.history
);
2488 mutex_lock(&ep
->com
.mutex
);
2489 switch (ep
->com
.state
) {
2491 __state_set(&ep
->com
, CLOSING
);
2494 __state_set(&ep
->com
, CLOSING
);
2495 connect_reply_upcall(ep
, -ECONNRESET
);
2500 * We're gonna mark this puppy DEAD, but keep
2501 * the reference on it until the ULP accepts or
2502 * rejects the CR. Also wake up anyone waiting
2503 * in rdma connection migration (see c4iw_accept_cr()).
2505 __state_set(&ep
->com
, CLOSING
);
2506 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2507 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2510 __state_set(&ep
->com
, CLOSING
);
2511 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
2512 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2516 __state_set(&ep
->com
, CLOSING
);
2517 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
2518 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2519 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2520 if (ret
!= -ECONNRESET
) {
2521 peer_close_upcall(ep
);
2529 __state_set(&ep
->com
, MORIBUND
);
2533 (void)stop_ep_timer(ep
);
2534 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2535 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2536 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2537 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2539 close_complete_upcall(ep
, 0);
2540 __state_set(&ep
->com
, DEAD
);
2550 mutex_unlock(&ep
->com
.mutex
);
2552 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2554 release_ep_resources(ep
);
2558 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2560 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
2562 struct cpl_abort_rpl
*rpl
;
2563 struct sk_buff
*rpl_skb
;
2564 struct c4iw_qp_attributes attrs
;
2567 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2568 unsigned int tid
= GET_TID(req
);
2570 ep
= lookup_tid(t
, tid
);
2571 if (is_neg_adv(req
->status
)) {
2572 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2573 __func__
, ep
->hwtid
, req
->status
,
2574 neg_adv_str(req
->status
));
2575 ep
->stats
.abort_neg_adv
++;
2576 mutex_lock(&dev
->rdev
.stats
.lock
);
2577 dev
->rdev
.stats
.neg_adv
++;
2578 mutex_unlock(&dev
->rdev
.stats
.lock
);
2581 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
2583 set_bit(PEER_ABORT
, &ep
->com
.history
);
2586 * Wake up any threads in rdma_init() or rdma_fini().
2587 * However, this is not needed if com state is just
2590 if (ep
->com
.state
!= MPA_REQ_SENT
)
2591 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
2593 mutex_lock(&ep
->com
.mutex
);
2594 switch (ep
->com
.state
) {
2598 (void)stop_ep_timer(ep
);
2601 (void)stop_ep_timer(ep
);
2602 if (mpa_rev
== 1 || (mpa_rev
== 2 && ep
->tried_with_mpa_v1
))
2603 connect_reply_upcall(ep
, -ECONNRESET
);
2606 * we just don't send notification upwards because we
2607 * want to retry with mpa_v1 without upper layers even
2610 * do some housekeeping so as to re-initiate the
2613 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__
,
2615 ep
->retry_with_mpa_v1
= 1;
2627 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2628 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2629 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2630 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2634 "%s - qp <- error failed!\n",
2637 peer_abort_upcall(ep
);
2642 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
2643 mutex_unlock(&ep
->com
.mutex
);
2649 dst_confirm(ep
->dst
);
2650 if (ep
->com
.state
!= ABORTING
) {
2651 __state_set(&ep
->com
, DEAD
);
2652 /* we don't release if we want to retry with mpa_v1 */
2653 if (!ep
->retry_with_mpa_v1
)
2656 mutex_unlock(&ep
->com
.mutex
);
2658 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
2660 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
2665 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
2666 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
2667 INIT_TP_WR(rpl
, ep
->hwtid
);
2668 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
2669 rpl
->cmd
= CPL_ABORT_NO_RST
;
2670 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
2673 release_ep_resources(ep
);
2674 else if (ep
->retry_with_mpa_v1
) {
2675 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->hwtid_idr
, ep
->hwtid
);
2676 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
2677 dst_release(ep
->dst
);
2678 cxgb4_l2t_release(ep
->l2t
);
2685 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2688 struct c4iw_qp_attributes attrs
;
2689 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
2691 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2692 unsigned int tid
= GET_TID(rpl
);
2694 ep
= lookup_tid(t
, tid
);
2696 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2699 /* The cm_id may be null if we failed to connect */
2700 mutex_lock(&ep
->com
.mutex
);
2701 switch (ep
->com
.state
) {
2703 __state_set(&ep
->com
, MORIBUND
);
2706 (void)stop_ep_timer(ep
);
2707 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
2708 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
2709 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2711 C4IW_QP_ATTR_NEXT_STATE
,
2714 close_complete_upcall(ep
, 0);
2715 __state_set(&ep
->com
, DEAD
);
2725 mutex_unlock(&ep
->com
.mutex
);
2727 release_ep_resources(ep
);
2731 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2733 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
2734 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2735 unsigned int tid
= GET_TID(rpl
);
2737 struct c4iw_qp_attributes attrs
;
2739 ep
= lookup_tid(t
, tid
);
2742 if (ep
&& ep
->com
.qp
) {
2743 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
2744 ep
->com
.qp
->wq
.sq
.qid
);
2745 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
2746 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
2747 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2749 printk(KERN_WARNING MOD
"TERM received tid %u no ep/qp\n", tid
);
2755 * Upcall from the adapter indicating data has been transmitted.
2756 * For us its just the single MPA request or reply. We can now free
2757 * the skb holding the mpa message.
2759 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2762 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
2763 u8 credits
= hdr
->credits
;
2764 unsigned int tid
= GET_TID(hdr
);
2765 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
2768 ep
= lookup_tid(t
, tid
);
2769 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
2771 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
2772 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
2776 dst_confirm(ep
->dst
);
2778 PDBG("%s last streaming msg ack ep %p tid %u state %u "
2779 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
2780 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
2781 kfree_skb(ep
->mpa_skb
);
2787 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
2791 struct c4iw_ep
*ep
= to_ep(cm_id
);
2792 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2794 mutex_lock(&ep
->com
.mutex
);
2795 if (ep
->com
.state
== DEAD
) {
2796 mutex_unlock(&ep
->com
.mutex
);
2797 c4iw_put_ep(&ep
->com
);
2800 set_bit(ULP_REJECT
, &ep
->com
.history
);
2801 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2803 abort_connection(ep
, NULL
, GFP_KERNEL
);
2805 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
2808 mutex_unlock(&ep
->com
.mutex
);
2810 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
2811 c4iw_put_ep(&ep
->com
);
2815 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2818 struct c4iw_qp_attributes attrs
;
2819 enum c4iw_qp_attr_mask mask
;
2820 struct c4iw_ep
*ep
= to_ep(cm_id
);
2821 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
2822 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
2824 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
2826 mutex_lock(&ep
->com
.mutex
);
2827 if (ep
->com
.state
== DEAD
) {
2832 BUG_ON(ep
->com
.state
!= MPA_REQ_RCVD
);
2835 set_bit(ULP_ACCEPT
, &ep
->com
.history
);
2836 if ((conn_param
->ord
> cur_max_read_depth(ep
->com
.dev
)) ||
2837 (conn_param
->ird
> cur_max_read_depth(ep
->com
.dev
))) {
2838 abort_connection(ep
, NULL
, GFP_KERNEL
);
2843 if (ep
->mpa_attr
.version
== 2 && ep
->mpa_attr
.enhanced_rdma_conn
) {
2844 if (conn_param
->ord
> ep
->ird
) {
2845 if (RELAXED_IRD_NEGOTIATION
) {
2848 ep
->ird
= conn_param
->ird
;
2849 ep
->ord
= conn_param
->ord
;
2850 send_mpa_reject(ep
, conn_param
->private_data
,
2851 conn_param
->private_data_len
);
2852 abort_connection(ep
, NULL
, GFP_KERNEL
);
2857 if (conn_param
->ird
< ep
->ord
) {
2858 if (RELAXED_IRD_NEGOTIATION
&&
2859 ep
->ord
<= h
->rdev
.lldi
.max_ordird_qp
) {
2860 conn_param
->ird
= ep
->ord
;
2862 abort_connection(ep
, NULL
, GFP_KERNEL
);
2868 ep
->ird
= conn_param
->ird
;
2869 ep
->ord
= conn_param
->ord
;
2871 if (ep
->mpa_attr
.version
== 1) {
2872 if (peer2peer
&& ep
->ird
== 0)
2876 (ep
->mpa_attr
.p2p_type
!= FW_RI_INIT_P2PTYPE_DISABLED
) &&
2877 (p2p_type
== FW_RI_INIT_P2PTYPE_READ_REQ
) && ep
->ord
== 0)
2881 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
2883 cm_id
->add_ref(cm_id
);
2884 ep
->com
.cm_id
= cm_id
;
2888 /* bind QP to EP and move to RTS */
2889 attrs
.mpa_attr
= ep
->mpa_attr
;
2890 attrs
.max_ird
= ep
->ird
;
2891 attrs
.max_ord
= ep
->ord
;
2892 attrs
.llp_stream_handle
= ep
;
2893 attrs
.next_state
= C4IW_QP_STATE_RTS
;
2895 /* bind QP and TID with INIT_WR */
2896 mask
= C4IW_QP_ATTR_NEXT_STATE
|
2897 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
2898 C4IW_QP_ATTR_MPA_ATTR
|
2899 C4IW_QP_ATTR_MAX_IRD
|
2900 C4IW_QP_ATTR_MAX_ORD
;
2902 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
2903 ep
->com
.qp
, mask
, &attrs
, 1);
2906 err
= send_mpa_reply(ep
, conn_param
->private_data
,
2907 conn_param
->private_data_len
);
2911 __state_set(&ep
->com
, FPDU_MODE
);
2912 established_upcall(ep
);
2913 mutex_unlock(&ep
->com
.mutex
);
2914 c4iw_put_ep(&ep
->com
);
2917 ep
->com
.cm_id
= NULL
;
2918 abort_connection(ep
, NULL
, GFP_KERNEL
);
2919 cm_id
->rem_ref(cm_id
);
2921 mutex_unlock(&ep
->com
.mutex
);
2922 c4iw_put_ep(&ep
->com
);
2926 static int pick_local_ipaddrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
2928 struct in_device
*ind
;
2930 struct sockaddr_in
*laddr
= (struct sockaddr_in
*)&cm_id
->local_addr
;
2931 struct sockaddr_in
*raddr
= (struct sockaddr_in
*)&cm_id
->remote_addr
;
2933 ind
= in_dev_get(dev
->rdev
.lldi
.ports
[0]);
2935 return -EADDRNOTAVAIL
;
2936 for_primary_ifa(ind
) {
2937 laddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
2938 raddr
->sin_addr
.s_addr
= ifa
->ifa_address
;
2944 return found
? 0 : -EADDRNOTAVAIL
;
2947 static int get_lladdr(struct net_device
*dev
, struct in6_addr
*addr
,
2948 unsigned char banned_flags
)
2950 struct inet6_dev
*idev
;
2951 int err
= -EADDRNOTAVAIL
;
2954 idev
= __in6_dev_get(dev
);
2956 struct inet6_ifaddr
*ifp
;
2958 read_lock_bh(&idev
->lock
);
2959 list_for_each_entry(ifp
, &idev
->addr_list
, if_list
) {
2960 if (ifp
->scope
== IFA_LINK
&&
2961 !(ifp
->flags
& banned_flags
)) {
2962 memcpy(addr
, &ifp
->addr
, 16);
2967 read_unlock_bh(&idev
->lock
);
2973 static int pick_local_ip6addrs(struct c4iw_dev
*dev
, struct iw_cm_id
*cm_id
)
2975 struct in6_addr
uninitialized_var(addr
);
2976 struct sockaddr_in6
*la6
= (struct sockaddr_in6
*)&cm_id
->local_addr
;
2977 struct sockaddr_in6
*ra6
= (struct sockaddr_in6
*)&cm_id
->remote_addr
;
2979 if (get_lladdr(dev
->rdev
.lldi
.ports
[0], &addr
, IFA_F_TENTATIVE
)) {
2980 memcpy(la6
->sin6_addr
.s6_addr
, &addr
, 16);
2981 memcpy(ra6
->sin6_addr
.s6_addr
, &addr
, 16);
2984 return -EADDRNOTAVAIL
;
2987 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
2989 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
2992 struct sockaddr_in
*laddr
;
2993 struct sockaddr_in
*raddr
;
2994 struct sockaddr_in6
*laddr6
;
2995 struct sockaddr_in6
*raddr6
;
2996 struct iwpm_dev_data pm_reg_msg
;
2997 struct iwpm_sa_data pm_msg
;
3002 if ((conn_param
->ord
> cur_max_read_depth(dev
)) ||
3003 (conn_param
->ird
> cur_max_read_depth(dev
))) {
3007 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3009 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3013 init_timer(&ep
->timer
);
3014 ep
->plen
= conn_param
->private_data_len
;
3016 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
3017 conn_param
->private_data
, ep
->plen
);
3018 ep
->ird
= conn_param
->ird
;
3019 ep
->ord
= conn_param
->ord
;
3021 if (peer2peer
&& ep
->ord
== 0)
3024 cm_id
->add_ref(cm_id
);
3026 ep
->com
.cm_id
= cm_id
;
3027 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
3029 PDBG("%s qpn 0x%x not found!\n", __func__
, conn_param
->qpn
);
3034 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
3038 * Allocate an active TID to initiate a TCP connection.
3040 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
3041 if (ep
->atid
== -1) {
3042 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
3046 insert_handle(dev
, &dev
->atid_idr
, ep
, ep
->atid
);
3048 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
3049 sizeof(ep
->com
.local_addr
));
3050 memcpy(&ep
->com
.remote_addr
, &cm_id
->remote_addr
,
3051 sizeof(ep
->com
.remote_addr
));
3053 /* No port mapper available, go with the specified peer information */
3054 memcpy(&ep
->com
.mapped_local_addr
, &cm_id
->local_addr
,
3055 sizeof(ep
->com
.mapped_local_addr
));
3056 memcpy(&ep
->com
.mapped_remote_addr
, &cm_id
->remote_addr
,
3057 sizeof(ep
->com
.mapped_remote_addr
));
3059 c4iw_form_reg_msg(dev
, &pm_reg_msg
);
3060 iwpm_err
= iwpm_register_pid(&pm_reg_msg
, RDMA_NL_C4IW
);
3062 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3063 __func__
, iwpm_err
);
3065 if (iwpm_valid_pid() && !iwpm_err
) {
3066 c4iw_form_pm_msg(ep
, &pm_msg
);
3067 iwpm_err
= iwpm_add_and_query_mapping(&pm_msg
, RDMA_NL_C4IW
);
3069 PDBG("%s: Port Mapper query fail (err = %d).\n",
3070 __func__
, iwpm_err
);
3072 c4iw_record_pm_msg(ep
, &pm_msg
);
3074 if (iwpm_create_mapinfo(&ep
->com
.local_addr
,
3075 &ep
->com
.mapped_local_addr
, RDMA_NL_C4IW
)) {
3076 iwpm_remove_mapping(&ep
->com
.local_addr
, RDMA_NL_C4IW
);
3080 print_addr(&ep
->com
, __func__
, "add_query/create_mapinfo");
3081 set_bit(RELEASE_MAPINFO
, &ep
->com
.flags
);
3083 laddr
= (struct sockaddr_in
*)&ep
->com
.mapped_local_addr
;
3084 raddr
= (struct sockaddr_in
*)&ep
->com
.mapped_remote_addr
;
3085 laddr6
= (struct sockaddr_in6
*)&ep
->com
.mapped_local_addr
;
3086 raddr6
= (struct sockaddr_in6
*) &ep
->com
.mapped_remote_addr
;
3088 if (cm_id
->remote_addr
.ss_family
== AF_INET
) {
3090 ra
= (__u8
*)&raddr
->sin_addr
;
3093 * Handle loopback requests to INADDR_ANY.
3095 if ((__force
int)raddr
->sin_addr
.s_addr
== INADDR_ANY
) {
3096 err
= pick_local_ipaddrs(dev
, cm_id
);
3102 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3103 __func__
, &laddr
->sin_addr
, ntohs(laddr
->sin_port
),
3104 ra
, ntohs(raddr
->sin_port
));
3105 ep
->dst
= find_route(dev
, laddr
->sin_addr
.s_addr
,
3106 raddr
->sin_addr
.s_addr
, laddr
->sin_port
,
3107 raddr
->sin_port
, 0);
3110 ra
= (__u8
*)&raddr6
->sin6_addr
;
3113 * Handle loopback requests to INADDR_ANY.
3115 if (ipv6_addr_type(&raddr6
->sin6_addr
) == IPV6_ADDR_ANY
) {
3116 err
= pick_local_ip6addrs(dev
, cm_id
);
3122 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3123 __func__
, laddr6
->sin6_addr
.s6_addr
,
3124 ntohs(laddr6
->sin6_port
),
3125 raddr6
->sin6_addr
.s6_addr
, ntohs(raddr6
->sin6_port
));
3126 ep
->dst
= find_route6(dev
, laddr6
->sin6_addr
.s6_addr
,
3127 raddr6
->sin6_addr
.s6_addr
,
3128 laddr6
->sin6_port
, raddr6
->sin6_port
, 0,
3129 raddr6
->sin6_scope_id
);
3132 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
3133 err
= -EHOSTUNREACH
;
3137 err
= import_ep(ep
, iptype
, ra
, ep
->dst
, ep
->com
.dev
, true);
3139 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
3143 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3144 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
3147 state_set(&ep
->com
, CONNECTING
);
3150 /* send connect request to rnic */
3151 err
= send_connect(ep
);
3155 cxgb4_l2t_release(ep
->l2t
);
3157 dst_release(ep
->dst
);
3159 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->atid_idr
, ep
->atid
);
3160 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
3162 cm_id
->rem_ref(cm_id
);
3163 c4iw_put_ep(&ep
->com
);
3168 static int create_server6(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3171 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)
3172 &ep
->com
.mapped_local_addr
;
3174 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3175 err
= cxgb4_create_server6(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3176 ep
->stid
, &sin6
->sin6_addr
,
3178 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3180 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3184 err
= net_xmit_errno(err
);
3186 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3188 sin6
->sin6_addr
.s6_addr
, ntohs(sin6
->sin6_port
));
3192 static int create_server4(struct c4iw_dev
*dev
, struct c4iw_listen_ep
*ep
)
3195 struct sockaddr_in
*sin
= (struct sockaddr_in
*)
3196 &ep
->com
.mapped_local_addr
;
3198 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
) {
3200 err
= cxgb4_create_server_filter(
3201 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3202 sin
->sin_addr
.s_addr
, sin
->sin_port
, 0,
3203 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0, 0);
3204 if (err
== -EBUSY
) {
3205 set_current_state(TASK_UNINTERRUPTIBLE
);
3206 schedule_timeout(usecs_to_jiffies(100));
3208 } while (err
== -EBUSY
);
3210 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3211 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0],
3212 ep
->stid
, sin
->sin_addr
.s_addr
, sin
->sin_port
,
3213 0, ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
3215 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
,
3219 err
= net_xmit_errno(err
);
3222 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3224 &sin
->sin_addr
, ntohs(sin
->sin_port
));
3228 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
3231 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
3232 struct c4iw_listen_ep
*ep
;
3233 struct iwpm_dev_data pm_reg_msg
;
3234 struct iwpm_sa_data pm_msg
;
3239 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
3241 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
3245 PDBG("%s ep %p\n", __func__
, ep
);
3246 cm_id
->add_ref(cm_id
);
3247 ep
->com
.cm_id
= cm_id
;
3249 ep
->backlog
= backlog
;
3250 memcpy(&ep
->com
.local_addr
, &cm_id
->local_addr
,
3251 sizeof(ep
->com
.local_addr
));
3254 * Allocate a server TID.
3256 if (dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3257 ep
->com
.local_addr
.ss_family
== AF_INET
)
3258 ep
->stid
= cxgb4_alloc_sftid(dev
->rdev
.lldi
.tids
,
3259 cm_id
->local_addr
.ss_family
, ep
);
3261 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
,
3262 cm_id
->local_addr
.ss_family
, ep
);
3264 if (ep
->stid
== -1) {
3265 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
3269 insert_handle(dev
, &dev
->stid_idr
, ep
, ep
->stid
);
3271 /* No port mapper available, go with the specified info */
3272 memcpy(&ep
->com
.mapped_local_addr
, &cm_id
->local_addr
,
3273 sizeof(ep
->com
.mapped_local_addr
));
3275 c4iw_form_reg_msg(dev
, &pm_reg_msg
);
3276 iwpm_err
= iwpm_register_pid(&pm_reg_msg
, RDMA_NL_C4IW
);
3278 PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
3279 __func__
, iwpm_err
);
3281 if (iwpm_valid_pid() && !iwpm_err
) {
3282 memcpy(&pm_msg
.loc_addr
, &ep
->com
.local_addr
,
3283 sizeof(ep
->com
.local_addr
));
3284 iwpm_err
= iwpm_add_mapping(&pm_msg
, RDMA_NL_C4IW
);
3286 PDBG("%s: Port Mapper query fail (err = %d).\n",
3287 __func__
, iwpm_err
);
3289 memcpy(&ep
->com
.mapped_local_addr
,
3290 &pm_msg
.mapped_loc_addr
,
3291 sizeof(ep
->com
.mapped_local_addr
));
3293 if (iwpm_create_mapinfo(&ep
->com
.local_addr
,
3294 &ep
->com
.mapped_local_addr
, RDMA_NL_C4IW
)) {
3298 print_addr(&ep
->com
, __func__
, "add_mapping/create_mapinfo");
3300 set_bit(RELEASE_MAPINFO
, &ep
->com
.flags
);
3301 state_set(&ep
->com
, LISTEN
);
3302 if (ep
->com
.local_addr
.ss_family
== AF_INET
)
3303 err
= create_server4(dev
, ep
);
3305 err
= create_server6(dev
, ep
);
3307 cm_id
->provider_data
= ep
;
3312 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3313 ep
->com
.local_addr
.ss_family
);
3315 cm_id
->rem_ref(cm_id
);
3316 c4iw_put_ep(&ep
->com
);
3322 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
3325 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
3327 PDBG("%s ep %p\n", __func__
, ep
);
3330 state_set(&ep
->com
, DEAD
);
3331 if (ep
->com
.dev
->rdev
.lldi
.enable_fw_ofld_conn
&&
3332 ep
->com
.local_addr
.ss_family
== AF_INET
) {
3333 err
= cxgb4_remove_server_filter(
3334 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3335 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3337 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
3338 err
= cxgb4_remove_server(
3339 ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
3340 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0], 0);
3343 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
,
3346 remove_handle(ep
->com
.dev
, &ep
->com
.dev
->stid_idr
, ep
->stid
);
3347 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
,
3348 ep
->com
.local_addr
.ss_family
);
3350 cm_id
->rem_ref(cm_id
);
3351 c4iw_put_ep(&ep
->com
);
3355 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
3360 struct c4iw_rdev
*rdev
;
3362 mutex_lock(&ep
->com
.mutex
);
3364 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
3365 states
[ep
->com
.state
], abrupt
);
3367 rdev
= &ep
->com
.dev
->rdev
;
3368 if (c4iw_fatal_error(rdev
)) {
3370 close_complete_upcall(ep
, -EIO
);
3371 ep
->com
.state
= DEAD
;
3373 switch (ep
->com
.state
) {
3381 ep
->com
.state
= ABORTING
;
3383 ep
->com
.state
= CLOSING
;
3386 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
3389 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
3392 (void)stop_ep_timer(ep
);
3393 ep
->com
.state
= ABORTING
;
3395 ep
->com
.state
= MORIBUND
;
3401 PDBG("%s ignoring disconnect ep %p state %u\n",
3402 __func__
, ep
, ep
->com
.state
);
3411 set_bit(EP_DISC_ABORT
, &ep
->com
.history
);
3412 close_complete_upcall(ep
, -ECONNRESET
);
3413 ret
= send_abort(ep
, NULL
, gfp
);
3415 set_bit(EP_DISC_CLOSE
, &ep
->com
.history
);
3416 ret
= send_halfclose(ep
, gfp
);
3421 mutex_unlock(&ep
->com
.mutex
);
3423 release_ep_resources(ep
);
3427 static void active_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3428 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3431 int atid
= be32_to_cpu(req
->tid
);
3433 ep
= (struct c4iw_ep
*)lookup_atid(dev
->rdev
.lldi
.tids
,
3434 (__force u32
) req
->tid
);
3438 switch (req
->retval
) {
3440 set_bit(ACT_RETRY_NOMEM
, &ep
->com
.history
);
3441 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3442 send_fw_act_open_req(ep
, atid
);
3446 set_bit(ACT_RETRY_INUSE
, &ep
->com
.history
);
3447 if (ep
->retry_count
++ < ACT_OPEN_RETRY_COUNT
) {
3448 send_fw_act_open_req(ep
, atid
);
3453 pr_info("%s unexpected ofld conn wr retval %d\n",
3454 __func__
, req
->retval
);
3457 pr_err("active ofld_connect_wr failure %d atid %d\n",
3459 mutex_lock(&dev
->rdev
.stats
.lock
);
3460 dev
->rdev
.stats
.act_ofld_conn_fails
++;
3461 mutex_unlock(&dev
->rdev
.stats
.lock
);
3462 connect_reply_upcall(ep
, status2errno(req
->retval
));
3463 state_set(&ep
->com
, DEAD
);
3464 remove_handle(dev
, &dev
->atid_idr
, atid
);
3465 cxgb4_free_atid(dev
->rdev
.lldi
.tids
, atid
);
3466 dst_release(ep
->dst
);
3467 cxgb4_l2t_release(ep
->l2t
);
3468 c4iw_put_ep(&ep
->com
);
3471 static void passive_ofld_conn_reply(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3472 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
)
3474 struct sk_buff
*rpl_skb
;
3475 struct cpl_pass_accept_req
*cpl
;
3478 rpl_skb
= (struct sk_buff
*)(unsigned long)req
->cookie
;
3481 PDBG("%s passive open failure %d\n", __func__
, req
->retval
);
3482 mutex_lock(&dev
->rdev
.stats
.lock
);
3483 dev
->rdev
.stats
.pas_ofld_conn_fails
++;
3484 mutex_unlock(&dev
->rdev
.stats
.lock
);
3487 cpl
= (struct cpl_pass_accept_req
*)cplhdr(rpl_skb
);
3488 OPCODE_TID(cpl
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
,
3489 (__force u32
) htonl(
3490 (__force u32
) req
->tid
)));
3491 ret
= pass_accept_req(dev
, rpl_skb
);
3498 static int deferred_fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3500 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3501 struct cpl_fw6_msg_ofld_connection_wr_rpl
*req
;
3503 switch (rpl
->type
) {
3505 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
3507 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3508 req
= (struct cpl_fw6_msg_ofld_connection_wr_rpl
*)rpl
->data
;
3509 switch (req
->t_state
) {
3511 active_ofld_conn_reply(dev
, skb
, req
);
3514 passive_ofld_conn_reply(dev
, skb
, req
);
3517 pr_err("%s unexpected ofld conn wr state %d\n",
3518 __func__
, req
->t_state
);
3526 static void build_cpl_pass_accept_req(struct sk_buff
*skb
, int stid
, u8 tos
)
3529 u16 vlantag
, len
, hdr_len
, eth_hdr_len
;
3531 struct cpl_rx_pkt
*cpl
= cplhdr(skb
);
3532 struct cpl_pass_accept_req
*req
;
3533 struct tcp_options_received tmp_opt
;
3534 struct c4iw_dev
*dev
;
3536 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3537 /* Store values from cpl_rx_pkt in temporary location. */
3538 vlantag
= (__force u16
) cpl
->vlan
;
3539 len
= (__force u16
) cpl
->len
;
3540 l2info
= (__force u32
) cpl
->l2info
;
3541 hdr_len
= (__force u16
) cpl
->hdr_len
;
3544 __skb_pull(skb
, sizeof(*req
) + sizeof(struct rss_header
));
3547 * We need to parse the TCP options from SYN packet.
3548 * to generate cpl_pass_accept_req.
3550 memset(&tmp_opt
, 0, sizeof(tmp_opt
));
3551 tcp_clear_options(&tmp_opt
);
3552 tcp_parse_options(skb
, &tmp_opt
, 0, NULL
);
3554 req
= (struct cpl_pass_accept_req
*)__skb_push(skb
, sizeof(*req
));
3555 memset(req
, 0, sizeof(*req
));
3556 req
->l2info
= cpu_to_be16(SYN_INTF_V(intf
) |
3557 SYN_MAC_IDX_V(RX_MACIDX_G(
3558 (__force
int) htonl(l2info
))) |
3560 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
3561 RX_ETHHDR_LEN_G((__force
int)htonl(l2info
)) :
3562 RX_T5_ETHHDR_LEN_G((__force
int)htonl(l2info
));
3563 req
->hdr_len
= cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(
3564 (__force
int) htonl(l2info
))) |
3565 TCP_HDR_LEN_V(RX_TCPHDR_LEN_G(
3566 (__force
int) htons(hdr_len
))) |
3567 IP_HDR_LEN_V(RX_IPHDR_LEN_G(
3568 (__force
int) htons(hdr_len
))) |
3569 ETH_HDR_LEN_V(RX_ETHHDR_LEN_G(eth_hdr_len
)));
3570 req
->vlan
= (__force __be16
) vlantag
;
3571 req
->len
= (__force __be16
) len
;
3572 req
->tos_stid
= cpu_to_be32(PASS_OPEN_TID_V(stid
) |
3573 PASS_OPEN_TOS_V(tos
));
3574 req
->tcpopt
.mss
= htons(tmp_opt
.mss_clamp
);
3575 if (tmp_opt
.wscale_ok
)
3576 req
->tcpopt
.wsf
= tmp_opt
.snd_wscale
;
3577 req
->tcpopt
.tstamp
= tmp_opt
.saw_tstamp
;
3578 if (tmp_opt
.sack_ok
)
3579 req
->tcpopt
.sack
= 1;
3580 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ
, 0));
3584 static void send_fw_pass_open_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
,
3585 __be32 laddr
, __be16 lport
,
3586 __be32 raddr
, __be16 rport
,
3587 u32 rcv_isn
, u32 filter
, u16 window
,
3588 u32 rss_qid
, u8 port_id
)
3590 struct sk_buff
*req_skb
;
3591 struct fw_ofld_connection_wr
*req
;
3592 struct cpl_pass_accept_req
*cpl
= cplhdr(skb
);
3595 req_skb
= alloc_skb(sizeof(struct fw_ofld_connection_wr
), GFP_KERNEL
);
3596 req
= (struct fw_ofld_connection_wr
*)__skb_put(req_skb
, sizeof(*req
));
3597 memset(req
, 0, sizeof(*req
));
3598 req
->op_compl
= htonl(WR_OP_V(FW_OFLD_CONNECTION_WR
) | FW_WR_COMPL_F
);
3599 req
->len16_pkd
= htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req
), 16)));
3600 req
->le
.version_cpl
= htonl(FW_OFLD_CONNECTION_WR_CPL_F
);
3601 req
->le
.filter
= (__force __be32
) filter
;
3602 req
->le
.lport
= lport
;
3603 req
->le
.pport
= rport
;
3604 req
->le
.u
.ipv4
.lip
= laddr
;
3605 req
->le
.u
.ipv4
.pip
= raddr
;
3606 req
->tcb
.rcv_nxt
= htonl(rcv_isn
+ 1);
3607 req
->tcb
.rcv_adv
= htons(window
);
3608 req
->tcb
.t_state_to_astid
=
3609 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV
) |
3610 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl
->tcpopt
.wsf
) |
3611 FW_OFLD_CONNECTION_WR_ASTID_V(
3612 PASS_OPEN_TID_G(ntohl(cpl
->tos_stid
))));
3615 * We store the qid in opt2 which will be used by the firmware
3616 * to send us the wr response.
3618 req
->tcb
.opt2
= htonl(RSS_QUEUE_V(rss_qid
));
3621 * We initialize the MSS index in TCB to 0xF.
3622 * So that when driver sends cpl_pass_accept_rpl
3623 * TCB picks up the correct value. If this was 0
3624 * TP will ignore any value > 0 for MSS index.
3626 req
->tcb
.opt0
= cpu_to_be64(MSS_IDX_V(0xF));
3627 req
->cookie
= (uintptr_t)skb
;
3629 set_wr_txq(req_skb
, CPL_PRIORITY_CONTROL
, port_id
);
3630 ret
= cxgb4_ofld_send(dev
->rdev
.lldi
.ports
[0], req_skb
);
3632 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__
,
3640 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3641 * messages when a filter is being used instead of server to
3642 * redirect a syn packet. When packets hit filter they are redirected
3643 * to the offload queue and driver tries to establish the connection
3644 * using firmware work request.
3646 static int rx_pkt(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3649 unsigned int filter
;
3650 struct ethhdr
*eh
= NULL
;
3651 struct vlan_ethhdr
*vlan_eh
= NULL
;
3653 struct tcphdr
*tcph
;
3654 struct rss_header
*rss
= (void *)skb
->data
;
3655 struct cpl_rx_pkt
*cpl
= (void *)skb
->data
;
3656 struct cpl_pass_accept_req
*req
= (void *)(rss
+ 1);
3657 struct l2t_entry
*e
;
3658 struct dst_entry
*dst
;
3659 struct c4iw_ep
*lep
;
3661 struct port_info
*pi
;
3662 struct net_device
*pdev
;
3663 u16 rss_qid
, eth_hdr_len
;
3666 struct neighbour
*neigh
;
3668 /* Drop all non-SYN packets */
3669 if (!(cpl
->l2info
& cpu_to_be32(RXF_SYN_F
)))
3673 * Drop all packets which did not hit the filter.
3674 * Unlikely to happen.
3676 if (!(rss
->filter_hit
&& rss
->filter_tid
))
3680 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3682 stid
= (__force
int) cpu_to_be32((__force u32
) rss
->hash_val
);
3684 lep
= (struct c4iw_ep
*)lookup_stid(dev
->rdev
.lldi
.tids
, stid
);
3686 PDBG("%s connect request on invalid stid %d\n", __func__
, stid
);
3690 eth_hdr_len
= is_t4(dev
->rdev
.lldi
.adapter_type
) ?
3691 RX_ETHHDR_LEN_G(htonl(cpl
->l2info
)) :
3692 RX_T5_ETHHDR_LEN_G(htonl(cpl
->l2info
));
3693 if (eth_hdr_len
== ETH_HLEN
) {
3694 eh
= (struct ethhdr
*)(req
+ 1);
3695 iph
= (struct iphdr
*)(eh
+ 1);
3697 vlan_eh
= (struct vlan_ethhdr
*)(req
+ 1);
3698 iph
= (struct iphdr
*)(vlan_eh
+ 1);
3699 skb
->vlan_tci
= ntohs(cpl
->vlan
);
3702 if (iph
->version
!= 0x4)
3705 tcph
= (struct tcphdr
*)(iph
+ 1);
3706 skb_set_network_header(skb
, (void *)iph
- (void *)rss
);
3707 skb_set_transport_header(skb
, (void *)tcph
- (void *)rss
);
3710 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__
,
3711 ntohl(iph
->daddr
), ntohs(tcph
->dest
), ntohl(iph
->saddr
),
3712 ntohs(tcph
->source
), iph
->tos
);
3714 dst
= find_route(dev
, iph
->daddr
, iph
->saddr
, tcph
->dest
, tcph
->source
,
3717 pr_err("%s - failed to find dst entry!\n",
3721 neigh
= dst_neigh_lookup_skb(dst
, skb
);
3724 pr_err("%s - failed to allocate neigh!\n",
3729 if (neigh
->dev
->flags
& IFF_LOOPBACK
) {
3730 pdev
= ip_dev_find(&init_net
, iph
->daddr
);
3731 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3733 pi
= (struct port_info
*)netdev_priv(pdev
);
3734 tx_chan
= cxgb4_port_chan(pdev
);
3737 pdev
= get_real_dev(neigh
->dev
);
3738 e
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, neigh
,
3740 pi
= (struct port_info
*)netdev_priv(pdev
);
3741 tx_chan
= cxgb4_port_chan(pdev
);
3743 neigh_release(neigh
);
3745 pr_err("%s - failed to allocate l2t entry!\n",
3750 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
3751 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[pi
->port_id
* step
];
3752 window
= (__force u16
) htons((__force u16
)tcph
->window
);
3754 /* Calcuate filter portion for LE region. */
3755 filter
= (__force
unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3756 dev
->rdev
.lldi
.ports
[0],
3760 * Synthesize the cpl_pass_accept_req. We have everything except the
3761 * TID. Once firmware sends a reply with TID we update the TID field
3762 * in cpl and pass it through the regular cpl_pass_accept_req path.
3764 build_cpl_pass_accept_req(skb
, stid
, iph
->tos
);
3765 send_fw_pass_open_req(dev
, skb
, iph
->daddr
, tcph
->dest
, iph
->saddr
,
3766 tcph
->source
, ntohl(tcph
->seq
), filter
, window
,
3767 rss_qid
, pi
->port_id
);
3768 cxgb4_l2t_release(e
);
3776 * These are the real handlers that are called from a
3779 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
3780 [CPL_ACT_ESTABLISH
] = act_establish
,
3781 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
3782 [CPL_RX_DATA
] = rx_data
,
3783 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
3784 [CPL_ABORT_RPL
] = abort_rpl
,
3785 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
3786 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
3787 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
3788 [CPL_PASS_ESTABLISH
] = pass_establish
,
3789 [CPL_PEER_CLOSE
] = peer_close
,
3790 [CPL_ABORT_REQ_RSS
] = peer_abort
,
3791 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
3792 [CPL_RDMA_TERMINATE
] = terminate
,
3793 [CPL_FW4_ACK
] = fw4_ack
,
3794 [CPL_FW6_MSG
] = deferred_fw6_msg
,
3795 [CPL_RX_PKT
] = rx_pkt
3798 static void process_timeout(struct c4iw_ep
*ep
)
3800 struct c4iw_qp_attributes attrs
;
3803 mutex_lock(&ep
->com
.mutex
);
3804 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
3806 set_bit(TIMEDOUT
, &ep
->com
.history
);
3807 switch (ep
->com
.state
) {
3809 __state_set(&ep
->com
, ABORTING
);
3810 connect_reply_upcall(ep
, -ETIMEDOUT
);
3813 __state_set(&ep
->com
, ABORTING
);
3817 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
3818 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
3819 c4iw_modify_qp(ep
->com
.qp
->rhp
,
3820 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
3823 __state_set(&ep
->com
, ABORTING
);
3824 close_complete_upcall(ep
, -ETIMEDOUT
);
3830 * These states are expected if the ep timed out at the same
3831 * time as another thread was calling stop_ep_timer().
3832 * So we silently do nothing for these states.
3837 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
3838 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
3842 abort_connection(ep
, NULL
, GFP_KERNEL
);
3843 mutex_unlock(&ep
->com
.mutex
);
3844 c4iw_put_ep(&ep
->com
);
3847 static void process_timedout_eps(void)
3851 spin_lock_irq(&timeout_lock
);
3852 while (!list_empty(&timeout_list
)) {
3853 struct list_head
*tmp
;
3855 tmp
= timeout_list
.next
;
3859 spin_unlock_irq(&timeout_lock
);
3860 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
3861 process_timeout(ep
);
3862 spin_lock_irq(&timeout_lock
);
3864 spin_unlock_irq(&timeout_lock
);
3867 static void process_work(struct work_struct
*work
)
3869 struct sk_buff
*skb
= NULL
;
3870 struct c4iw_dev
*dev
;
3871 struct cpl_act_establish
*rpl
;
3872 unsigned int opcode
;
3875 process_timedout_eps();
3876 while ((skb
= skb_dequeue(&rxq
))) {
3878 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
3879 opcode
= rpl
->ot
.opcode
;
3881 BUG_ON(!work_handlers
[opcode
]);
3882 ret
= work_handlers
[opcode
](dev
, skb
);
3885 process_timedout_eps();
3889 static DECLARE_WORK(skb_work
, process_work
);
3891 static void ep_timeout(unsigned long arg
)
3893 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
3896 spin_lock(&timeout_lock
);
3897 if (!test_and_set_bit(TIMEOUT
, &ep
->com
.flags
)) {
3899 * Only insert if it is not already on the list.
3901 if (!ep
->entry
.next
) {
3902 list_add_tail(&ep
->entry
, &timeout_list
);
3906 spin_unlock(&timeout_lock
);
3908 queue_work(workq
, &skb_work
);
3912 * All the CM events are handled on a work queue to have a safe context.
3914 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3918 * Save dev in the skb->cb area.
3920 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
3923 * Queue the skb and schedule the worker thread.
3925 skb_queue_tail(&rxq
, skb
);
3926 queue_work(workq
, &skb_work
);
3930 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3932 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
3934 if (rpl
->status
!= CPL_ERR_NONE
) {
3935 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
3936 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
3942 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3944 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
3945 struct c4iw_wr_wait
*wr_waitp
;
3948 PDBG("%s type %u\n", __func__
, rpl
->type
);
3950 switch (rpl
->type
) {
3951 case FW6_TYPE_WR_RPL
:
3952 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
3953 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
3954 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
3956 c4iw_wake_up(wr_waitp
, ret
? -ret
: 0);
3960 case FW6_TYPE_OFLD_CONNECTION_WR_RPL
:
3964 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
3972 static int peer_abort_intr(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
3974 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
3976 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
3977 unsigned int tid
= GET_TID(req
);
3979 ep
= lookup_tid(t
, tid
);
3981 printk(KERN_WARNING MOD
3982 "Abort on non-existent endpoint, tid %d\n", tid
);
3986 if (is_neg_adv(req
->status
)) {
3987 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
3988 __func__
, ep
->hwtid
, req
->status
,
3989 neg_adv_str(req
->status
));
3990 ep
->stats
.abort_neg_adv
++;
3991 dev
->rdev
.stats
.neg_adv
++;
3995 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
3999 * Wake up any threads in rdma_init() or rdma_fini().
4000 * However, if we are on MPAv2 and want to retry with MPAv1
4001 * then, don't wake up yet.
4003 if (mpa_rev
== 2 && !ep
->tried_with_mpa_v1
) {
4004 if (ep
->com
.state
!= MPA_REQ_SENT
)
4005 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4007 c4iw_wake_up(&ep
->com
.wr_wait
, -ECONNRESET
);
4013 * Most upcalls from the T4 Core go to sched() to
4014 * schedule the processing on a work queue.
4016 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
4017 [CPL_ACT_ESTABLISH
] = sched
,
4018 [CPL_ACT_OPEN_RPL
] = sched
,
4019 [CPL_RX_DATA
] = sched
,
4020 [CPL_ABORT_RPL_RSS
] = sched
,
4021 [CPL_ABORT_RPL
] = sched
,
4022 [CPL_PASS_OPEN_RPL
] = sched
,
4023 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
4024 [CPL_PASS_ACCEPT_REQ
] = sched
,
4025 [CPL_PASS_ESTABLISH
] = sched
,
4026 [CPL_PEER_CLOSE
] = sched
,
4027 [CPL_CLOSE_CON_RPL
] = sched
,
4028 [CPL_ABORT_REQ_RSS
] = peer_abort_intr
,
4029 [CPL_RDMA_TERMINATE
] = sched
,
4030 [CPL_FW4_ACK
] = sched
,
4031 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
4032 [CPL_FW6_MSG
] = fw6_msg
,
4033 [CPL_RX_PKT
] = sched
4036 int __init
c4iw_cm_init(void)
4038 spin_lock_init(&timeout_lock
);
4039 skb_queue_head_init(&rxq
);
4041 workq
= create_singlethread_workqueue("iw_cxgb4");
4048 void c4iw_cm_term(void)
4050 WARN_ON(!list_empty(&timeout_list
));
4051 flush_workqueue(workq
);
4052 destroy_workqueue(workq
);