2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
40 #include <linux/tcp.h>
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
48 static char *states
[] = {
64 static int dack_mode
= 1;
65 module_param(dack_mode
, int, 0644);
66 MODULE_PARM_DESC(dack_mode
, "Delayed ack mode (default=1)");
68 int c4iw_max_read_depth
= 8;
69 module_param(c4iw_max_read_depth
, int, 0644);
70 MODULE_PARM_DESC(c4iw_max_read_depth
, "Per-connection max ORD/IRD (default=8)");
72 static int enable_tcp_timestamps
;
73 module_param(enable_tcp_timestamps
, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_timestamps
, "Enable tcp timestamps (default=0)");
76 static int enable_tcp_sack
;
77 module_param(enable_tcp_sack
, int, 0644);
78 MODULE_PARM_DESC(enable_tcp_sack
, "Enable tcp SACK (default=0)");
80 static int enable_tcp_window_scaling
= 1;
81 module_param(enable_tcp_window_scaling
, int, 0644);
82 MODULE_PARM_DESC(enable_tcp_window_scaling
,
83 "Enable tcp window scaling (default=1)");
86 module_param(c4iw_debug
, int, 0644);
87 MODULE_PARM_DESC(c4iw_debug
, "Enable debug logging (default=0)");
90 module_param(peer2peer
, int, 0644);
91 MODULE_PARM_DESC(peer2peer
, "Support peer2peer ULPs (default=0)");
93 static int p2p_type
= FW_RI_INIT_P2PTYPE_READ_REQ
;
94 module_param(p2p_type
, int, 0644);
95 MODULE_PARM_DESC(p2p_type
, "RDMAP opcode to use for the RTR message: "
96 "1=RDMA_READ 0=RDMA_WRITE (default 1)");
98 static int ep_timeout_secs
= 60;
99 module_param(ep_timeout_secs
, int, 0644);
100 MODULE_PARM_DESC(ep_timeout_secs
, "CM Endpoint operation timeout "
101 "in seconds (default=60)");
103 static int mpa_rev
= 1;
104 module_param(mpa_rev
, int, 0644);
105 MODULE_PARM_DESC(mpa_rev
, "MPA Revision, 0 supports amso1100, "
106 "1 is spec compliant. (default=1)");
108 static int markers_enabled
;
109 module_param(markers_enabled
, int, 0644);
110 MODULE_PARM_DESC(markers_enabled
, "Enable MPA MARKERS (default(0)=disabled)");
112 static int crc_enabled
= 1;
113 module_param(crc_enabled
, int, 0644);
114 MODULE_PARM_DESC(crc_enabled
, "Enable MPA CRC (default(1)=enabled)");
116 static int rcv_win
= 256 * 1024;
117 module_param(rcv_win
, int, 0644);
118 MODULE_PARM_DESC(rcv_win
, "TCP receive window in bytes (default=256KB)");
120 static int snd_win
= 128 * 1024;
121 module_param(snd_win
, int, 0644);
122 MODULE_PARM_DESC(snd_win
, "TCP send window in bytes (default=128KB)");
124 static struct workqueue_struct
*workq
;
126 static struct sk_buff_head rxq
;
128 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
);
129 static void ep_timeout(unsigned long arg
);
130 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
);
132 static LIST_HEAD(timeout_list
);
133 static spinlock_t timeout_lock
;
135 static void start_ep_timer(struct c4iw_ep
*ep
)
137 PDBG("%s ep %p\n", __func__
, ep
);
138 if (timer_pending(&ep
->timer
)) {
139 PDBG("%s stopped / restarted timer ep %p\n", __func__
, ep
);
140 del_timer_sync(&ep
->timer
);
142 c4iw_get_ep(&ep
->com
);
143 ep
->timer
.expires
= jiffies
+ ep_timeout_secs
* HZ
;
144 ep
->timer
.data
= (unsigned long)ep
;
145 ep
->timer
.function
= ep_timeout
;
146 add_timer(&ep
->timer
);
149 static void stop_ep_timer(struct c4iw_ep
*ep
)
151 PDBG("%s ep %p\n", __func__
, ep
);
152 if (!timer_pending(&ep
->timer
)) {
153 printk(KERN_ERR
"%s timer stopped when its not running! "
154 "ep %p state %u\n", __func__
, ep
, ep
->com
.state
);
158 del_timer_sync(&ep
->timer
);
159 c4iw_put_ep(&ep
->com
);
162 static int c4iw_l2t_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
,
163 struct l2t_entry
*l2e
)
167 if (c4iw_fatal_error(rdev
)) {
169 PDBG("%s - device in error state - dropping\n", __func__
);
172 error
= cxgb4_l2t_send(rdev
->lldi
.ports
[0], skb
, l2e
);
175 return error
< 0 ? error
: 0;
178 int c4iw_ofld_send(struct c4iw_rdev
*rdev
, struct sk_buff
*skb
)
182 if (c4iw_fatal_error(rdev
)) {
184 PDBG("%s - device in error state - dropping\n", __func__
);
187 error
= cxgb4_ofld_send(rdev
->lldi
.ports
[0], skb
);
190 return error
< 0 ? error
: 0;
193 static void release_tid(struct c4iw_rdev
*rdev
, u32 hwtid
, struct sk_buff
*skb
)
195 struct cpl_tid_release
*req
;
197 skb
= get_skb(skb
, sizeof *req
, GFP_KERNEL
);
200 req
= (struct cpl_tid_release
*) skb_put(skb
, sizeof(*req
));
201 INIT_TP_WR(req
, hwtid
);
202 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE
, hwtid
));
203 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
204 c4iw_ofld_send(rdev
, skb
);
208 static void set_emss(struct c4iw_ep
*ep
, u16 opt
)
210 ep
->emss
= ep
->com
.dev
->rdev
.lldi
.mtus
[GET_TCPOPT_MSS(opt
)] - 40;
212 if (GET_TCPOPT_TSTAMP(opt
))
216 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__
, GET_TCPOPT_MSS(opt
),
220 static enum c4iw_ep_state
state_read(struct c4iw_ep_common
*epc
)
222 enum c4iw_ep_state state
;
224 mutex_lock(&epc
->mutex
);
226 mutex_unlock(&epc
->mutex
);
230 static void __state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
235 static void state_set(struct c4iw_ep_common
*epc
, enum c4iw_ep_state
new)
237 mutex_lock(&epc
->mutex
);
238 PDBG("%s - %s -> %s\n", __func__
, states
[epc
->state
], states
[new]);
239 __state_set(epc
, new);
240 mutex_unlock(&epc
->mutex
);
244 static void *alloc_ep(int size
, gfp_t gfp
)
246 struct c4iw_ep_common
*epc
;
248 epc
= kzalloc(size
, gfp
);
250 kref_init(&epc
->kref
);
251 mutex_init(&epc
->mutex
);
252 c4iw_init_wr_wait(&epc
->wr_wait
);
254 PDBG("%s alloc ep %p\n", __func__
, epc
);
258 void _c4iw_free_ep(struct kref
*kref
)
262 ep
= container_of(kref
, struct c4iw_ep
, com
.kref
);
263 PDBG("%s ep %p state %s\n", __func__
, ep
, states
[state_read(&ep
->com
)]);
264 if (test_bit(RELEASE_RESOURCES
, &ep
->com
.flags
)) {
265 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, ep
->hwtid
);
266 dst_release(ep
->dst
);
267 cxgb4_l2t_release(ep
->l2t
);
272 static void release_ep_resources(struct c4iw_ep
*ep
)
274 set_bit(RELEASE_RESOURCES
, &ep
->com
.flags
);
275 c4iw_put_ep(&ep
->com
);
278 static int status2errno(int status
)
283 case CPL_ERR_CONN_RESET
:
285 case CPL_ERR_ARP_MISS
:
286 return -EHOSTUNREACH
;
287 case CPL_ERR_CONN_TIMEDOUT
:
289 case CPL_ERR_TCAM_FULL
:
291 case CPL_ERR_CONN_EXIST
:
299 * Try and reuse skbs already allocated...
301 static struct sk_buff
*get_skb(struct sk_buff
*skb
, int len
, gfp_t gfp
)
303 if (skb
&& !skb_is_nonlinear(skb
) && !skb_cloned(skb
)) {
306 skb_reset_transport_header(skb
);
308 skb
= alloc_skb(len
, gfp
);
313 static struct rtable
*find_route(struct c4iw_dev
*dev
, __be32 local_ip
,
314 __be32 peer_ip
, __be16 local_port
,
315 __be16 peer_port
, u8 tos
)
319 rt
= ip_route_output_ports(&init_net
, NULL
, peer_ip
, local_ip
,
320 peer_port
, local_port
, IPPROTO_TCP
,
327 static void arp_failure_discard(void *handle
, struct sk_buff
*skb
)
329 PDBG("%s c4iw_dev %p\n", __func__
, handle
);
334 * Handle an ARP failure for an active open.
336 static void act_open_req_arp_failure(void *handle
, struct sk_buff
*skb
)
338 printk(KERN_ERR MOD
"ARP failure duing connect\n");
343 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant
346 static void abort_arp_failure(void *handle
, struct sk_buff
*skb
)
348 struct c4iw_rdev
*rdev
= handle
;
349 struct cpl_abort_req
*req
= cplhdr(skb
);
351 PDBG("%s rdev %p\n", __func__
, rdev
);
352 req
->cmd
= CPL_ABORT_NO_RST
;
353 c4iw_ofld_send(rdev
, skb
);
356 static void send_flowc(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
358 unsigned int flowclen
= 80;
359 struct fw_flowc_wr
*flowc
;
362 skb
= get_skb(skb
, flowclen
, GFP_KERNEL
);
363 flowc
= (struct fw_flowc_wr
*)__skb_put(skb
, flowclen
);
365 flowc
->op_to_nparams
= cpu_to_be32(FW_WR_OP(FW_FLOWC_WR
) |
366 FW_FLOWC_WR_NPARAMS(8));
367 flowc
->flowid_len16
= cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen
,
368 16)) | FW_WR_FLOWID(ep
->hwtid
));
370 flowc
->mnemval
[0].mnemonic
= FW_FLOWC_MNEM_PFNVFN
;
371 flowc
->mnemval
[0].val
= cpu_to_be32(PCI_FUNC(ep
->com
.dev
->rdev
.lldi
.pdev
->devfn
) << 8);
372 flowc
->mnemval
[1].mnemonic
= FW_FLOWC_MNEM_CH
;
373 flowc
->mnemval
[1].val
= cpu_to_be32(ep
->tx_chan
);
374 flowc
->mnemval
[2].mnemonic
= FW_FLOWC_MNEM_PORT
;
375 flowc
->mnemval
[2].val
= cpu_to_be32(ep
->tx_chan
);
376 flowc
->mnemval
[3].mnemonic
= FW_FLOWC_MNEM_IQID
;
377 flowc
->mnemval
[3].val
= cpu_to_be32(ep
->rss_qid
);
378 flowc
->mnemval
[4].mnemonic
= FW_FLOWC_MNEM_SNDNXT
;
379 flowc
->mnemval
[4].val
= cpu_to_be32(ep
->snd_seq
);
380 flowc
->mnemval
[5].mnemonic
= FW_FLOWC_MNEM_RCVNXT
;
381 flowc
->mnemval
[5].val
= cpu_to_be32(ep
->rcv_seq
);
382 flowc
->mnemval
[6].mnemonic
= FW_FLOWC_MNEM_SNDBUF
;
383 flowc
->mnemval
[6].val
= cpu_to_be32(snd_win
);
384 flowc
->mnemval
[7].mnemonic
= FW_FLOWC_MNEM_MSS
;
385 flowc
->mnemval
[7].val
= cpu_to_be32(ep
->emss
);
386 /* Pad WR to 16 byte boundary */
387 flowc
->mnemval
[8].mnemonic
= 0;
388 flowc
->mnemval
[8].val
= 0;
389 for (i
= 0; i
< 9; i
++) {
390 flowc
->mnemval
[i
].r4
[0] = 0;
391 flowc
->mnemval
[i
].r4
[1] = 0;
392 flowc
->mnemval
[i
].r4
[2] = 0;
395 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
396 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
399 static int send_halfclose(struct c4iw_ep
*ep
, gfp_t gfp
)
401 struct cpl_close_con_req
*req
;
403 int wrlen
= roundup(sizeof *req
, 16);
405 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
406 skb
= get_skb(NULL
, wrlen
, gfp
);
408 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
411 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
412 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
413 req
= (struct cpl_close_con_req
*) skb_put(skb
, wrlen
);
414 memset(req
, 0, wrlen
);
415 INIT_TP_WR(req
, ep
->hwtid
);
416 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ
,
418 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
421 static int send_abort(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
423 struct cpl_abort_req
*req
;
424 int wrlen
= roundup(sizeof *req
, 16);
426 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
427 skb
= get_skb(skb
, wrlen
, gfp
);
429 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
433 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
434 t4_set_arp_err_handler(skb
, &ep
->com
.dev
->rdev
, abort_arp_failure
);
435 req
= (struct cpl_abort_req
*) skb_put(skb
, wrlen
);
436 memset(req
, 0, wrlen
);
437 INIT_TP_WR(req
, ep
->hwtid
);
438 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ
, ep
->hwtid
));
439 req
->cmd
= CPL_ABORT_SEND_RST
;
440 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
443 static int send_connect(struct c4iw_ep
*ep
)
445 struct cpl_act_open_req
*req
;
449 unsigned int mtu_idx
;
451 int wrlen
= roundup(sizeof *req
, 16);
453 PDBG("%s ep %p atid %u\n", __func__
, ep
, ep
->atid
);
455 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
457 printk(KERN_ERR MOD
"%s - failed to alloc skb.\n",
461 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
463 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
464 wscale
= compute_wscale(rcv_win
);
465 opt0
= KEEP_ALIVE(1) |
469 L2T_IDX(ep
->l2t
->idx
) |
470 TX_CHAN(ep
->tx_chan
) |
471 SMAC_SEL(ep
->smac_idx
) |
473 ULP_MODE(ULP_MODE_TCPDDP
) |
474 RCV_BUFSIZ(rcv_win
>>10);
475 opt2
= RX_CHANNEL(0) |
476 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
477 if (enable_tcp_timestamps
)
478 opt2
|= TSTAMPS_EN(1);
481 if (wscale
&& enable_tcp_window_scaling
)
482 opt2
|= WND_SCALE_EN(1);
483 t4_set_arp_err_handler(skb
, NULL
, act_open_req_arp_failure
);
485 req
= (struct cpl_act_open_req
*) skb_put(skb
, wrlen
);
487 OPCODE_TID(req
) = cpu_to_be32(
488 MK_OPCODE_TID(CPL_ACT_OPEN_REQ
, ((ep
->rss_qid
<<14)|ep
->atid
)));
489 req
->local_port
= ep
->com
.local_addr
.sin_port
;
490 req
->peer_port
= ep
->com
.remote_addr
.sin_port
;
491 req
->local_ip
= ep
->com
.local_addr
.sin_addr
.s_addr
;
492 req
->peer_ip
= ep
->com
.remote_addr
.sin_addr
.s_addr
;
493 req
->opt0
= cpu_to_be64(opt0
);
495 req
->opt2
= cpu_to_be32(opt2
);
496 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
499 static void send_mpa_req(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
502 struct fw_ofld_tx_data_wr
*req
;
503 struct mpa_message
*mpa
;
505 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
507 BUG_ON(skb_cloned(skb
));
509 mpalen
= sizeof(*mpa
) + ep
->plen
;
510 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
511 skb
= get_skb(skb
, wrlen
, GFP_KERNEL
);
513 connect_reply_upcall(ep
, -ENOMEM
);
516 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
518 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
519 memset(req
, 0, wrlen
);
520 req
->op_to_immdlen
= cpu_to_be32(
521 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
523 FW_WR_IMMDLEN(mpalen
));
524 req
->flowid_len16
= cpu_to_be32(
525 FW_WR_FLOWID(ep
->hwtid
) |
526 FW_WR_LEN16(wrlen
>> 4));
527 req
->plen
= cpu_to_be32(mpalen
);
528 req
->tunnel_to_proxy
= cpu_to_be32(
529 FW_OFLD_TX_DATA_WR_FLUSH(1) |
530 FW_OFLD_TX_DATA_WR_SHOVE(1));
532 mpa
= (struct mpa_message
*)(req
+ 1);
533 memcpy(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
));
534 mpa
->flags
= (crc_enabled
? MPA_CRC
: 0) |
535 (markers_enabled
? MPA_MARKERS
: 0);
536 mpa
->private_data_size
= htons(ep
->plen
);
537 mpa
->revision
= mpa_rev
;
540 memcpy(mpa
->private_data
, ep
->mpa_pkt
+ sizeof(*mpa
), ep
->plen
);
543 * Reference the mpa skb. This ensures the data area
544 * will remain in memory until the hw acks the tx.
545 * Function fw4_ack() will deref it.
548 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
551 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
553 state_set(&ep
->com
, MPA_REQ_SENT
);
554 ep
->mpa_attr
.initiator
= 1;
558 static int send_mpa_reject(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
561 struct fw_ofld_tx_data_wr
*req
;
562 struct mpa_message
*mpa
;
565 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
567 mpalen
= sizeof(*mpa
) + plen
;
568 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
570 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
572 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
575 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
577 req
= (struct fw_ofld_tx_data_wr
*)skb_put(skb
, wrlen
);
578 memset(req
, 0, wrlen
);
579 req
->op_to_immdlen
= cpu_to_be32(
580 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
582 FW_WR_IMMDLEN(mpalen
));
583 req
->flowid_len16
= cpu_to_be32(
584 FW_WR_FLOWID(ep
->hwtid
) |
585 FW_WR_LEN16(wrlen
>> 4));
586 req
->plen
= cpu_to_be32(mpalen
);
587 req
->tunnel_to_proxy
= cpu_to_be32(
588 FW_OFLD_TX_DATA_WR_FLUSH(1) |
589 FW_OFLD_TX_DATA_WR_SHOVE(1));
591 mpa
= (struct mpa_message
*)(req
+ 1);
592 memset(mpa
, 0, sizeof(*mpa
));
593 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
594 mpa
->flags
= MPA_REJECT
;
595 mpa
->revision
= mpa_rev
;
596 mpa
->private_data_size
= htons(plen
);
598 memcpy(mpa
->private_data
, pdata
, plen
);
601 * Reference the mpa skb again. This ensures the data area
602 * will remain in memory until the hw acks the tx.
603 * Function fw4_ack() will deref it.
606 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
607 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
610 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
613 static int send_mpa_reply(struct c4iw_ep
*ep
, const void *pdata
, u8 plen
)
616 struct fw_ofld_tx_data_wr
*req
;
617 struct mpa_message
*mpa
;
620 PDBG("%s ep %p tid %u pd_len %d\n", __func__
, ep
, ep
->hwtid
, ep
->plen
);
622 mpalen
= sizeof(*mpa
) + plen
;
623 wrlen
= roundup(mpalen
+ sizeof *req
, 16);
625 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
627 printk(KERN_ERR MOD
"%s - cannot alloc skb!\n", __func__
);
630 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
632 req
= (struct fw_ofld_tx_data_wr
*) skb_put(skb
, wrlen
);
633 memset(req
, 0, wrlen
);
634 req
->op_to_immdlen
= cpu_to_be32(
635 FW_WR_OP(FW_OFLD_TX_DATA_WR
) |
637 FW_WR_IMMDLEN(mpalen
));
638 req
->flowid_len16
= cpu_to_be32(
639 FW_WR_FLOWID(ep
->hwtid
) |
640 FW_WR_LEN16(wrlen
>> 4));
641 req
->plen
= cpu_to_be32(mpalen
);
642 req
->tunnel_to_proxy
= cpu_to_be32(
643 FW_OFLD_TX_DATA_WR_FLUSH(1) |
644 FW_OFLD_TX_DATA_WR_SHOVE(1));
646 mpa
= (struct mpa_message
*)(req
+ 1);
647 memset(mpa
, 0, sizeof(*mpa
));
648 memcpy(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
));
649 mpa
->flags
= (ep
->mpa_attr
.crc_enabled
? MPA_CRC
: 0) |
650 (markers_enabled
? MPA_MARKERS
: 0);
651 mpa
->revision
= mpa_rev
;
652 mpa
->private_data_size
= htons(plen
);
654 memcpy(mpa
->private_data
, pdata
, plen
);
657 * Reference the mpa skb. This ensures the data area
658 * will remain in memory until the hw acks the tx.
659 * Function fw4_ack() will deref it.
662 t4_set_arp_err_handler(skb
, NULL
, arp_failure_discard
);
664 state_set(&ep
->com
, MPA_REP_SENT
);
665 return c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
668 static int act_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
671 struct cpl_act_establish
*req
= cplhdr(skb
);
672 unsigned int tid
= GET_TID(req
);
673 unsigned int atid
= GET_TID_TID(ntohl(req
->tos_atid
));
674 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
676 ep
= lookup_atid(t
, atid
);
678 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__
, ep
, tid
,
679 be32_to_cpu(req
->snd_isn
), be32_to_cpu(req
->rcv_isn
));
681 dst_confirm(ep
->dst
);
683 /* setup the hwtid for this connection */
685 cxgb4_insert_tid(t
, ep
, tid
);
687 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
688 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
690 set_emss(ep
, ntohs(req
->tcp_opt
));
692 /* dealloc the atid */
693 cxgb4_free_atid(t
, atid
);
695 /* start MPA negotiation */
696 send_flowc(ep
, NULL
);
697 send_mpa_req(ep
, skb
);
702 static void close_complete_upcall(struct c4iw_ep
*ep
)
704 struct iw_cm_event event
;
706 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
707 memset(&event
, 0, sizeof(event
));
708 event
.event
= IW_CM_EVENT_CLOSE
;
710 PDBG("close complete delivered ep %p cm_id %p tid %u\n",
711 ep
, ep
->com
.cm_id
, ep
->hwtid
);
712 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
713 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
714 ep
->com
.cm_id
= NULL
;
719 static int abort_connection(struct c4iw_ep
*ep
, struct sk_buff
*skb
, gfp_t gfp
)
721 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
722 close_complete_upcall(ep
);
723 state_set(&ep
->com
, ABORTING
);
724 return send_abort(ep
, skb
, gfp
);
727 static void peer_close_upcall(struct c4iw_ep
*ep
)
729 struct iw_cm_event event
;
731 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
732 memset(&event
, 0, sizeof(event
));
733 event
.event
= IW_CM_EVENT_DISCONNECT
;
735 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
736 ep
, ep
->com
.cm_id
, ep
->hwtid
);
737 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
741 static void peer_abort_upcall(struct c4iw_ep
*ep
)
743 struct iw_cm_event event
;
745 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
746 memset(&event
, 0, sizeof(event
));
747 event
.event
= IW_CM_EVENT_CLOSE
;
748 event
.status
= -ECONNRESET
;
750 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep
,
751 ep
->com
.cm_id
, ep
->hwtid
);
752 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
753 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
754 ep
->com
.cm_id
= NULL
;
759 static void connect_reply_upcall(struct c4iw_ep
*ep
, int status
)
761 struct iw_cm_event event
;
763 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
, ep
->hwtid
, status
);
764 memset(&event
, 0, sizeof(event
));
765 event
.event
= IW_CM_EVENT_CONNECT_REPLY
;
766 event
.status
= status
;
767 event
.local_addr
= ep
->com
.local_addr
;
768 event
.remote_addr
= ep
->com
.remote_addr
;
770 if ((status
== 0) || (status
== -ECONNREFUSED
)) {
771 event
.private_data_len
= ep
->plen
;
772 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
775 PDBG("%s ep %p tid %u status %d\n", __func__
, ep
,
777 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
780 ep
->com
.cm_id
->rem_ref(ep
->com
.cm_id
);
781 ep
->com
.cm_id
= NULL
;
786 static void connect_request_upcall(struct c4iw_ep
*ep
)
788 struct iw_cm_event event
;
790 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
791 memset(&event
, 0, sizeof(event
));
792 event
.event
= IW_CM_EVENT_CONNECT_REQUEST
;
793 event
.local_addr
= ep
->com
.local_addr
;
794 event
.remote_addr
= ep
->com
.remote_addr
;
795 event
.private_data_len
= ep
->plen
;
796 event
.private_data
= ep
->mpa_pkt
+ sizeof(struct mpa_message
);
797 event
.provider_data
= ep
;
798 if (state_read(&ep
->parent_ep
->com
) != DEAD
) {
799 c4iw_get_ep(&ep
->com
);
800 ep
->parent_ep
->com
.cm_id
->event_handler(
801 ep
->parent_ep
->com
.cm_id
,
804 c4iw_put_ep(&ep
->parent_ep
->com
);
805 ep
->parent_ep
= NULL
;
808 static void established_upcall(struct c4iw_ep
*ep
)
810 struct iw_cm_event event
;
812 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
813 memset(&event
, 0, sizeof(event
));
814 event
.event
= IW_CM_EVENT_ESTABLISHED
;
816 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
817 ep
->com
.cm_id
->event_handler(ep
->com
.cm_id
, &event
);
821 static int update_rx_credits(struct c4iw_ep
*ep
, u32 credits
)
823 struct cpl_rx_data_ack
*req
;
825 int wrlen
= roundup(sizeof *req
, 16);
827 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
828 skb
= get_skb(NULL
, wrlen
, GFP_KERNEL
);
830 printk(KERN_ERR MOD
"update_rx_credits - cannot alloc skb!\n");
834 req
= (struct cpl_rx_data_ack
*) skb_put(skb
, wrlen
);
835 memset(req
, 0, wrlen
);
836 INIT_TP_WR(req
, ep
->hwtid
);
837 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK
,
839 req
->credit_dack
= cpu_to_be32(credits
| RX_FORCE_ACK(1) |
841 V_RX_DACK_MODE(dack_mode
));
842 set_wr_txq(skb
, CPL_PRIORITY_ACK
, ep
->ctrlq_idx
);
843 c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
847 static void process_mpa_reply(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
849 struct mpa_message
*mpa
;
851 struct c4iw_qp_attributes attrs
;
852 enum c4iw_qp_attr_mask mask
;
855 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
858 * Stop mpa timer. If it expired, then the state has
859 * changed and we bail since ep_timeout already aborted
863 if (state_read(&ep
->com
) != MPA_REQ_SENT
)
867 * If we get more than the supported amount of private data
868 * then we must fail this connection.
870 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
876 * copy the new data into our accumulation buffer.
878 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
880 ep
->mpa_pkt_len
+= skb
->len
;
883 * if we don't even have the mpa message, then bail.
885 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
887 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
889 /* Validate MPA header. */
890 if (mpa
->revision
!= mpa_rev
) {
894 if (memcmp(mpa
->key
, MPA_KEY_REP
, sizeof(mpa
->key
))) {
899 plen
= ntohs(mpa
->private_data_size
);
902 * Fail if there's too much private data.
904 if (plen
> MPA_MAX_PRIVATE_DATA
) {
910 * If plen does not account for pkt size
912 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
917 ep
->plen
= (u8
) plen
;
920 * If we don't have all the pdata yet, then bail.
921 * We'll continue process when more data arrives.
923 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
926 if (mpa
->flags
& MPA_REJECT
) {
932 * If we get here we have accumulated the entire mpa
933 * start reply message including private data. And
934 * the MPA header is valid.
936 state_set(&ep
->com
, FPDU_MODE
);
937 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
938 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
939 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
940 ep
->mpa_attr
.version
= mpa_rev
;
941 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
942 FW_RI_INIT_P2PTYPE_DISABLED
;
943 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
944 "xmit_marker_enabled=%d, version=%d\n", __func__
,
945 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
946 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
);
948 attrs
.mpa_attr
= ep
->mpa_attr
;
949 attrs
.max_ird
= ep
->ird
;
950 attrs
.max_ord
= ep
->ord
;
951 attrs
.llp_stream_handle
= ep
;
952 attrs
.next_state
= C4IW_QP_STATE_RTS
;
954 mask
= C4IW_QP_ATTR_NEXT_STATE
|
955 C4IW_QP_ATTR_LLP_STREAM_HANDLE
| C4IW_QP_ATTR_MPA_ATTR
|
956 C4IW_QP_ATTR_MAX_IRD
| C4IW_QP_ATTR_MAX_ORD
;
958 /* bind QP and TID with INIT_WR */
959 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
960 ep
->com
.qp
, mask
, &attrs
, 1);
965 state_set(&ep
->com
, ABORTING
);
966 send_abort(ep
, skb
, GFP_KERNEL
);
968 connect_reply_upcall(ep
, err
);
972 static void process_mpa_request(struct c4iw_ep
*ep
, struct sk_buff
*skb
)
974 struct mpa_message
*mpa
;
977 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
979 if (state_read(&ep
->com
) != MPA_REQ_WAIT
)
983 * If we get more than the supported amount of private data
984 * then we must fail this connection.
986 if (ep
->mpa_pkt_len
+ skb
->len
> sizeof(ep
->mpa_pkt
)) {
988 abort_connection(ep
, skb
, GFP_KERNEL
);
992 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
995 * Copy the new data into our accumulation buffer.
997 skb_copy_from_linear_data(skb
, &(ep
->mpa_pkt
[ep
->mpa_pkt_len
]),
999 ep
->mpa_pkt_len
+= skb
->len
;
1002 * If we don't even have the mpa message, then bail.
1003 * We'll continue process when more data arrives.
1005 if (ep
->mpa_pkt_len
< sizeof(*mpa
))
1008 PDBG("%s enter (%s line %u)\n", __func__
, __FILE__
, __LINE__
);
1010 mpa
= (struct mpa_message
*) ep
->mpa_pkt
;
1013 * Validate MPA Header.
1015 if (mpa
->revision
!= mpa_rev
) {
1016 abort_connection(ep
, skb
, GFP_KERNEL
);
1020 if (memcmp(mpa
->key
, MPA_KEY_REQ
, sizeof(mpa
->key
))) {
1021 abort_connection(ep
, skb
, GFP_KERNEL
);
1025 plen
= ntohs(mpa
->private_data_size
);
1028 * Fail if there's too much private data.
1030 if (plen
> MPA_MAX_PRIVATE_DATA
) {
1031 abort_connection(ep
, skb
, GFP_KERNEL
);
1036 * If plen does not account for pkt size
1038 if (ep
->mpa_pkt_len
> (sizeof(*mpa
) + plen
)) {
1039 abort_connection(ep
, skb
, GFP_KERNEL
);
1042 ep
->plen
= (u8
) plen
;
1045 * If we don't have all the pdata yet, then bail.
1047 if (ep
->mpa_pkt_len
< (sizeof(*mpa
) + plen
))
1051 * If we get here we have accumulated the entire mpa
1052 * start reply message including private data.
1054 ep
->mpa_attr
.initiator
= 0;
1055 ep
->mpa_attr
.crc_enabled
= (mpa
->flags
& MPA_CRC
) | crc_enabled
? 1 : 0;
1056 ep
->mpa_attr
.recv_marker_enabled
= markers_enabled
;
1057 ep
->mpa_attr
.xmit_marker_enabled
= mpa
->flags
& MPA_MARKERS
? 1 : 0;
1058 ep
->mpa_attr
.version
= mpa_rev
;
1059 ep
->mpa_attr
.p2p_type
= peer2peer
? p2p_type
:
1060 FW_RI_INIT_P2PTYPE_DISABLED
;
1061 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1062 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__
,
1063 ep
->mpa_attr
.crc_enabled
, ep
->mpa_attr
.recv_marker_enabled
,
1064 ep
->mpa_attr
.xmit_marker_enabled
, ep
->mpa_attr
.version
,
1065 ep
->mpa_attr
.p2p_type
);
1067 state_set(&ep
->com
, MPA_REQ_RCVD
);
1070 connect_request_upcall(ep
);
1074 static int rx_data(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1077 struct cpl_rx_data
*hdr
= cplhdr(skb
);
1078 unsigned int dlen
= ntohs(hdr
->len
);
1079 unsigned int tid
= GET_TID(hdr
);
1080 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1082 ep
= lookup_tid(t
, tid
);
1083 PDBG("%s ep %p tid %u dlen %u\n", __func__
, ep
, ep
->hwtid
, dlen
);
1084 skb_pull(skb
, sizeof(*hdr
));
1085 skb_trim(skb
, dlen
);
1087 ep
->rcv_seq
+= dlen
;
1088 BUG_ON(ep
->rcv_seq
!= (ntohl(hdr
->seq
) + dlen
));
1090 /* update RX credits */
1091 update_rx_credits(ep
, dlen
);
1093 switch (state_read(&ep
->com
)) {
1095 process_mpa_reply(ep
, skb
);
1098 process_mpa_request(ep
, skb
);
1103 printk(KERN_ERR MOD
"%s Unexpected streaming data."
1104 " ep %p state %d tid %u\n",
1105 __func__
, ep
, state_read(&ep
->com
), ep
->hwtid
);
1108 * The ep will timeout and inform the ULP of the failure.
1116 static int abort_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1119 struct cpl_abort_rpl_rss
*rpl
= cplhdr(skb
);
1121 unsigned int tid
= GET_TID(rpl
);
1122 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1124 ep
= lookup_tid(t
, tid
);
1125 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1127 mutex_lock(&ep
->com
.mutex
);
1128 switch (ep
->com
.state
) {
1130 __state_set(&ep
->com
, DEAD
);
1134 printk(KERN_ERR
"%s ep %p state %d\n",
1135 __func__
, ep
, ep
->com
.state
);
1138 mutex_unlock(&ep
->com
.mutex
);
1141 release_ep_resources(ep
);
1146 * Return whether a failed active open has allocated a TID
1148 static inline int act_open_has_tid(int status
)
1150 return status
!= CPL_ERR_TCAM_FULL
&& status
!= CPL_ERR_CONN_EXIST
&&
1151 status
!= CPL_ERR_ARP_MISS
;
1154 static int act_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1157 struct cpl_act_open_rpl
*rpl
= cplhdr(skb
);
1158 unsigned int atid
= GET_TID_TID(GET_AOPEN_ATID(
1159 ntohl(rpl
->atid_status
)));
1160 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1161 int status
= GET_AOPEN_STATUS(ntohl(rpl
->atid_status
));
1163 ep
= lookup_atid(t
, atid
);
1165 PDBG("%s ep %p atid %u status %u errno %d\n", __func__
, ep
, atid
,
1166 status
, status2errno(status
));
1168 if (status
== CPL_ERR_RTX_NEG_ADVICE
) {
1169 printk(KERN_WARNING MOD
"Connection problems for atid %u\n",
1174 connect_reply_upcall(ep
, status2errno(status
));
1175 state_set(&ep
->com
, DEAD
);
1177 if (status
&& act_open_has_tid(status
))
1178 cxgb4_remove_tid(ep
->com
.dev
->rdev
.lldi
.tids
, 0, GET_TID(rpl
));
1180 cxgb4_free_atid(t
, atid
);
1181 dst_release(ep
->dst
);
1182 cxgb4_l2t_release(ep
->l2t
);
1183 c4iw_put_ep(&ep
->com
);
1188 static int pass_open_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1190 struct cpl_pass_open_rpl
*rpl
= cplhdr(skb
);
1191 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1192 unsigned int stid
= GET_TID(rpl
);
1193 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1196 printk(KERN_ERR MOD
"stid %d lookup failure!\n", stid
);
1199 PDBG("%s ep %p status %d error %d\n", __func__
, ep
,
1200 rpl
->status
, status2errno(rpl
->status
));
1201 ep
->com
.wr_wait
.ret
= status2errno(rpl
->status
);
1202 ep
->com
.wr_wait
.done
= 1;
1203 wake_up(&ep
->com
.wr_wait
.wait
);
1208 static int listen_stop(struct c4iw_listen_ep
*ep
)
1210 struct sk_buff
*skb
;
1211 struct cpl_close_listsvr_req
*req
;
1213 PDBG("%s ep %p\n", __func__
, ep
);
1214 skb
= get_skb(NULL
, sizeof(*req
), GFP_KERNEL
);
1216 printk(KERN_ERR MOD
"%s - failed to alloc skb\n", __func__
);
1219 req
= (struct cpl_close_listsvr_req
*) skb_put(skb
, sizeof(*req
));
1221 OPCODE_TID(req
) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
,
1223 req
->reply_ctrl
= cpu_to_be16(
1224 QUEUENO(ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]));
1225 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, 0);
1226 return c4iw_ofld_send(&ep
->com
.dev
->rdev
, skb
);
1229 static int close_listsrv_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1231 struct cpl_close_listsvr_rpl
*rpl
= cplhdr(skb
);
1232 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1233 unsigned int stid
= GET_TID(rpl
);
1234 struct c4iw_listen_ep
*ep
= lookup_stid(t
, stid
);
1236 PDBG("%s ep %p\n", __func__
, ep
);
1237 ep
->com
.wr_wait
.ret
= status2errno(rpl
->status
);
1238 ep
->com
.wr_wait
.done
= 1;
1239 wake_up(&ep
->com
.wr_wait
.wait
);
1243 static void accept_cr(struct c4iw_ep
*ep
, __be32 peer_ip
, struct sk_buff
*skb
,
1244 struct cpl_pass_accept_req
*req
)
1246 struct cpl_pass_accept_rpl
*rpl
;
1247 unsigned int mtu_idx
;
1252 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1253 BUG_ON(skb_cloned(skb
));
1254 skb_trim(skb
, sizeof(*rpl
));
1256 cxgb4_best_mtu(ep
->com
.dev
->rdev
.lldi
.mtus
, ep
->mtu
, &mtu_idx
);
1257 wscale
= compute_wscale(rcv_win
);
1258 opt0
= KEEP_ALIVE(1) |
1262 L2T_IDX(ep
->l2t
->idx
) |
1263 TX_CHAN(ep
->tx_chan
) |
1264 SMAC_SEL(ep
->smac_idx
) |
1266 ULP_MODE(ULP_MODE_TCPDDP
) |
1267 RCV_BUFSIZ(rcv_win
>>10);
1268 opt2
= RX_CHANNEL(0) |
1269 RSS_QUEUE_VALID
| RSS_QUEUE(ep
->rss_qid
);
1271 if (enable_tcp_timestamps
&& req
->tcpopt
.tstamp
)
1272 opt2
|= TSTAMPS_EN(1);
1273 if (enable_tcp_sack
&& req
->tcpopt
.sack
)
1275 if (wscale
&& enable_tcp_window_scaling
)
1276 opt2
|= WND_SCALE_EN(1);
1279 INIT_TP_WR(rpl
, ep
->hwtid
);
1280 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL
,
1282 rpl
->opt0
= cpu_to_be64(opt0
);
1283 rpl
->opt2
= cpu_to_be32(opt2
);
1284 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, ep
->ctrlq_idx
);
1285 c4iw_l2t_send(&ep
->com
.dev
->rdev
, skb
, ep
->l2t
);
1290 static void reject_cr(struct c4iw_dev
*dev
, u32 hwtid
, __be32 peer_ip
,
1291 struct sk_buff
*skb
)
1293 PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__
, dev
, hwtid
,
1295 BUG_ON(skb_cloned(skb
));
1296 skb_trim(skb
, sizeof(struct cpl_tid_release
));
1298 release_tid(&dev
->rdev
, hwtid
, skb
);
1302 static void get_4tuple(struct cpl_pass_accept_req
*req
,
1303 __be32
*local_ip
, __be32
*peer_ip
,
1304 __be16
*local_port
, __be16
*peer_port
)
1306 int eth_len
= G_ETH_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1307 int ip_len
= G_IP_HDR_LEN(be32_to_cpu(req
->hdr_len
));
1308 struct iphdr
*ip
= (struct iphdr
*)((u8
*)(req
+ 1) + eth_len
);
1309 struct tcphdr
*tcp
= (struct tcphdr
*)
1310 ((u8
*)(req
+ 1) + eth_len
+ ip_len
);
1312 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__
,
1313 ntohl(ip
->saddr
), ntohl(ip
->daddr
), ntohs(tcp
->source
),
1316 *peer_ip
= ip
->saddr
;
1317 *local_ip
= ip
->daddr
;
1318 *peer_port
= tcp
->source
;
1319 *local_port
= tcp
->dest
;
1324 static int pass_accept_req(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1326 struct c4iw_ep
*child_ep
, *parent_ep
;
1327 struct cpl_pass_accept_req
*req
= cplhdr(skb
);
1328 unsigned int stid
= GET_POPEN_TID(ntohl(req
->tos_stid
));
1329 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1330 unsigned int hwtid
= GET_TID(req
);
1331 struct dst_entry
*dst
;
1332 struct l2t_entry
*l2t
;
1334 __be32 local_ip
, peer_ip
;
1335 __be16 local_port
, peer_port
;
1336 struct net_device
*pdev
;
1337 u32 tx_chan
, smac_idx
;
1341 int txq_idx
, ctrlq_idx
;
1343 parent_ep
= lookup_stid(t
, stid
);
1344 PDBG("%s parent ep %p tid %u\n", __func__
, parent_ep
, hwtid
);
1346 get_4tuple(req
, &local_ip
, &peer_ip
, &local_port
, &peer_port
);
1348 if (state_read(&parent_ep
->com
) != LISTEN
) {
1349 printk(KERN_ERR
"%s - listening ep not in LISTEN\n",
1354 /* Find output route */
1355 rt
= find_route(dev
, local_ip
, peer_ip
, local_port
, peer_port
,
1356 GET_POPEN_TOS(ntohl(req
->tos_stid
)));
1358 printk(KERN_ERR MOD
"%s - failed to find dst entry!\n",
1363 if (dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1364 pdev
= ip_dev_find(&init_net
, peer_ip
);
1366 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1369 tx_chan
= cxgb4_port_chan(pdev
);
1370 smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1371 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1372 txq_idx
= cxgb4_port_idx(pdev
) * step
;
1373 ctrlq_idx
= cxgb4_port_idx(pdev
);
1374 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1375 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[cxgb4_port_idx(pdev
) * step
];
1378 l2t
= cxgb4_l2t_get(dev
->rdev
.lldi
.l2t
, dst
->neighbour
,
1379 dst
->neighbour
->dev
, 0);
1381 tx_chan
= cxgb4_port_chan(dst
->neighbour
->dev
);
1382 smac_idx
= (cxgb4_port_viid(dst
->neighbour
->dev
) & 0x7F) << 1;
1383 step
= dev
->rdev
.lldi
.ntxq
/ dev
->rdev
.lldi
.nchan
;
1384 txq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
) * step
;
1385 ctrlq_idx
= cxgb4_port_idx(dst
->neighbour
->dev
);
1386 step
= dev
->rdev
.lldi
.nrxq
/ dev
->rdev
.lldi
.nchan
;
1387 rss_qid
= dev
->rdev
.lldi
.rxq_ids
[
1388 cxgb4_port_idx(dst
->neighbour
->dev
) * step
];
1391 printk(KERN_ERR MOD
"%s - failed to allocate l2t entry!\n",
1397 child_ep
= alloc_ep(sizeof(*child_ep
), GFP_KERNEL
);
1399 printk(KERN_ERR MOD
"%s - failed to allocate ep entry!\n",
1401 cxgb4_l2t_release(l2t
);
1405 state_set(&child_ep
->com
, CONNECTING
);
1406 child_ep
->com
.dev
= dev
;
1407 child_ep
->com
.cm_id
= NULL
;
1408 child_ep
->com
.local_addr
.sin_family
= PF_INET
;
1409 child_ep
->com
.local_addr
.sin_port
= local_port
;
1410 child_ep
->com
.local_addr
.sin_addr
.s_addr
= local_ip
;
1411 child_ep
->com
.remote_addr
.sin_family
= PF_INET
;
1412 child_ep
->com
.remote_addr
.sin_port
= peer_port
;
1413 child_ep
->com
.remote_addr
.sin_addr
.s_addr
= peer_ip
;
1414 c4iw_get_ep(&parent_ep
->com
);
1415 child_ep
->parent_ep
= parent_ep
;
1416 child_ep
->tos
= GET_POPEN_TOS(ntohl(req
->tos_stid
));
1417 child_ep
->l2t
= l2t
;
1418 child_ep
->dst
= dst
;
1419 child_ep
->hwtid
= hwtid
;
1420 child_ep
->tx_chan
= tx_chan
;
1421 child_ep
->smac_idx
= smac_idx
;
1422 child_ep
->rss_qid
= rss_qid
;
1423 child_ep
->mtu
= mtu
;
1424 child_ep
->txq_idx
= txq_idx
;
1425 child_ep
->ctrlq_idx
= ctrlq_idx
;
1427 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__
,
1428 tx_chan
, smac_idx
, rss_qid
);
1430 init_timer(&child_ep
->timer
);
1431 cxgb4_insert_tid(t
, child_ep
, hwtid
);
1432 accept_cr(child_ep
, peer_ip
, skb
, req
);
1435 reject_cr(dev
, hwtid
, peer_ip
, skb
);
1440 static int pass_establish(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1443 struct cpl_pass_establish
*req
= cplhdr(skb
);
1444 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1445 unsigned int tid
= GET_TID(req
);
1447 ep
= lookup_tid(t
, tid
);
1448 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1449 ep
->snd_seq
= be32_to_cpu(req
->snd_isn
);
1450 ep
->rcv_seq
= be32_to_cpu(req
->rcv_isn
);
1452 set_emss(ep
, ntohs(req
->tcp_opt
));
1454 dst_confirm(ep
->dst
);
1455 state_set(&ep
->com
, MPA_REQ_WAIT
);
1457 send_flowc(ep
, skb
);
1462 static int peer_close(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1464 struct cpl_peer_close
*hdr
= cplhdr(skb
);
1466 struct c4iw_qp_attributes attrs
;
1470 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1471 unsigned int tid
= GET_TID(hdr
);
1473 ep
= lookup_tid(t
, tid
);
1474 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1475 dst_confirm(ep
->dst
);
1477 mutex_lock(&ep
->com
.mutex
);
1478 switch (ep
->com
.state
) {
1480 __state_set(&ep
->com
, CLOSING
);
1483 __state_set(&ep
->com
, CLOSING
);
1484 connect_reply_upcall(ep
, -ECONNRESET
);
1489 * We're gonna mark this puppy DEAD, but keep
1490 * the reference on it until the ULP accepts or
1491 * rejects the CR. Also wake up anyone waiting
1492 * in rdma connection migration (see c4iw_accept_cr()).
1494 __state_set(&ep
->com
, CLOSING
);
1495 ep
->com
.wr_wait
.done
= 1;
1496 ep
->com
.wr_wait
.ret
= -ECONNRESET
;
1497 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1498 wake_up(&ep
->com
.wr_wait
.wait
);
1501 __state_set(&ep
->com
, CLOSING
);
1502 ep
->com
.wr_wait
.done
= 1;
1503 ep
->com
.wr_wait
.ret
= -ECONNRESET
;
1504 PDBG("waking up ep %p tid %u\n", ep
, ep
->hwtid
);
1505 wake_up(&ep
->com
.wr_wait
.wait
);
1509 __state_set(&ep
->com
, CLOSING
);
1511 peer_close_upcall(ep
);
1517 __state_set(&ep
->com
, MORIBUND
);
1522 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1523 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1524 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1525 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1527 close_complete_upcall(ep
);
1528 __state_set(&ep
->com
, DEAD
);
1538 mutex_unlock(&ep
->com
.mutex
);
1540 attrs
.next_state
= C4IW_QP_STATE_CLOSING
;
1541 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1542 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1545 c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1547 release_ep_resources(ep
);
1552 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1554 static int is_neg_adv_abort(unsigned int status
)
1556 return status
== CPL_ERR_RTX_NEG_ADVICE
||
1557 status
== CPL_ERR_PERSIST_NEG_ADVICE
;
1560 static int peer_abort(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1562 struct cpl_abort_req_rss
*req
= cplhdr(skb
);
1564 struct cpl_abort_rpl
*rpl
;
1565 struct sk_buff
*rpl_skb
;
1566 struct c4iw_qp_attributes attrs
;
1569 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1570 unsigned int tid
= GET_TID(req
);
1572 ep
= lookup_tid(t
, tid
);
1573 if (is_neg_adv_abort(req
->status
)) {
1574 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__
, ep
,
1578 PDBG("%s ep %p tid %u state %u\n", __func__
, ep
, ep
->hwtid
,
1582 * Wake up any threads in rdma_init() or rdma_fini().
1584 ep
->com
.wr_wait
.done
= 1;
1585 ep
->com
.wr_wait
.ret
= -ECONNRESET
;
1586 wake_up(&ep
->com
.wr_wait
.wait
);
1588 mutex_lock(&ep
->com
.mutex
);
1589 switch (ep
->com
.state
) {
1597 connect_reply_upcall(ep
, -ECONNRESET
);
1608 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
1609 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1610 ret
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1611 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
1615 "%s - qp <- error failed!\n",
1618 peer_abort_upcall(ep
);
1623 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__
);
1624 mutex_unlock(&ep
->com
.mutex
);
1630 dst_confirm(ep
->dst
);
1631 if (ep
->com
.state
!= ABORTING
) {
1632 __state_set(&ep
->com
, DEAD
);
1635 mutex_unlock(&ep
->com
.mutex
);
1637 rpl_skb
= get_skb(skb
, sizeof(*rpl
), GFP_KERNEL
);
1639 printk(KERN_ERR MOD
"%s - cannot allocate skb!\n",
1644 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1645 rpl
= (struct cpl_abort_rpl
*) skb_put(rpl_skb
, sizeof(*rpl
));
1646 INIT_TP_WR(rpl
, ep
->hwtid
);
1647 OPCODE_TID(rpl
) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL
, ep
->hwtid
));
1648 rpl
->cmd
= CPL_ABORT_NO_RST
;
1649 c4iw_ofld_send(&ep
->com
.dev
->rdev
, rpl_skb
);
1652 release_ep_resources(ep
);
1656 static int close_con_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1659 struct c4iw_qp_attributes attrs
;
1660 struct cpl_close_con_rpl
*rpl
= cplhdr(skb
);
1662 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1663 unsigned int tid
= GET_TID(rpl
);
1665 ep
= lookup_tid(t
, tid
);
1667 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1670 /* The cm_id may be null if we failed to connect */
1671 mutex_lock(&ep
->com
.mutex
);
1672 switch (ep
->com
.state
) {
1674 __state_set(&ep
->com
, MORIBUND
);
1678 if ((ep
->com
.cm_id
) && (ep
->com
.qp
)) {
1679 attrs
.next_state
= C4IW_QP_STATE_IDLE
;
1680 c4iw_modify_qp(ep
->com
.qp
->rhp
,
1682 C4IW_QP_ATTR_NEXT_STATE
,
1685 close_complete_upcall(ep
);
1686 __state_set(&ep
->com
, DEAD
);
1696 mutex_unlock(&ep
->com
.mutex
);
1698 release_ep_resources(ep
);
1702 static int terminate(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1704 struct cpl_rdma_terminate
*rpl
= cplhdr(skb
);
1705 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1706 unsigned int tid
= GET_TID(rpl
);
1708 struct c4iw_qp_attributes attrs
;
1710 ep
= lookup_tid(t
, tid
);
1714 printk(KERN_WARNING MOD
"TERM received tid %u qpid %u\n", tid
,
1715 ep
->com
.qp
->wq
.sq
.qid
);
1716 attrs
.next_state
= C4IW_QP_STATE_TERMINATE
;
1717 c4iw_modify_qp(ep
->com
.qp
->rhp
, ep
->com
.qp
,
1718 C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1720 printk(KERN_WARNING MOD
"TERM received tid %u no qp\n", tid
);
1726 * Upcall from the adapter indicating data has been transmitted.
1727 * For us its just the single MPA request or reply. We can now free
1728 * the skb holding the mpa message.
1730 static int fw4_ack(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
1733 struct cpl_fw4_ack
*hdr
= cplhdr(skb
);
1734 u8 credits
= hdr
->credits
;
1735 unsigned int tid
= GET_TID(hdr
);
1736 struct tid_info
*t
= dev
->rdev
.lldi
.tids
;
1739 ep
= lookup_tid(t
, tid
);
1740 PDBG("%s ep %p tid %u credits %u\n", __func__
, ep
, ep
->hwtid
, credits
);
1742 PDBG("%s 0 credit ack ep %p tid %u state %u\n",
1743 __func__
, ep
, ep
->hwtid
, state_read(&ep
->com
));
1747 dst_confirm(ep
->dst
);
1749 PDBG("%s last streaming msg ack ep %p tid %u state %u "
1750 "initiator %u freeing skb\n", __func__
, ep
, ep
->hwtid
,
1751 state_read(&ep
->com
), ep
->mpa_attr
.initiator
? 1 : 0);
1752 kfree_skb(ep
->mpa_skb
);
1758 int c4iw_reject_cr(struct iw_cm_id
*cm_id
, const void *pdata
, u8 pdata_len
)
1761 struct c4iw_ep
*ep
= to_ep(cm_id
);
1762 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1764 if (state_read(&ep
->com
) == DEAD
) {
1765 c4iw_put_ep(&ep
->com
);
1768 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1770 abort_connection(ep
, NULL
, GFP_KERNEL
);
1772 err
= send_mpa_reject(ep
, pdata
, pdata_len
);
1773 err
= c4iw_ep_disconnect(ep
, 0, GFP_KERNEL
);
1775 c4iw_put_ep(&ep
->com
);
1779 int c4iw_accept_cr(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1782 struct c4iw_qp_attributes attrs
;
1783 enum c4iw_qp_attr_mask mask
;
1784 struct c4iw_ep
*ep
= to_ep(cm_id
);
1785 struct c4iw_dev
*h
= to_c4iw_dev(cm_id
->device
);
1786 struct c4iw_qp
*qp
= get_qhp(h
, conn_param
->qpn
);
1788 PDBG("%s ep %p tid %u\n", __func__
, ep
, ep
->hwtid
);
1789 if (state_read(&ep
->com
) == DEAD
) {
1794 BUG_ON(state_read(&ep
->com
) != MPA_REQ_RCVD
);
1797 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1798 (conn_param
->ird
> c4iw_max_read_depth
)) {
1799 abort_connection(ep
, NULL
, GFP_KERNEL
);
1804 cm_id
->add_ref(cm_id
);
1805 ep
->com
.cm_id
= cm_id
;
1808 ep
->ird
= conn_param
->ird
;
1809 ep
->ord
= conn_param
->ord
;
1811 if (peer2peer
&& ep
->ird
== 0)
1814 PDBG("%s %d ird %d ord %d\n", __func__
, __LINE__
, ep
->ird
, ep
->ord
);
1816 /* bind QP to EP and move to RTS */
1817 attrs
.mpa_attr
= ep
->mpa_attr
;
1818 attrs
.max_ird
= ep
->ird
;
1819 attrs
.max_ord
= ep
->ord
;
1820 attrs
.llp_stream_handle
= ep
;
1821 attrs
.next_state
= C4IW_QP_STATE_RTS
;
1823 /* bind QP and TID with INIT_WR */
1824 mask
= C4IW_QP_ATTR_NEXT_STATE
|
1825 C4IW_QP_ATTR_LLP_STREAM_HANDLE
|
1826 C4IW_QP_ATTR_MPA_ATTR
|
1827 C4IW_QP_ATTR_MAX_IRD
|
1828 C4IW_QP_ATTR_MAX_ORD
;
1830 err
= c4iw_modify_qp(ep
->com
.qp
->rhp
,
1831 ep
->com
.qp
, mask
, &attrs
, 1);
1834 err
= send_mpa_reply(ep
, conn_param
->private_data
,
1835 conn_param
->private_data_len
);
1839 state_set(&ep
->com
, FPDU_MODE
);
1840 established_upcall(ep
);
1841 c4iw_put_ep(&ep
->com
);
1844 ep
->com
.cm_id
= NULL
;
1846 cm_id
->rem_ref(cm_id
);
1848 c4iw_put_ep(&ep
->com
);
1852 int c4iw_connect(struct iw_cm_id
*cm_id
, struct iw_cm_conn_param
*conn_param
)
1855 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
1858 struct net_device
*pdev
;
1861 if ((conn_param
->ord
> c4iw_max_read_depth
) ||
1862 (conn_param
->ird
> c4iw_max_read_depth
)) {
1866 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
1868 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
1872 init_timer(&ep
->timer
);
1873 ep
->plen
= conn_param
->private_data_len
;
1875 memcpy(ep
->mpa_pkt
+ sizeof(struct mpa_message
),
1876 conn_param
->private_data
, ep
->plen
);
1877 ep
->ird
= conn_param
->ird
;
1878 ep
->ord
= conn_param
->ord
;
1880 if (peer2peer
&& ep
->ord
== 0)
1883 cm_id
->add_ref(cm_id
);
1885 ep
->com
.cm_id
= cm_id
;
1886 ep
->com
.qp
= get_qhp(dev
, conn_param
->qpn
);
1887 BUG_ON(!ep
->com
.qp
);
1888 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__
, conn_param
->qpn
,
1892 * Allocate an active TID to initiate a TCP connection.
1894 ep
->atid
= cxgb4_alloc_atid(dev
->rdev
.lldi
.tids
, ep
);
1895 if (ep
->atid
== -1) {
1896 printk(KERN_ERR MOD
"%s - cannot alloc atid.\n", __func__
);
1901 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__
,
1902 ntohl(cm_id
->local_addr
.sin_addr
.s_addr
),
1903 ntohs(cm_id
->local_addr
.sin_port
),
1904 ntohl(cm_id
->remote_addr
.sin_addr
.s_addr
),
1905 ntohs(cm_id
->remote_addr
.sin_port
));
1908 rt
= find_route(dev
,
1909 cm_id
->local_addr
.sin_addr
.s_addr
,
1910 cm_id
->remote_addr
.sin_addr
.s_addr
,
1911 cm_id
->local_addr
.sin_port
,
1912 cm_id
->remote_addr
.sin_port
, 0);
1914 printk(KERN_ERR MOD
"%s - cannot find route.\n", __func__
);
1915 err
= -EHOSTUNREACH
;
1920 /* get a l2t entry */
1921 if (ep
->dst
->neighbour
->dev
->flags
& IFF_LOOPBACK
) {
1922 PDBG("%s LOOPBACK\n", __func__
);
1923 pdev
= ip_dev_find(&init_net
,
1924 cm_id
->remote_addr
.sin_addr
.s_addr
);
1925 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1928 ep
->mtu
= pdev
->mtu
;
1929 ep
->tx_chan
= cxgb4_port_chan(pdev
);
1930 ep
->smac_idx
= (cxgb4_port_viid(pdev
) & 0x7F) << 1;
1931 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1932 ep
->com
.dev
->rdev
.lldi
.nchan
;
1933 ep
->txq_idx
= cxgb4_port_idx(pdev
) * step
;
1934 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1935 ep
->com
.dev
->rdev
.lldi
.nchan
;
1936 ep
->ctrlq_idx
= cxgb4_port_idx(pdev
);
1937 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1938 cxgb4_port_idx(pdev
) * step
];
1941 ep
->l2t
= cxgb4_l2t_get(ep
->com
.dev
->rdev
.lldi
.l2t
,
1943 ep
->dst
->neighbour
->dev
, 0);
1944 ep
->mtu
= dst_mtu(ep
->dst
);
1945 ep
->tx_chan
= cxgb4_port_chan(ep
->dst
->neighbour
->dev
);
1946 ep
->smac_idx
= (cxgb4_port_viid(ep
->dst
->neighbour
->dev
) &
1948 step
= ep
->com
.dev
->rdev
.lldi
.ntxq
/
1949 ep
->com
.dev
->rdev
.lldi
.nchan
;
1950 ep
->txq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
;
1951 ep
->ctrlq_idx
= cxgb4_port_idx(ep
->dst
->neighbour
->dev
);
1952 step
= ep
->com
.dev
->rdev
.lldi
.nrxq
/
1953 ep
->com
.dev
->rdev
.lldi
.nchan
;
1954 ep
->rss_qid
= ep
->com
.dev
->rdev
.lldi
.rxq_ids
[
1955 cxgb4_port_idx(ep
->dst
->neighbour
->dev
) * step
];
1958 printk(KERN_ERR MOD
"%s - cannot alloc l2e.\n", __func__
);
1963 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1964 __func__
, ep
->txq_idx
, ep
->tx_chan
, ep
->smac_idx
, ep
->rss_qid
,
1967 state_set(&ep
->com
, CONNECTING
);
1969 ep
->com
.local_addr
= cm_id
->local_addr
;
1970 ep
->com
.remote_addr
= cm_id
->remote_addr
;
1972 /* send connect request to rnic */
1973 err
= send_connect(ep
);
1977 cxgb4_l2t_release(ep
->l2t
);
1979 dst_release(ep
->dst
);
1981 cxgb4_free_atid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->atid
);
1983 cm_id
->rem_ref(cm_id
);
1984 c4iw_put_ep(&ep
->com
);
1989 int c4iw_create_listen(struct iw_cm_id
*cm_id
, int backlog
)
1992 struct c4iw_dev
*dev
= to_c4iw_dev(cm_id
->device
);
1993 struct c4iw_listen_ep
*ep
;
1998 ep
= alloc_ep(sizeof(*ep
), GFP_KERNEL
);
2000 printk(KERN_ERR MOD
"%s - cannot alloc ep.\n", __func__
);
2004 PDBG("%s ep %p\n", __func__
, ep
);
2005 cm_id
->add_ref(cm_id
);
2006 ep
->com
.cm_id
= cm_id
;
2008 ep
->backlog
= backlog
;
2009 ep
->com
.local_addr
= cm_id
->local_addr
;
2012 * Allocate a server TID.
2014 ep
->stid
= cxgb4_alloc_stid(dev
->rdev
.lldi
.tids
, PF_INET
, ep
);
2015 if (ep
->stid
== -1) {
2016 printk(KERN_ERR MOD
"%s - cannot alloc stid.\n", __func__
);
2021 state_set(&ep
->com
, LISTEN
);
2022 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2023 err
= cxgb4_create_server(ep
->com
.dev
->rdev
.lldi
.ports
[0], ep
->stid
,
2024 ep
->com
.local_addr
.sin_addr
.s_addr
,
2025 ep
->com
.local_addr
.sin_port
,
2026 ep
->com
.dev
->rdev
.lldi
.rxq_ids
[0]);
2030 /* wait for pass_open_rpl */
2031 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
, 0, 0,
2034 cm_id
->provider_data
= ep
;
2038 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2040 cm_id
->rem_ref(cm_id
);
2041 c4iw_put_ep(&ep
->com
);
2047 int c4iw_destroy_listen(struct iw_cm_id
*cm_id
)
2050 struct c4iw_listen_ep
*ep
= to_listen_ep(cm_id
);
2052 PDBG("%s ep %p\n", __func__
, ep
);
2055 state_set(&ep
->com
, DEAD
);
2056 c4iw_init_wr_wait(&ep
->com
.wr_wait
);
2057 err
= listen_stop(ep
);
2060 err
= c4iw_wait_for_reply(&ep
->com
.dev
->rdev
, &ep
->com
.wr_wait
, 0, 0,
2062 cxgb4_free_stid(ep
->com
.dev
->rdev
.lldi
.tids
, ep
->stid
, PF_INET
);
2064 cm_id
->rem_ref(cm_id
);
2065 c4iw_put_ep(&ep
->com
);
2069 int c4iw_ep_disconnect(struct c4iw_ep
*ep
, int abrupt
, gfp_t gfp
)
2074 struct c4iw_rdev
*rdev
;
2076 mutex_lock(&ep
->com
.mutex
);
2078 PDBG("%s ep %p state %s, abrupt %d\n", __func__
, ep
,
2079 states
[ep
->com
.state
], abrupt
);
2081 rdev
= &ep
->com
.dev
->rdev
;
2082 if (c4iw_fatal_error(rdev
)) {
2084 close_complete_upcall(ep
);
2085 ep
->com
.state
= DEAD
;
2087 switch (ep
->com
.state
) {
2095 ep
->com
.state
= ABORTING
;
2097 ep
->com
.state
= CLOSING
;
2100 set_bit(CLOSE_SENT
, &ep
->com
.flags
);
2103 if (!test_and_set_bit(CLOSE_SENT
, &ep
->com
.flags
)) {
2107 ep
->com
.state
= ABORTING
;
2109 ep
->com
.state
= MORIBUND
;
2115 PDBG("%s ignoring disconnect ep %p state %u\n",
2116 __func__
, ep
, ep
->com
.state
);
2123 mutex_unlock(&ep
->com
.mutex
);
2126 ret
= abort_connection(ep
, NULL
, gfp
);
2128 ret
= send_halfclose(ep
, gfp
);
2133 release_ep_resources(ep
);
2137 static int async_event(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2139 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2140 c4iw_ev_dispatch(dev
, (struct t4_cqe
*)&rpl
->data
[0]);
2145 * These are the real handlers that are called from a
2148 static c4iw_handler_func work_handlers
[NUM_CPL_CMDS
] = {
2149 [CPL_ACT_ESTABLISH
] = act_establish
,
2150 [CPL_ACT_OPEN_RPL
] = act_open_rpl
,
2151 [CPL_RX_DATA
] = rx_data
,
2152 [CPL_ABORT_RPL_RSS
] = abort_rpl
,
2153 [CPL_ABORT_RPL
] = abort_rpl
,
2154 [CPL_PASS_OPEN_RPL
] = pass_open_rpl
,
2155 [CPL_CLOSE_LISTSRV_RPL
] = close_listsrv_rpl
,
2156 [CPL_PASS_ACCEPT_REQ
] = pass_accept_req
,
2157 [CPL_PASS_ESTABLISH
] = pass_establish
,
2158 [CPL_PEER_CLOSE
] = peer_close
,
2159 [CPL_ABORT_REQ_RSS
] = peer_abort
,
2160 [CPL_CLOSE_CON_RPL
] = close_con_rpl
,
2161 [CPL_RDMA_TERMINATE
] = terminate
,
2162 [CPL_FW4_ACK
] = fw4_ack
,
2163 [CPL_FW6_MSG
] = async_event
2166 static void process_timeout(struct c4iw_ep
*ep
)
2168 struct c4iw_qp_attributes attrs
;
2171 mutex_lock(&ep
->com
.mutex
);
2172 PDBG("%s ep %p tid %u state %d\n", __func__
, ep
, ep
->hwtid
,
2174 switch (ep
->com
.state
) {
2176 __state_set(&ep
->com
, ABORTING
);
2177 connect_reply_upcall(ep
, -ETIMEDOUT
);
2180 __state_set(&ep
->com
, ABORTING
);
2184 if (ep
->com
.cm_id
&& ep
->com
.qp
) {
2185 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2186 c4iw_modify_qp(ep
->com
.qp
->rhp
,
2187 ep
->com
.qp
, C4IW_QP_ATTR_NEXT_STATE
,
2190 __state_set(&ep
->com
, ABORTING
);
2193 printk(KERN_ERR
"%s unexpected state ep %p tid %u state %u\n",
2194 __func__
, ep
, ep
->hwtid
, ep
->com
.state
);
2198 mutex_unlock(&ep
->com
.mutex
);
2200 abort_connection(ep
, NULL
, GFP_KERNEL
);
2201 c4iw_put_ep(&ep
->com
);
2204 static void process_timedout_eps(void)
2208 spin_lock_irq(&timeout_lock
);
2209 while (!list_empty(&timeout_list
)) {
2210 struct list_head
*tmp
;
2212 tmp
= timeout_list
.next
;
2214 spin_unlock_irq(&timeout_lock
);
2215 ep
= list_entry(tmp
, struct c4iw_ep
, entry
);
2216 process_timeout(ep
);
2217 spin_lock_irq(&timeout_lock
);
2219 spin_unlock_irq(&timeout_lock
);
2222 static void process_work(struct work_struct
*work
)
2224 struct sk_buff
*skb
= NULL
;
2225 struct c4iw_dev
*dev
;
2226 struct cpl_act_establish
*rpl
;
2227 unsigned int opcode
;
2230 while ((skb
= skb_dequeue(&rxq
))) {
2232 dev
= *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *)));
2233 opcode
= rpl
->ot
.opcode
;
2235 BUG_ON(!work_handlers
[opcode
]);
2236 ret
= work_handlers
[opcode
](dev
, skb
);
2240 process_timedout_eps();
2243 static DECLARE_WORK(skb_work
, process_work
);
2245 static void ep_timeout(unsigned long arg
)
2247 struct c4iw_ep
*ep
= (struct c4iw_ep
*)arg
;
2249 spin_lock(&timeout_lock
);
2250 list_add_tail(&ep
->entry
, &timeout_list
);
2251 spin_unlock(&timeout_lock
);
2252 queue_work(workq
, &skb_work
);
2256 * All the CM events are handled on a work queue to have a safe context.
2258 static int sched(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2262 * Save dev in the skb->cb area.
2264 *((struct c4iw_dev
**) (skb
->cb
+ sizeof(void *))) = dev
;
2267 * Queue the skb and schedule the worker thread.
2269 skb_queue_tail(&rxq
, skb
);
2270 queue_work(workq
, &skb_work
);
2274 static int set_tcb_rpl(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2276 struct cpl_set_tcb_rpl
*rpl
= cplhdr(skb
);
2278 if (rpl
->status
!= CPL_ERR_NONE
) {
2279 printk(KERN_ERR MOD
"Unexpected SET_TCB_RPL status %u "
2280 "for tid %u\n", rpl
->status
, GET_TID(rpl
));
2286 static int fw6_msg(struct c4iw_dev
*dev
, struct sk_buff
*skb
)
2288 struct cpl_fw6_msg
*rpl
= cplhdr(skb
);
2289 struct c4iw_wr_wait
*wr_waitp
;
2292 PDBG("%s type %u\n", __func__
, rpl
->type
);
2294 switch (rpl
->type
) {
2296 ret
= (int)((be64_to_cpu(rpl
->data
[0]) >> 8) & 0xff);
2297 wr_waitp
= (struct c4iw_wr_wait
*)(__force
unsigned long) rpl
->data
[1];
2298 PDBG("%s wr_waitp %p ret %u\n", __func__
, wr_waitp
, ret
);
2301 wr_waitp
->ret
= -ret
;
2305 wake_up(&wr_waitp
->wait
);
2313 printk(KERN_ERR MOD
"%s unexpected fw6 msg type %u\n", __func__
,
2322 * Most upcalls from the T4 Core go to sched() to
2323 * schedule the processing on a work queue.
2325 c4iw_handler_func c4iw_handlers
[NUM_CPL_CMDS
] = {
2326 [CPL_ACT_ESTABLISH
] = sched
,
2327 [CPL_ACT_OPEN_RPL
] = sched
,
2328 [CPL_RX_DATA
] = sched
,
2329 [CPL_ABORT_RPL_RSS
] = sched
,
2330 [CPL_ABORT_RPL
] = sched
,
2331 [CPL_PASS_OPEN_RPL
] = sched
,
2332 [CPL_CLOSE_LISTSRV_RPL
] = sched
,
2333 [CPL_PASS_ACCEPT_REQ
] = sched
,
2334 [CPL_PASS_ESTABLISH
] = sched
,
2335 [CPL_PEER_CLOSE
] = sched
,
2336 [CPL_CLOSE_CON_RPL
] = sched
,
2337 [CPL_ABORT_REQ_RSS
] = sched
,
2338 [CPL_RDMA_TERMINATE
] = sched
,
2339 [CPL_FW4_ACK
] = sched
,
2340 [CPL_SET_TCB_RPL
] = set_tcb_rpl
,
2341 [CPL_FW6_MSG
] = fw6_msg
2344 int __init
c4iw_cm_init(void)
2346 spin_lock_init(&timeout_lock
);
2347 skb_queue_head_init(&rxq
);
2349 workq
= create_singlethread_workqueue("iw_cxgb4");
2356 void __exit
c4iw_cm_term(void)
2358 WARN_ON(!list_empty(&timeout_list
));
2359 flush_workqueue(workq
);
2360 destroy_workqueue(workq
);