Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/timer.h> | |
37 | #include <linux/notifier.h> | |
38 | #include <linux/inetdevice.h> | |
39 | #include <linux/ip.h> | |
40 | #include <linux/tcp.h> | |
1cab775c | 41 | #include <linux/if_vlan.h> |
cfdda9d7 SW |
42 | |
43 | #include <net/neighbour.h> | |
44 | #include <net/netevent.h> | |
45 | #include <net/route.h> | |
1cab775c | 46 | #include <net/tcp.h> |
cfdda9d7 SW |
47 | |
48 | #include "iw_cxgb4.h" | |
49 | ||
50 | static char *states[] = { | |
51 | "idle", | |
52 | "listen", | |
53 | "connecting", | |
54 | "mpa_wait_req", | |
55 | "mpa_req_sent", | |
56 | "mpa_req_rcvd", | |
57 | "mpa_rep_sent", | |
58 | "fpdu_mode", | |
59 | "aborting", | |
60 | "closing", | |
61 | "moribund", | |
62 | "dead", | |
63 | NULL, | |
64 | }; | |
65 | ||
5be78ee9 VP |
66 | static int nocong; |
67 | module_param(nocong, int, 0644); | |
68 | MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); | |
69 | ||
70 | static int enable_ecn; | |
71 | module_param(enable_ecn, int, 0644); | |
72 | MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); | |
73 | ||
b52fe09e | 74 | static int dack_mode = 1; |
ba6d3925 | 75 | module_param(dack_mode, int, 0644); |
b52fe09e | 76 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); |
ba6d3925 | 77 | |
be4c9bad RD |
78 | int c4iw_max_read_depth = 8; |
79 | module_param(c4iw_max_read_depth, int, 0644); | |
80 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); | |
81 | ||
cfdda9d7 SW |
82 | static int enable_tcp_timestamps; |
83 | module_param(enable_tcp_timestamps, int, 0644); | |
84 | MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); | |
85 | ||
86 | static int enable_tcp_sack; | |
87 | module_param(enable_tcp_sack, int, 0644); | |
88 | MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); | |
89 | ||
90 | static int enable_tcp_window_scaling = 1; | |
91 | module_param(enable_tcp_window_scaling, int, 0644); | |
92 | MODULE_PARM_DESC(enable_tcp_window_scaling, | |
93 | "Enable tcp window scaling (default=1)"); | |
94 | ||
95 | int c4iw_debug; | |
96 | module_param(c4iw_debug, int, 0644); | |
97 | MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); | |
98 | ||
99 | static int peer2peer; | |
100 | module_param(peer2peer, int, 0644); | |
101 | MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); | |
102 | ||
103 | static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; | |
104 | module_param(p2p_type, int, 0644); | |
105 | MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " | |
106 | "1=RDMA_READ 0=RDMA_WRITE (default 1)"); | |
107 | ||
108 | static int ep_timeout_secs = 60; | |
109 | module_param(ep_timeout_secs, int, 0644); | |
110 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |
111 | "in seconds (default=60)"); | |
112 | ||
113 | static int mpa_rev = 1; | |
114 | module_param(mpa_rev, int, 0644); | |
115 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | |
d2fe99e8 KS |
116 | "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" |
117 | " compliant (default=1)"); | |
cfdda9d7 SW |
118 | |
119 | static int markers_enabled; | |
120 | module_param(markers_enabled, int, 0644); | |
121 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | |
122 | ||
123 | static int crc_enabled = 1; | |
124 | module_param(crc_enabled, int, 0644); | |
125 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | |
126 | ||
127 | static int rcv_win = 256 * 1024; | |
128 | module_param(rcv_win, int, 0644); | |
129 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); | |
130 | ||
98ae68b7 | 131 | static int snd_win = 128 * 1024; |
cfdda9d7 | 132 | module_param(snd_win, int, 0644); |
98ae68b7 | 133 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); |
cfdda9d7 | 134 | |
cfdda9d7 | 135 | static struct workqueue_struct *workq; |
cfdda9d7 SW |
136 | |
137 | static struct sk_buff_head rxq; | |
cfdda9d7 SW |
138 | |
139 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | |
140 | static void ep_timeout(unsigned long arg); | |
141 | static void connect_reply_upcall(struct c4iw_ep *ep, int status); | |
142 | ||
be4c9bad RD |
143 | static LIST_HEAD(timeout_list); |
144 | static spinlock_t timeout_lock; | |
145 | ||
cfdda9d7 SW |
146 | static void start_ep_timer(struct c4iw_ep *ep) |
147 | { | |
148 | PDBG("%s ep %p\n", __func__, ep); | |
149 | if (timer_pending(&ep->timer)) { | |
150 | PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); | |
151 | del_timer_sync(&ep->timer); | |
152 | } else | |
153 | c4iw_get_ep(&ep->com); | |
154 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | |
155 | ep->timer.data = (unsigned long)ep; | |
156 | ep->timer.function = ep_timeout; | |
157 | add_timer(&ep->timer); | |
158 | } | |
159 | ||
160 | static void stop_ep_timer(struct c4iw_ep *ep) | |
161 | { | |
162 | PDBG("%s ep %p\n", __func__, ep); | |
163 | if (!timer_pending(&ep->timer)) { | |
76f267b7 | 164 | WARN(1, "%s timer stopped when its not running! " |
cfdda9d7 | 165 | "ep %p state %u\n", __func__, ep, ep->com.state); |
cfdda9d7 SW |
166 | return; |
167 | } | |
168 | del_timer_sync(&ep->timer); | |
169 | c4iw_put_ep(&ep->com); | |
170 | } | |
171 | ||
172 | static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | |
173 | struct l2t_entry *l2e) | |
174 | { | |
175 | int error = 0; | |
176 | ||
177 | if (c4iw_fatal_error(rdev)) { | |
178 | kfree_skb(skb); | |
179 | PDBG("%s - device in error state - dropping\n", __func__); | |
180 | return -EIO; | |
181 | } | |
182 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); | |
183 | if (error < 0) | |
184 | kfree_skb(skb); | |
74594861 | 185 | return error < 0 ? error : 0; |
cfdda9d7 SW |
186 | } |
187 | ||
188 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | |
189 | { | |
190 | int error = 0; | |
191 | ||
192 | if (c4iw_fatal_error(rdev)) { | |
193 | kfree_skb(skb); | |
194 | PDBG("%s - device in error state - dropping\n", __func__); | |
195 | return -EIO; | |
196 | } | |
197 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); | |
198 | if (error < 0) | |
199 | kfree_skb(skb); | |
74594861 | 200 | return error < 0 ? error : 0; |
cfdda9d7 SW |
201 | } |
202 | ||
203 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | |
204 | { | |
205 | struct cpl_tid_release *req; | |
206 | ||
207 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | |
208 | if (!skb) | |
209 | return; | |
210 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | |
211 | INIT_TP_WR(req, hwtid); | |
212 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | |
213 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
214 | c4iw_ofld_send(rdev, skb); | |
215 | return; | |
216 | } | |
217 | ||
218 | static void set_emss(struct c4iw_ep *ep, u16 opt) | |
219 | { | |
220 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; | |
221 | ep->mss = ep->emss; | |
222 | if (GET_TCPOPT_TSTAMP(opt)) | |
223 | ep->emss -= 12; | |
224 | if (ep->emss < 128) | |
225 | ep->emss = 128; | |
226 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), | |
227 | ep->mss, ep->emss); | |
228 | } | |
229 | ||
230 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) | |
231 | { | |
cfdda9d7 SW |
232 | enum c4iw_ep_state state; |
233 | ||
2f5b48c3 | 234 | mutex_lock(&epc->mutex); |
cfdda9d7 | 235 | state = epc->state; |
2f5b48c3 | 236 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
237 | return state; |
238 | } | |
239 | ||
240 | static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
241 | { | |
242 | epc->state = new; | |
243 | } | |
244 | ||
245 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |
246 | { | |
2f5b48c3 | 247 | mutex_lock(&epc->mutex); |
cfdda9d7 SW |
248 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); |
249 | __state_set(epc, new); | |
2f5b48c3 | 250 | mutex_unlock(&epc->mutex); |
cfdda9d7 SW |
251 | return; |
252 | } | |
253 | ||
254 | static void *alloc_ep(int size, gfp_t gfp) | |
255 | { | |
256 | struct c4iw_ep_common *epc; | |
257 | ||
258 | epc = kzalloc(size, gfp); | |
259 | if (epc) { | |
260 | kref_init(&epc->kref); | |
2f5b48c3 | 261 | mutex_init(&epc->mutex); |
aadc4df3 | 262 | c4iw_init_wr_wait(&epc->wr_wait); |
cfdda9d7 SW |
263 | } |
264 | PDBG("%s alloc ep %p\n", __func__, epc); | |
265 | return epc; | |
266 | } | |
267 | ||
268 | void _c4iw_free_ep(struct kref *kref) | |
269 | { | |
270 | struct c4iw_ep *ep; | |
271 | ||
272 | ep = container_of(kref, struct c4iw_ep, com.kref); | |
273 | PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); | |
274 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | |
275 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | |
276 | dst_release(ep->dst); | |
277 | cxgb4_l2t_release(ep->l2t); | |
278 | } | |
279 | kfree(ep); | |
280 | } | |
281 | ||
282 | static void release_ep_resources(struct c4iw_ep *ep) | |
283 | { | |
284 | set_bit(RELEASE_RESOURCES, &ep->com.flags); | |
285 | c4iw_put_ep(&ep->com); | |
286 | } | |
287 | ||
cfdda9d7 SW |
288 | static int status2errno(int status) |
289 | { | |
290 | switch (status) { | |
291 | case CPL_ERR_NONE: | |
292 | return 0; | |
293 | case CPL_ERR_CONN_RESET: | |
294 | return -ECONNRESET; | |
295 | case CPL_ERR_ARP_MISS: | |
296 | return -EHOSTUNREACH; | |
297 | case CPL_ERR_CONN_TIMEDOUT: | |
298 | return -ETIMEDOUT; | |
299 | case CPL_ERR_TCAM_FULL: | |
300 | return -ENOMEM; | |
301 | case CPL_ERR_CONN_EXIST: | |
302 | return -EADDRINUSE; | |
303 | default: | |
304 | return -EIO; | |
305 | } | |
306 | } | |
307 | ||
308 | /* | |
309 | * Try and reuse skbs already allocated... | |
310 | */ | |
311 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | |
312 | { | |
313 | if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { | |
314 | skb_trim(skb, 0); | |
315 | skb_get(skb); | |
316 | skb_reset_transport_header(skb); | |
317 | } else { | |
318 | skb = alloc_skb(len, gfp); | |
319 | } | |
320 | return skb; | |
321 | } | |
322 | ||
323 | static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip, | |
324 | __be32 peer_ip, __be16 local_port, | |
325 | __be16 peer_port, u8 tos) | |
326 | { | |
327 | struct rtable *rt; | |
31e4543d | 328 | struct flowi4 fl4; |
78fbfd8a | 329 | |
31e4543d | 330 | rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, |
78fbfd8a DM |
331 | peer_port, local_port, IPPROTO_TCP, |
332 | tos, 0); | |
b23dd4fe | 333 | if (IS_ERR(rt)) |
cfdda9d7 SW |
334 | return NULL; |
335 | return rt; | |
336 | } | |
337 | ||
338 | static void arp_failure_discard(void *handle, struct sk_buff *skb) | |
339 | { | |
340 | PDBG("%s c4iw_dev %p\n", __func__, handle); | |
341 | kfree_skb(skb); | |
342 | } | |
343 | ||
344 | /* | |
345 | * Handle an ARP failure for an active open. | |
346 | */ | |
347 | static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) | |
348 | { | |
349 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | |
350 | kfree_skb(skb); | |
351 | } | |
352 | ||
353 | /* | |
354 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | |
355 | * and send it along. | |
356 | */ | |
357 | static void abort_arp_failure(void *handle, struct sk_buff *skb) | |
358 | { | |
359 | struct c4iw_rdev *rdev = handle; | |
360 | struct cpl_abort_req *req = cplhdr(skb); | |
361 | ||
362 | PDBG("%s rdev %p\n", __func__, rdev); | |
363 | req->cmd = CPL_ABORT_NO_RST; | |
364 | c4iw_ofld_send(rdev, skb); | |
365 | } | |
366 | ||
367 | static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | |
368 | { | |
369 | unsigned int flowclen = 80; | |
370 | struct fw_flowc_wr *flowc; | |
371 | int i; | |
372 | ||
373 | skb = get_skb(skb, flowclen, GFP_KERNEL); | |
374 | flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); | |
375 | ||
376 | flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | | |
377 | FW_FLOWC_WR_NPARAMS(8)); | |
378 | flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, | |
379 | 16)) | FW_WR_FLOWID(ep->hwtid)); | |
380 | ||
381 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | |
94788657 | 382 | flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); |
cfdda9d7 SW |
383 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
384 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); | |
385 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | |
386 | flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); | |
387 | flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; | |
388 | flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); | |
389 | flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; | |
390 | flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); | |
391 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | |
392 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); | |
393 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | |
394 | flowc->mnemval[6].val = cpu_to_be32(snd_win); | |
395 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | |
396 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); | |
397 | /* Pad WR to 16 byte boundary */ | |
398 | flowc->mnemval[8].mnemonic = 0; | |
399 | flowc->mnemval[8].val = 0; | |
400 | for (i = 0; i < 9; i++) { | |
401 | flowc->mnemval[i].r4[0] = 0; | |
402 | flowc->mnemval[i].r4[1] = 0; | |
403 | flowc->mnemval[i].r4[2] = 0; | |
404 | } | |
405 | ||
406 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
407 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
408 | } | |
409 | ||
410 | static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) | |
411 | { | |
412 | struct cpl_close_con_req *req; | |
413 | struct sk_buff *skb; | |
414 | int wrlen = roundup(sizeof *req, 16); | |
415 | ||
416 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
417 | skb = get_skb(NULL, wrlen, gfp); | |
418 | if (!skb) { | |
419 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
420 | return -ENOMEM; | |
421 | } | |
422 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
423 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
424 | req = (struct cpl_close_con_req *) skb_put(skb, wrlen); | |
425 | memset(req, 0, wrlen); | |
426 | INIT_TP_WR(req, ep->hwtid); | |
427 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, | |
428 | ep->hwtid)); | |
429 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
430 | } | |
431 | ||
432 | static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
433 | { | |
434 | struct cpl_abort_req *req; | |
435 | int wrlen = roundup(sizeof *req, 16); | |
436 | ||
437 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
438 | skb = get_skb(skb, wrlen, gfp); | |
439 | if (!skb) { | |
440 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
441 | __func__); | |
442 | return -ENOMEM; | |
443 | } | |
444 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
445 | t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); | |
446 | req = (struct cpl_abort_req *) skb_put(skb, wrlen); | |
447 | memset(req, 0, wrlen); | |
448 | INIT_TP_WR(req, ep->hwtid); | |
449 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | |
450 | req->cmd = CPL_ABORT_SEND_RST; | |
451 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
452 | } | |
453 | ||
5be78ee9 VP |
454 | #define VLAN_NONE 0xfff |
455 | #define FILTER_SEL_VLAN_NONE 0xffff | |
456 | #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ | |
457 | #define FILTER_SEL_WIDTH_VIN_P_FC \ | |
458 | (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ | |
459 | #define FILTER_SEL_WIDTH_TAG_P_FC \ | |
460 | (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ | |
461 | #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) | |
462 | ||
463 | static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, | |
464 | struct l2t_entry *l2t) | |
465 | { | |
466 | unsigned int ntuple = 0; | |
467 | u32 viid; | |
468 | ||
469 | switch (dev->rdev.lldi.filt_mode) { | |
470 | ||
471 | /* default filter mode */ | |
472 | case HW_TPL_FR_MT_PR_IV_P_FC: | |
473 | if (l2t->vlan == VLAN_NONE) | |
474 | ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; | |
475 | else { | |
476 | ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; | |
477 | ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
478 | } | |
479 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | |
480 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
481 | break; | |
482 | case HW_TPL_FR_MT_PR_OV_P_FC: { | |
483 | viid = cxgb4_port_viid(l2t->neigh->dev); | |
484 | ||
485 | ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; | |
486 | ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; | |
487 | ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; | |
488 | ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << | |
489 | FILTER_SEL_WIDTH_VLD_TAG_P_FC; | |
490 | break; | |
491 | } | |
492 | default: | |
493 | break; | |
494 | } | |
495 | return ntuple; | |
496 | } | |
497 | ||
cfdda9d7 SW |
498 | static int send_connect(struct c4iw_ep *ep) |
499 | { | |
500 | struct cpl_act_open_req *req; | |
501 | struct sk_buff *skb; | |
502 | u64 opt0; | |
503 | u32 opt2; | |
504 | unsigned int mtu_idx; | |
505 | int wscale; | |
506 | int wrlen = roundup(sizeof *req, 16); | |
507 | ||
508 | PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); | |
509 | ||
510 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
511 | if (!skb) { | |
512 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
513 | __func__); | |
514 | return -ENOMEM; | |
515 | } | |
d4f1a5c6 | 516 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
517 | |
518 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
519 | wscale = compute_wscale(rcv_win); | |
5be78ee9 VP |
520 | opt0 = (nocong ? NO_CONG(1) : 0) | |
521 | KEEP_ALIVE(1) | | |
ba6d3925 | 522 | DELACK(1) | |
cfdda9d7 SW |
523 | WND_SCALE(wscale) | |
524 | MSS_IDX(mtu_idx) | | |
525 | L2T_IDX(ep->l2t->idx) | | |
526 | TX_CHAN(ep->tx_chan) | | |
527 | SMAC_SEL(ep->smac_idx) | | |
528 | DSCP(ep->tos) | | |
b48f3b9c | 529 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
530 | RCV_BUFSIZ(rcv_win>>10); |
531 | opt2 = RX_CHANNEL(0) | | |
5be78ee9 | 532 | CCTRL_ECN(enable_ecn) | |
cfdda9d7 SW |
533 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
534 | if (enable_tcp_timestamps) | |
535 | opt2 |= TSTAMPS_EN(1); | |
536 | if (enable_tcp_sack) | |
537 | opt2 |= SACK_EN(1); | |
538 | if (wscale && enable_tcp_window_scaling) | |
539 | opt2 |= WND_SCALE_EN(1); | |
540 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | |
541 | ||
542 | req = (struct cpl_act_open_req *) skb_put(skb, wrlen); | |
543 | INIT_TP_WR(req, 0); | |
544 | OPCODE_TID(req) = cpu_to_be32( | |
545 | MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); | |
546 | req->local_port = ep->com.local_addr.sin_port; | |
547 | req->peer_port = ep->com.remote_addr.sin_port; | |
548 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | |
549 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | |
550 | req->opt0 = cpu_to_be64(opt0); | |
5be78ee9 | 551 | req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); |
cfdda9d7 SW |
552 | req->opt2 = cpu_to_be32(opt2); |
553 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
554 | } | |
555 | ||
d2fe99e8 KS |
556 | static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, |
557 | u8 mpa_rev_to_use) | |
cfdda9d7 SW |
558 | { |
559 | int mpalen, wrlen; | |
560 | struct fw_ofld_tx_data_wr *req; | |
561 | struct mpa_message *mpa; | |
d2fe99e8 | 562 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
563 | |
564 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
565 | ||
566 | BUG_ON(skb_cloned(skb)); | |
567 | ||
568 | mpalen = sizeof(*mpa) + ep->plen; | |
d2fe99e8 KS |
569 | if (mpa_rev_to_use == 2) |
570 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
571 | wrlen = roundup(mpalen + sizeof *req, 16); |
572 | skb = get_skb(skb, wrlen, GFP_KERNEL); | |
573 | if (!skb) { | |
574 | connect_reply_upcall(ep, -ENOMEM); | |
575 | return; | |
576 | } | |
577 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
578 | ||
579 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
580 | memset(req, 0, wrlen); | |
581 | req->op_to_immdlen = cpu_to_be32( | |
582 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
583 | FW_WR_COMPL(1) | | |
584 | FW_WR_IMMDLEN(mpalen)); | |
585 | req->flowid_len16 = cpu_to_be32( | |
586 | FW_WR_FLOWID(ep->hwtid) | | |
587 | FW_WR_LEN16(wrlen >> 4)); | |
588 | req->plen = cpu_to_be32(mpalen); | |
589 | req->tunnel_to_proxy = cpu_to_be32( | |
590 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
591 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
592 | ||
593 | mpa = (struct mpa_message *)(req + 1); | |
594 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | |
595 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | |
d2fe99e8 KS |
596 | (markers_enabled ? MPA_MARKERS : 0) | |
597 | (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); | |
cfdda9d7 | 598 | mpa->private_data_size = htons(ep->plen); |
d2fe99e8 | 599 | mpa->revision = mpa_rev_to_use; |
01b225e1 | 600 | if (mpa_rev_to_use == 1) { |
d2fe99e8 | 601 | ep->tried_with_mpa_v1 = 1; |
01b225e1 KS |
602 | ep->retry_with_mpa_v1 = 0; |
603 | } | |
d2fe99e8 KS |
604 | |
605 | if (mpa_rev_to_use == 2) { | |
f747c34a RD |
606 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
607 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
608 | mpa_v2_params.ird = htons((u16)ep->ird); |
609 | mpa_v2_params.ord = htons((u16)ep->ord); | |
610 | ||
611 | if (peer2peer) { | |
612 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | |
613 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | |
614 | mpa_v2_params.ord |= | |
615 | htons(MPA_V2_RDMA_WRITE_RTR); | |
616 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | |
617 | mpa_v2_params.ord |= | |
618 | htons(MPA_V2_RDMA_READ_RTR); | |
619 | } | |
620 | memcpy(mpa->private_data, &mpa_v2_params, | |
621 | sizeof(struct mpa_v2_conn_params)); | |
cfdda9d7 | 622 | |
d2fe99e8 KS |
623 | if (ep->plen) |
624 | memcpy(mpa->private_data + | |
625 | sizeof(struct mpa_v2_conn_params), | |
626 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
627 | } else | |
628 | if (ep->plen) | |
629 | memcpy(mpa->private_data, | |
630 | ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
cfdda9d7 SW |
631 | |
632 | /* | |
633 | * Reference the mpa skb. This ensures the data area | |
634 | * will remain in memory until the hw acks the tx. | |
635 | * Function fw4_ack() will deref it. | |
636 | */ | |
637 | skb_get(skb); | |
638 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
639 | BUG_ON(ep->mpa_skb); | |
640 | ep->mpa_skb = skb; | |
641 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
642 | start_ep_timer(ep); | |
643 | state_set(&ep->com, MPA_REQ_SENT); | |
644 | ep->mpa_attr.initiator = 1; | |
645 | return; | |
646 | } | |
647 | ||
648 | static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
649 | { | |
650 | int mpalen, wrlen; | |
651 | struct fw_ofld_tx_data_wr *req; | |
652 | struct mpa_message *mpa; | |
653 | struct sk_buff *skb; | |
d2fe99e8 | 654 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
655 | |
656 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
657 | ||
658 | mpalen = sizeof(*mpa) + plen; | |
d2fe99e8 KS |
659 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
660 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
661 | wrlen = roundup(mpalen + sizeof *req, 16); |
662 | ||
663 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
664 | if (!skb) { | |
665 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
666 | return -ENOMEM; | |
667 | } | |
668 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
669 | ||
670 | req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); | |
671 | memset(req, 0, wrlen); | |
672 | req->op_to_immdlen = cpu_to_be32( | |
673 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
674 | FW_WR_COMPL(1) | | |
675 | FW_WR_IMMDLEN(mpalen)); | |
676 | req->flowid_len16 = cpu_to_be32( | |
677 | FW_WR_FLOWID(ep->hwtid) | | |
678 | FW_WR_LEN16(wrlen >> 4)); | |
679 | req->plen = cpu_to_be32(mpalen); | |
680 | req->tunnel_to_proxy = cpu_to_be32( | |
681 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
682 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
683 | ||
684 | mpa = (struct mpa_message *)(req + 1); | |
685 | memset(mpa, 0, sizeof(*mpa)); | |
686 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
687 | mpa->flags = MPA_REJECT; | |
688 | mpa->revision = mpa_rev; | |
689 | mpa->private_data_size = htons(plen); | |
d2fe99e8 KS |
690 | |
691 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | |
692 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | |
f747c34a RD |
693 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
694 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
695 | mpa_v2_params.ird = htons(((u16)ep->ird) | |
696 | (peer2peer ? MPA_V2_PEER2PEER_MODEL : | |
697 | 0)); | |
698 | mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? | |
699 | (p2p_type == | |
700 | FW_RI_INIT_P2PTYPE_RDMA_WRITE ? | |
701 | MPA_V2_RDMA_WRITE_RTR : p2p_type == | |
702 | FW_RI_INIT_P2PTYPE_READ_REQ ? | |
703 | MPA_V2_RDMA_READ_RTR : 0) : 0)); | |
704 | memcpy(mpa->private_data, &mpa_v2_params, | |
705 | sizeof(struct mpa_v2_conn_params)); | |
706 | ||
707 | if (ep->plen) | |
708 | memcpy(mpa->private_data + | |
709 | sizeof(struct mpa_v2_conn_params), pdata, plen); | |
710 | } else | |
711 | if (plen) | |
712 | memcpy(mpa->private_data, pdata, plen); | |
cfdda9d7 SW |
713 | |
714 | /* | |
715 | * Reference the mpa skb again. This ensures the data area | |
716 | * will remain in memory until the hw acks the tx. | |
717 | * Function fw4_ack() will deref it. | |
718 | */ | |
719 | skb_get(skb); | |
720 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
721 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
722 | BUG_ON(ep->mpa_skb); | |
723 | ep->mpa_skb = skb; | |
724 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
725 | } | |
726 | ||
727 | static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) | |
728 | { | |
729 | int mpalen, wrlen; | |
730 | struct fw_ofld_tx_data_wr *req; | |
731 | struct mpa_message *mpa; | |
732 | struct sk_buff *skb; | |
d2fe99e8 | 733 | struct mpa_v2_conn_params mpa_v2_params; |
cfdda9d7 SW |
734 | |
735 | PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); | |
736 | ||
737 | mpalen = sizeof(*mpa) + plen; | |
d2fe99e8 KS |
738 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) |
739 | mpalen += sizeof(struct mpa_v2_conn_params); | |
cfdda9d7 SW |
740 | wrlen = roundup(mpalen + sizeof *req, 16); |
741 | ||
742 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
743 | if (!skb) { | |
744 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); | |
745 | return -ENOMEM; | |
746 | } | |
747 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
748 | ||
749 | req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); | |
750 | memset(req, 0, wrlen); | |
751 | req->op_to_immdlen = cpu_to_be32( | |
752 | FW_WR_OP(FW_OFLD_TX_DATA_WR) | | |
753 | FW_WR_COMPL(1) | | |
754 | FW_WR_IMMDLEN(mpalen)); | |
755 | req->flowid_len16 = cpu_to_be32( | |
756 | FW_WR_FLOWID(ep->hwtid) | | |
757 | FW_WR_LEN16(wrlen >> 4)); | |
758 | req->plen = cpu_to_be32(mpalen); | |
759 | req->tunnel_to_proxy = cpu_to_be32( | |
760 | FW_OFLD_TX_DATA_WR_FLUSH(1) | | |
761 | FW_OFLD_TX_DATA_WR_SHOVE(1)); | |
762 | ||
763 | mpa = (struct mpa_message *)(req + 1); | |
764 | memset(mpa, 0, sizeof(*mpa)); | |
765 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
766 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | |
767 | (markers_enabled ? MPA_MARKERS : 0); | |
d2fe99e8 | 768 | mpa->revision = ep->mpa_attr.version; |
cfdda9d7 | 769 | mpa->private_data_size = htons(plen); |
d2fe99e8 KS |
770 | |
771 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { | |
772 | mpa->flags |= MPA_ENHANCED_RDMA_CONN; | |
f747c34a RD |
773 | mpa->private_data_size = htons(ntohs(mpa->private_data_size) + |
774 | sizeof (struct mpa_v2_conn_params)); | |
d2fe99e8 KS |
775 | mpa_v2_params.ird = htons((u16)ep->ird); |
776 | mpa_v2_params.ord = htons((u16)ep->ord); | |
777 | if (peer2peer && (ep->mpa_attr.p2p_type != | |
778 | FW_RI_INIT_P2PTYPE_DISABLED)) { | |
779 | mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); | |
780 | ||
781 | if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) | |
782 | mpa_v2_params.ord |= | |
783 | htons(MPA_V2_RDMA_WRITE_RTR); | |
784 | else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) | |
785 | mpa_v2_params.ord |= | |
786 | htons(MPA_V2_RDMA_READ_RTR); | |
787 | } | |
788 | ||
789 | memcpy(mpa->private_data, &mpa_v2_params, | |
790 | sizeof(struct mpa_v2_conn_params)); | |
791 | ||
792 | if (ep->plen) | |
793 | memcpy(mpa->private_data + | |
794 | sizeof(struct mpa_v2_conn_params), pdata, plen); | |
795 | } else | |
796 | if (plen) | |
797 | memcpy(mpa->private_data, pdata, plen); | |
cfdda9d7 SW |
798 | |
799 | /* | |
800 | * Reference the mpa skb. This ensures the data area | |
801 | * will remain in memory until the hw acks the tx. | |
802 | * Function fw4_ack() will deref it. | |
803 | */ | |
804 | skb_get(skb); | |
805 | t4_set_arp_err_handler(skb, NULL, arp_failure_discard); | |
806 | ep->mpa_skb = skb; | |
807 | state_set(&ep->com, MPA_REP_SENT); | |
808 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
809 | } | |
810 | ||
811 | static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
812 | { | |
813 | struct c4iw_ep *ep; | |
814 | struct cpl_act_establish *req = cplhdr(skb); | |
815 | unsigned int tid = GET_TID(req); | |
816 | unsigned int atid = GET_TID_TID(ntohl(req->tos_atid)); | |
817 | struct tid_info *t = dev->rdev.lldi.tids; | |
818 | ||
819 | ep = lookup_atid(t, atid); | |
820 | ||
821 | PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, | |
822 | be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); | |
823 | ||
824 | dst_confirm(ep->dst); | |
825 | ||
826 | /* setup the hwtid for this connection */ | |
827 | ep->hwtid = tid; | |
828 | cxgb4_insert_tid(t, ep, tid); | |
829 | ||
830 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
831 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
832 | ||
833 | set_emss(ep, ntohs(req->tcp_opt)); | |
834 | ||
835 | /* dealloc the atid */ | |
836 | cxgb4_free_atid(t, atid); | |
837 | ||
838 | /* start MPA negotiation */ | |
839 | send_flowc(ep, NULL); | |
d2fe99e8 KS |
840 | if (ep->retry_with_mpa_v1) |
841 | send_mpa_req(ep, skb, 1); | |
842 | else | |
843 | send_mpa_req(ep, skb, mpa_rev); | |
cfdda9d7 SW |
844 | |
845 | return 0; | |
846 | } | |
847 | ||
848 | static void close_complete_upcall(struct c4iw_ep *ep) | |
849 | { | |
850 | struct iw_cm_event event; | |
851 | ||
852 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
853 | memset(&event, 0, sizeof(event)); | |
854 | event.event = IW_CM_EVENT_CLOSE; | |
855 | if (ep->com.cm_id) { | |
856 | PDBG("close complete delivered ep %p cm_id %p tid %u\n", | |
857 | ep, ep->com.cm_id, ep->hwtid); | |
858 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
859 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
860 | ep->com.cm_id = NULL; | |
861 | ep->com.qp = NULL; | |
862 | } | |
863 | } | |
864 | ||
865 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
866 | { | |
867 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
868 | close_complete_upcall(ep); | |
869 | state_set(&ep->com, ABORTING); | |
870 | return send_abort(ep, skb, gfp); | |
871 | } | |
872 | ||
873 | static void peer_close_upcall(struct c4iw_ep *ep) | |
874 | { | |
875 | struct iw_cm_event event; | |
876 | ||
877 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
878 | memset(&event, 0, sizeof(event)); | |
879 | event.event = IW_CM_EVENT_DISCONNECT; | |
880 | if (ep->com.cm_id) { | |
881 | PDBG("peer close delivered ep %p cm_id %p tid %u\n", | |
882 | ep, ep->com.cm_id, ep->hwtid); | |
883 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
884 | } | |
885 | } | |
886 | ||
887 | static void peer_abort_upcall(struct c4iw_ep *ep) | |
888 | { | |
889 | struct iw_cm_event event; | |
890 | ||
891 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
892 | memset(&event, 0, sizeof(event)); | |
893 | event.event = IW_CM_EVENT_CLOSE; | |
894 | event.status = -ECONNRESET; | |
895 | if (ep->com.cm_id) { | |
896 | PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, | |
897 | ep->com.cm_id, ep->hwtid); | |
898 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
899 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
900 | ep->com.cm_id = NULL; | |
901 | ep->com.qp = NULL; | |
902 | } | |
903 | } | |
904 | ||
905 | static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |
906 | { | |
907 | struct iw_cm_event event; | |
908 | ||
909 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); | |
910 | memset(&event, 0, sizeof(event)); | |
911 | event.event = IW_CM_EVENT_CONNECT_REPLY; | |
912 | event.status = status; | |
913 | event.local_addr = ep->com.local_addr; | |
914 | event.remote_addr = ep->com.remote_addr; | |
915 | ||
916 | if ((status == 0) || (status == -ECONNREFUSED)) { | |
d2fe99e8 KS |
917 | if (!ep->tried_with_mpa_v1) { |
918 | /* this means MPA_v2 is used */ | |
919 | event.private_data_len = ep->plen - | |
920 | sizeof(struct mpa_v2_conn_params); | |
921 | event.private_data = ep->mpa_pkt + | |
922 | sizeof(struct mpa_message) + | |
923 | sizeof(struct mpa_v2_conn_params); | |
924 | } else { | |
925 | /* this means MPA_v1 is used */ | |
926 | event.private_data_len = ep->plen; | |
927 | event.private_data = ep->mpa_pkt + | |
928 | sizeof(struct mpa_message); | |
929 | } | |
cfdda9d7 | 930 | } |
85963e4c RD |
931 | |
932 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | |
933 | ep->hwtid, status); | |
934 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
935 | ||
cfdda9d7 SW |
936 | if (status < 0) { |
937 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
938 | ep->com.cm_id = NULL; | |
939 | ep->com.qp = NULL; | |
940 | } | |
941 | } | |
942 | ||
943 | static void connect_request_upcall(struct c4iw_ep *ep) | |
944 | { | |
945 | struct iw_cm_event event; | |
946 | ||
947 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
948 | memset(&event, 0, sizeof(event)); | |
949 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | |
950 | event.local_addr = ep->com.local_addr; | |
951 | event.remote_addr = ep->com.remote_addr; | |
cfdda9d7 | 952 | event.provider_data = ep; |
d2fe99e8 KS |
953 | if (!ep->tried_with_mpa_v1) { |
954 | /* this means MPA_v2 is used */ | |
955 | event.ord = ep->ord; | |
956 | event.ird = ep->ird; | |
957 | event.private_data_len = ep->plen - | |
958 | sizeof(struct mpa_v2_conn_params); | |
959 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + | |
960 | sizeof(struct mpa_v2_conn_params); | |
961 | } else { | |
962 | /* this means MPA_v1 is used. Send max supported */ | |
963 | event.ord = c4iw_max_read_depth; | |
964 | event.ird = c4iw_max_read_depth; | |
965 | event.private_data_len = ep->plen; | |
966 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
967 | } | |
cfdda9d7 SW |
968 | if (state_read(&ep->parent_ep->com) != DEAD) { |
969 | c4iw_get_ep(&ep->com); | |
970 | ep->parent_ep->com.cm_id->event_handler( | |
971 | ep->parent_ep->com.cm_id, | |
972 | &event); | |
973 | } | |
974 | c4iw_put_ep(&ep->parent_ep->com); | |
975 | ep->parent_ep = NULL; | |
976 | } | |
977 | ||
978 | static void established_upcall(struct c4iw_ep *ep) | |
979 | { | |
980 | struct iw_cm_event event; | |
981 | ||
982 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
983 | memset(&event, 0, sizeof(event)); | |
984 | event.event = IW_CM_EVENT_ESTABLISHED; | |
d2fe99e8 KS |
985 | event.ird = ep->ird; |
986 | event.ord = ep->ord; | |
cfdda9d7 SW |
987 | if (ep->com.cm_id) { |
988 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
989 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
990 | } | |
991 | } | |
992 | ||
993 | static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |
994 | { | |
995 | struct cpl_rx_data_ack *req; | |
996 | struct sk_buff *skb; | |
997 | int wrlen = roundup(sizeof *req, 16); | |
998 | ||
999 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
1000 | skb = get_skb(NULL, wrlen, GFP_KERNEL); | |
1001 | if (!skb) { | |
1002 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | |
1003 | return 0; | |
1004 | } | |
1005 | ||
1006 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); | |
1007 | memset(req, 0, wrlen); | |
1008 | INIT_TP_WR(req, ep->hwtid); | |
1009 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | |
1010 | ep->hwtid)); | |
ba6d3925 SW |
1011 | req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | |
1012 | F_RX_DACK_CHANGE | | |
1013 | V_RX_DACK_MODE(dack_mode)); | |
d4f1a5c6 | 1014 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); |
cfdda9d7 SW |
1015 | c4iw_ofld_send(&ep->com.dev->rdev, skb); |
1016 | return credits; | |
1017 | } | |
1018 | ||
1019 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |
1020 | { | |
1021 | struct mpa_message *mpa; | |
d2fe99e8 | 1022 | struct mpa_v2_conn_params *mpa_v2_params; |
cfdda9d7 | 1023 | u16 plen; |
d2fe99e8 KS |
1024 | u16 resp_ird, resp_ord; |
1025 | u8 rtr_mismatch = 0, insuff_ird = 0; | |
cfdda9d7 SW |
1026 | struct c4iw_qp_attributes attrs; |
1027 | enum c4iw_qp_attr_mask mask; | |
1028 | int err; | |
1029 | ||
1030 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1031 | ||
1032 | /* | |
1033 | * Stop mpa timer. If it expired, then the state has | |
1034 | * changed and we bail since ep_timeout already aborted | |
1035 | * the connection. | |
1036 | */ | |
1037 | stop_ep_timer(ep); | |
1038 | if (state_read(&ep->com) != MPA_REQ_SENT) | |
1039 | return; | |
1040 | ||
1041 | /* | |
1042 | * If we get more than the supported amount of private data | |
1043 | * then we must fail this connection. | |
1044 | */ | |
1045 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
1046 | err = -EINVAL; | |
1047 | goto err; | |
1048 | } | |
1049 | ||
1050 | /* | |
1051 | * copy the new data into our accumulation buffer. | |
1052 | */ | |
1053 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
1054 | skb->len); | |
1055 | ep->mpa_pkt_len += skb->len; | |
1056 | ||
1057 | /* | |
1058 | * if we don't even have the mpa message, then bail. | |
1059 | */ | |
1060 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
1061 | return; | |
1062 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
1063 | ||
1064 | /* Validate MPA header. */ | |
d2fe99e8 KS |
1065 | if (mpa->revision > mpa_rev) { |
1066 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | |
1067 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | |
cfdda9d7 SW |
1068 | err = -EPROTO; |
1069 | goto err; | |
1070 | } | |
1071 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | |
1072 | err = -EPROTO; | |
1073 | goto err; | |
1074 | } | |
1075 | ||
1076 | plen = ntohs(mpa->private_data_size); | |
1077 | ||
1078 | /* | |
1079 | * Fail if there's too much private data. | |
1080 | */ | |
1081 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
1082 | err = -EPROTO; | |
1083 | goto err; | |
1084 | } | |
1085 | ||
1086 | /* | |
1087 | * If plen does not account for pkt size | |
1088 | */ | |
1089 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
1090 | err = -EPROTO; | |
1091 | goto err; | |
1092 | } | |
1093 | ||
1094 | ep->plen = (u8) plen; | |
1095 | ||
1096 | /* | |
1097 | * If we don't have all the pdata yet, then bail. | |
1098 | * We'll continue process when more data arrives. | |
1099 | */ | |
1100 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
1101 | return; | |
1102 | ||
1103 | if (mpa->flags & MPA_REJECT) { | |
1104 | err = -ECONNREFUSED; | |
1105 | goto err; | |
1106 | } | |
1107 | ||
1108 | /* | |
1109 | * If we get here we have accumulated the entire mpa | |
1110 | * start reply message including private data. And | |
1111 | * the MPA header is valid. | |
1112 | */ | |
1113 | state_set(&ep->com, FPDU_MODE); | |
1114 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
1115 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
1116 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
d2fe99e8 KS |
1117 | ep->mpa_attr.version = mpa->revision; |
1118 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1119 | ||
1120 | if (mpa->revision == 2) { | |
1121 | ep->mpa_attr.enhanced_rdma_conn = | |
1122 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | |
1123 | if (ep->mpa_attr.enhanced_rdma_conn) { | |
1124 | mpa_v2_params = (struct mpa_v2_conn_params *) | |
1125 | (ep->mpa_pkt + sizeof(*mpa)); | |
1126 | resp_ird = ntohs(mpa_v2_params->ird) & | |
1127 | MPA_V2_IRD_ORD_MASK; | |
1128 | resp_ord = ntohs(mpa_v2_params->ord) & | |
1129 | MPA_V2_IRD_ORD_MASK; | |
1130 | ||
1131 | /* | |
1132 | * This is a double-check. Ideally, below checks are | |
1133 | * not required since ird/ord stuff has been taken | |
1134 | * care of in c4iw_accept_cr | |
1135 | */ | |
1136 | if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { | |
1137 | err = -ENOMEM; | |
1138 | ep->ird = resp_ord; | |
1139 | ep->ord = resp_ird; | |
1140 | insuff_ird = 1; | |
1141 | } | |
1142 | ||
1143 | if (ntohs(mpa_v2_params->ird) & | |
1144 | MPA_V2_PEER2PEER_MODEL) { | |
1145 | if (ntohs(mpa_v2_params->ord) & | |
1146 | MPA_V2_RDMA_WRITE_RTR) | |
1147 | ep->mpa_attr.p2p_type = | |
1148 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | |
1149 | else if (ntohs(mpa_v2_params->ord) & | |
1150 | MPA_V2_RDMA_READ_RTR) | |
1151 | ep->mpa_attr.p2p_type = | |
1152 | FW_RI_INIT_P2PTYPE_READ_REQ; | |
1153 | } | |
1154 | } | |
1155 | } else if (mpa->revision == 1) | |
1156 | if (peer2peer) | |
1157 | ep->mpa_attr.p2p_type = p2p_type; | |
1158 | ||
cfdda9d7 | 1159 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
d2fe99e8 KS |
1160 | "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " |
1161 | "%d\n", __func__, ep->mpa_attr.crc_enabled, | |
1162 | ep->mpa_attr.recv_marker_enabled, | |
1163 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | |
1164 | ep->mpa_attr.p2p_type, p2p_type); | |
1165 | ||
1166 | /* | |
1167 | * If responder's RTR does not match with that of initiator, assign | |
1168 | * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not | |
1169 | * generated when moving QP to RTS state. | |
1170 | * A TERM message will be sent after QP has moved to RTS state | |
1171 | */ | |
91018f86 | 1172 | if ((ep->mpa_attr.version == 2) && peer2peer && |
d2fe99e8 KS |
1173 | (ep->mpa_attr.p2p_type != p2p_type)) { |
1174 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1175 | rtr_mismatch = 1; | |
1176 | } | |
cfdda9d7 SW |
1177 | |
1178 | attrs.mpa_attr = ep->mpa_attr; | |
1179 | attrs.max_ird = ep->ird; | |
1180 | attrs.max_ord = ep->ord; | |
1181 | attrs.llp_stream_handle = ep; | |
1182 | attrs.next_state = C4IW_QP_STATE_RTS; | |
1183 | ||
1184 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
1185 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | | |
1186 | C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; | |
1187 | ||
1188 | /* bind QP and TID with INIT_WR */ | |
1189 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
1190 | ep->com.qp, mask, &attrs, 1); | |
1191 | if (err) | |
1192 | goto err; | |
d2fe99e8 KS |
1193 | |
1194 | /* | |
1195 | * If responder's RTR requirement did not match with what initiator | |
1196 | * supports, generate TERM message | |
1197 | */ | |
1198 | if (rtr_mismatch) { | |
1199 | printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); | |
1200 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | |
1201 | attrs.ecode = MPA_NOMATCH_RTR; | |
1202 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
1203 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1204 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1205 | err = -ENOMEM; | |
1206 | goto out; | |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * Generate TERM if initiator IRD is not sufficient for responder | |
1211 | * provided ORD. Currently, we do the same behaviour even when | |
1212 | * responder provided IRD is also not sufficient as regards to | |
1213 | * initiator ORD. | |
1214 | */ | |
1215 | if (insuff_ird) { | |
1216 | printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", | |
1217 | __func__); | |
1218 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | |
1219 | attrs.ecode = MPA_INSUFF_IRD; | |
1220 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
1221 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1222 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1223 | err = -ENOMEM; | |
1224 | goto out; | |
1225 | } | |
cfdda9d7 SW |
1226 | goto out; |
1227 | err: | |
b21ef16a SW |
1228 | state_set(&ep->com, ABORTING); |
1229 | send_abort(ep, skb, GFP_KERNEL); | |
cfdda9d7 SW |
1230 | out: |
1231 | connect_reply_upcall(ep, err); | |
1232 | return; | |
1233 | } | |
1234 | ||
1235 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | |
1236 | { | |
1237 | struct mpa_message *mpa; | |
d2fe99e8 | 1238 | struct mpa_v2_conn_params *mpa_v2_params; |
cfdda9d7 SW |
1239 | u16 plen; |
1240 | ||
1241 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1242 | ||
1243 | if (state_read(&ep->com) != MPA_REQ_WAIT) | |
1244 | return; | |
1245 | ||
1246 | /* | |
1247 | * If we get more than the supported amount of private data | |
1248 | * then we must fail this connection. | |
1249 | */ | |
1250 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
1251 | stop_ep_timer(ep); | |
1252 | abort_connection(ep, skb, GFP_KERNEL); | |
1253 | return; | |
1254 | } | |
1255 | ||
1256 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
1257 | ||
1258 | /* | |
1259 | * Copy the new data into our accumulation buffer. | |
1260 | */ | |
1261 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), | |
1262 | skb->len); | |
1263 | ep->mpa_pkt_len += skb->len; | |
1264 | ||
1265 | /* | |
1266 | * If we don't even have the mpa message, then bail. | |
1267 | * We'll continue process when more data arrives. | |
1268 | */ | |
1269 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
1270 | return; | |
1271 | ||
1272 | PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); | |
1273 | stop_ep_timer(ep); | |
1274 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
1275 | ||
1276 | /* | |
1277 | * Validate MPA Header. | |
1278 | */ | |
d2fe99e8 KS |
1279 | if (mpa->revision > mpa_rev) { |
1280 | printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," | |
1281 | " Received = %d\n", __func__, mpa_rev, mpa->revision); | |
cfdda9d7 SW |
1282 | abort_connection(ep, skb, GFP_KERNEL); |
1283 | return; | |
1284 | } | |
1285 | ||
1286 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | |
1287 | abort_connection(ep, skb, GFP_KERNEL); | |
1288 | return; | |
1289 | } | |
1290 | ||
1291 | plen = ntohs(mpa->private_data_size); | |
1292 | ||
1293 | /* | |
1294 | * Fail if there's too much private data. | |
1295 | */ | |
1296 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
1297 | abort_connection(ep, skb, GFP_KERNEL); | |
1298 | return; | |
1299 | } | |
1300 | ||
1301 | /* | |
1302 | * If plen does not account for pkt size | |
1303 | */ | |
1304 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
1305 | abort_connection(ep, skb, GFP_KERNEL); | |
1306 | return; | |
1307 | } | |
1308 | ep->plen = (u8) plen; | |
1309 | ||
1310 | /* | |
1311 | * If we don't have all the pdata yet, then bail. | |
1312 | */ | |
1313 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
1314 | return; | |
1315 | ||
1316 | /* | |
1317 | * If we get here we have accumulated the entire mpa | |
1318 | * start reply message including private data. | |
1319 | */ | |
1320 | ep->mpa_attr.initiator = 0; | |
1321 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
1322 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
1323 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
d2fe99e8 KS |
1324 | ep->mpa_attr.version = mpa->revision; |
1325 | if (mpa->revision == 1) | |
1326 | ep->tried_with_mpa_v1 = 1; | |
1327 | ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; | |
1328 | ||
1329 | if (mpa->revision == 2) { | |
1330 | ep->mpa_attr.enhanced_rdma_conn = | |
1331 | mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; | |
1332 | if (ep->mpa_attr.enhanced_rdma_conn) { | |
1333 | mpa_v2_params = (struct mpa_v2_conn_params *) | |
1334 | (ep->mpa_pkt + sizeof(*mpa)); | |
1335 | ep->ird = ntohs(mpa_v2_params->ird) & | |
1336 | MPA_V2_IRD_ORD_MASK; | |
1337 | ep->ord = ntohs(mpa_v2_params->ord) & | |
1338 | MPA_V2_IRD_ORD_MASK; | |
1339 | if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) | |
1340 | if (peer2peer) { | |
1341 | if (ntohs(mpa_v2_params->ord) & | |
1342 | MPA_V2_RDMA_WRITE_RTR) | |
1343 | ep->mpa_attr.p2p_type = | |
1344 | FW_RI_INIT_P2PTYPE_RDMA_WRITE; | |
1345 | else if (ntohs(mpa_v2_params->ord) & | |
1346 | MPA_V2_RDMA_READ_RTR) | |
1347 | ep->mpa_attr.p2p_type = | |
1348 | FW_RI_INIT_P2PTYPE_READ_REQ; | |
1349 | } | |
1350 | } | |
1351 | } else if (mpa->revision == 1) | |
1352 | if (peer2peer) | |
1353 | ep->mpa_attr.p2p_type = p2p_type; | |
1354 | ||
cfdda9d7 SW |
1355 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " |
1356 | "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, | |
1357 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
1358 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, | |
1359 | ep->mpa_attr.p2p_type); | |
1360 | ||
1361 | state_set(&ep->com, MPA_REQ_RCVD); | |
1362 | ||
1363 | /* drive upcall */ | |
1364 | connect_request_upcall(ep); | |
1365 | return; | |
1366 | } | |
1367 | ||
1368 | static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |
1369 | { | |
1370 | struct c4iw_ep *ep; | |
1371 | struct cpl_rx_data *hdr = cplhdr(skb); | |
1372 | unsigned int dlen = ntohs(hdr->len); | |
1373 | unsigned int tid = GET_TID(hdr); | |
1374 | struct tid_info *t = dev->rdev.lldi.tids; | |
1375 | ||
1376 | ep = lookup_tid(t, tid); | |
1377 | PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); | |
1378 | skb_pull(skb, sizeof(*hdr)); | |
1379 | skb_trim(skb, dlen); | |
1380 | ||
1381 | ep->rcv_seq += dlen; | |
1382 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | |
1383 | ||
1384 | /* update RX credits */ | |
1385 | update_rx_credits(ep, dlen); | |
1386 | ||
1387 | switch (state_read(&ep->com)) { | |
1388 | case MPA_REQ_SENT: | |
1389 | process_mpa_reply(ep, skb); | |
1390 | break; | |
1391 | case MPA_REQ_WAIT: | |
1392 | process_mpa_request(ep, skb); | |
1393 | break; | |
1394 | case MPA_REP_SENT: | |
1395 | break; | |
1396 | default: | |
1397 | printk(KERN_ERR MOD "%s Unexpected streaming data." | |
1398 | " ep %p state %d tid %u\n", | |
1399 | __func__, ep, state_read(&ep->com), ep->hwtid); | |
1400 | ||
1401 | /* | |
1402 | * The ep will timeout and inform the ULP of the failure. | |
1403 | * See ep_timeout(). | |
1404 | */ | |
1405 | break; | |
1406 | } | |
1407 | return 0; | |
1408 | } | |
1409 | ||
1410 | static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1411 | { | |
1412 | struct c4iw_ep *ep; | |
1413 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | |
cfdda9d7 SW |
1414 | int release = 0; |
1415 | unsigned int tid = GET_TID(rpl); | |
1416 | struct tid_info *t = dev->rdev.lldi.tids; | |
1417 | ||
1418 | ep = lookup_tid(t, tid); | |
4984037b VP |
1419 | if (!ep) { |
1420 | printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); | |
1421 | return 0; | |
1422 | } | |
92dd6c3d | 1423 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
2f5b48c3 | 1424 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1425 | switch (ep->com.state) { |
1426 | case ABORTING: | |
1427 | __state_set(&ep->com, DEAD); | |
1428 | release = 1; | |
1429 | break; | |
1430 | default: | |
1431 | printk(KERN_ERR "%s ep %p state %d\n", | |
1432 | __func__, ep, ep->com.state); | |
1433 | break; | |
1434 | } | |
2f5b48c3 | 1435 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1436 | |
1437 | if (release) | |
1438 | release_ep_resources(ep); | |
1439 | return 0; | |
1440 | } | |
1441 | ||
5be78ee9 VP |
1442 | static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) |
1443 | { | |
1444 | struct sk_buff *skb; | |
1445 | struct fw_ofld_connection_wr *req; | |
1446 | unsigned int mtu_idx; | |
1447 | int wscale; | |
1448 | ||
1449 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1450 | req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); | |
1451 | memset(req, 0, sizeof(*req)); | |
1452 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); | |
1453 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | |
1454 | req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, | |
1455 | ep->l2t)); | |
1456 | req->le.lport = ep->com.local_addr.sin_port; | |
1457 | req->le.pport = ep->com.remote_addr.sin_port; | |
1458 | req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; | |
1459 | req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; | |
1460 | req->tcb.t_state_to_astid = | |
1461 | htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | | |
1462 | V_FW_OFLD_CONNECTION_WR_ASTID(atid)); | |
1463 | req->tcb.cplrxdataack_cplpassacceptrpl = | |
1464 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); | |
1465 | req->tcb.tx_max = jiffies; | |
1466 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
1467 | wscale = compute_wscale(rcv_win); | |
1468 | req->tcb.opt0 = TCAM_BYPASS(1) | | |
1469 | (nocong ? NO_CONG(1) : 0) | | |
1470 | KEEP_ALIVE(1) | | |
1471 | DELACK(1) | | |
1472 | WND_SCALE(wscale) | | |
1473 | MSS_IDX(mtu_idx) | | |
1474 | L2T_IDX(ep->l2t->idx) | | |
1475 | TX_CHAN(ep->tx_chan) | | |
1476 | SMAC_SEL(ep->smac_idx) | | |
1477 | DSCP(ep->tos) | | |
1478 | ULP_MODE(ULP_MODE_TCPDDP) | | |
1479 | RCV_BUFSIZ(rcv_win >> 10); | |
1480 | req->tcb.opt2 = PACE(1) | | |
1481 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | | |
1482 | RX_CHANNEL(0) | | |
1483 | CCTRL_ECN(enable_ecn) | | |
1484 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
1485 | if (enable_tcp_timestamps) | |
1486 | req->tcb.opt2 |= TSTAMPS_EN(1); | |
1487 | if (enable_tcp_sack) | |
1488 | req->tcb.opt2 |= SACK_EN(1); | |
1489 | if (wscale && enable_tcp_window_scaling) | |
1490 | req->tcb.opt2 |= WND_SCALE_EN(1); | |
1491 | req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); | |
1492 | req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); | |
1493 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
1494 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | |
1495 | } | |
1496 | ||
cfdda9d7 SW |
1497 | /* |
1498 | * Return whether a failed active open has allocated a TID | |
1499 | */ | |
1500 | static inline int act_open_has_tid(int status) | |
1501 | { | |
1502 | return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && | |
1503 | status != CPL_ERR_ARP_MISS; | |
1504 | } | |
1505 | ||
1506 | static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1507 | { | |
1508 | struct c4iw_ep *ep; | |
1509 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
1510 | unsigned int atid = GET_TID_TID(GET_AOPEN_ATID( | |
1511 | ntohl(rpl->atid_status))); | |
1512 | struct tid_info *t = dev->rdev.lldi.tids; | |
1513 | int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status)); | |
1514 | ||
1515 | ep = lookup_atid(t, atid); | |
1516 | ||
1517 | PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, | |
1518 | status, status2errno(status)); | |
1519 | ||
1520 | if (status == CPL_ERR_RTX_NEG_ADVICE) { | |
1521 | printk(KERN_WARNING MOD "Connection problems for atid %u\n", | |
1522 | atid); | |
1523 | return 0; | |
1524 | } | |
1525 | ||
d716a2a0 VP |
1526 | /* |
1527 | * Log interesting failures. | |
1528 | */ | |
1529 | switch (status) { | |
1530 | case CPL_ERR_CONN_RESET: | |
1531 | case CPL_ERR_CONN_TIMEDOUT: | |
1532 | break; | |
5be78ee9 VP |
1533 | case CPL_ERR_TCAM_FULL: |
1534 | mutex_lock(&dev->rdev.stats.lock); | |
1535 | dev->rdev.stats.tcam_full++; | |
1536 | mutex_unlock(&dev->rdev.stats.lock); | |
1537 | send_fw_act_open_req(ep, | |
1538 | GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl->atid_status)))); | |
1539 | return 0; | |
1540 | break; | |
d716a2a0 VP |
1541 | default: |
1542 | printk(KERN_INFO MOD "Active open failure - " | |
1543 | "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", | |
1544 | atid, status, status2errno(status), | |
1545 | &ep->com.local_addr.sin_addr.s_addr, | |
1546 | ntohs(ep->com.local_addr.sin_port), | |
1547 | &ep->com.remote_addr.sin_addr.s_addr, | |
1548 | ntohs(ep->com.remote_addr.sin_port)); | |
1549 | break; | |
1550 | } | |
1551 | ||
cfdda9d7 SW |
1552 | connect_reply_upcall(ep, status2errno(status)); |
1553 | state_set(&ep->com, DEAD); | |
1554 | ||
1555 | if (status && act_open_has_tid(status)) | |
1556 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); | |
1557 | ||
1558 | cxgb4_free_atid(t, atid); | |
1559 | dst_release(ep->dst); | |
1560 | cxgb4_l2t_release(ep->l2t); | |
1561 | c4iw_put_ep(&ep->com); | |
1562 | ||
1563 | return 0; | |
1564 | } | |
1565 | ||
1566 | static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1567 | { | |
1568 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | |
1569 | struct tid_info *t = dev->rdev.lldi.tids; | |
1570 | unsigned int stid = GET_TID(rpl); | |
1571 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1572 | ||
1573 | if (!ep) { | |
1cab775c VP |
1574 | PDBG("%s stid %d lookup failure!\n", __func__, stid); |
1575 | goto out; | |
cfdda9d7 SW |
1576 | } |
1577 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | |
1578 | rpl->status, status2errno(rpl->status)); | |
d9594d99 | 1579 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 | 1580 | |
1cab775c | 1581 | out: |
cfdda9d7 SW |
1582 | return 0; |
1583 | } | |
1584 | ||
1585 | static int listen_stop(struct c4iw_listen_ep *ep) | |
1586 | { | |
1587 | struct sk_buff *skb; | |
1588 | struct cpl_close_listsvr_req *req; | |
1589 | ||
1590 | PDBG("%s ep %p\n", __func__, ep); | |
1591 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1592 | if (!skb) { | |
1593 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); | |
1594 | return -ENOMEM; | |
1595 | } | |
1596 | req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req)); | |
1597 | INIT_TP_WR(req, 0); | |
1598 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, | |
1599 | ep->stid)); | |
1600 | req->reply_ctrl = cpu_to_be16( | |
1601 | QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0])); | |
1602 | set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); | |
1603 | return c4iw_ofld_send(&ep->com.dev->rdev, skb); | |
1604 | } | |
1605 | ||
1606 | static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
1607 | { | |
1608 | struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); | |
1609 | struct tid_info *t = dev->rdev.lldi.tids; | |
1610 | unsigned int stid = GET_TID(rpl); | |
1611 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | |
1612 | ||
1613 | PDBG("%s ep %p\n", __func__, ep); | |
d9594d99 | 1614 | c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); |
cfdda9d7 SW |
1615 | return 0; |
1616 | } | |
1617 | ||
1618 | static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |
1619 | struct cpl_pass_accept_req *req) | |
1620 | { | |
1621 | struct cpl_pass_accept_rpl *rpl; | |
1622 | unsigned int mtu_idx; | |
1623 | u64 opt0; | |
1624 | u32 opt2; | |
1625 | int wscale; | |
1626 | ||
1627 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1628 | BUG_ON(skb_cloned(skb)); | |
1629 | skb_trim(skb, sizeof(*rpl)); | |
1630 | skb_get(skb); | |
1631 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | |
1632 | wscale = compute_wscale(rcv_win); | |
5be78ee9 VP |
1633 | opt0 = (nocong ? NO_CONG(1) : 0) | |
1634 | KEEP_ALIVE(1) | | |
ba6d3925 | 1635 | DELACK(1) | |
cfdda9d7 SW |
1636 | WND_SCALE(wscale) | |
1637 | MSS_IDX(mtu_idx) | | |
1638 | L2T_IDX(ep->l2t->idx) | | |
1639 | TX_CHAN(ep->tx_chan) | | |
1640 | SMAC_SEL(ep->smac_idx) | | |
5be78ee9 | 1641 | DSCP(ep->tos >> 2) | |
b48f3b9c | 1642 | ULP_MODE(ULP_MODE_TCPDDP) | |
cfdda9d7 SW |
1643 | RCV_BUFSIZ(rcv_win>>10); |
1644 | opt2 = RX_CHANNEL(0) | | |
1645 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | |
1646 | ||
1647 | if (enable_tcp_timestamps && req->tcpopt.tstamp) | |
1648 | opt2 |= TSTAMPS_EN(1); | |
1649 | if (enable_tcp_sack && req->tcpopt.sack) | |
1650 | opt2 |= SACK_EN(1); | |
1651 | if (wscale && enable_tcp_window_scaling) | |
1652 | opt2 |= WND_SCALE_EN(1); | |
5be78ee9 VP |
1653 | if (enable_ecn) { |
1654 | const struct tcphdr *tcph; | |
1655 | u32 hlen = ntohl(req->hdr_len); | |
1656 | ||
1657 | tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + | |
1658 | G_IP_HDR_LEN(hlen); | |
1659 | if (tcph->ece && tcph->cwr) | |
1660 | opt2 |= CCTRL_ECN(1); | |
1661 | } | |
cfdda9d7 SW |
1662 | |
1663 | rpl = cplhdr(skb); | |
1664 | INIT_TP_WR(rpl, ep->hwtid); | |
1665 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | |
1666 | ep->hwtid)); | |
1667 | rpl->opt0 = cpu_to_be64(opt0); | |
1668 | rpl->opt2 = cpu_to_be32(opt2); | |
d4f1a5c6 | 1669 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
cfdda9d7 SW |
1670 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
1671 | ||
1672 | return; | |
1673 | } | |
1674 | ||
1675 | static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip, | |
1676 | struct sk_buff *skb) | |
1677 | { | |
1678 | PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid, | |
1679 | peer_ip); | |
1680 | BUG_ON(skb_cloned(skb)); | |
1681 | skb_trim(skb, sizeof(struct cpl_tid_release)); | |
1682 | skb_get(skb); | |
1683 | release_tid(&dev->rdev, hwtid, skb); | |
1684 | return; | |
1685 | } | |
1686 | ||
1687 | static void get_4tuple(struct cpl_pass_accept_req *req, | |
1688 | __be32 *local_ip, __be32 *peer_ip, | |
1689 | __be16 *local_port, __be16 *peer_port) | |
1690 | { | |
1691 | int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1692 | int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len)); | |
1693 | struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); | |
1694 | struct tcphdr *tcp = (struct tcphdr *) | |
1695 | ((u8 *)(req + 1) + eth_len + ip_len); | |
1696 | ||
1697 | PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, | |
1698 | ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), | |
1699 | ntohs(tcp->dest)); | |
1700 | ||
1701 | *peer_ip = ip->saddr; | |
1702 | *local_ip = ip->daddr; | |
1703 | *peer_port = tcp->source; | |
1704 | *local_port = tcp->dest; | |
1705 | ||
1706 | return; | |
1707 | } | |
1708 | ||
3786cf18 DM |
1709 | static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst, |
1710 | struct c4iw_dev *cdev, bool clear_mpa_v1) | |
1711 | { | |
1712 | struct neighbour *n; | |
1713 | int err, step; | |
1714 | ||
64b7007e | 1715 | n = dst_neigh_lookup(dst, &peer_ip); |
3786cf18 | 1716 | if (!n) |
64b7007e DM |
1717 | return -ENODEV; |
1718 | ||
1719 | rcu_read_lock(); | |
3786cf18 DM |
1720 | err = -ENOMEM; |
1721 | if (n->dev->flags & IFF_LOOPBACK) { | |
1722 | struct net_device *pdev; | |
1723 | ||
1724 | pdev = ip_dev_find(&init_net, peer_ip); | |
71b43fd5 TLSC |
1725 | if (!pdev) { |
1726 | err = -ENODEV; | |
1727 | goto out; | |
1728 | } | |
3786cf18 DM |
1729 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, |
1730 | n, pdev, 0); | |
1731 | if (!ep->l2t) | |
1732 | goto out; | |
1733 | ep->mtu = pdev->mtu; | |
1734 | ep->tx_chan = cxgb4_port_chan(pdev); | |
1735 | ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; | |
1736 | step = cdev->rdev.lldi.ntxq / | |
1737 | cdev->rdev.lldi.nchan; | |
1738 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | |
1739 | step = cdev->rdev.lldi.nrxq / | |
1740 | cdev->rdev.lldi.nchan; | |
1741 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | |
1742 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | |
1743 | cxgb4_port_idx(pdev) * step]; | |
1744 | dev_put(pdev); | |
1745 | } else { | |
1746 | ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, | |
1747 | n, n->dev, 0); | |
1748 | if (!ep->l2t) | |
1749 | goto out; | |
bd61baaf | 1750 | ep->mtu = dst_mtu(dst); |
3786cf18 DM |
1751 | ep->tx_chan = cxgb4_port_chan(n->dev); |
1752 | ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1; | |
1753 | step = cdev->rdev.lldi.ntxq / | |
1754 | cdev->rdev.lldi.nchan; | |
1755 | ep->txq_idx = cxgb4_port_idx(n->dev) * step; | |
1756 | ep->ctrlq_idx = cxgb4_port_idx(n->dev); | |
1757 | step = cdev->rdev.lldi.nrxq / | |
1758 | cdev->rdev.lldi.nchan; | |
1759 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | |
1760 | cxgb4_port_idx(n->dev) * step]; | |
1761 | ||
1762 | if (clear_mpa_v1) { | |
1763 | ep->retry_with_mpa_v1 = 0; | |
1764 | ep->tried_with_mpa_v1 = 0; | |
1765 | } | |
1766 | } | |
1767 | err = 0; | |
1768 | out: | |
1769 | rcu_read_unlock(); | |
1770 | ||
64b7007e DM |
1771 | neigh_release(n); |
1772 | ||
3786cf18 DM |
1773 | return err; |
1774 | } | |
1775 | ||
cfdda9d7 SW |
1776 | static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) |
1777 | { | |
1778 | struct c4iw_ep *child_ep, *parent_ep; | |
1779 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
1780 | unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); | |
1781 | struct tid_info *t = dev->rdev.lldi.tids; | |
1782 | unsigned int hwtid = GET_TID(req); | |
1783 | struct dst_entry *dst; | |
cfdda9d7 | 1784 | struct rtable *rt; |
1cab775c | 1785 | __be32 local_ip, peer_ip = 0; |
cfdda9d7 | 1786 | __be16 local_port, peer_port; |
3786cf18 | 1787 | int err; |
1cab775c | 1788 | u16 peer_mss = ntohs(req->tcpopt.mss); |
cfdda9d7 SW |
1789 | |
1790 | parent_ep = lookup_stid(t, stid); | |
1cab775c VP |
1791 | if (!parent_ep) { |
1792 | PDBG("%s connect request on invalid stid %d\n", __func__, stid); | |
1793 | goto reject; | |
1794 | } | |
cfdda9d7 SW |
1795 | get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); |
1796 | ||
1cab775c VP |
1797 | PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \ |
1798 | "rport %d peer_mss %d\n", __func__, parent_ep, hwtid, | |
1799 | ntohl(local_ip), ntohl(peer_ip), ntohs(local_port), | |
1800 | ntohs(peer_port), peer_mss); | |
1801 | ||
cfdda9d7 SW |
1802 | if (state_read(&parent_ep->com) != LISTEN) { |
1803 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | |
1804 | __func__); | |
1805 | goto reject; | |
1806 | } | |
1807 | ||
1808 | /* Find output route */ | |
1809 | rt = find_route(dev, local_ip, peer_ip, local_port, peer_port, | |
1810 | GET_POPEN_TOS(ntohl(req->tos_stid))); | |
1811 | if (!rt) { | |
1812 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | |
1813 | __func__); | |
1814 | goto reject; | |
1815 | } | |
d8d1f30b | 1816 | dst = &rt->dst; |
3786cf18 DM |
1817 | |
1818 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | |
1819 | if (!child_ep) { | |
1820 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | |
cfdda9d7 SW |
1821 | __func__); |
1822 | dst_release(dst); | |
1823 | goto reject; | |
1824 | } | |
1825 | ||
3786cf18 DM |
1826 | err = import_ep(child_ep, peer_ip, dst, dev, false); |
1827 | if (err) { | |
1828 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | |
cfdda9d7 | 1829 | __func__); |
cfdda9d7 | 1830 | dst_release(dst); |
3786cf18 | 1831 | kfree(child_ep); |
cfdda9d7 SW |
1832 | goto reject; |
1833 | } | |
3786cf18 | 1834 | |
1cab775c VP |
1835 | if (peer_mss && child_ep->mtu > (peer_mss + 40)) |
1836 | child_ep->mtu = peer_mss + 40; | |
1837 | ||
cfdda9d7 SW |
1838 | state_set(&child_ep->com, CONNECTING); |
1839 | child_ep->com.dev = dev; | |
1840 | child_ep->com.cm_id = NULL; | |
1841 | child_ep->com.local_addr.sin_family = PF_INET; | |
1842 | child_ep->com.local_addr.sin_port = local_port; | |
1843 | child_ep->com.local_addr.sin_addr.s_addr = local_ip; | |
1844 | child_ep->com.remote_addr.sin_family = PF_INET; | |
1845 | child_ep->com.remote_addr.sin_port = peer_port; | |
1846 | child_ep->com.remote_addr.sin_addr.s_addr = peer_ip; | |
1847 | c4iw_get_ep(&parent_ep->com); | |
1848 | child_ep->parent_ep = parent_ep; | |
1849 | child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid)); | |
cfdda9d7 SW |
1850 | child_ep->dst = dst; |
1851 | child_ep->hwtid = hwtid; | |
cfdda9d7 SW |
1852 | |
1853 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, | |
3786cf18 | 1854 | child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); |
cfdda9d7 SW |
1855 | |
1856 | init_timer(&child_ep->timer); | |
1857 | cxgb4_insert_tid(t, child_ep, hwtid); | |
1858 | accept_cr(child_ep, peer_ip, skb, req); | |
1859 | goto out; | |
1860 | reject: | |
1861 | reject_cr(dev, hwtid, peer_ip, skb); | |
1862 | out: | |
1863 | return 0; | |
1864 | } | |
1865 | ||
1866 | static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) | |
1867 | { | |
1868 | struct c4iw_ep *ep; | |
1869 | struct cpl_pass_establish *req = cplhdr(skb); | |
1870 | struct tid_info *t = dev->rdev.lldi.tids; | |
1871 | unsigned int tid = GET_TID(req); | |
1872 | ||
1873 | ep = lookup_tid(t, tid); | |
1874 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1875 | ep->snd_seq = be32_to_cpu(req->snd_isn); | |
1876 | ep->rcv_seq = be32_to_cpu(req->rcv_isn); | |
1877 | ||
1cab775c VP |
1878 | PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, |
1879 | ntohs(req->tcp_opt)); | |
1880 | ||
cfdda9d7 SW |
1881 | set_emss(ep, ntohs(req->tcp_opt)); |
1882 | ||
1883 | dst_confirm(ep->dst); | |
1884 | state_set(&ep->com, MPA_REQ_WAIT); | |
1885 | start_ep_timer(ep); | |
1886 | send_flowc(ep, skb); | |
1887 | ||
1888 | return 0; | |
1889 | } | |
1890 | ||
1891 | static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |
1892 | { | |
1893 | struct cpl_peer_close *hdr = cplhdr(skb); | |
1894 | struct c4iw_ep *ep; | |
1895 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
1896 | int disconnect = 1; |
1897 | int release = 0; | |
cfdda9d7 SW |
1898 | struct tid_info *t = dev->rdev.lldi.tids; |
1899 | unsigned int tid = GET_TID(hdr); | |
8da7e7a5 | 1900 | int ret; |
cfdda9d7 SW |
1901 | |
1902 | ep = lookup_tid(t, tid); | |
1903 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
1904 | dst_confirm(ep->dst); | |
1905 | ||
2f5b48c3 | 1906 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
1907 | switch (ep->com.state) { |
1908 | case MPA_REQ_WAIT: | |
1909 | __state_set(&ep->com, CLOSING); | |
1910 | break; | |
1911 | case MPA_REQ_SENT: | |
1912 | __state_set(&ep->com, CLOSING); | |
1913 | connect_reply_upcall(ep, -ECONNRESET); | |
1914 | break; | |
1915 | case MPA_REQ_RCVD: | |
1916 | ||
1917 | /* | |
1918 | * We're gonna mark this puppy DEAD, but keep | |
1919 | * the reference on it until the ULP accepts or | |
1920 | * rejects the CR. Also wake up anyone waiting | |
1921 | * in rdma connection migration (see c4iw_accept_cr()). | |
1922 | */ | |
1923 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1924 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1925 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1926 | break; |
1927 | case MPA_REP_SENT: | |
1928 | __state_set(&ep->com, CLOSING); | |
cfdda9d7 | 1929 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
d9594d99 | 1930 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
cfdda9d7 SW |
1931 | break; |
1932 | case FPDU_MODE: | |
ca5a2202 | 1933 | start_ep_timer(ep); |
cfdda9d7 | 1934 | __state_set(&ep->com, CLOSING); |
30c95c2d | 1935 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
8da7e7a5 | 1936 | ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
30c95c2d | 1937 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
8da7e7a5 SW |
1938 | if (ret != -ECONNRESET) { |
1939 | peer_close_upcall(ep); | |
1940 | disconnect = 1; | |
1941 | } | |
cfdda9d7 SW |
1942 | break; |
1943 | case ABORTING: | |
1944 | disconnect = 0; | |
1945 | break; | |
1946 | case CLOSING: | |
1947 | __state_set(&ep->com, MORIBUND); | |
1948 | disconnect = 0; | |
1949 | break; | |
1950 | case MORIBUND: | |
ca5a2202 | 1951 | stop_ep_timer(ep); |
cfdda9d7 SW |
1952 | if (ep->com.cm_id && ep->com.qp) { |
1953 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
1954 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1955 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1956 | } | |
1957 | close_complete_upcall(ep); | |
1958 | __state_set(&ep->com, DEAD); | |
1959 | release = 1; | |
1960 | disconnect = 0; | |
1961 | break; | |
1962 | case DEAD: | |
1963 | disconnect = 0; | |
1964 | break; | |
1965 | default: | |
1966 | BUG_ON(1); | |
1967 | } | |
2f5b48c3 | 1968 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
1969 | if (disconnect) |
1970 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
1971 | if (release) | |
1972 | release_ep_resources(ep); | |
1973 | return 0; | |
1974 | } | |
1975 | ||
1976 | /* | |
1977 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | |
1978 | */ | |
1979 | static int is_neg_adv_abort(unsigned int status) | |
1980 | { | |
1981 | return status == CPL_ERR_RTX_NEG_ADVICE || | |
1982 | status == CPL_ERR_PERSIST_NEG_ADVICE; | |
1983 | } | |
1984 | ||
d2fe99e8 KS |
1985 | static int c4iw_reconnect(struct c4iw_ep *ep) |
1986 | { | |
d2fe99e8 | 1987 | struct rtable *rt; |
3786cf18 | 1988 | int err = 0; |
d2fe99e8 KS |
1989 | |
1990 | PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); | |
1991 | init_timer(&ep->timer); | |
1992 | ||
1993 | /* | |
1994 | * Allocate an active TID to initiate a TCP connection. | |
1995 | */ | |
1996 | ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); | |
1997 | if (ep->atid == -1) { | |
1998 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | |
1999 | err = -ENOMEM; | |
2000 | goto fail2; | |
2001 | } | |
2002 | ||
2003 | /* find a route */ | |
2004 | rt = find_route(ep->com.dev, | |
2005 | ep->com.cm_id->local_addr.sin_addr.s_addr, | |
2006 | ep->com.cm_id->remote_addr.sin_addr.s_addr, | |
2007 | ep->com.cm_id->local_addr.sin_port, | |
2008 | ep->com.cm_id->remote_addr.sin_port, 0); | |
2009 | if (!rt) { | |
2010 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | |
2011 | err = -EHOSTUNREACH; | |
2012 | goto fail3; | |
2013 | } | |
2014 | ep->dst = &rt->dst; | |
2015 | ||
3786cf18 DM |
2016 | err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, |
2017 | ep->dst, ep->com.dev, false); | |
2018 | if (err) { | |
d2fe99e8 | 2019 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
d2fe99e8 KS |
2020 | goto fail4; |
2021 | } | |
2022 | ||
2023 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | |
2024 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | |
2025 | ep->l2t->idx); | |
2026 | ||
2027 | state_set(&ep->com, CONNECTING); | |
2028 | ep->tos = 0; | |
2029 | ||
2030 | /* send connect request to rnic */ | |
2031 | err = send_connect(ep); | |
2032 | if (!err) | |
2033 | goto out; | |
2034 | ||
2035 | cxgb4_l2t_release(ep->l2t); | |
2036 | fail4: | |
2037 | dst_release(ep->dst); | |
2038 | fail3: | |
2039 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | |
2040 | fail2: | |
2041 | /* | |
2042 | * remember to send notification to upper layer. | |
2043 | * We are in here so the upper layer is not aware that this is | |
2044 | * re-connect attempt and so, upper layer is still waiting for | |
2045 | * response of 1st connect request. | |
2046 | */ | |
2047 | connect_reply_upcall(ep, -ECONNRESET); | |
2048 | c4iw_put_ep(&ep->com); | |
2049 | out: | |
2050 | return err; | |
2051 | } | |
2052 | ||
cfdda9d7 SW |
2053 | static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) |
2054 | { | |
2055 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
2056 | struct c4iw_ep *ep; | |
2057 | struct cpl_abort_rpl *rpl; | |
2058 | struct sk_buff *rpl_skb; | |
2059 | struct c4iw_qp_attributes attrs; | |
2060 | int ret; | |
2061 | int release = 0; | |
cfdda9d7 SW |
2062 | struct tid_info *t = dev->rdev.lldi.tids; |
2063 | unsigned int tid = GET_TID(req); | |
cfdda9d7 SW |
2064 | |
2065 | ep = lookup_tid(t, tid); | |
2066 | if (is_neg_adv_abort(req->status)) { | |
2067 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
2068 | ep->hwtid); | |
2069 | return 0; | |
2070 | } | |
cfdda9d7 SW |
2071 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
2072 | ep->com.state); | |
2f5b48c3 SW |
2073 | |
2074 | /* | |
2075 | * Wake up any threads in rdma_init() or rdma_fini(). | |
d2fe99e8 KS |
2076 | * However, this is not needed if com state is just |
2077 | * MPA_REQ_SENT | |
2f5b48c3 | 2078 | */ |
d2fe99e8 KS |
2079 | if (ep->com.state != MPA_REQ_SENT) |
2080 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); | |
2f5b48c3 SW |
2081 | |
2082 | mutex_lock(&ep->com.mutex); | |
cfdda9d7 SW |
2083 | switch (ep->com.state) { |
2084 | case CONNECTING: | |
2085 | break; | |
2086 | case MPA_REQ_WAIT: | |
ca5a2202 | 2087 | stop_ep_timer(ep); |
cfdda9d7 SW |
2088 | break; |
2089 | case MPA_REQ_SENT: | |
ca5a2202 | 2090 | stop_ep_timer(ep); |
d2fe99e8 KS |
2091 | if (mpa_rev == 2 && ep->tried_with_mpa_v1) |
2092 | connect_reply_upcall(ep, -ECONNRESET); | |
2093 | else { | |
2094 | /* | |
2095 | * we just don't send notification upwards because we | |
2096 | * want to retry with mpa_v1 without upper layers even | |
2097 | * knowing it. | |
2098 | * | |
2099 | * do some housekeeping so as to re-initiate the | |
2100 | * connection | |
2101 | */ | |
2102 | PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, | |
2103 | mpa_rev); | |
2104 | ep->retry_with_mpa_v1 = 1; | |
2105 | } | |
cfdda9d7 SW |
2106 | break; |
2107 | case MPA_REP_SENT: | |
cfdda9d7 SW |
2108 | break; |
2109 | case MPA_REQ_RCVD: | |
cfdda9d7 SW |
2110 | break; |
2111 | case MORIBUND: | |
2112 | case CLOSING: | |
ca5a2202 | 2113 | stop_ep_timer(ep); |
cfdda9d7 SW |
2114 | /*FALLTHROUGH*/ |
2115 | case FPDU_MODE: | |
2116 | if (ep->com.cm_id && ep->com.qp) { | |
2117 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
2118 | ret = c4iw_modify_qp(ep->com.qp->rhp, | |
2119 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
2120 | &attrs, 1); | |
2121 | if (ret) | |
2122 | printk(KERN_ERR MOD | |
2123 | "%s - qp <- error failed!\n", | |
2124 | __func__); | |
2125 | } | |
2126 | peer_abort_upcall(ep); | |
2127 | break; | |
2128 | case ABORTING: | |
2129 | break; | |
2130 | case DEAD: | |
2131 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); | |
2f5b48c3 | 2132 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2133 | return 0; |
2134 | default: | |
2135 | BUG_ON(1); | |
2136 | break; | |
2137 | } | |
2138 | dst_confirm(ep->dst); | |
2139 | if (ep->com.state != ABORTING) { | |
2140 | __state_set(&ep->com, DEAD); | |
d2fe99e8 KS |
2141 | /* we don't release if we want to retry with mpa_v1 */ |
2142 | if (!ep->retry_with_mpa_v1) | |
2143 | release = 1; | |
cfdda9d7 | 2144 | } |
2f5b48c3 | 2145 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2146 | |
2147 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | |
2148 | if (!rpl_skb) { | |
2149 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | |
2150 | __func__); | |
2151 | release = 1; | |
2152 | goto out; | |
2153 | } | |
2154 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); | |
2155 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | |
2156 | INIT_TP_WR(rpl, ep->hwtid); | |
2157 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | |
2158 | rpl->cmd = CPL_ABORT_NO_RST; | |
2159 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); | |
2160 | out: | |
cfdda9d7 SW |
2161 | if (release) |
2162 | release_ep_resources(ep); | |
d2fe99e8 KS |
2163 | |
2164 | /* retry with mpa-v1 */ | |
2165 | if (ep && ep->retry_with_mpa_v1) { | |
2166 | cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); | |
2167 | dst_release(ep->dst); | |
2168 | cxgb4_l2t_release(ep->l2t); | |
2169 | c4iw_reconnect(ep); | |
2170 | } | |
2171 | ||
cfdda9d7 SW |
2172 | return 0; |
2173 | } | |
2174 | ||
2175 | static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
2176 | { | |
2177 | struct c4iw_ep *ep; | |
2178 | struct c4iw_qp_attributes attrs; | |
2179 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | |
cfdda9d7 SW |
2180 | int release = 0; |
2181 | struct tid_info *t = dev->rdev.lldi.tids; | |
2182 | unsigned int tid = GET_TID(rpl); | |
cfdda9d7 SW |
2183 | |
2184 | ep = lookup_tid(t, tid); | |
2185 | ||
2186 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2187 | BUG_ON(!ep); | |
2188 | ||
2189 | /* The cm_id may be null if we failed to connect */ | |
2f5b48c3 | 2190 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
2191 | switch (ep->com.state) { |
2192 | case CLOSING: | |
2193 | __state_set(&ep->com, MORIBUND); | |
2194 | break; | |
2195 | case MORIBUND: | |
ca5a2202 | 2196 | stop_ep_timer(ep); |
cfdda9d7 SW |
2197 | if ((ep->com.cm_id) && (ep->com.qp)) { |
2198 | attrs.next_state = C4IW_QP_STATE_IDLE; | |
2199 | c4iw_modify_qp(ep->com.qp->rhp, | |
2200 | ep->com.qp, | |
2201 | C4IW_QP_ATTR_NEXT_STATE, | |
2202 | &attrs, 1); | |
2203 | } | |
2204 | close_complete_upcall(ep); | |
2205 | __state_set(&ep->com, DEAD); | |
2206 | release = 1; | |
2207 | break; | |
2208 | case ABORTING: | |
2209 | case DEAD: | |
2210 | break; | |
2211 | default: | |
2212 | BUG_ON(1); | |
2213 | break; | |
2214 | } | |
2f5b48c3 | 2215 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2216 | if (release) |
2217 | release_ep_resources(ep); | |
2218 | return 0; | |
2219 | } | |
2220 | ||
2221 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | |
2222 | { | |
0e42c1f4 | 2223 | struct cpl_rdma_terminate *rpl = cplhdr(skb); |
cfdda9d7 | 2224 | struct tid_info *t = dev->rdev.lldi.tids; |
0e42c1f4 SW |
2225 | unsigned int tid = GET_TID(rpl); |
2226 | struct c4iw_ep *ep; | |
2227 | struct c4iw_qp_attributes attrs; | |
cfdda9d7 SW |
2228 | |
2229 | ep = lookup_tid(t, tid); | |
0e42c1f4 | 2230 | BUG_ON(!ep); |
cfdda9d7 | 2231 | |
30c95c2d | 2232 | if (ep && ep->com.qp) { |
0e42c1f4 SW |
2233 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
2234 | ep->com.qp->wq.sq.qid); | |
2235 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | |
2236 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
2237 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | |
2238 | } else | |
30c95c2d | 2239 | printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); |
cfdda9d7 | 2240 | |
cfdda9d7 SW |
2241 | return 0; |
2242 | } | |
2243 | ||
2244 | /* | |
2245 | * Upcall from the adapter indicating data has been transmitted. | |
2246 | * For us its just the single MPA request or reply. We can now free | |
2247 | * the skb holding the mpa message. | |
2248 | */ | |
2249 | static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | |
2250 | { | |
2251 | struct c4iw_ep *ep; | |
2252 | struct cpl_fw4_ack *hdr = cplhdr(skb); | |
2253 | u8 credits = hdr->credits; | |
2254 | unsigned int tid = GET_TID(hdr); | |
2255 | struct tid_info *t = dev->rdev.lldi.tids; | |
2256 | ||
2257 | ||
2258 | ep = lookup_tid(t, tid); | |
2259 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | |
2260 | if (credits == 0) { | |
aa1ad260 JP |
2261 | PDBG("%s 0 credit ack ep %p tid %u state %u\n", |
2262 | __func__, ep, ep->hwtid, state_read(&ep->com)); | |
cfdda9d7 SW |
2263 | return 0; |
2264 | } | |
2265 | ||
2266 | dst_confirm(ep->dst); | |
2267 | if (ep->mpa_skb) { | |
2268 | PDBG("%s last streaming msg ack ep %p tid %u state %u " | |
2269 | "initiator %u freeing skb\n", __func__, ep, ep->hwtid, | |
2270 | state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); | |
2271 | kfree_skb(ep->mpa_skb); | |
2272 | ep->mpa_skb = NULL; | |
2273 | } | |
2274 | return 0; | |
2275 | } | |
2276 | ||
cfdda9d7 SW |
2277 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) |
2278 | { | |
2279 | int err; | |
2280 | struct c4iw_ep *ep = to_ep(cm_id); | |
2281 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2282 | ||
2283 | if (state_read(&ep->com) == DEAD) { | |
2284 | c4iw_put_ep(&ep->com); | |
2285 | return -ECONNRESET; | |
2286 | } | |
2287 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
2288 | if (mpa_rev == 0) | |
2289 | abort_connection(ep, NULL, GFP_KERNEL); | |
2290 | else { | |
2291 | err = send_mpa_reject(ep, pdata, pdata_len); | |
2292 | err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | |
2293 | } | |
2294 | c4iw_put_ep(&ep->com); | |
2295 | return 0; | |
2296 | } | |
2297 | ||
2298 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
2299 | { | |
2300 | int err; | |
2301 | struct c4iw_qp_attributes attrs; | |
2302 | enum c4iw_qp_attr_mask mask; | |
2303 | struct c4iw_ep *ep = to_ep(cm_id); | |
2304 | struct c4iw_dev *h = to_c4iw_dev(cm_id->device); | |
2305 | struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); | |
2306 | ||
2307 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | |
2308 | if (state_read(&ep->com) == DEAD) { | |
2309 | err = -ECONNRESET; | |
2310 | goto err; | |
2311 | } | |
2312 | ||
2313 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
2314 | BUG_ON(!qp); | |
2315 | ||
be4c9bad RD |
2316 | if ((conn_param->ord > c4iw_max_read_depth) || |
2317 | (conn_param->ird > c4iw_max_read_depth)) { | |
cfdda9d7 SW |
2318 | abort_connection(ep, NULL, GFP_KERNEL); |
2319 | err = -EINVAL; | |
2320 | goto err; | |
2321 | } | |
2322 | ||
d2fe99e8 KS |
2323 | if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { |
2324 | if (conn_param->ord > ep->ird) { | |
2325 | ep->ird = conn_param->ird; | |
2326 | ep->ord = conn_param->ord; | |
2327 | send_mpa_reject(ep, conn_param->private_data, | |
2328 | conn_param->private_data_len); | |
2329 | abort_connection(ep, NULL, GFP_KERNEL); | |
2330 | err = -ENOMEM; | |
2331 | goto err; | |
2332 | } | |
2333 | if (conn_param->ird > ep->ord) { | |
2334 | if (!ep->ord) | |
2335 | conn_param->ird = 1; | |
2336 | else { | |
2337 | abort_connection(ep, NULL, GFP_KERNEL); | |
2338 | err = -ENOMEM; | |
2339 | goto err; | |
2340 | } | |
2341 | } | |
cfdda9d7 | 2342 | |
d2fe99e8 | 2343 | } |
cfdda9d7 SW |
2344 | ep->ird = conn_param->ird; |
2345 | ep->ord = conn_param->ord; | |
2346 | ||
d2fe99e8 KS |
2347 | if (ep->mpa_attr.version != 2) |
2348 | if (peer2peer && ep->ird == 0) | |
2349 | ep->ird = 1; | |
cfdda9d7 SW |
2350 | |
2351 | PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); | |
2352 | ||
d2fe99e8 KS |
2353 | cm_id->add_ref(cm_id); |
2354 | ep->com.cm_id = cm_id; | |
2355 | ep->com.qp = qp; | |
2356 | ||
cfdda9d7 SW |
2357 | /* bind QP to EP and move to RTS */ |
2358 | attrs.mpa_attr = ep->mpa_attr; | |
2359 | attrs.max_ird = ep->ird; | |
2360 | attrs.max_ord = ep->ord; | |
2361 | attrs.llp_stream_handle = ep; | |
2362 | attrs.next_state = C4IW_QP_STATE_RTS; | |
2363 | ||
2364 | /* bind QP and TID with INIT_WR */ | |
2365 | mask = C4IW_QP_ATTR_NEXT_STATE | | |
2366 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | |
2367 | C4IW_QP_ATTR_MPA_ATTR | | |
2368 | C4IW_QP_ATTR_MAX_IRD | | |
2369 | C4IW_QP_ATTR_MAX_ORD; | |
2370 | ||
2371 | err = c4iw_modify_qp(ep->com.qp->rhp, | |
2372 | ep->com.qp, mask, &attrs, 1); | |
2373 | if (err) | |
2374 | goto err1; | |
2375 | err = send_mpa_reply(ep, conn_param->private_data, | |
2376 | conn_param->private_data_len); | |
2377 | if (err) | |
2378 | goto err1; | |
2379 | ||
2380 | state_set(&ep->com, FPDU_MODE); | |
2381 | established_upcall(ep); | |
2382 | c4iw_put_ep(&ep->com); | |
2383 | return 0; | |
2384 | err1: | |
2385 | ep->com.cm_id = NULL; | |
2386 | ep->com.qp = NULL; | |
2387 | cm_id->rem_ref(cm_id); | |
2388 | err: | |
2389 | c4iw_put_ep(&ep->com); | |
2390 | return err; | |
2391 | } | |
2392 | ||
2393 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
2394 | { | |
cfdda9d7 SW |
2395 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); |
2396 | struct c4iw_ep *ep; | |
2397 | struct rtable *rt; | |
3786cf18 | 2398 | int err = 0; |
cfdda9d7 | 2399 | |
be4c9bad RD |
2400 | if ((conn_param->ord > c4iw_max_read_depth) || |
2401 | (conn_param->ird > c4iw_max_read_depth)) { | |
2402 | err = -EINVAL; | |
2403 | goto out; | |
2404 | } | |
cfdda9d7 SW |
2405 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); |
2406 | if (!ep) { | |
2407 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
2408 | err = -ENOMEM; | |
2409 | goto out; | |
2410 | } | |
2411 | init_timer(&ep->timer); | |
2412 | ep->plen = conn_param->private_data_len; | |
2413 | if (ep->plen) | |
2414 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | |
2415 | conn_param->private_data, ep->plen); | |
2416 | ep->ird = conn_param->ird; | |
2417 | ep->ord = conn_param->ord; | |
2418 | ||
2419 | if (peer2peer && ep->ord == 0) | |
2420 | ep->ord = 1; | |
2421 | ||
2422 | cm_id->add_ref(cm_id); | |
2423 | ep->com.dev = dev; | |
2424 | ep->com.cm_id = cm_id; | |
2425 | ep->com.qp = get_qhp(dev, conn_param->qpn); | |
2426 | BUG_ON(!ep->com.qp); | |
2427 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, | |
2428 | ep->com.qp, cm_id); | |
2429 | ||
2430 | /* | |
2431 | * Allocate an active TID to initiate a TCP connection. | |
2432 | */ | |
2433 | ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); | |
2434 | if (ep->atid == -1) { | |
2435 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); | |
2436 | err = -ENOMEM; | |
2437 | goto fail2; | |
2438 | } | |
2439 | ||
2440 | PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, | |
2441 | ntohl(cm_id->local_addr.sin_addr.s_addr), | |
2442 | ntohs(cm_id->local_addr.sin_port), | |
2443 | ntohl(cm_id->remote_addr.sin_addr.s_addr), | |
2444 | ntohs(cm_id->remote_addr.sin_port)); | |
2445 | ||
2446 | /* find a route */ | |
2447 | rt = find_route(dev, | |
2448 | cm_id->local_addr.sin_addr.s_addr, | |
2449 | cm_id->remote_addr.sin_addr.s_addr, | |
2450 | cm_id->local_addr.sin_port, | |
2451 | cm_id->remote_addr.sin_port, 0); | |
2452 | if (!rt) { | |
2453 | printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); | |
2454 | err = -EHOSTUNREACH; | |
2455 | goto fail3; | |
2456 | } | |
d8d1f30b | 2457 | ep->dst = &rt->dst; |
cfdda9d7 | 2458 | |
3786cf18 DM |
2459 | err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr, |
2460 | ep->dst, ep->com.dev, true); | |
2461 | if (err) { | |
cfdda9d7 | 2462 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); |
cfdda9d7 SW |
2463 | goto fail4; |
2464 | } | |
2465 | ||
2466 | PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", | |
2467 | __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, | |
2468 | ep->l2t->idx); | |
2469 | ||
2470 | state_set(&ep->com, CONNECTING); | |
2471 | ep->tos = 0; | |
2472 | ep->com.local_addr = cm_id->local_addr; | |
2473 | ep->com.remote_addr = cm_id->remote_addr; | |
2474 | ||
2475 | /* send connect request to rnic */ | |
2476 | err = send_connect(ep); | |
2477 | if (!err) | |
2478 | goto out; | |
2479 | ||
2480 | cxgb4_l2t_release(ep->l2t); | |
2481 | fail4: | |
2482 | dst_release(ep->dst); | |
2483 | fail3: | |
2484 | cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); | |
2485 | fail2: | |
2486 | cm_id->rem_ref(cm_id); | |
2487 | c4iw_put_ep(&ep->com); | |
2488 | out: | |
2489 | return err; | |
2490 | } | |
2491 | ||
2492 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |
2493 | { | |
2494 | int err = 0; | |
2495 | struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); | |
2496 | struct c4iw_listen_ep *ep; | |
2497 | ||
cfdda9d7 SW |
2498 | might_sleep(); |
2499 | ||
2500 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | |
2501 | if (!ep) { | |
2502 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); | |
2503 | err = -ENOMEM; | |
2504 | goto fail1; | |
2505 | } | |
2506 | PDBG("%s ep %p\n", __func__, ep); | |
2507 | cm_id->add_ref(cm_id); | |
2508 | ep->com.cm_id = cm_id; | |
2509 | ep->com.dev = dev; | |
2510 | ep->backlog = backlog; | |
2511 | ep->com.local_addr = cm_id->local_addr; | |
2512 | ||
2513 | /* | |
2514 | * Allocate a server TID. | |
2515 | */ | |
1cab775c VP |
2516 | if (dev->rdev.lldi.enable_fw_ofld_conn) |
2517 | ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep); | |
2518 | else | |
2519 | ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); | |
2520 | ||
cfdda9d7 | 2521 | if (ep->stid == -1) { |
be4c9bad | 2522 | printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); |
cfdda9d7 SW |
2523 | err = -ENOMEM; |
2524 | goto fail2; | |
2525 | } | |
cfdda9d7 | 2526 | state_set(&ep->com, LISTEN); |
1cab775c VP |
2527 | if (dev->rdev.lldi.enable_fw_ofld_conn) { |
2528 | do { | |
2529 | err = cxgb4_create_server_filter( | |
2530 | ep->com.dev->rdev.lldi.ports[0], ep->stid, | |
2531 | ep->com.local_addr.sin_addr.s_addr, | |
2532 | ep->com.local_addr.sin_port, | |
2533 | ep->com.dev->rdev.lldi.rxq_ids[0]); | |
2534 | if (err == -EBUSY) { | |
2535 | set_current_state(TASK_UNINTERRUPTIBLE); | |
2536 | schedule_timeout(usecs_to_jiffies(100)); | |
2537 | } | |
2538 | } while (err == -EBUSY); | |
2539 | } else { | |
2540 | c4iw_init_wr_wait(&ep->com.wr_wait); | |
2541 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], | |
2542 | ep->stid, ep->com.local_addr.sin_addr.s_addr, | |
2543 | ep->com.local_addr.sin_port, | |
2544 | ep->com.dev->rdev.lldi.rxq_ids[0]); | |
2545 | if (!err) | |
2546 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, | |
2547 | &ep->com.wr_wait, | |
2548 | 0, 0, __func__); | |
2549 | } | |
cfdda9d7 SW |
2550 | if (!err) { |
2551 | cm_id->provider_data = ep; | |
2552 | goto out; | |
2553 | } | |
1cab775c VP |
2554 | pr_err("%s cxgb4_create_server/filter failed err %d " \ |
2555 | "stid %d laddr %08x lport %d\n", \ | |
2556 | __func__, err, ep->stid, | |
2557 | ntohl(ep->com.local_addr.sin_addr.s_addr), | |
2558 | ntohs(ep->com.local_addr.sin_port)); | |
cfdda9d7 SW |
2559 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
2560 | fail2: | |
2561 | cm_id->rem_ref(cm_id); | |
2562 | c4iw_put_ep(&ep->com); | |
2563 | fail1: | |
2564 | out: | |
2565 | return err; | |
2566 | } | |
2567 | ||
2568 | int c4iw_destroy_listen(struct iw_cm_id *cm_id) | |
2569 | { | |
2570 | int err; | |
2571 | struct c4iw_listen_ep *ep = to_listen_ep(cm_id); | |
2572 | ||
2573 | PDBG("%s ep %p\n", __func__, ep); | |
2574 | ||
2575 | might_sleep(); | |
2576 | state_set(&ep->com, DEAD); | |
1cab775c VP |
2577 | if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) { |
2578 | err = cxgb4_remove_server_filter( | |
2579 | ep->com.dev->rdev.lldi.ports[0], ep->stid, | |
2580 | ep->com.dev->rdev.lldi.rxq_ids[0], 0); | |
2581 | } else { | |
2582 | c4iw_init_wr_wait(&ep->com.wr_wait); | |
2583 | err = listen_stop(ep); | |
2584 | if (err) | |
2585 | goto done; | |
2586 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, | |
2587 | 0, 0, __func__); | |
2588 | } | |
cfdda9d7 SW |
2589 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
2590 | done: | |
cfdda9d7 SW |
2591 | cm_id->rem_ref(cm_id); |
2592 | c4iw_put_ep(&ep->com); | |
2593 | return err; | |
2594 | } | |
2595 | ||
2596 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |
2597 | { | |
2598 | int ret = 0; | |
cfdda9d7 SW |
2599 | int close = 0; |
2600 | int fatal = 0; | |
2601 | struct c4iw_rdev *rdev; | |
cfdda9d7 | 2602 | |
2f5b48c3 | 2603 | mutex_lock(&ep->com.mutex); |
cfdda9d7 SW |
2604 | |
2605 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | |
2606 | states[ep->com.state], abrupt); | |
2607 | ||
2608 | rdev = &ep->com.dev->rdev; | |
2609 | if (c4iw_fatal_error(rdev)) { | |
2610 | fatal = 1; | |
2611 | close_complete_upcall(ep); | |
2612 | ep->com.state = DEAD; | |
2613 | } | |
2614 | switch (ep->com.state) { | |
2615 | case MPA_REQ_WAIT: | |
2616 | case MPA_REQ_SENT: | |
2617 | case MPA_REQ_RCVD: | |
2618 | case MPA_REP_SENT: | |
2619 | case FPDU_MODE: | |
2620 | close = 1; | |
2621 | if (abrupt) | |
2622 | ep->com.state = ABORTING; | |
2623 | else { | |
2624 | ep->com.state = CLOSING; | |
ca5a2202 | 2625 | start_ep_timer(ep); |
cfdda9d7 SW |
2626 | } |
2627 | set_bit(CLOSE_SENT, &ep->com.flags); | |
2628 | break; | |
2629 | case CLOSING: | |
2630 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | |
2631 | close = 1; | |
2632 | if (abrupt) { | |
ca5a2202 | 2633 | stop_ep_timer(ep); |
cfdda9d7 SW |
2634 | ep->com.state = ABORTING; |
2635 | } else | |
2636 | ep->com.state = MORIBUND; | |
2637 | } | |
2638 | break; | |
2639 | case MORIBUND: | |
2640 | case ABORTING: | |
2641 | case DEAD: | |
2642 | PDBG("%s ignoring disconnect ep %p state %u\n", | |
2643 | __func__, ep, ep->com.state); | |
2644 | break; | |
2645 | default: | |
2646 | BUG(); | |
2647 | break; | |
2648 | } | |
2649 | ||
cfdda9d7 | 2650 | if (close) { |
8da7e7a5 SW |
2651 | if (abrupt) { |
2652 | close_complete_upcall(ep); | |
2653 | ret = send_abort(ep, NULL, gfp); | |
2654 | } else | |
cfdda9d7 SW |
2655 | ret = send_halfclose(ep, gfp); |
2656 | if (ret) | |
2657 | fatal = 1; | |
2658 | } | |
8da7e7a5 | 2659 | mutex_unlock(&ep->com.mutex); |
cfdda9d7 SW |
2660 | if (fatal) |
2661 | release_ep_resources(ep); | |
2662 | return ret; | |
2663 | } | |
2664 | ||
1cab775c VP |
2665 | static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, |
2666 | struct cpl_fw6_msg_ofld_connection_wr_rpl *req) | |
2667 | { | |
2668 | struct c4iw_ep *ep; | |
2669 | ||
2670 | ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); | |
2671 | if (!ep) | |
2672 | return; | |
2673 | ||
2674 | switch (req->retval) { | |
2675 | case FW_ENOMEM: | |
2676 | case FW_EADDRINUSE: | |
2677 | PDBG("%s ofld conn wr ret %d\n", __func__, req->retval); | |
2678 | break; | |
2679 | default: | |
2680 | pr_info("%s unexpected ofld conn wr retval %d\n", | |
2681 | __func__, req->retval); | |
2682 | break; | |
2683 | } | |
2684 | connect_reply_upcall(ep, status2errno(req->retval)); | |
2685 | } | |
2686 | ||
2687 | static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, | |
2688 | struct cpl_fw6_msg_ofld_connection_wr_rpl *req) | |
2689 | { | |
2690 | struct sk_buff *rpl_skb; | |
2691 | struct cpl_pass_accept_req *cpl; | |
2692 | int ret; | |
2693 | ||
2694 | rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); | |
2695 | BUG_ON(!rpl_skb); | |
2696 | if (req->retval) { | |
2697 | PDBG("%s passive open failure %d\n", __func__, req->retval); | |
2698 | kfree_skb(rpl_skb); | |
2699 | } else { | |
2700 | cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); | |
2701 | OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, | |
2702 | htonl(req->tid))); | |
2703 | ret = pass_accept_req(dev, rpl_skb); | |
2704 | if (!ret) | |
2705 | kfree_skb(rpl_skb); | |
2706 | } | |
2707 | return; | |
2708 | } | |
2709 | ||
2710 | static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | |
2f5b48c3 SW |
2711 | { |
2712 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
1cab775c VP |
2713 | struct cpl_fw6_msg_ofld_connection_wr_rpl *req; |
2714 | ||
2715 | switch (rpl->type) { | |
2716 | case FW6_TYPE_CQE: | |
2717 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | |
2718 | break; | |
2719 | case FW6_TYPE_OFLD_CONNECTION_WR_RPL: | |
2720 | req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; | |
2721 | switch (req->t_state) { | |
2722 | case TCP_SYN_SENT: | |
2723 | active_ofld_conn_reply(dev, skb, req); | |
2724 | break; | |
2725 | case TCP_SYN_RECV: | |
2726 | passive_ofld_conn_reply(dev, skb, req); | |
2727 | break; | |
2728 | default: | |
2729 | pr_err("%s unexpected ofld conn wr state %d\n", | |
2730 | __func__, req->t_state); | |
2731 | break; | |
2732 | } | |
2733 | break; | |
2734 | } | |
2735 | return 0; | |
2736 | } | |
2737 | ||
2738 | static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) | |
2739 | { | |
2740 | u32 l2info; | |
2741 | u16 vlantag, len, hdr_len; | |
2742 | u8 intf; | |
2743 | struct cpl_rx_pkt *cpl = cplhdr(skb); | |
2744 | struct cpl_pass_accept_req *req; | |
2745 | struct tcp_options_received tmp_opt; | |
2746 | ||
2747 | /* Store values from cpl_rx_pkt in temporary location. */ | |
2748 | vlantag = cpl->vlan; | |
2749 | len = cpl->len; | |
2750 | l2info = cpl->l2info; | |
2751 | hdr_len = cpl->hdr_len; | |
2752 | intf = cpl->iff; | |
2753 | ||
2754 | __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); | |
2755 | ||
2756 | /* | |
2757 | * We need to parse the TCP options from SYN packet. | |
2758 | * to generate cpl_pass_accept_req. | |
2759 | */ | |
2760 | memset(&tmp_opt, 0, sizeof(tmp_opt)); | |
2761 | tcp_clear_options(&tmp_opt); | |
2762 | tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); | |
2763 | ||
2764 | req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); | |
2765 | memset(req, 0, sizeof(*req)); | |
2766 | req->l2info = cpu_to_be16(V_SYN_INTF(intf) | | |
2767 | V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | | |
2768 | F_SYN_XACT_MATCH); | |
2769 | req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | | |
2770 | V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | | |
2771 | V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | | |
2772 | V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); | |
2773 | req->vlan = vlantag; | |
2774 | req->len = len; | |
2775 | req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | | |
2776 | PASS_OPEN_TOS(tos)); | |
2777 | req->tcpopt.mss = htons(tmp_opt.mss_clamp); | |
2778 | if (tmp_opt.wscale_ok) | |
2779 | req->tcpopt.wsf = tmp_opt.snd_wscale; | |
2780 | req->tcpopt.tstamp = tmp_opt.saw_tstamp; | |
2781 | if (tmp_opt.sack_ok) | |
2782 | req->tcpopt.sack = 1; | |
2783 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); | |
2784 | return; | |
2785 | } | |
2786 | ||
2787 | static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |
2788 | __be32 laddr, __be16 lport, | |
2789 | __be32 raddr, __be16 rport, | |
2790 | u32 rcv_isn, u32 filter, u16 window, | |
2791 | u32 rss_qid, u8 port_id) | |
2792 | { | |
2793 | struct sk_buff *req_skb; | |
2794 | struct fw_ofld_connection_wr *req; | |
2795 | struct cpl_pass_accept_req *cpl = cplhdr(skb); | |
2796 | ||
2797 | req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); | |
2798 | req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); | |
2799 | memset(req, 0, sizeof(*req)); | |
2800 | req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); | |
2801 | req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); | |
2802 | req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); | |
2803 | req->le.filter = filter; | |
2804 | req->le.lport = lport; | |
2805 | req->le.pport = rport; | |
2806 | req->le.u.ipv4.lip = laddr; | |
2807 | req->le.u.ipv4.pip = raddr; | |
2808 | req->tcb.rcv_nxt = htonl(rcv_isn + 1); | |
2809 | req->tcb.rcv_adv = htons(window); | |
2810 | req->tcb.t_state_to_astid = | |
2811 | htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | | |
2812 | V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | | |
2813 | V_FW_OFLD_CONNECTION_WR_ASTID( | |
2814 | GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); | |
2815 | ||
2816 | /* | |
2817 | * We store the qid in opt2 which will be used by the firmware | |
2818 | * to send us the wr response. | |
2819 | */ | |
2820 | req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); | |
2821 | ||
2822 | /* | |
2823 | * We initialize the MSS index in TCB to 0xF. | |
2824 | * So that when driver sends cpl_pass_accept_rpl | |
2825 | * TCB picks up the correct value. If this was 0 | |
2826 | * TP will ignore any value > 0 for MSS index. | |
2827 | */ | |
2828 | req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); | |
2829 | req->cookie = cpu_to_be64((u64)skb); | |
2830 | ||
2831 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | |
2832 | cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | |
2833 | } | |
2834 | ||
2835 | /* | |
2836 | * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt | |
2837 | * messages when a filter is being used instead of server to | |
2838 | * redirect a syn packet. When packets hit filter they are redirected | |
2839 | * to the offload queue and driver tries to establish the connection | |
2840 | * using firmware work request. | |
2841 | */ | |
2842 | static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |
2843 | { | |
2844 | int stid; | |
2845 | unsigned int filter; | |
2846 | struct ethhdr *eh = NULL; | |
2847 | struct vlan_ethhdr *vlan_eh = NULL; | |
2848 | struct iphdr *iph; | |
2849 | struct tcphdr *tcph; | |
2850 | struct rss_header *rss = (void *)skb->data; | |
2851 | struct cpl_rx_pkt *cpl = (void *)skb->data; | |
2852 | struct cpl_pass_accept_req *req = (void *)(rss + 1); | |
2853 | struct l2t_entry *e; | |
2854 | struct dst_entry *dst; | |
2855 | struct rtable *rt; | |
2856 | struct c4iw_ep *lep; | |
2857 | u16 window; | |
2858 | struct port_info *pi; | |
2859 | struct net_device *pdev; | |
2860 | u16 rss_qid; | |
2861 | int step; | |
2862 | u32 tx_chan; | |
2863 | struct neighbour *neigh; | |
2864 | ||
2865 | /* Drop all non-SYN packets */ | |
2866 | if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) | |
2867 | goto reject; | |
2868 | ||
2869 | /* | |
2870 | * Drop all packets which did not hit the filter. | |
2871 | * Unlikely to happen. | |
2872 | */ | |
2873 | if (!(rss->filter_hit && rss->filter_tid)) | |
2874 | goto reject; | |
2875 | ||
2876 | /* | |
2877 | * Calculate the server tid from filter hit index from cpl_rx_pkt. | |
2878 | */ | |
2879 | stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base | |
2880 | + dev->rdev.lldi.tids->nstids; | |
2881 | ||
2882 | lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); | |
2883 | if (!lep) { | |
2884 | PDBG("%s connect request on invalid stid %d\n", __func__, stid); | |
2885 | goto reject; | |
2886 | } | |
2887 | ||
2888 | if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { | |
2889 | eh = (struct ethhdr *)(req + 1); | |
2890 | iph = (struct iphdr *)(eh + 1); | |
2891 | } else { | |
2892 | vlan_eh = (struct vlan_ethhdr *)(req + 1); | |
2893 | iph = (struct iphdr *)(vlan_eh + 1); | |
2894 | skb->vlan_tci = ntohs(cpl->vlan); | |
2895 | } | |
2896 | ||
2897 | if (iph->version != 0x4) | |
2898 | goto reject; | |
2899 | ||
2900 | tcph = (struct tcphdr *)(iph + 1); | |
2901 | skb_set_network_header(skb, (void *)iph - (void *)rss); | |
2902 | skb_set_transport_header(skb, (void *)tcph - (void *)rss); | |
2903 | skb_get(skb); | |
2904 | ||
2905 | PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, | |
2906 | ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), | |
2907 | ntohs(tcph->source), iph->tos); | |
2908 | ||
2909 | rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, | |
2910 | iph->tos); | |
2911 | if (!rt) { | |
2912 | pr_err("%s - failed to find dst entry!\n", | |
2913 | __func__); | |
2914 | goto reject; | |
2915 | } | |
2916 | dst = &rt->dst; | |
2917 | neigh = dst_neigh_lookup_skb(dst, skb); | |
2918 | ||
2919 | if (neigh->dev->flags & IFF_LOOPBACK) { | |
2920 | pdev = ip_dev_find(&init_net, iph->daddr); | |
2921 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, | |
2922 | pdev, 0); | |
2923 | pi = (struct port_info *)netdev_priv(pdev); | |
2924 | tx_chan = cxgb4_port_chan(pdev); | |
2925 | dev_put(pdev); | |
2926 | } else { | |
2927 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, | |
2928 | neigh->dev, 0); | |
2929 | pi = (struct port_info *)netdev_priv(neigh->dev); | |
2930 | tx_chan = cxgb4_port_chan(neigh->dev); | |
2931 | } | |
2932 | if (!e) { | |
2933 | pr_err("%s - failed to allocate l2t entry!\n", | |
2934 | __func__); | |
2935 | goto free_dst; | |
2936 | } | |
2937 | ||
2938 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | |
2939 | rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; | |
2940 | window = htons(tcph->window); | |
2941 | ||
2942 | /* Calcuate filter portion for LE region. */ | |
2943 | filter = cpu_to_be32(select_ntuple(dev, dst, e)); | |
2944 | ||
2945 | /* | |
2946 | * Synthesize the cpl_pass_accept_req. We have everything except the | |
2947 | * TID. Once firmware sends a reply with TID we update the TID field | |
2948 | * in cpl and pass it through the regular cpl_pass_accept_req path. | |
2949 | */ | |
2950 | build_cpl_pass_accept_req(skb, stid, iph->tos); | |
2951 | send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, | |
2952 | tcph->source, ntohl(tcph->seq), filter, window, | |
2953 | rss_qid, pi->port_id); | |
2954 | cxgb4_l2t_release(e); | |
2955 | free_dst: | |
2956 | dst_release(dst); | |
2957 | reject: | |
2f5b48c3 SW |
2958 | return 0; |
2959 | } | |
2960 | ||
be4c9bad RD |
2961 | /* |
2962 | * These are the real handlers that are called from a | |
2963 | * work queue. | |
2964 | */ | |
2965 | static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { | |
2966 | [CPL_ACT_ESTABLISH] = act_establish, | |
2967 | [CPL_ACT_OPEN_RPL] = act_open_rpl, | |
2968 | [CPL_RX_DATA] = rx_data, | |
2969 | [CPL_ABORT_RPL_RSS] = abort_rpl, | |
2970 | [CPL_ABORT_RPL] = abort_rpl, | |
2971 | [CPL_PASS_OPEN_RPL] = pass_open_rpl, | |
2972 | [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, | |
2973 | [CPL_PASS_ACCEPT_REQ] = pass_accept_req, | |
2974 | [CPL_PASS_ESTABLISH] = pass_establish, | |
2975 | [CPL_PEER_CLOSE] = peer_close, | |
2976 | [CPL_ABORT_REQ_RSS] = peer_abort, | |
2977 | [CPL_CLOSE_CON_RPL] = close_con_rpl, | |
2978 | [CPL_RDMA_TERMINATE] = terminate, | |
2f5b48c3 | 2979 | [CPL_FW4_ACK] = fw4_ack, |
1cab775c VP |
2980 | [CPL_FW6_MSG] = deferred_fw6_msg, |
2981 | [CPL_RX_PKT] = rx_pkt | |
be4c9bad RD |
2982 | }; |
2983 | ||
2984 | static void process_timeout(struct c4iw_ep *ep) | |
2985 | { | |
2986 | struct c4iw_qp_attributes attrs; | |
2987 | int abort = 1; | |
2988 | ||
2f5b48c3 | 2989 | mutex_lock(&ep->com.mutex); |
be4c9bad RD |
2990 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, |
2991 | ep->com.state); | |
2992 | switch (ep->com.state) { | |
2993 | case MPA_REQ_SENT: | |
2994 | __state_set(&ep->com, ABORTING); | |
2995 | connect_reply_upcall(ep, -ETIMEDOUT); | |
2996 | break; | |
2997 | case MPA_REQ_WAIT: | |
2998 | __state_set(&ep->com, ABORTING); | |
2999 | break; | |
3000 | case CLOSING: | |
3001 | case MORIBUND: | |
3002 | if (ep->com.cm_id && ep->com.qp) { | |
3003 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
3004 | c4iw_modify_qp(ep->com.qp->rhp, | |
3005 | ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, | |
3006 | &attrs, 1); | |
3007 | } | |
3008 | __state_set(&ep->com, ABORTING); | |
3009 | break; | |
3010 | default: | |
76f267b7 | 3011 | WARN(1, "%s unexpected state ep %p tid %u state %u\n", |
be4c9bad | 3012 | __func__, ep, ep->hwtid, ep->com.state); |
be4c9bad RD |
3013 | abort = 0; |
3014 | } | |
2f5b48c3 | 3015 | mutex_unlock(&ep->com.mutex); |
be4c9bad RD |
3016 | if (abort) |
3017 | abort_connection(ep, NULL, GFP_KERNEL); | |
3018 | c4iw_put_ep(&ep->com); | |
3019 | } | |
3020 | ||
3021 | static void process_timedout_eps(void) | |
3022 | { | |
3023 | struct c4iw_ep *ep; | |
3024 | ||
3025 | spin_lock_irq(&timeout_lock); | |
3026 | while (!list_empty(&timeout_list)) { | |
3027 | struct list_head *tmp; | |
3028 | ||
3029 | tmp = timeout_list.next; | |
3030 | list_del(tmp); | |
3031 | spin_unlock_irq(&timeout_lock); | |
3032 | ep = list_entry(tmp, struct c4iw_ep, entry); | |
3033 | process_timeout(ep); | |
3034 | spin_lock_irq(&timeout_lock); | |
3035 | } | |
3036 | spin_unlock_irq(&timeout_lock); | |
3037 | } | |
3038 | ||
3039 | static void process_work(struct work_struct *work) | |
3040 | { | |
3041 | struct sk_buff *skb = NULL; | |
3042 | struct c4iw_dev *dev; | |
c1d7356c | 3043 | struct cpl_act_establish *rpl; |
be4c9bad RD |
3044 | unsigned int opcode; |
3045 | int ret; | |
3046 | ||
3047 | while ((skb = skb_dequeue(&rxq))) { | |
3048 | rpl = cplhdr(skb); | |
3049 | dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); | |
3050 | opcode = rpl->ot.opcode; | |
3051 | ||
3052 | BUG_ON(!work_handlers[opcode]); | |
3053 | ret = work_handlers[opcode](dev, skb); | |
3054 | if (!ret) | |
3055 | kfree_skb(skb); | |
3056 | } | |
3057 | process_timedout_eps(); | |
3058 | } | |
3059 | ||
3060 | static DECLARE_WORK(skb_work, process_work); | |
3061 | ||
3062 | static void ep_timeout(unsigned long arg) | |
3063 | { | |
3064 | struct c4iw_ep *ep = (struct c4iw_ep *)arg; | |
3065 | ||
3066 | spin_lock(&timeout_lock); | |
3067 | list_add_tail(&ep->entry, &timeout_list); | |
3068 | spin_unlock(&timeout_lock); | |
3069 | queue_work(workq, &skb_work); | |
3070 | } | |
3071 | ||
cfdda9d7 SW |
3072 | /* |
3073 | * All the CM events are handled on a work queue to have a safe context. | |
3074 | */ | |
3075 | static int sched(struct c4iw_dev *dev, struct sk_buff *skb) | |
3076 | { | |
3077 | ||
3078 | /* | |
3079 | * Save dev in the skb->cb area. | |
3080 | */ | |
3081 | *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; | |
3082 | ||
3083 | /* | |
3084 | * Queue the skb and schedule the worker thread. | |
3085 | */ | |
3086 | skb_queue_tail(&rxq, skb); | |
3087 | queue_work(workq, &skb_work); | |
3088 | return 0; | |
3089 | } | |
3090 | ||
3091 | static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |
3092 | { | |
3093 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb); | |
3094 | ||
3095 | if (rpl->status != CPL_ERR_NONE) { | |
3096 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | |
3097 | "for tid %u\n", rpl->status, GET_TID(rpl)); | |
3098 | } | |
2f5b48c3 | 3099 | kfree_skb(skb); |
cfdda9d7 SW |
3100 | return 0; |
3101 | } | |
3102 | ||
be4c9bad RD |
3103 | static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) |
3104 | { | |
3105 | struct cpl_fw6_msg *rpl = cplhdr(skb); | |
3106 | struct c4iw_wr_wait *wr_waitp; | |
3107 | int ret; | |
3108 | ||
3109 | PDBG("%s type %u\n", __func__, rpl->type); | |
3110 | ||
3111 | switch (rpl->type) { | |
5be78ee9 | 3112 | case FW6_TYPE_WR_RPL: |
be4c9bad | 3113 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); |
c8e081a1 | 3114 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
be4c9bad | 3115 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
d9594d99 SW |
3116 | if (wr_waitp) |
3117 | c4iw_wake_up(wr_waitp, ret ? -ret : 0); | |
2f5b48c3 | 3118 | kfree_skb(skb); |
be4c9bad | 3119 | break; |
5be78ee9 | 3120 | case FW6_TYPE_CQE: |
5be78ee9 | 3121 | case FW6_TYPE_OFLD_CONNECTION_WR_RPL: |
1cab775c | 3122 | sched(dev, skb); |
5be78ee9 | 3123 | break; |
be4c9bad RD |
3124 | default: |
3125 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, | |
3126 | rpl->type); | |
2f5b48c3 | 3127 | kfree_skb(skb); |
be4c9bad RD |
3128 | break; |
3129 | } | |
3130 | return 0; | |
3131 | } | |
3132 | ||
8da7e7a5 SW |
3133 | static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) |
3134 | { | |
3135 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
3136 | struct c4iw_ep *ep; | |
3137 | struct tid_info *t = dev->rdev.lldi.tids; | |
3138 | unsigned int tid = GET_TID(req); | |
3139 | ||
3140 | ep = lookup_tid(t, tid); | |
14b92228 SW |
3141 | if (!ep) { |
3142 | printk(KERN_WARNING MOD | |
3143 | "Abort on non-existent endpoint, tid %d\n", tid); | |
3144 | kfree_skb(skb); | |
3145 | return 0; | |
3146 | } | |
8da7e7a5 SW |
3147 | if (is_neg_adv_abort(req->status)) { |
3148 | PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, | |
3149 | ep->hwtid); | |
3150 | kfree_skb(skb); | |
3151 | return 0; | |
3152 | } | |
3153 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | |
3154 | ep->com.state); | |
3155 | ||
3156 | /* | |
3157 | * Wake up any threads in rdma_init() or rdma_fini(). | |
3158 | */ | |
0f1dcfae | 3159 | c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); |
8da7e7a5 SW |
3160 | sched(dev, skb); |
3161 | return 0; | |
3162 | } | |
3163 | ||
be4c9bad RD |
3164 | /* |
3165 | * Most upcalls from the T4 Core go to sched() to | |
3166 | * schedule the processing on a work queue. | |
3167 | */ | |
3168 | c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { | |
3169 | [CPL_ACT_ESTABLISH] = sched, | |
3170 | [CPL_ACT_OPEN_RPL] = sched, | |
3171 | [CPL_RX_DATA] = sched, | |
3172 | [CPL_ABORT_RPL_RSS] = sched, | |
3173 | [CPL_ABORT_RPL] = sched, | |
3174 | [CPL_PASS_OPEN_RPL] = sched, | |
3175 | [CPL_CLOSE_LISTSRV_RPL] = sched, | |
3176 | [CPL_PASS_ACCEPT_REQ] = sched, | |
3177 | [CPL_PASS_ESTABLISH] = sched, | |
3178 | [CPL_PEER_CLOSE] = sched, | |
3179 | [CPL_CLOSE_CON_RPL] = sched, | |
8da7e7a5 | 3180 | [CPL_ABORT_REQ_RSS] = peer_abort_intr, |
be4c9bad RD |
3181 | [CPL_RDMA_TERMINATE] = sched, |
3182 | [CPL_FW4_ACK] = sched, | |
3183 | [CPL_SET_TCB_RPL] = set_tcb_rpl, | |
1cab775c VP |
3184 | [CPL_FW6_MSG] = fw6_msg, |
3185 | [CPL_RX_PKT] = sched | |
be4c9bad RD |
3186 | }; |
3187 | ||
cfdda9d7 SW |
3188 | int __init c4iw_cm_init(void) |
3189 | { | |
be4c9bad | 3190 | spin_lock_init(&timeout_lock); |
cfdda9d7 SW |
3191 | skb_queue_head_init(&rxq); |
3192 | ||
3193 | workq = create_singlethread_workqueue("iw_cxgb4"); | |
3194 | if (!workq) | |
3195 | return -ENOMEM; | |
3196 | ||
cfdda9d7 SW |
3197 | return 0; |
3198 | } | |
3199 | ||
3200 | void __exit c4iw_cm_term(void) | |
3201 | { | |
be4c9bad | 3202 | WARN_ON(!list_empty(&timeout_list)); |
cfdda9d7 SW |
3203 | flush_workqueue(workq); |
3204 | destroy_workqueue(workq); | |
3205 | } |