Commit | Line | Data |
---|---|---|
b038ced7 SW |
1 | /* |
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | |
b038ced7 SW |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include <linux/module.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/workqueue.h> | |
35 | #include <linux/skbuff.h> | |
36 | #include <linux/timer.h> | |
37 | #include <linux/notifier.h> | |
38 | ||
39 | #include <net/neighbour.h> | |
40 | #include <net/netevent.h> | |
41 | #include <net/route.h> | |
42 | ||
43 | #include "tcb.h" | |
44 | #include "cxgb3_offload.h" | |
45 | #include "iwch.h" | |
46 | #include "iwch_provider.h" | |
47 | #include "iwch_cm.h" | |
48 | ||
49 | static char *states[] = { | |
50 | "idle", | |
51 | "listen", | |
52 | "connecting", | |
53 | "mpa_wait_req", | |
54 | "mpa_req_sent", | |
55 | "mpa_req_rcvd", | |
56 | "mpa_rep_sent", | |
57 | "fpdu_mode", | |
58 | "aborting", | |
59 | "closing", | |
60 | "moribund", | |
61 | "dead", | |
62 | NULL, | |
63 | }; | |
64 | ||
65 | static int ep_timeout_secs = 10; | |
66 | module_param(ep_timeout_secs, int, 0444); | |
67 | MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " | |
68 | "in seconds (default=10)"); | |
69 | ||
70 | static int mpa_rev = 1; | |
71 | module_param(mpa_rev, int, 0444); | |
72 | MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " | |
73 | "1 is spec compliant. (default=1)"); | |
74 | ||
75 | static int markers_enabled = 0; | |
76 | module_param(markers_enabled, int, 0444); | |
77 | MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); | |
78 | ||
79 | static int crc_enabled = 1; | |
80 | module_param(crc_enabled, int, 0444); | |
81 | MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); | |
82 | ||
83 | static int rcv_win = 256 * 1024; | |
84 | module_param(rcv_win, int, 0444); | |
85 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256)"); | |
86 | ||
87 | static int snd_win = 32 * 1024; | |
88 | module_param(snd_win, int, 0444); | |
89 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); | |
90 | ||
91 | static unsigned int nocong = 0; | |
92 | module_param(nocong, uint, 0444); | |
93 | MODULE_PARM_DESC(nocong, "Turn off congestion control (default=0)"); | |
94 | ||
95 | static unsigned int cong_flavor = 1; | |
96 | module_param(cong_flavor, uint, 0444); | |
97 | MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); | |
98 | ||
99 | static void process_work(struct work_struct *work); | |
100 | static struct workqueue_struct *workq; | |
101 | static DECLARE_WORK(skb_work, process_work); | |
102 | ||
103 | static struct sk_buff_head rxq; | |
104 | static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS]; | |
105 | ||
106 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); | |
107 | static void ep_timeout(unsigned long arg); | |
108 | static void connect_reply_upcall(struct iwch_ep *ep, int status); | |
109 | ||
110 | static void start_ep_timer(struct iwch_ep *ep) | |
111 | { | |
112 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
113 | if (timer_pending(&ep->timer)) { | |
114 | PDBG("%s stopped / restarted timer ep %p\n", __FUNCTION__, ep); | |
115 | del_timer_sync(&ep->timer); | |
116 | } else | |
117 | get_ep(&ep->com); | |
118 | ep->timer.expires = jiffies + ep_timeout_secs * HZ; | |
119 | ep->timer.data = (unsigned long)ep; | |
120 | ep->timer.function = ep_timeout; | |
121 | add_timer(&ep->timer); | |
122 | } | |
123 | ||
124 | static void stop_ep_timer(struct iwch_ep *ep) | |
125 | { | |
126 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
127 | del_timer_sync(&ep->timer); | |
128 | put_ep(&ep->com); | |
129 | } | |
130 | ||
131 | static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) | |
132 | { | |
133 | struct cpl_tid_release *req; | |
134 | ||
135 | skb = get_skb(skb, sizeof *req, GFP_KERNEL); | |
136 | if (!skb) | |
137 | return; | |
138 | req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); | |
139 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
140 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); | |
141 | skb->priority = CPL_PRIORITY_SETUP; | |
699924b1 | 142 | cxgb3_ofld_send(tdev, skb); |
b038ced7 SW |
143 | return; |
144 | } | |
145 | ||
146 | int iwch_quiesce_tid(struct iwch_ep *ep) | |
147 | { | |
148 | struct cpl_set_tcb_field *req; | |
149 | struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
150 | ||
151 | if (!skb) | |
152 | return -ENOMEM; | |
153 | req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); | |
154 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
155 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
156 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); | |
157 | req->reply = 0; | |
158 | req->cpu_idx = 0; | |
159 | req->word = htons(W_TCB_RX_QUIESCE); | |
160 | req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); | |
161 | req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); | |
162 | ||
163 | skb->priority = CPL_PRIORITY_DATA; | |
699924b1 | 164 | cxgb3_ofld_send(ep->com.tdev, skb); |
b038ced7 SW |
165 | return 0; |
166 | } | |
167 | ||
168 | int iwch_resume_tid(struct iwch_ep *ep) | |
169 | { | |
170 | struct cpl_set_tcb_field *req; | |
171 | struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
172 | ||
173 | if (!skb) | |
174 | return -ENOMEM; | |
175 | req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); | |
176 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
177 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
178 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); | |
179 | req->reply = 0; | |
180 | req->cpu_idx = 0; | |
181 | req->word = htons(W_TCB_RX_QUIESCE); | |
182 | req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); | |
183 | req->val = 0; | |
184 | ||
185 | skb->priority = CPL_PRIORITY_DATA; | |
699924b1 | 186 | cxgb3_ofld_send(ep->com.tdev, skb); |
b038ced7 SW |
187 | return 0; |
188 | } | |
189 | ||
190 | static void set_emss(struct iwch_ep *ep, u16 opt) | |
191 | { | |
192 | PDBG("%s ep %p opt %u\n", __FUNCTION__, ep, opt); | |
193 | ep->emss = T3C_DATA(ep->com.tdev)->mtus[G_TCPOPT_MSS(opt)] - 40; | |
194 | if (G_TCPOPT_TSTAMP(opt)) | |
195 | ep->emss -= 12; | |
196 | if (ep->emss < 128) | |
197 | ep->emss = 128; | |
198 | PDBG("emss=%d\n", ep->emss); | |
199 | } | |
200 | ||
201 | static enum iwch_ep_state state_read(struct iwch_ep_common *epc) | |
202 | { | |
203 | unsigned long flags; | |
204 | enum iwch_ep_state state; | |
205 | ||
206 | spin_lock_irqsave(&epc->lock, flags); | |
207 | state = epc->state; | |
208 | spin_unlock_irqrestore(&epc->lock, flags); | |
209 | return state; | |
210 | } | |
211 | ||
2b540355 | 212 | static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) |
b038ced7 SW |
213 | { |
214 | epc->state = new; | |
215 | } | |
216 | ||
217 | static void state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) | |
218 | { | |
219 | unsigned long flags; | |
220 | ||
221 | spin_lock_irqsave(&epc->lock, flags); | |
222 | PDBG("%s - %s -> %s\n", __FUNCTION__, states[epc->state], states[new]); | |
223 | __state_set(epc, new); | |
224 | spin_unlock_irqrestore(&epc->lock, flags); | |
225 | return; | |
226 | } | |
227 | ||
228 | static void *alloc_ep(int size, gfp_t gfp) | |
229 | { | |
230 | struct iwch_ep_common *epc; | |
231 | ||
dd00cc48 | 232 | epc = kzalloc(size, gfp); |
b038ced7 | 233 | if (epc) { |
b038ced7 SW |
234 | kref_init(&epc->kref); |
235 | spin_lock_init(&epc->lock); | |
236 | init_waitqueue_head(&epc->waitq); | |
237 | } | |
238 | PDBG("%s alloc ep %p\n", __FUNCTION__, epc); | |
239 | return epc; | |
240 | } | |
241 | ||
242 | void __free_ep(struct kref *kref) | |
243 | { | |
244 | struct iwch_ep_common *epc; | |
245 | epc = container_of(kref, struct iwch_ep_common, kref); | |
246 | PDBG("%s ep %p state %s\n", __FUNCTION__, epc, states[state_read(epc)]); | |
247 | kfree(epc); | |
248 | } | |
249 | ||
250 | static void release_ep_resources(struct iwch_ep *ep) | |
251 | { | |
252 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | |
253 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | |
254 | dst_release(ep->dst); | |
255 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | |
b038ced7 SW |
256 | put_ep(&ep->com); |
257 | } | |
258 | ||
259 | static void process_work(struct work_struct *work) | |
260 | { | |
261 | struct sk_buff *skb = NULL; | |
262 | void *ep; | |
263 | struct t3cdev *tdev; | |
264 | int ret; | |
265 | ||
266 | while ((skb = skb_dequeue(&rxq))) { | |
267 | ep = *((void **) (skb->cb)); | |
268 | tdev = *((struct t3cdev **) (skb->cb + sizeof(void *))); | |
269 | ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep); | |
270 | if (ret & CPL_RET_BUF_DONE) | |
271 | kfree_skb(skb); | |
272 | ||
273 | /* | |
274 | * ep was referenced in sched(), and is freed here. | |
275 | */ | |
276 | put_ep((struct iwch_ep_common *)ep); | |
277 | } | |
278 | } | |
279 | ||
280 | static int status2errno(int status) | |
281 | { | |
282 | switch (status) { | |
283 | case CPL_ERR_NONE: | |
284 | return 0; | |
285 | case CPL_ERR_CONN_RESET: | |
286 | return -ECONNRESET; | |
287 | case CPL_ERR_ARP_MISS: | |
288 | return -EHOSTUNREACH; | |
289 | case CPL_ERR_CONN_TIMEDOUT: | |
290 | return -ETIMEDOUT; | |
291 | case CPL_ERR_TCAM_FULL: | |
292 | return -ENOMEM; | |
293 | case CPL_ERR_CONN_EXIST: | |
294 | return -EADDRINUSE; | |
295 | default: | |
296 | return -EIO; | |
297 | } | |
298 | } | |
299 | ||
300 | /* | |
301 | * Try and reuse skbs already allocated... | |
302 | */ | |
303 | static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) | |
304 | { | |
1f6a849b | 305 | if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { |
b038ced7 SW |
306 | skb_trim(skb, 0); |
307 | skb_get(skb); | |
308 | } else { | |
309 | skb = alloc_skb(len, gfp); | |
310 | } | |
311 | return skb; | |
312 | } | |
313 | ||
314 | static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip, | |
315 | __be32 peer_ip, __be16 local_port, | |
316 | __be16 peer_port, u8 tos) | |
317 | { | |
318 | struct rtable *rt; | |
319 | struct flowi fl = { | |
320 | .oif = 0, | |
321 | .nl_u = { | |
322 | .ip4_u = { | |
323 | .daddr = peer_ip, | |
324 | .saddr = local_ip, | |
325 | .tos = tos} | |
326 | }, | |
327 | .proto = IPPROTO_TCP, | |
328 | .uli_u = { | |
329 | .ports = { | |
330 | .sport = local_port, | |
331 | .dport = peer_port} | |
332 | } | |
333 | }; | |
334 | ||
335 | if (ip_route_output_flow(&rt, &fl, NULL, 0)) | |
336 | return NULL; | |
337 | return rt; | |
338 | } | |
339 | ||
340 | static unsigned int find_best_mtu(const struct t3c_data *d, unsigned short mtu) | |
341 | { | |
342 | int i = 0; | |
343 | ||
344 | while (i < d->nmtus - 1 && d->mtus[i + 1] <= mtu) | |
345 | ++i; | |
346 | return i; | |
347 | } | |
348 | ||
349 | static void arp_failure_discard(struct t3cdev *dev, struct sk_buff *skb) | |
350 | { | |
351 | PDBG("%s t3cdev %p\n", __FUNCTION__, dev); | |
352 | kfree_skb(skb); | |
353 | } | |
354 | ||
355 | /* | |
356 | * Handle an ARP failure for an active open. | |
357 | */ | |
358 | static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |
359 | { | |
360 | printk(KERN_ERR MOD "ARP failure duing connect\n"); | |
361 | kfree_skb(skb); | |
362 | } | |
363 | ||
364 | /* | |
365 | * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant | |
366 | * and send it along. | |
367 | */ | |
368 | static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb) | |
369 | { | |
370 | struct cpl_abort_req *req = cplhdr(skb); | |
371 | ||
372 | PDBG("%s t3cdev %p\n", __FUNCTION__, dev); | |
373 | req->cmd = CPL_ABORT_NO_RST; | |
374 | cxgb3_ofld_send(dev, skb); | |
375 | } | |
376 | ||
377 | static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) | |
378 | { | |
379 | struct cpl_close_con_req *req; | |
380 | struct sk_buff *skb; | |
381 | ||
382 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
383 | skb = get_skb(NULL, sizeof(*req), gfp); | |
384 | if (!skb) { | |
385 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); | |
386 | return -ENOMEM; | |
387 | } | |
388 | skb->priority = CPL_PRIORITY_DATA; | |
389 | set_arp_failure_handler(skb, arp_failure_discard); | |
390 | req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); | |
391 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); | |
392 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
393 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); | |
394 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
395 | return 0; | |
396 | } | |
397 | ||
398 | static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
399 | { | |
400 | struct cpl_abort_req *req; | |
401 | ||
402 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
403 | skb = get_skb(skb, sizeof(*req), gfp); | |
404 | if (!skb) { | |
405 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
406 | __FUNCTION__); | |
407 | return -ENOMEM; | |
408 | } | |
409 | skb->priority = CPL_PRIORITY_DATA; | |
410 | set_arp_failure_handler(skb, abort_arp_failure); | |
411 | req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); | |
412 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); | |
413 | req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
414 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); | |
415 | req->cmd = CPL_ABORT_SEND_RST; | |
416 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
417 | return 0; | |
418 | } | |
419 | ||
420 | static int send_connect(struct iwch_ep *ep) | |
421 | { | |
422 | struct cpl_act_open_req *req; | |
423 | struct sk_buff *skb; | |
424 | u32 opt0h, opt0l, opt2; | |
425 | unsigned int mtu_idx; | |
426 | int wscale; | |
427 | ||
428 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
429 | ||
430 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
431 | if (!skb) { | |
432 | printk(KERN_ERR MOD "%s - failed to alloc skb.\n", | |
433 | __FUNCTION__); | |
434 | return -ENOMEM; | |
435 | } | |
436 | mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); | |
437 | wscale = compute_wscale(rcv_win); | |
438 | opt0h = V_NAGLE(0) | | |
439 | V_NO_CONG(nocong) | | |
440 | V_KEEP_ALIVE(1) | | |
441 | F_TCAM_BYPASS | | |
442 | V_WND_SCALE(wscale) | | |
443 | V_MSS_IDX(mtu_idx) | | |
444 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | |
445 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | |
446 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | |
447 | skb->priority = CPL_PRIORITY_SETUP; | |
448 | set_arp_failure_handler(skb, act_open_req_arp_failure); | |
449 | ||
450 | req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); | |
451 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
452 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); | |
453 | req->local_port = ep->com.local_addr.sin_port; | |
454 | req->peer_port = ep->com.remote_addr.sin_port; | |
455 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | |
456 | req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; | |
457 | req->opt0h = htonl(opt0h); | |
458 | req->opt0l = htonl(opt0l); | |
459 | req->params = 0; | |
460 | req->opt2 = htonl(opt2); | |
461 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
462 | return 0; | |
463 | } | |
464 | ||
465 | static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) | |
466 | { | |
467 | int mpalen; | |
468 | struct tx_data_wr *req; | |
469 | struct mpa_message *mpa; | |
470 | int len; | |
471 | ||
472 | PDBG("%s ep %p pd_len %d\n", __FUNCTION__, ep, ep->plen); | |
473 | ||
474 | BUG_ON(skb_cloned(skb)); | |
475 | ||
476 | mpalen = sizeof(*mpa) + ep->plen; | |
4305b541 | 477 | if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { |
b038ced7 SW |
478 | kfree_skb(skb); |
479 | skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); | |
480 | if (!skb) { | |
481 | connect_reply_upcall(ep, -ENOMEM); | |
482 | return; | |
483 | } | |
484 | } | |
485 | skb_trim(skb, 0); | |
486 | skb_reserve(skb, sizeof(*req)); | |
487 | skb_put(skb, mpalen); | |
488 | skb->priority = CPL_PRIORITY_DATA; | |
489 | mpa = (struct mpa_message *) skb->data; | |
490 | memset(mpa, 0, sizeof(*mpa)); | |
491 | memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); | |
492 | mpa->flags = (crc_enabled ? MPA_CRC : 0) | | |
493 | (markers_enabled ? MPA_MARKERS : 0); | |
494 | mpa->private_data_size = htons(ep->plen); | |
495 | mpa->revision = mpa_rev; | |
496 | ||
497 | if (ep->plen) | |
498 | memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen); | |
499 | ||
500 | /* | |
501 | * Reference the mpa skb. This ensures the data area | |
502 | * will remain in memory until the hw acks the tx. | |
503 | * Function tx_ack() will deref it. | |
504 | */ | |
505 | skb_get(skb); | |
506 | set_arp_failure_handler(skb, arp_failure_discard); | |
badff6d0 | 507 | skb_reset_transport_header(skb); |
b038ced7 SW |
508 | len = skb->len; |
509 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | |
510 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | |
511 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
512 | req->len = htonl(len); | |
513 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | |
514 | V_TX_SNDBUF(snd_win>>15)); | |
de3d3530 | 515 | req->flags = htonl(F_TX_INIT); |
b038ced7 SW |
516 | req->sndseq = htonl(ep->snd_seq); |
517 | BUG_ON(ep->mpa_skb); | |
518 | ep->mpa_skb = skb; | |
519 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
520 | start_ep_timer(ep); | |
521 | state_set(&ep->com, MPA_REQ_SENT); | |
522 | return; | |
523 | } | |
524 | ||
525 | static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) | |
526 | { | |
527 | int mpalen; | |
528 | struct tx_data_wr *req; | |
529 | struct mpa_message *mpa; | |
530 | struct sk_buff *skb; | |
531 | ||
532 | PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); | |
533 | ||
534 | mpalen = sizeof(*mpa) + plen; | |
535 | ||
536 | skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); | |
537 | if (!skb) { | |
538 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); | |
539 | return -ENOMEM; | |
540 | } | |
541 | skb_reserve(skb, sizeof(*req)); | |
542 | mpa = (struct mpa_message *) skb_put(skb, mpalen); | |
543 | memset(mpa, 0, sizeof(*mpa)); | |
544 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
545 | mpa->flags = MPA_REJECT; | |
546 | mpa->revision = mpa_rev; | |
547 | mpa->private_data_size = htons(plen); | |
548 | if (plen) | |
549 | memcpy(mpa->private_data, pdata, plen); | |
550 | ||
551 | /* | |
552 | * Reference the mpa skb again. This ensures the data area | |
553 | * will remain in memory until the hw acks the tx. | |
554 | * Function tx_ack() will deref it. | |
555 | */ | |
556 | skb_get(skb); | |
557 | skb->priority = CPL_PRIORITY_DATA; | |
558 | set_arp_failure_handler(skb, arp_failure_discard); | |
badff6d0 | 559 | skb_reset_transport_header(skb); |
b038ced7 SW |
560 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); |
561 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | |
562 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
563 | req->len = htonl(mpalen); | |
564 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | |
565 | V_TX_SNDBUF(snd_win>>15)); | |
de3d3530 | 566 | req->flags = htonl(F_TX_INIT); |
b038ced7 SW |
567 | req->sndseq = htonl(ep->snd_seq); |
568 | BUG_ON(ep->mpa_skb); | |
569 | ep->mpa_skb = skb; | |
570 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
571 | return 0; | |
572 | } | |
573 | ||
574 | static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) | |
575 | { | |
576 | int mpalen; | |
577 | struct tx_data_wr *req; | |
578 | struct mpa_message *mpa; | |
579 | int len; | |
580 | struct sk_buff *skb; | |
581 | ||
582 | PDBG("%s ep %p plen %d\n", __FUNCTION__, ep, plen); | |
583 | ||
584 | mpalen = sizeof(*mpa) + plen; | |
585 | ||
586 | skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); | |
587 | if (!skb) { | |
588 | printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __FUNCTION__); | |
589 | return -ENOMEM; | |
590 | } | |
591 | skb->priority = CPL_PRIORITY_DATA; | |
592 | skb_reserve(skb, sizeof(*req)); | |
593 | mpa = (struct mpa_message *) skb_put(skb, mpalen); | |
594 | memset(mpa, 0, sizeof(*mpa)); | |
595 | memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); | |
596 | mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | | |
597 | (markers_enabled ? MPA_MARKERS : 0); | |
598 | mpa->revision = mpa_rev; | |
599 | mpa->private_data_size = htons(plen); | |
600 | if (plen) | |
601 | memcpy(mpa->private_data, pdata, plen); | |
602 | ||
603 | /* | |
604 | * Reference the mpa skb. This ensures the data area | |
605 | * will remain in memory until the hw acks the tx. | |
606 | * Function tx_ack() will deref it. | |
607 | */ | |
608 | skb_get(skb); | |
609 | set_arp_failure_handler(skb, arp_failure_discard); | |
badff6d0 | 610 | skb_reset_transport_header(skb); |
b038ced7 SW |
611 | len = skb->len; |
612 | req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); | |
613 | req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); | |
614 | req->wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
615 | req->len = htonl(len); | |
616 | req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | | |
617 | V_TX_SNDBUF(snd_win>>15)); | |
de3d3530 | 618 | req->flags = htonl(F_TX_INIT); |
b038ced7 SW |
619 | req->sndseq = htonl(ep->snd_seq); |
620 | ep->mpa_skb = skb; | |
621 | state_set(&ep->com, MPA_REP_SENT); | |
622 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
623 | return 0; | |
624 | } | |
625 | ||
626 | static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
627 | { | |
628 | struct iwch_ep *ep = ctx; | |
629 | struct cpl_act_establish *req = cplhdr(skb); | |
630 | unsigned int tid = GET_TID(req); | |
631 | ||
632 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, tid); | |
633 | ||
634 | dst_confirm(ep->dst); | |
635 | ||
636 | /* setup the hwtid for this connection */ | |
637 | ep->hwtid = tid; | |
638 | cxgb3_insert_tid(ep->com.tdev, &t3c_client, ep, tid); | |
639 | ||
640 | ep->snd_seq = ntohl(req->snd_isn); | |
de3d3530 | 641 | ep->rcv_seq = ntohl(req->rcv_isn); |
b038ced7 SW |
642 | |
643 | set_emss(ep, ntohs(req->tcp_opt)); | |
644 | ||
645 | /* dealloc the atid */ | |
646 | cxgb3_free_atid(ep->com.tdev, ep->atid); | |
647 | ||
648 | /* start MPA negotiation */ | |
649 | send_mpa_req(ep, skb); | |
650 | ||
651 | return 0; | |
652 | } | |
653 | ||
654 | static void abort_connection(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) | |
655 | { | |
656 | PDBG("%s ep %p\n", __FILE__, ep); | |
657 | state_set(&ep->com, ABORTING); | |
658 | send_abort(ep, skb, gfp); | |
659 | } | |
660 | ||
661 | static void close_complete_upcall(struct iwch_ep *ep) | |
662 | { | |
663 | struct iw_cm_event event; | |
664 | ||
665 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
666 | memset(&event, 0, sizeof(event)); | |
667 | event.event = IW_CM_EVENT_CLOSE; | |
668 | if (ep->com.cm_id) { | |
669 | PDBG("close complete delivered ep %p cm_id %p tid %d\n", | |
670 | ep, ep->com.cm_id, ep->hwtid); | |
671 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
672 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
673 | ep->com.cm_id = NULL; | |
674 | ep->com.qp = NULL; | |
675 | } | |
676 | } | |
677 | ||
678 | static void peer_close_upcall(struct iwch_ep *ep) | |
679 | { | |
680 | struct iw_cm_event event; | |
681 | ||
682 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
683 | memset(&event, 0, sizeof(event)); | |
684 | event.event = IW_CM_EVENT_DISCONNECT; | |
685 | if (ep->com.cm_id) { | |
686 | PDBG("peer close delivered ep %p cm_id %p tid %d\n", | |
687 | ep, ep->com.cm_id, ep->hwtid); | |
688 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
689 | } | |
690 | } | |
691 | ||
692 | static void peer_abort_upcall(struct iwch_ep *ep) | |
693 | { | |
694 | struct iw_cm_event event; | |
695 | ||
696 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
697 | memset(&event, 0, sizeof(event)); | |
698 | event.event = IW_CM_EVENT_CLOSE; | |
699 | event.status = -ECONNRESET; | |
700 | if (ep->com.cm_id) { | |
701 | PDBG("abort delivered ep %p cm_id %p tid %d\n", ep, | |
702 | ep->com.cm_id, ep->hwtid); | |
703 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
704 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
705 | ep->com.cm_id = NULL; | |
706 | ep->com.qp = NULL; | |
707 | } | |
708 | } | |
709 | ||
710 | static void connect_reply_upcall(struct iwch_ep *ep, int status) | |
711 | { | |
712 | struct iw_cm_event event; | |
713 | ||
714 | PDBG("%s ep %p status %d\n", __FUNCTION__, ep, status); | |
715 | memset(&event, 0, sizeof(event)); | |
716 | event.event = IW_CM_EVENT_CONNECT_REPLY; | |
717 | event.status = status; | |
718 | event.local_addr = ep->com.local_addr; | |
719 | event.remote_addr = ep->com.remote_addr; | |
720 | ||
721 | if ((status == 0) || (status == -ECONNREFUSED)) { | |
722 | event.private_data_len = ep->plen; | |
723 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
724 | } | |
725 | if (ep->com.cm_id) { | |
726 | PDBG("%s ep %p tid %d status %d\n", __FUNCTION__, ep, | |
727 | ep->hwtid, status); | |
728 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
729 | } | |
730 | if (status < 0) { | |
731 | ep->com.cm_id->rem_ref(ep->com.cm_id); | |
732 | ep->com.cm_id = NULL; | |
733 | ep->com.qp = NULL; | |
734 | } | |
735 | } | |
736 | ||
737 | static void connect_request_upcall(struct iwch_ep *ep) | |
738 | { | |
739 | struct iw_cm_event event; | |
740 | ||
741 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | |
742 | memset(&event, 0, sizeof(event)); | |
743 | event.event = IW_CM_EVENT_CONNECT_REQUEST; | |
744 | event.local_addr = ep->com.local_addr; | |
745 | event.remote_addr = ep->com.remote_addr; | |
746 | event.private_data_len = ep->plen; | |
747 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | |
748 | event.provider_data = ep; | |
749 | if (state_read(&ep->parent_ep->com) != DEAD) | |
750 | ep->parent_ep->com.cm_id->event_handler( | |
751 | ep->parent_ep->com.cm_id, | |
752 | &event); | |
753 | put_ep(&ep->parent_ep->com); | |
754 | ep->parent_ep = NULL; | |
755 | } | |
756 | ||
757 | static void established_upcall(struct iwch_ep *ep) | |
758 | { | |
759 | struct iw_cm_event event; | |
760 | ||
761 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
762 | memset(&event, 0, sizeof(event)); | |
763 | event.event = IW_CM_EVENT_ESTABLISHED; | |
764 | if (ep->com.cm_id) { | |
765 | PDBG("%s ep %p tid %d\n", __FUNCTION__, ep, ep->hwtid); | |
766 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | |
767 | } | |
768 | } | |
769 | ||
770 | static int update_rx_credits(struct iwch_ep *ep, u32 credits) | |
771 | { | |
772 | struct cpl_rx_data_ack *req; | |
773 | struct sk_buff *skb; | |
774 | ||
775 | PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); | |
776 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
777 | if (!skb) { | |
778 | printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); | |
779 | return 0; | |
780 | } | |
781 | ||
782 | req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); | |
783 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
784 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); | |
785 | req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); | |
786 | skb->priority = CPL_PRIORITY_ACK; | |
699924b1 | 787 | cxgb3_ofld_send(ep->com.tdev, skb); |
b038ced7 SW |
788 | return credits; |
789 | } | |
790 | ||
791 | static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |
792 | { | |
793 | struct mpa_message *mpa; | |
794 | u16 plen; | |
795 | struct iwch_qp_attributes attrs; | |
796 | enum iwch_qp_attr_mask mask; | |
797 | int err; | |
798 | ||
799 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
800 | ||
801 | /* | |
802 | * Stop mpa timer. If it expired, then the state has | |
803 | * changed and we bail since ep_timeout already aborted | |
804 | * the connection. | |
805 | */ | |
806 | stop_ep_timer(ep); | |
807 | if (state_read(&ep->com) != MPA_REQ_SENT) | |
808 | return; | |
809 | ||
810 | /* | |
811 | * If we get more than the supported amount of private data | |
812 | * then we must fail this connection. | |
813 | */ | |
814 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
815 | err = -EINVAL; | |
816 | goto err; | |
817 | } | |
818 | ||
819 | /* | |
820 | * copy the new data into our accumulation buffer. | |
821 | */ | |
d626f62b ACM |
822 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
823 | skb->len); | |
b038ced7 SW |
824 | ep->mpa_pkt_len += skb->len; |
825 | ||
826 | /* | |
827 | * if we don't even have the mpa message, then bail. | |
828 | */ | |
829 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
830 | return; | |
831 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
832 | ||
833 | /* Validate MPA header. */ | |
834 | if (mpa->revision != mpa_rev) { | |
835 | err = -EPROTO; | |
836 | goto err; | |
837 | } | |
838 | if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { | |
839 | err = -EPROTO; | |
840 | goto err; | |
841 | } | |
842 | ||
843 | plen = ntohs(mpa->private_data_size); | |
844 | ||
845 | /* | |
846 | * Fail if there's too much private data. | |
847 | */ | |
848 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
849 | err = -EPROTO; | |
850 | goto err; | |
851 | } | |
852 | ||
853 | /* | |
854 | * If plen does not account for pkt size | |
855 | */ | |
856 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
857 | err = -EPROTO; | |
858 | goto err; | |
859 | } | |
860 | ||
861 | ep->plen = (u8) plen; | |
862 | ||
863 | /* | |
864 | * If we don't have all the pdata yet, then bail. | |
865 | * We'll continue process when more data arrives. | |
866 | */ | |
867 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
868 | return; | |
869 | ||
870 | if (mpa->flags & MPA_REJECT) { | |
871 | err = -ECONNREFUSED; | |
872 | goto err; | |
873 | } | |
874 | ||
875 | /* | |
876 | * If we get here we have accumulated the entire mpa | |
877 | * start reply message including private data. And | |
878 | * the MPA header is valid. | |
879 | */ | |
880 | state_set(&ep->com, FPDU_MODE); | |
881 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
882 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
883 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
884 | ep->mpa_attr.version = mpa_rev; | |
885 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | |
886 | "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, | |
887 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
888 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | |
889 | ||
890 | attrs.mpa_attr = ep->mpa_attr; | |
891 | attrs.max_ird = ep->ird; | |
892 | attrs.max_ord = ep->ord; | |
893 | attrs.llp_stream_handle = ep; | |
894 | attrs.next_state = IWCH_QP_STATE_RTS; | |
895 | ||
896 | mask = IWCH_QP_ATTR_NEXT_STATE | | |
897 | IWCH_QP_ATTR_LLP_STREAM_HANDLE | IWCH_QP_ATTR_MPA_ATTR | | |
898 | IWCH_QP_ATTR_MAX_IRD | IWCH_QP_ATTR_MAX_ORD; | |
899 | ||
900 | /* bind QP and TID with INIT_WR */ | |
901 | err = iwch_modify_qp(ep->com.qp->rhp, | |
902 | ep->com.qp, mask, &attrs, 1); | |
903 | if (!err) | |
904 | goto out; | |
905 | err: | |
906 | abort_connection(ep, skb, GFP_KERNEL); | |
907 | out: | |
908 | connect_reply_upcall(ep, err); | |
909 | return; | |
910 | } | |
911 | ||
912 | static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb) | |
913 | { | |
914 | struct mpa_message *mpa; | |
915 | u16 plen; | |
916 | ||
917 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
918 | ||
919 | /* | |
920 | * Stop mpa timer. If it expired, then the state has | |
921 | * changed and we bail since ep_timeout already aborted | |
922 | * the connection. | |
923 | */ | |
924 | stop_ep_timer(ep); | |
925 | if (state_read(&ep->com) != MPA_REQ_WAIT) | |
926 | return; | |
927 | ||
928 | /* | |
929 | * If we get more than the supported amount of private data | |
930 | * then we must fail this connection. | |
931 | */ | |
932 | if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { | |
933 | abort_connection(ep, skb, GFP_KERNEL); | |
934 | return; | |
935 | } | |
936 | ||
937 | PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); | |
938 | ||
939 | /* | |
940 | * Copy the new data into our accumulation buffer. | |
941 | */ | |
d626f62b ACM |
942 | skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), |
943 | skb->len); | |
b038ced7 SW |
944 | ep->mpa_pkt_len += skb->len; |
945 | ||
946 | /* | |
947 | * If we don't even have the mpa message, then bail. | |
948 | * We'll continue process when more data arrives. | |
949 | */ | |
950 | if (ep->mpa_pkt_len < sizeof(*mpa)) | |
951 | return; | |
952 | PDBG("%s enter (%s line %u)\n", __FUNCTION__, __FILE__, __LINE__); | |
953 | mpa = (struct mpa_message *) ep->mpa_pkt; | |
954 | ||
955 | /* | |
956 | * Validate MPA Header. | |
957 | */ | |
958 | if (mpa->revision != mpa_rev) { | |
959 | abort_connection(ep, skb, GFP_KERNEL); | |
960 | return; | |
961 | } | |
962 | ||
963 | if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { | |
964 | abort_connection(ep, skb, GFP_KERNEL); | |
965 | return; | |
966 | } | |
967 | ||
968 | plen = ntohs(mpa->private_data_size); | |
969 | ||
970 | /* | |
971 | * Fail if there's too much private data. | |
972 | */ | |
973 | if (plen > MPA_MAX_PRIVATE_DATA) { | |
974 | abort_connection(ep, skb, GFP_KERNEL); | |
975 | return; | |
976 | } | |
977 | ||
978 | /* | |
979 | * If plen does not account for pkt size | |
980 | */ | |
981 | if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { | |
982 | abort_connection(ep, skb, GFP_KERNEL); | |
983 | return; | |
984 | } | |
985 | ep->plen = (u8) plen; | |
986 | ||
987 | /* | |
988 | * If we don't have all the pdata yet, then bail. | |
989 | */ | |
990 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | |
991 | return; | |
992 | ||
993 | /* | |
994 | * If we get here we have accumulated the entire mpa | |
995 | * start reply message including private data. | |
996 | */ | |
997 | ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; | |
998 | ep->mpa_attr.recv_marker_enabled = markers_enabled; | |
999 | ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; | |
1000 | ep->mpa_attr.version = mpa_rev; | |
1001 | PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " | |
1002 | "xmit_marker_enabled=%d, version=%d\n", __FUNCTION__, | |
1003 | ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, | |
1004 | ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); | |
1005 | ||
1006 | state_set(&ep->com, MPA_REQ_RCVD); | |
1007 | ||
1008 | /* drive upcall */ | |
1009 | connect_request_upcall(ep); | |
1010 | return; | |
1011 | } | |
1012 | ||
1013 | static int rx_data(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1014 | { | |
1015 | struct iwch_ep *ep = ctx; | |
1016 | struct cpl_rx_data *hdr = cplhdr(skb); | |
1017 | unsigned int dlen = ntohs(hdr->len); | |
1018 | ||
1019 | PDBG("%s ep %p dlen %u\n", __FUNCTION__, ep, dlen); | |
1020 | ||
1021 | skb_pull(skb, sizeof(*hdr)); | |
1022 | skb_trim(skb, dlen); | |
1023 | ||
de3d3530 SW |
1024 | ep->rcv_seq += dlen; |
1025 | BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); | |
1026 | ||
b038ced7 SW |
1027 | switch (state_read(&ep->com)) { |
1028 | case MPA_REQ_SENT: | |
1029 | process_mpa_reply(ep, skb); | |
1030 | break; | |
1031 | case MPA_REQ_WAIT: | |
1032 | process_mpa_request(ep, skb); | |
1033 | break; | |
1034 | case MPA_REP_SENT: | |
1035 | break; | |
1036 | default: | |
1037 | printk(KERN_ERR MOD "%s Unexpected streaming data." | |
1038 | " ep %p state %d tid %d\n", | |
1039 | __FUNCTION__, ep, state_read(&ep->com), ep->hwtid); | |
1040 | ||
1041 | /* | |
1042 | * The ep will timeout and inform the ULP of the failure. | |
1043 | * See ep_timeout(). | |
1044 | */ | |
1045 | break; | |
1046 | } | |
1047 | ||
1048 | /* update RX credits */ | |
1049 | update_rx_credits(ep, dlen); | |
1050 | ||
1051 | return CPL_RET_BUF_DONE; | |
1052 | } | |
1053 | ||
1054 | /* | |
1055 | * Upcall from the adapter indicating data has been transmitted. | |
1056 | * For us its just the single MPA request or reply. We can now free | |
1057 | * the skb holding the mpa message. | |
1058 | */ | |
1059 | static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1060 | { | |
1061 | struct iwch_ep *ep = ctx; | |
1062 | struct cpl_wr_ack *hdr = cplhdr(skb); | |
1063 | unsigned int credits = ntohs(hdr->credits); | |
b038ced7 SW |
1064 | |
1065 | PDBG("%s ep %p credits %u\n", __FUNCTION__, ep, credits); | |
1066 | ||
1067 | if (credits == 0) | |
1068 | return CPL_RET_BUF_DONE; | |
1069 | BUG_ON(credits != 1); | |
1070 | BUG_ON(ep->mpa_skb == NULL); | |
1071 | kfree_skb(ep->mpa_skb); | |
1072 | ep->mpa_skb = NULL; | |
1073 | dst_confirm(ep->dst); | |
1074 | if (state_read(&ep->com) == MPA_REP_SENT) { | |
b038ced7 SW |
1075 | ep->com.rpl_done = 1; |
1076 | PDBG("waking up ep %p\n", ep); | |
1077 | wake_up(&ep->com.waitq); | |
1078 | } | |
1079 | return CPL_RET_BUF_DONE; | |
1080 | } | |
1081 | ||
1082 | static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1083 | { | |
1084 | struct iwch_ep *ep = ctx; | |
1085 | ||
1086 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1087 | ||
aff9e39d SW |
1088 | /* |
1089 | * We get 2 abort replies from the HW. The first one must | |
1090 | * be ignored except for scribbling that we need one more. | |
1091 | */ | |
1092 | if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { | |
1093 | ep->flags |= ABORT_REQ_IN_PROGRESS; | |
1094 | return CPL_RET_BUF_DONE; | |
1095 | } | |
1096 | ||
b038ced7 SW |
1097 | close_complete_upcall(ep); |
1098 | state_set(&ep->com, DEAD); | |
1099 | release_ep_resources(ep); | |
1100 | return CPL_RET_BUF_DONE; | |
1101 | } | |
1102 | ||
96d0e493 SW |
1103 | /* |
1104 | * Return whether a failed active open has allocated a TID | |
1105 | */ | |
1106 | static inline int act_open_has_tid(int status) | |
1107 | { | |
1108 | return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && | |
1109 | status != CPL_ERR_ARP_MISS; | |
1110 | } | |
1111 | ||
b038ced7 SW |
1112 | static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) |
1113 | { | |
1114 | struct iwch_ep *ep = ctx; | |
1115 | struct cpl_act_open_rpl *rpl = cplhdr(skb); | |
1116 | ||
1117 | PDBG("%s ep %p status %u errno %d\n", __FUNCTION__, ep, rpl->status, | |
1118 | status2errno(rpl->status)); | |
1119 | connect_reply_upcall(ep, status2errno(rpl->status)); | |
1120 | state_set(&ep->com, DEAD); | |
96d0e493 | 1121 | if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status)) |
b038ced7 SW |
1122 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); |
1123 | cxgb3_free_atid(ep->com.tdev, ep->atid); | |
1124 | dst_release(ep->dst); | |
1125 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | |
1126 | put_ep(&ep->com); | |
1127 | return CPL_RET_BUF_DONE; | |
1128 | } | |
1129 | ||
1130 | static int listen_start(struct iwch_listen_ep *ep) | |
1131 | { | |
1132 | struct sk_buff *skb; | |
1133 | struct cpl_pass_open_req *req; | |
1134 | ||
1135 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1136 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1137 | if (!skb) { | |
1138 | printk(KERN_ERR MOD "t3c_listen_start failed to alloc skb!\n"); | |
1139 | return -ENOMEM; | |
1140 | } | |
1141 | ||
1142 | req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); | |
1143 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1144 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); | |
1145 | req->local_port = ep->com.local_addr.sin_port; | |
1146 | req->local_ip = ep->com.local_addr.sin_addr.s_addr; | |
1147 | req->peer_port = 0; | |
1148 | req->peer_ip = 0; | |
1149 | req->peer_netmask = 0; | |
1150 | req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS); | |
1151 | req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10)); | |
1152 | req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); | |
1153 | ||
1154 | skb->priority = 1; | |
699924b1 | 1155 | cxgb3_ofld_send(ep->com.tdev, skb); |
b038ced7 SW |
1156 | return 0; |
1157 | } | |
1158 | ||
1159 | static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1160 | { | |
1161 | struct iwch_listen_ep *ep = ctx; | |
1162 | struct cpl_pass_open_rpl *rpl = cplhdr(skb); | |
1163 | ||
1164 | PDBG("%s ep %p status %d error %d\n", __FUNCTION__, ep, | |
1165 | rpl->status, status2errno(rpl->status)); | |
1166 | ep->com.rpl_err = status2errno(rpl->status); | |
1167 | ep->com.rpl_done = 1; | |
1168 | wake_up(&ep->com.waitq); | |
1169 | ||
1170 | return CPL_RET_BUF_DONE; | |
1171 | } | |
1172 | ||
1173 | static int listen_stop(struct iwch_listen_ep *ep) | |
1174 | { | |
1175 | struct sk_buff *skb; | |
1176 | struct cpl_close_listserv_req *req; | |
1177 | ||
1178 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1179 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | |
1180 | if (!skb) { | |
1181 | printk(KERN_ERR MOD "%s - failed to alloc skb\n", __FUNCTION__); | |
1182 | return -ENOMEM; | |
1183 | } | |
1184 | req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); | |
1185 | req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
60be4b59 | 1186 | req->cpu_idx = 0; |
b038ced7 SW |
1187 | OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); |
1188 | skb->priority = 1; | |
699924b1 | 1189 | cxgb3_ofld_send(ep->com.tdev, skb); |
b038ced7 SW |
1190 | return 0; |
1191 | } | |
1192 | ||
1193 | static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, | |
1194 | void *ctx) | |
1195 | { | |
1196 | struct iwch_listen_ep *ep = ctx; | |
1197 | struct cpl_close_listserv_rpl *rpl = cplhdr(skb); | |
1198 | ||
1199 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1200 | ep->com.rpl_err = status2errno(rpl->status); | |
1201 | ep->com.rpl_done = 1; | |
1202 | wake_up(&ep->com.waitq); | |
1203 | return CPL_RET_BUF_DONE; | |
1204 | } | |
1205 | ||
1206 | static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) | |
1207 | { | |
1208 | struct cpl_pass_accept_rpl *rpl; | |
1209 | unsigned int mtu_idx; | |
1210 | u32 opt0h, opt0l, opt2; | |
1211 | int wscale; | |
1212 | ||
1213 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1214 | BUG_ON(skb_cloned(skb)); | |
1215 | skb_trim(skb, sizeof(*rpl)); | |
1216 | skb_get(skb); | |
1217 | mtu_idx = find_best_mtu(T3C_DATA(ep->com.tdev), dst_mtu(ep->dst)); | |
1218 | wscale = compute_wscale(rcv_win); | |
1219 | opt0h = V_NAGLE(0) | | |
1220 | V_NO_CONG(nocong) | | |
1221 | V_KEEP_ALIVE(1) | | |
1222 | F_TCAM_BYPASS | | |
1223 | V_WND_SCALE(wscale) | | |
1224 | V_MSS_IDX(mtu_idx) | | |
1225 | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); | |
1226 | opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); | |
1227 | opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); | |
1228 | ||
1229 | rpl = cplhdr(skb); | |
1230 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1231 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, ep->hwtid)); | |
1232 | rpl->peer_ip = peer_ip; | |
1233 | rpl->opt0h = htonl(opt0h); | |
1234 | rpl->opt0l_status = htonl(opt0l | CPL_PASS_OPEN_ACCEPT); | |
1235 | rpl->opt2 = htonl(opt2); | |
1236 | rpl->rsvd = rpl->opt2; /* workaround for HW bug */ | |
1237 | skb->priority = CPL_PRIORITY_SETUP; | |
1238 | l2t_send(ep->com.tdev, skb, ep->l2t); | |
1239 | ||
1240 | return; | |
1241 | } | |
1242 | ||
1243 | static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, | |
1244 | struct sk_buff *skb) | |
1245 | { | |
1246 | PDBG("%s t3cdev %p tid %u peer_ip %x\n", __FUNCTION__, tdev, hwtid, | |
1247 | peer_ip); | |
1248 | BUG_ON(skb_cloned(skb)); | |
1249 | skb_trim(skb, sizeof(struct cpl_tid_release)); | |
1250 | skb_get(skb); | |
1251 | ||
1252 | if (tdev->type == T3B) | |
1253 | release_tid(tdev, hwtid, skb); | |
1254 | else { | |
1255 | struct cpl_pass_accept_rpl *rpl; | |
1256 | ||
1257 | rpl = cplhdr(skb); | |
1258 | skb->priority = CPL_PRIORITY_SETUP; | |
1259 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); | |
1260 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | |
1261 | hwtid)); | |
1262 | rpl->peer_ip = peer_ip; | |
1263 | rpl->opt0h = htonl(F_TCAM_BYPASS); | |
1264 | rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); | |
1265 | rpl->opt2 = 0; | |
1266 | rpl->rsvd = rpl->opt2; | |
699924b1 | 1267 | cxgb3_ofld_send(tdev, skb); |
b038ced7 SW |
1268 | } |
1269 | } | |
1270 | ||
1271 | static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1272 | { | |
1273 | struct iwch_ep *child_ep, *parent_ep = ctx; | |
1274 | struct cpl_pass_accept_req *req = cplhdr(skb); | |
1275 | unsigned int hwtid = GET_TID(req); | |
1276 | struct dst_entry *dst; | |
1277 | struct l2t_entry *l2t; | |
1278 | struct rtable *rt; | |
1279 | struct iff_mac tim; | |
1280 | ||
1281 | PDBG("%s parent ep %p tid %u\n", __FUNCTION__, parent_ep, hwtid); | |
1282 | ||
1283 | if (state_read(&parent_ep->com) != LISTEN) { | |
1284 | printk(KERN_ERR "%s - listening ep not in LISTEN\n", | |
1285 | __FUNCTION__); | |
1286 | goto reject; | |
1287 | } | |
1288 | ||
1289 | /* | |
1290 | * Find the netdev for this connection request. | |
1291 | */ | |
1292 | tim.mac_addr = req->dst_mac; | |
1293 | tim.vlan_tag = ntohs(req->vlan_tag); | |
1294 | if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) { | |
1295 | printk(KERN_ERR | |
1296 | "%s bad dst mac %02x %02x %02x %02x %02x %02x\n", | |
1297 | __FUNCTION__, | |
1298 | req->dst_mac[0], | |
1299 | req->dst_mac[1], | |
1300 | req->dst_mac[2], | |
1301 | req->dst_mac[3], | |
1302 | req->dst_mac[4], | |
1303 | req->dst_mac[5]); | |
1304 | goto reject; | |
1305 | } | |
1306 | ||
1307 | /* Find output route */ | |
1308 | rt = find_route(tdev, | |
1309 | req->local_ip, | |
1310 | req->peer_ip, | |
1311 | req->local_port, | |
1312 | req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); | |
1313 | if (!rt) { | |
1314 | printk(KERN_ERR MOD "%s - failed to find dst entry!\n", | |
1315 | __FUNCTION__); | |
1316 | goto reject; | |
1317 | } | |
1318 | dst = &rt->u.dst; | |
1319 | l2t = t3_l2t_get(tdev, dst->neighbour, dst->neighbour->dev); | |
1320 | if (!l2t) { | |
1321 | printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", | |
1322 | __FUNCTION__); | |
1323 | dst_release(dst); | |
1324 | goto reject; | |
1325 | } | |
1326 | child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); | |
1327 | if (!child_ep) { | |
1328 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | |
1329 | __FUNCTION__); | |
1330 | l2t_release(L2DATA(tdev), l2t); | |
1331 | dst_release(dst); | |
1332 | goto reject; | |
1333 | } | |
1334 | state_set(&child_ep->com, CONNECTING); | |
1335 | child_ep->com.tdev = tdev; | |
1336 | child_ep->com.cm_id = NULL; | |
1337 | child_ep->com.local_addr.sin_family = PF_INET; | |
1338 | child_ep->com.local_addr.sin_port = req->local_port; | |
1339 | child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; | |
1340 | child_ep->com.remote_addr.sin_family = PF_INET; | |
1341 | child_ep->com.remote_addr.sin_port = req->peer_port; | |
1342 | child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; | |
1343 | get_ep(&parent_ep->com); | |
1344 | child_ep->parent_ep = parent_ep; | |
1345 | child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid)); | |
1346 | child_ep->l2t = l2t; | |
1347 | child_ep->dst = dst; | |
1348 | child_ep->hwtid = hwtid; | |
1349 | init_timer(&child_ep->timer); | |
1350 | cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid); | |
1351 | accept_cr(child_ep, req->peer_ip, skb); | |
1352 | goto out; | |
1353 | reject: | |
1354 | reject_cr(tdev, hwtid, req->peer_ip, skb); | |
1355 | out: | |
1356 | return CPL_RET_BUF_DONE; | |
1357 | } | |
1358 | ||
1359 | static int pass_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1360 | { | |
1361 | struct iwch_ep *ep = ctx; | |
1362 | struct cpl_pass_establish *req = cplhdr(skb); | |
1363 | ||
1364 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1365 | ep->snd_seq = ntohl(req->snd_isn); | |
de3d3530 | 1366 | ep->rcv_seq = ntohl(req->rcv_isn); |
b038ced7 SW |
1367 | |
1368 | set_emss(ep, ntohs(req->tcp_opt)); | |
1369 | ||
1370 | dst_confirm(ep->dst); | |
1371 | state_set(&ep->com, MPA_REQ_WAIT); | |
1372 | start_ep_timer(ep); | |
1373 | ||
1374 | return CPL_RET_BUF_DONE; | |
1375 | } | |
1376 | ||
1377 | static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1378 | { | |
1379 | struct iwch_ep *ep = ctx; | |
1380 | struct iwch_qp_attributes attrs; | |
1381 | unsigned long flags; | |
1382 | int disconnect = 1; | |
1383 | int release = 0; | |
1384 | ||
1385 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1386 | dst_confirm(ep->dst); | |
1387 | ||
1388 | spin_lock_irqsave(&ep->com.lock, flags); | |
1389 | switch (ep->com.state) { | |
1390 | case MPA_REQ_WAIT: | |
1391 | __state_set(&ep->com, CLOSING); | |
1392 | break; | |
1393 | case MPA_REQ_SENT: | |
1394 | __state_set(&ep->com, CLOSING); | |
1395 | connect_reply_upcall(ep, -ECONNRESET); | |
1396 | break; | |
1397 | case MPA_REQ_RCVD: | |
1398 | ||
1399 | /* | |
1400 | * We're gonna mark this puppy DEAD, but keep | |
1401 | * the reference on it until the ULP accepts or | |
1402 | * rejects the CR. | |
1403 | */ | |
1404 | __state_set(&ep->com, CLOSING); | |
1405 | get_ep(&ep->com); | |
1406 | break; | |
1407 | case MPA_REP_SENT: | |
1408 | __state_set(&ep->com, CLOSING); | |
1409 | ep->com.rpl_done = 1; | |
1410 | ep->com.rpl_err = -ECONNRESET; | |
1411 | PDBG("waking up ep %p\n", ep); | |
1412 | wake_up(&ep->com.waitq); | |
1413 | break; | |
1414 | case FPDU_MODE: | |
42e31753 | 1415 | start_ep_timer(ep); |
b038ced7 SW |
1416 | __state_set(&ep->com, CLOSING); |
1417 | attrs.next_state = IWCH_QP_STATE_CLOSING; | |
1418 | iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1419 | IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1420 | peer_close_upcall(ep); | |
1421 | break; | |
1422 | case ABORTING: | |
1423 | disconnect = 0; | |
1424 | break; | |
1425 | case CLOSING: | |
b038ced7 SW |
1426 | __state_set(&ep->com, MORIBUND); |
1427 | disconnect = 0; | |
1428 | break; | |
1429 | case MORIBUND: | |
1430 | stop_ep_timer(ep); | |
1431 | if (ep->com.cm_id && ep->com.qp) { | |
1432 | attrs.next_state = IWCH_QP_STATE_IDLE; | |
1433 | iwch_modify_qp(ep->com.qp->rhp, ep->com.qp, | |
1434 | IWCH_QP_ATTR_NEXT_STATE, &attrs, 1); | |
1435 | } | |
1436 | close_complete_upcall(ep); | |
1437 | __state_set(&ep->com, DEAD); | |
1438 | release = 1; | |
1439 | disconnect = 0; | |
1440 | break; | |
1441 | case DEAD: | |
1442 | disconnect = 0; | |
1443 | break; | |
1444 | default: | |
1445 | BUG_ON(1); | |
1446 | } | |
1447 | spin_unlock_irqrestore(&ep->com.lock, flags); | |
1448 | if (disconnect) | |
1449 | iwch_ep_disconnect(ep, 0, GFP_KERNEL); | |
1450 | if (release) | |
1451 | release_ep_resources(ep); | |
1452 | return CPL_RET_BUF_DONE; | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * Returns whether an ABORT_REQ_RSS message is a negative advice. | |
1457 | */ | |
2b540355 | 1458 | static int is_neg_adv_abort(unsigned int status) |
b038ced7 SW |
1459 | { |
1460 | return status == CPL_ERR_RTX_NEG_ADVICE || | |
1461 | status == CPL_ERR_PERSIST_NEG_ADVICE; | |
1462 | } | |
1463 | ||
1464 | static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1465 | { | |
1466 | struct cpl_abort_req_rss *req = cplhdr(skb); | |
1467 | struct iwch_ep *ep = ctx; | |
1468 | struct cpl_abort_rpl *rpl; | |
1469 | struct sk_buff *rpl_skb; | |
1470 | struct iwch_qp_attributes attrs; | |
1471 | int ret; | |
1472 | int state; | |
1473 | ||
1580367e SW |
1474 | if (is_neg_adv_abort(req->status)) { |
1475 | PDBG("%s neg_adv_abort ep %p tid %d\n", __FUNCTION__, ep, | |
1476 | ep->hwtid); | |
1477 | t3_l2t_send_event(ep->com.tdev, ep->l2t); | |
1478 | return CPL_RET_BUF_DONE; | |
1479 | } | |
1480 | ||
aff9e39d SW |
1481 | /* |
1482 | * We get 2 peer aborts from the HW. The first one must | |
1483 | * be ignored except for scribbling that we need one more. | |
1484 | */ | |
1485 | if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { | |
1486 | ep->flags |= PEER_ABORT_IN_PROGRESS; | |
1487 | return CPL_RET_BUF_DONE; | |
1488 | } | |
1489 | ||
b038ced7 SW |
1490 | state = state_read(&ep->com); |
1491 | PDBG("%s ep %p state %u\n", __FUNCTION__, ep, state); | |
1492 | switch (state) { | |
1493 | case CONNECTING: | |
1494 | break; | |
1495 | case MPA_REQ_WAIT: | |
adf376b3 | 1496 | stop_ep_timer(ep); |
b038ced7 SW |
1497 | break; |
1498 | case MPA_REQ_SENT: | |
adf376b3 | 1499 | stop_ep_timer(ep); |
b038ced7 SW |
1500 | connect_reply_upcall(ep, -ECONNRESET); |
1501 | break; | |
1502 | case MPA_REP_SENT: | |
1503 | ep->com.rpl_done = 1; | |
1504 | ep->com.rpl_err = -ECONNRESET; | |
1505 | PDBG("waking up ep %p\n", ep); | |
1506 | wake_up(&ep->com.waitq); | |
1507 | break; | |
1508 | case MPA_REQ_RCVD: | |
1509 | ||
1510 | /* | |
1511 | * We're gonna mark this puppy DEAD, but keep | |
1512 | * the reference on it until the ULP accepts or | |
1513 | * rejects the CR. | |
1514 | */ | |
1515 | get_ep(&ep->com); | |
1516 | break; | |
1517 | case MORIBUND: | |
42e31753 | 1518 | case CLOSING: |
b038ced7 | 1519 | stop_ep_timer(ep); |
42e31753 | 1520 | /*FALLTHROUGH*/ |
b038ced7 | 1521 | case FPDU_MODE: |
b038ced7 SW |
1522 | if (ep->com.cm_id && ep->com.qp) { |
1523 | attrs.next_state = IWCH_QP_STATE_ERROR; | |
1524 | ret = iwch_modify_qp(ep->com.qp->rhp, | |
1525 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | |
1526 | &attrs, 1); | |
1527 | if (ret) | |
1528 | printk(KERN_ERR MOD | |
1529 | "%s - qp <- error failed!\n", | |
1530 | __FUNCTION__); | |
1531 | } | |
1532 | peer_abort_upcall(ep); | |
1533 | break; | |
1534 | case ABORTING: | |
1535 | break; | |
1536 | case DEAD: | |
1537 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __FUNCTION__); | |
1538 | return CPL_RET_BUF_DONE; | |
1539 | default: | |
1540 | BUG_ON(1); | |
1541 | break; | |
1542 | } | |
1543 | dst_confirm(ep->dst); | |
1544 | ||
1545 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | |
1546 | if (!rpl_skb) { | |
1547 | printk(KERN_ERR MOD "%s - cannot allocate skb!\n", | |
1548 | __FUNCTION__); | |
1549 | dst_release(ep->dst); | |
1550 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | |
1551 | put_ep(&ep->com); | |
1552 | return CPL_RET_BUF_DONE; | |
1553 | } | |
1554 | rpl_skb->priority = CPL_PRIORITY_DATA; | |
1555 | rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); | |
1556 | rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL)); | |
1557 | rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); | |
1558 | OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); | |
1559 | rpl->cmd = CPL_ABORT_NO_RST; | |
699924b1 | 1560 | cxgb3_ofld_send(ep->com.tdev, rpl_skb); |
b038ced7 SW |
1561 | if (state != ABORTING) { |
1562 | state_set(&ep->com, DEAD); | |
1563 | release_ep_resources(ep); | |
1564 | } | |
1565 | return CPL_RET_BUF_DONE; | |
1566 | } | |
1567 | ||
1568 | static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1569 | { | |
1570 | struct iwch_ep *ep = ctx; | |
1571 | struct iwch_qp_attributes attrs; | |
1572 | unsigned long flags; | |
1573 | int release = 0; | |
1574 | ||
1575 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1576 | BUG_ON(!ep); | |
1577 | ||
1578 | /* The cm_id may be null if we failed to connect */ | |
1579 | spin_lock_irqsave(&ep->com.lock, flags); | |
1580 | switch (ep->com.state) { | |
1581 | case CLOSING: | |
b038ced7 SW |
1582 | __state_set(&ep->com, MORIBUND); |
1583 | break; | |
1584 | case MORIBUND: | |
1585 | stop_ep_timer(ep); | |
1586 | if ((ep->com.cm_id) && (ep->com.qp)) { | |
1587 | attrs.next_state = IWCH_QP_STATE_IDLE; | |
1588 | iwch_modify_qp(ep->com.qp->rhp, | |
1589 | ep->com.qp, | |
1590 | IWCH_QP_ATTR_NEXT_STATE, | |
1591 | &attrs, 1); | |
1592 | } | |
1593 | close_complete_upcall(ep); | |
1594 | __state_set(&ep->com, DEAD); | |
1595 | release = 1; | |
1596 | break; | |
42e31753 SW |
1597 | case ABORTING: |
1598 | break; | |
b038ced7 SW |
1599 | case DEAD: |
1600 | default: | |
1601 | BUG_ON(1); | |
1602 | break; | |
1603 | } | |
1604 | spin_unlock_irqrestore(&ep->com.lock, flags); | |
1605 | if (release) | |
1606 | release_ep_resources(ep); | |
1607 | return CPL_RET_BUF_DONE; | |
1608 | } | |
1609 | ||
1610 | /* | |
1611 | * T3A does 3 things when a TERM is received: | |
1612 | * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet | |
1613 | * 2) generate an async event on the QP with the TERMINATE opcode | |
1614 | * 3) post a TERMINATE opcde cqe into the associated CQ. | |
1615 | * | |
1616 | * For (1), we save the message in the qp for later consumer consumption. | |
1617 | * For (2), we move the QP into TERMINATE, post a QP event and disconnect. | |
1618 | * For (3), we toss the CQE in cxio_poll_cq(). | |
1619 | * | |
1620 | * terminate() handles case (1)... | |
1621 | */ | |
1622 | static int terminate(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1623 | { | |
1624 | struct iwch_ep *ep = ctx; | |
1625 | ||
1626 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1627 | skb_pull(skb, sizeof(struct cpl_rdma_terminate)); | |
1628 | PDBG("%s saving %d bytes of term msg\n", __FUNCTION__, skb->len); | |
d626f62b ACM |
1629 | skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, |
1630 | skb->len); | |
b038ced7 SW |
1631 | ep->com.qp->attr.terminate_msg_len = skb->len; |
1632 | ep->com.qp->attr.is_terminate_local = 0; | |
1633 | return CPL_RET_BUF_DONE; | |
1634 | } | |
1635 | ||
1636 | static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
1637 | { | |
1638 | struct cpl_rdma_ec_status *rep = cplhdr(skb); | |
1639 | struct iwch_ep *ep = ctx; | |
1640 | ||
1641 | PDBG("%s ep %p tid %u status %d\n", __FUNCTION__, ep, ep->hwtid, | |
1642 | rep->status); | |
1643 | if (rep->status) { | |
1644 | struct iwch_qp_attributes attrs; | |
1645 | ||
1646 | printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", | |
1647 | __FUNCTION__, ep->hwtid); | |
2f236735 | 1648 | stop_ep_timer(ep); |
b038ced7 SW |
1649 | attrs.next_state = IWCH_QP_STATE_ERROR; |
1650 | iwch_modify_qp(ep->com.qp->rhp, | |
1651 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | |
1652 | &attrs, 1); | |
1653 | abort_connection(ep, NULL, GFP_KERNEL); | |
1654 | } | |
1655 | return CPL_RET_BUF_DONE; | |
1656 | } | |
1657 | ||
1658 | static void ep_timeout(unsigned long arg) | |
1659 | { | |
1660 | struct iwch_ep *ep = (struct iwch_ep *)arg; | |
1661 | struct iwch_qp_attributes attrs; | |
1662 | unsigned long flags; | |
1663 | ||
1664 | spin_lock_irqsave(&ep->com.lock, flags); | |
1665 | PDBG("%s ep %p tid %u state %d\n", __FUNCTION__, ep, ep->hwtid, | |
1666 | ep->com.state); | |
1667 | switch (ep->com.state) { | |
1668 | case MPA_REQ_SENT: | |
1669 | connect_reply_upcall(ep, -ETIMEDOUT); | |
1670 | break; | |
1671 | case MPA_REQ_WAIT: | |
1672 | break; | |
42e31753 | 1673 | case CLOSING: |
b038ced7 SW |
1674 | case MORIBUND: |
1675 | if (ep->com.cm_id && ep->com.qp) { | |
1676 | attrs.next_state = IWCH_QP_STATE_ERROR; | |
1677 | iwch_modify_qp(ep->com.qp->rhp, | |
1678 | ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, | |
1679 | &attrs, 1); | |
1680 | } | |
1681 | break; | |
1682 | default: | |
1683 | BUG(); | |
1684 | } | |
1685 | __state_set(&ep->com, CLOSING); | |
1686 | spin_unlock_irqrestore(&ep->com.lock, flags); | |
1687 | abort_connection(ep, NULL, GFP_ATOMIC); | |
1688 | put_ep(&ep->com); | |
1689 | } | |
1690 | ||
1691 | int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) | |
1692 | { | |
1693 | int err; | |
1694 | struct iwch_ep *ep = to_ep(cm_id); | |
1695 | PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); | |
1696 | ||
1697 | if (state_read(&ep->com) == DEAD) { | |
1698 | put_ep(&ep->com); | |
1699 | return -ECONNRESET; | |
1700 | } | |
1701 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
b038ced7 SW |
1702 | if (mpa_rev == 0) |
1703 | abort_connection(ep, NULL, GFP_KERNEL); | |
1704 | else { | |
1705 | err = send_mpa_reject(ep, pdata, pdata_len); | |
7d526e6b | 1706 | err = iwch_ep_disconnect(ep, 0, GFP_KERNEL); |
b038ced7 SW |
1707 | } |
1708 | return 0; | |
1709 | } | |
1710 | ||
1711 | int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
1712 | { | |
1713 | int err; | |
1714 | struct iwch_qp_attributes attrs; | |
1715 | enum iwch_qp_attr_mask mask; | |
1716 | struct iwch_ep *ep = to_ep(cm_id); | |
1717 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | |
1718 | struct iwch_qp *qp = get_qhp(h, conn_param->qpn); | |
1719 | ||
1720 | PDBG("%s ep %p tid %u\n", __FUNCTION__, ep, ep->hwtid); | |
de3d3530 | 1721 | if (state_read(&ep->com) == DEAD) |
b038ced7 | 1722 | return -ECONNRESET; |
b038ced7 SW |
1723 | |
1724 | BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); | |
1725 | BUG_ON(!qp); | |
1726 | ||
1727 | if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) || | |
1728 | (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) { | |
1729 | abort_connection(ep, NULL, GFP_KERNEL); | |
1730 | return -EINVAL; | |
1731 | } | |
1732 | ||
1733 | cm_id->add_ref(cm_id); | |
1734 | ep->com.cm_id = cm_id; | |
1735 | ep->com.qp = qp; | |
1736 | ||
1737 | ep->com.rpl_done = 0; | |
1738 | ep->com.rpl_err = 0; | |
1739 | ep->ird = conn_param->ird; | |
1740 | ep->ord = conn_param->ord; | |
1741 | PDBG("%s %d ird %d ord %d\n", __FUNCTION__, __LINE__, ep->ird, ep->ord); | |
de3d3530 | 1742 | |
b038ced7 | 1743 | get_ep(&ep->com); |
b038ced7 SW |
1744 | |
1745 | /* bind QP to EP and move to RTS */ | |
1746 | attrs.mpa_attr = ep->mpa_attr; | |
1747 | attrs.max_ird = ep->ord; | |
1748 | attrs.max_ord = ep->ord; | |
1749 | attrs.llp_stream_handle = ep; | |
1750 | attrs.next_state = IWCH_QP_STATE_RTS; | |
1751 | ||
1752 | /* bind QP and TID with INIT_WR */ | |
1753 | mask = IWCH_QP_ATTR_NEXT_STATE | | |
1754 | IWCH_QP_ATTR_LLP_STREAM_HANDLE | | |
1755 | IWCH_QP_ATTR_MPA_ATTR | | |
1756 | IWCH_QP_ATTR_MAX_IRD | | |
1757 | IWCH_QP_ATTR_MAX_ORD; | |
1758 | ||
1759 | err = iwch_modify_qp(ep->com.qp->rhp, | |
1760 | ep->com.qp, mask, &attrs, 1); | |
de3d3530 SW |
1761 | if (err) |
1762 | goto err; | |
b038ced7 | 1763 | |
de3d3530 SW |
1764 | err = send_mpa_reply(ep, conn_param->private_data, |
1765 | conn_param->private_data_len); | |
1766 | if (err) | |
1767 | goto err; | |
1768 | ||
1769 | /* wait for wr_ack */ | |
1770 | wait_event(ep->com.waitq, ep->com.rpl_done); | |
1771 | err = ep->com.rpl_err; | |
1772 | if (err) | |
1773 | goto err; | |
1774 | ||
1775 | state_set(&ep->com, FPDU_MODE); | |
1776 | established_upcall(ep); | |
1777 | put_ep(&ep->com); | |
1778 | return 0; | |
1779 | err: | |
1780 | ep->com.cm_id = NULL; | |
1781 | ep->com.qp = NULL; | |
1782 | cm_id->rem_ref(cm_id); | |
b038ced7 SW |
1783 | put_ep(&ep->com); |
1784 | return err; | |
1785 | } | |
1786 | ||
1787 | int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |
1788 | { | |
1789 | int err = 0; | |
1790 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | |
1791 | struct iwch_ep *ep; | |
1792 | struct rtable *rt; | |
1793 | ||
1794 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | |
1795 | if (!ep) { | |
1796 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); | |
1797 | err = -ENOMEM; | |
1798 | goto out; | |
1799 | } | |
1800 | init_timer(&ep->timer); | |
1801 | ep->plen = conn_param->private_data_len; | |
1802 | if (ep->plen) | |
1803 | memcpy(ep->mpa_pkt + sizeof(struct mpa_message), | |
1804 | conn_param->private_data, ep->plen); | |
1805 | ep->ird = conn_param->ird; | |
1806 | ep->ord = conn_param->ord; | |
1807 | ep->com.tdev = h->rdev.t3cdev_p; | |
1808 | ||
1809 | cm_id->add_ref(cm_id); | |
1810 | ep->com.cm_id = cm_id; | |
1811 | ep->com.qp = get_qhp(h, conn_param->qpn); | |
1812 | BUG_ON(!ep->com.qp); | |
1813 | PDBG("%s qpn 0x%x qp %p cm_id %p\n", __FUNCTION__, conn_param->qpn, | |
1814 | ep->com.qp, cm_id); | |
1815 | ||
1816 | /* | |
1817 | * Allocate an active TID to initiate a TCP connection. | |
1818 | */ | |
1819 | ep->atid = cxgb3_alloc_atid(h->rdev.t3cdev_p, &t3c_client, ep); | |
1820 | if (ep->atid == -1) { | |
1821 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); | |
1822 | err = -ENOMEM; | |
1823 | goto fail2; | |
1824 | } | |
1825 | ||
1826 | /* find a route */ | |
1827 | rt = find_route(h->rdev.t3cdev_p, | |
1828 | cm_id->local_addr.sin_addr.s_addr, | |
1829 | cm_id->remote_addr.sin_addr.s_addr, | |
1830 | cm_id->local_addr.sin_port, | |
1831 | cm_id->remote_addr.sin_port, IPTOS_LOWDELAY); | |
1832 | if (!rt) { | |
1833 | printk(KERN_ERR MOD "%s - cannot find route.\n", __FUNCTION__); | |
1834 | err = -EHOSTUNREACH; | |
1835 | goto fail3; | |
1836 | } | |
1837 | ep->dst = &rt->u.dst; | |
1838 | ||
1839 | /* get a l2t entry */ | |
1840 | ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst->neighbour, | |
1841 | ep->dst->neighbour->dev); | |
1842 | if (!ep->l2t) { | |
1843 | printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __FUNCTION__); | |
1844 | err = -ENOMEM; | |
1845 | goto fail4; | |
1846 | } | |
1847 | ||
1848 | state_set(&ep->com, CONNECTING); | |
1849 | ep->tos = IPTOS_LOWDELAY; | |
1850 | ep->com.local_addr = cm_id->local_addr; | |
1851 | ep->com.remote_addr = cm_id->remote_addr; | |
1852 | ||
1853 | /* send connect request to rnic */ | |
1854 | err = send_connect(ep); | |
1855 | if (!err) | |
1856 | goto out; | |
1857 | ||
1858 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | |
1859 | fail4: | |
1860 | dst_release(ep->dst); | |
1861 | fail3: | |
1862 | cxgb3_free_atid(ep->com.tdev, ep->atid); | |
1863 | fail2: | |
1864 | put_ep(&ep->com); | |
1865 | out: | |
1866 | return err; | |
1867 | } | |
1868 | ||
1869 | int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) | |
1870 | { | |
1871 | int err = 0; | |
1872 | struct iwch_dev *h = to_iwch_dev(cm_id->device); | |
1873 | struct iwch_listen_ep *ep; | |
1874 | ||
1875 | ||
1876 | might_sleep(); | |
1877 | ||
1878 | ep = alloc_ep(sizeof(*ep), GFP_KERNEL); | |
1879 | if (!ep) { | |
1880 | printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __FUNCTION__); | |
1881 | err = -ENOMEM; | |
1882 | goto fail1; | |
1883 | } | |
1884 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1885 | ep->com.tdev = h->rdev.t3cdev_p; | |
1886 | cm_id->add_ref(cm_id); | |
1887 | ep->com.cm_id = cm_id; | |
1888 | ep->backlog = backlog; | |
1889 | ep->com.local_addr = cm_id->local_addr; | |
1890 | ||
1891 | /* | |
1892 | * Allocate a server TID. | |
1893 | */ | |
1894 | ep->stid = cxgb3_alloc_stid(h->rdev.t3cdev_p, &t3c_client, ep); | |
1895 | if (ep->stid == -1) { | |
1896 | printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __FUNCTION__); | |
1897 | err = -ENOMEM; | |
1898 | goto fail2; | |
1899 | } | |
1900 | ||
1901 | state_set(&ep->com, LISTEN); | |
1902 | err = listen_start(ep); | |
1903 | if (err) | |
1904 | goto fail3; | |
1905 | ||
1906 | /* wait for pass_open_rpl */ | |
1907 | wait_event(ep->com.waitq, ep->com.rpl_done); | |
1908 | err = ep->com.rpl_err; | |
1909 | if (!err) { | |
1910 | cm_id->provider_data = ep; | |
1911 | goto out; | |
1912 | } | |
1913 | fail3: | |
1914 | cxgb3_free_stid(ep->com.tdev, ep->stid); | |
1915 | fail2: | |
1b07db70 | 1916 | cm_id->rem_ref(cm_id); |
b038ced7 SW |
1917 | put_ep(&ep->com); |
1918 | fail1: | |
1919 | out: | |
1920 | return err; | |
1921 | } | |
1922 | ||
1923 | int iwch_destroy_listen(struct iw_cm_id *cm_id) | |
1924 | { | |
1925 | int err; | |
1926 | struct iwch_listen_ep *ep = to_listen_ep(cm_id); | |
1927 | ||
1928 | PDBG("%s ep %p\n", __FUNCTION__, ep); | |
1929 | ||
1930 | might_sleep(); | |
1931 | state_set(&ep->com, DEAD); | |
1932 | ep->com.rpl_done = 0; | |
1933 | ep->com.rpl_err = 0; | |
1934 | err = listen_stop(ep); | |
1935 | wait_event(ep->com.waitq, ep->com.rpl_done); | |
1936 | cxgb3_free_stid(ep->com.tdev, ep->stid); | |
1937 | err = ep->com.rpl_err; | |
1938 | cm_id->rem_ref(cm_id); | |
1939 | put_ep(&ep->com); | |
1940 | return err; | |
1941 | } | |
1942 | ||
1943 | int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) | |
1944 | { | |
1945 | int ret=0; | |
1946 | unsigned long flags; | |
1947 | int close = 0; | |
1948 | ||
1949 | spin_lock_irqsave(&ep->com.lock, flags); | |
1950 | ||
1951 | PDBG("%s ep %p state %s, abrupt %d\n", __FUNCTION__, ep, | |
1952 | states[ep->com.state], abrupt); | |
1953 | ||
1954 | if (ep->com.state == DEAD) { | |
1955 | PDBG("%s already dead ep %p\n", __FUNCTION__, ep); | |
1956 | goto out; | |
1957 | } | |
1958 | ||
1959 | if (abrupt) { | |
1960 | if (ep->com.state != ABORTING) { | |
1961 | ep->com.state = ABORTING; | |
1962 | close = 1; | |
1963 | } | |
1964 | goto out; | |
1965 | } | |
1966 | ||
1967 | switch (ep->com.state) { | |
1968 | case MPA_REQ_WAIT: | |
1969 | case MPA_REQ_SENT: | |
1970 | case MPA_REQ_RCVD: | |
1971 | case MPA_REP_SENT: | |
1972 | case FPDU_MODE: | |
42e31753 | 1973 | start_ep_timer(ep); |
b038ced7 SW |
1974 | ep->com.state = CLOSING; |
1975 | close = 1; | |
1976 | break; | |
1977 | case CLOSING: | |
b038ced7 SW |
1978 | ep->com.state = MORIBUND; |
1979 | close = 1; | |
1980 | break; | |
1981 | case MORIBUND: | |
1982 | break; | |
1983 | default: | |
1984 | BUG(); | |
1985 | break; | |
1986 | } | |
1987 | out: | |
1988 | spin_unlock_irqrestore(&ep->com.lock, flags); | |
1989 | if (close) { | |
1990 | if (abrupt) | |
1991 | ret = send_abort(ep, NULL, gfp); | |
1992 | else | |
1993 | ret = send_halfclose(ep, gfp); | |
1994 | } | |
1995 | return ret; | |
1996 | } | |
1997 | ||
1998 | int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |
1999 | struct l2t_entry *l2t) | |
2000 | { | |
2001 | struct iwch_ep *ep = ctx; | |
2002 | ||
2003 | if (ep->dst != old) | |
2004 | return 0; | |
2005 | ||
2006 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __FUNCTION__, ep, new, | |
2007 | l2t); | |
2008 | dst_hold(new); | |
2009 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | |
2010 | ep->l2t = l2t; | |
2011 | dst_release(old); | |
2012 | ep->dst = new; | |
2013 | return 1; | |
2014 | } | |
2015 | ||
2016 | /* | |
2017 | * All the CM events are handled on a work queue to have a safe context. | |
2018 | */ | |
2019 | static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |
2020 | { | |
2021 | struct iwch_ep_common *epc = ctx; | |
2022 | ||
2023 | get_ep(epc); | |
2024 | ||
2025 | /* | |
2026 | * Save ctx and tdev in the skb->cb area. | |
2027 | */ | |
2028 | *((void **) skb->cb) = ctx; | |
2029 | *((struct t3cdev **) (skb->cb + sizeof(void *))) = tdev; | |
2030 | ||
2031 | /* | |
2032 | * Queue the skb and schedule the worker thread. | |
2033 | */ | |
2034 | skb_queue_tail(&rxq, skb); | |
2035 | queue_work(workq, &skb_work); | |
2036 | return 0; | |
2037 | } | |
2038 | ||
1ca19770 SW |
2039 | static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) |
2040 | { | |
2041 | struct cpl_set_tcb_rpl *rpl = cplhdr(skb); | |
2042 | ||
2043 | if (rpl->status != CPL_ERR_NONE) { | |
2044 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | |
2045 | "for tid %u\n", rpl->status, GET_TID(rpl)); | |
2046 | } | |
2047 | return CPL_RET_BUF_DONE; | |
2048 | } | |
2049 | ||
b038ced7 SW |
2050 | int __init iwch_cm_init(void) |
2051 | { | |
2052 | skb_queue_head_init(&rxq); | |
2053 | ||
2054 | workq = create_singlethread_workqueue("iw_cxgb3"); | |
2055 | if (!workq) | |
2056 | return -ENOMEM; | |
2057 | ||
2058 | /* | |
2059 | * All upcalls from the T3 Core go to sched() to | |
2060 | * schedule the processing on a work queue. | |
2061 | */ | |
2062 | t3c_handlers[CPL_ACT_ESTABLISH] = sched; | |
2063 | t3c_handlers[CPL_ACT_OPEN_RPL] = sched; | |
2064 | t3c_handlers[CPL_RX_DATA] = sched; | |
2065 | t3c_handlers[CPL_TX_DMA_ACK] = sched; | |
2066 | t3c_handlers[CPL_ABORT_RPL_RSS] = sched; | |
2067 | t3c_handlers[CPL_ABORT_RPL] = sched; | |
2068 | t3c_handlers[CPL_PASS_OPEN_RPL] = sched; | |
2069 | t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched; | |
2070 | t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched; | |
2071 | t3c_handlers[CPL_PASS_ESTABLISH] = sched; | |
2072 | t3c_handlers[CPL_PEER_CLOSE] = sched; | |
2073 | t3c_handlers[CPL_CLOSE_CON_RPL] = sched; | |
2074 | t3c_handlers[CPL_ABORT_REQ_RSS] = sched; | |
2075 | t3c_handlers[CPL_RDMA_TERMINATE] = sched; | |
2076 | t3c_handlers[CPL_RDMA_EC_STATUS] = sched; | |
1ca19770 | 2077 | t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl; |
b038ced7 SW |
2078 | |
2079 | /* | |
2080 | * These are the real handlers that are called from a | |
2081 | * work queue. | |
2082 | */ | |
2083 | work_handlers[CPL_ACT_ESTABLISH] = act_establish; | |
2084 | work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl; | |
2085 | work_handlers[CPL_RX_DATA] = rx_data; | |
2086 | work_handlers[CPL_TX_DMA_ACK] = tx_ack; | |
2087 | work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl; | |
2088 | work_handlers[CPL_ABORT_RPL] = abort_rpl; | |
2089 | work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl; | |
2090 | work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl; | |
2091 | work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req; | |
2092 | work_handlers[CPL_PASS_ESTABLISH] = pass_establish; | |
2093 | work_handlers[CPL_PEER_CLOSE] = peer_close; | |
2094 | work_handlers[CPL_ABORT_REQ_RSS] = peer_abort; | |
2095 | work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl; | |
2096 | work_handlers[CPL_RDMA_TERMINATE] = terminate; | |
2097 | work_handlers[CPL_RDMA_EC_STATUS] = ec_status; | |
2098 | return 0; | |
2099 | } | |
2100 | ||
2101 | void __exit iwch_cm_term(void) | |
2102 | { | |
2103 | flush_workqueue(workq); | |
2104 | destroy_workqueue(workq); | |
2105 | } |