Commit | Line | Data |
---|---|---|
839fcaba MT |
1 | /* |
2 | * Copyright (c) 2006 Mellanox Technologies. All rights reserved | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | * $Id$ | |
33 | */ | |
34 | ||
35 | #include <rdma/ib_cm.h> | |
36 | #include <rdma/ib_cache.h> | |
37 | #include <net/dst.h> | |
38 | #include <net/icmp.h> | |
39 | #include <linux/icmpv6.h> | |
518b1646 | 40 | #include <linux/delay.h> |
839fcaba MT |
41 | |
42 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA | |
43 | static int data_debug_level; | |
44 | ||
45 | module_param_named(cm_data_debug_level, data_debug_level, int, 0644); | |
46 | MODULE_PARM_DESC(cm_data_debug_level, | |
47 | "Enable data path debug tracing for connected mode if > 0"); | |
48 | #endif | |
49 | ||
50 | #include "ipoib.h" | |
51 | ||
52 | #define IPOIB_CM_IETF_ID 0x1000000000000000ULL | |
53 | ||
54 | #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ) | |
55 | #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ) | |
56 | #define IPOIB_CM_RX_DELAY (3 * 256 * HZ) | |
57 | #define IPOIB_CM_RX_UPDATE_MASK (0x3) | |
58 | ||
518b1646 MT |
59 | static struct ib_qp_attr ipoib_cm_err_attr = { |
60 | .qp_state = IB_QPS_ERR | |
61 | }; | |
62 | ||
63 | #define IPOIB_CM_RX_DRAIN_WRID 0x7fffffff | |
64 | ||
ec56dc0b MT |
65 | static struct ib_send_wr ipoib_cm_rx_drain_wr = { |
66 | .wr_id = IPOIB_CM_RX_DRAIN_WRID, | |
67 | .opcode = IB_WR_SEND, | |
518b1646 MT |
68 | }; |
69 | ||
839fcaba MT |
70 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, |
71 | struct ib_cm_event *event); | |
72 | ||
1812063b | 73 | static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, |
839fcaba MT |
74 | u64 mapping[IPOIB_CM_RX_SG]) |
75 | { | |
76 | int i; | |
77 | ||
78 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
79 | ||
1812063b | 80 | for (i = 0; i < frags; ++i) |
839fcaba MT |
81 | ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); |
82 | } | |
83 | ||
84 | static int ipoib_cm_post_receive(struct net_device *dev, int id) | |
85 | { | |
86 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
87 | struct ib_recv_wr *bad_wr; | |
88 | int i, ret; | |
89 | ||
1b524963 | 90 | priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; |
839fcaba MT |
91 | |
92 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | |
93 | priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; | |
94 | ||
95 | ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); | |
96 | if (unlikely(ret)) { | |
97 | ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); | |
1812063b MT |
98 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
99 | priv->cm.srq_ring[id].mapping); | |
839fcaba MT |
100 | dev_kfree_skb_any(priv->cm.srq_ring[id].skb); |
101 | priv->cm.srq_ring[id].skb = NULL; | |
102 | } | |
103 | ||
104 | return ret; | |
105 | } | |
106 | ||
1812063b MT |
107 | static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, |
108 | u64 mapping[IPOIB_CM_RX_SG]) | |
839fcaba MT |
109 | { |
110 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
111 | struct sk_buff *skb; | |
112 | int i; | |
113 | ||
114 | skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); | |
115 | if (unlikely(!skb)) | |
1812063b | 116 | return NULL; |
839fcaba MT |
117 | |
118 | /* | |
119 | * IPoIB adds a 4 byte header. So we need 12 more bytes to align the | |
120 | * IP header to a multiple of 16. | |
121 | */ | |
122 | skb_reserve(skb, 12); | |
123 | ||
124 | mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, | |
125 | DMA_FROM_DEVICE); | |
126 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { | |
127 | dev_kfree_skb_any(skb); | |
1812063b | 128 | return NULL; |
839fcaba MT |
129 | } |
130 | ||
1812063b | 131 | for (i = 0; i < frags; i++) { |
839fcaba MT |
132 | struct page *page = alloc_page(GFP_ATOMIC); |
133 | ||
134 | if (!page) | |
135 | goto partial_error; | |
136 | skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE); | |
137 | ||
138 | mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page, | |
6371ea3d | 139 | 0, PAGE_SIZE, DMA_FROM_DEVICE); |
839fcaba MT |
140 | if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) |
141 | goto partial_error; | |
142 | } | |
143 | ||
144 | priv->cm.srq_ring[id].skb = skb; | |
1812063b | 145 | return skb; |
839fcaba MT |
146 | |
147 | partial_error: | |
148 | ||
149 | ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); | |
150 | ||
841adfca RC |
151 | for (; i > 0; --i) |
152 | ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); | |
839fcaba | 153 | |
8a2e65f8 | 154 | dev_kfree_skb_any(skb); |
1812063b | 155 | return NULL; |
839fcaba MT |
156 | } |
157 | ||
518b1646 MT |
158 | static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv) |
159 | { | |
ec56dc0b MT |
160 | struct ib_send_wr *bad_wr; |
161 | struct ipoib_cm_rx *p; | |
518b1646 | 162 | |
ec56dc0b | 163 | /* We only reserved 1 extra slot in CQ for drain WRs, so |
518b1646 MT |
164 | * make sure we have at most 1 outstanding WR. */ |
165 | if (list_empty(&priv->cm.rx_flush_list) || | |
166 | !list_empty(&priv->cm.rx_drain_list)) | |
167 | return; | |
168 | ||
ec56dc0b MT |
169 | /* |
170 | * QPs on flush list are error state. This way, a "flush | |
171 | * error" WC will be immediately generated for each WR we post. | |
172 | */ | |
173 | p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list); | |
174 | if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr)) | |
175 | ipoib_warn(priv, "failed to post drain wr\n"); | |
518b1646 MT |
176 | |
177 | list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list); | |
178 | } | |
179 | ||
180 | static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx) | |
181 | { | |
182 | struct ipoib_cm_rx *p = ctx; | |
183 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
184 | unsigned long flags; | |
185 | ||
186 | if (event->event != IB_EVENT_QP_LAST_WQE_REACHED) | |
187 | return; | |
188 | ||
189 | spin_lock_irqsave(&priv->lock, flags); | |
190 | list_move(&p->list, &priv->cm.rx_flush_list); | |
191 | p->state = IPOIB_CM_RX_FLUSH; | |
192 | ipoib_cm_start_rx_drain(priv); | |
193 | spin_unlock_irqrestore(&priv->lock, flags); | |
194 | } | |
195 | ||
839fcaba MT |
196 | static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, |
197 | struct ipoib_cm_rx *p) | |
198 | { | |
199 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
200 | struct ib_qp_init_attr attr = { | |
518b1646 | 201 | .event_handler = ipoib_cm_rx_event_handler, |
ec56dc0b | 202 | .send_cq = priv->cq, /* For drain WR */ |
839fcaba MT |
203 | .recv_cq = priv->cq, |
204 | .srq = priv->cm.srq, | |
ec56dc0b | 205 | .cap.max_send_wr = 1, /* For drain WR */ |
839fcaba MT |
206 | .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */ |
207 | .sq_sig_type = IB_SIGNAL_ALL_WR, | |
208 | .qp_type = IB_QPT_RC, | |
209 | .qp_context = p, | |
210 | }; | |
211 | return ib_create_qp(priv->pd, &attr); | |
212 | } | |
213 | ||
214 | static int ipoib_cm_modify_rx_qp(struct net_device *dev, | |
215 | struct ib_cm_id *cm_id, struct ib_qp *qp, | |
216 | unsigned psn) | |
217 | { | |
218 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
219 | struct ib_qp_attr qp_attr; | |
220 | int qp_attr_mask, ret; | |
221 | ||
222 | qp_attr.qp_state = IB_QPS_INIT; | |
223 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
224 | if (ret) { | |
225 | ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret); | |
226 | return ret; | |
227 | } | |
228 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
229 | if (ret) { | |
230 | ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret); | |
231 | return ret; | |
232 | } | |
233 | qp_attr.qp_state = IB_QPS_RTR; | |
234 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
235 | if (ret) { | |
236 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
237 | return ret; | |
238 | } | |
239 | qp_attr.rq_psn = psn; | |
240 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
241 | if (ret) { | |
242 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
243 | return ret; | |
244 | } | |
ec56dc0b MT |
245 | |
246 | /* | |
247 | * Current Mellanox HCA firmware won't generate completions | |
248 | * with error for drain WRs unless the QP has been moved to | |
249 | * RTS first. This work-around leaves a window where a QP has | |
250 | * moved to error asynchronously, but this will eventually get | |
251 | * fixed in firmware, so let's not error out if modify QP | |
252 | * fails. | |
253 | */ | |
254 | qp_attr.qp_state = IB_QPS_RTS; | |
255 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
256 | if (ret) { | |
257 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
258 | return 0; | |
259 | } | |
260 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
261 | if (ret) { | |
262 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
263 | return 0; | |
264 | } | |
265 | ||
839fcaba MT |
266 | return 0; |
267 | } | |
268 | ||
269 | static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, | |
270 | struct ib_qp *qp, struct ib_cm_req_event_param *req, | |
271 | unsigned psn) | |
272 | { | |
273 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
274 | struct ipoib_cm_data data = {}; | |
275 | struct ib_cm_rep_param rep = {}; | |
276 | ||
277 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
278 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
279 | ||
280 | rep.private_data = &data; | |
281 | rep.private_data_len = sizeof data; | |
282 | rep.flow_control = 0; | |
283 | rep.rnr_retry_count = req->rnr_retry_count; | |
839fcaba MT |
284 | rep.srq = 1; |
285 | rep.qp_num = qp->qp_num; | |
286 | rep.starting_psn = psn; | |
287 | return ib_send_cm_rep(cm_id, &rep); | |
288 | } | |
289 | ||
290 | static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
291 | { | |
292 | struct net_device *dev = cm_id->context; | |
293 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
294 | struct ipoib_cm_rx *p; | |
839fcaba MT |
295 | unsigned psn; |
296 | int ret; | |
297 | ||
298 | ipoib_dbg(priv, "REQ arrived\n"); | |
299 | p = kzalloc(sizeof *p, GFP_KERNEL); | |
300 | if (!p) | |
301 | return -ENOMEM; | |
302 | p->dev = dev; | |
303 | p->id = cm_id; | |
3ec7393a MT |
304 | cm_id->context = p; |
305 | p->state = IPOIB_CM_RX_LIVE; | |
306 | p->jiffies = jiffies; | |
307 | INIT_LIST_HEAD(&p->list); | |
308 | ||
839fcaba MT |
309 | p->qp = ipoib_cm_create_rx_qp(dev, p); |
310 | if (IS_ERR(p->qp)) { | |
311 | ret = PTR_ERR(p->qp); | |
312 | goto err_qp; | |
313 | } | |
314 | ||
315 | psn = random32() & 0xffffff; | |
316 | ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn); | |
317 | if (ret) | |
318 | goto err_modify; | |
319 | ||
3ec7393a MT |
320 | spin_lock_irq(&priv->lock); |
321 | queue_delayed_work(ipoib_workqueue, | |
322 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | |
323 | /* Add this entry to passive ids list head, but do not re-add it | |
324 | * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */ | |
325 | p->jiffies = jiffies; | |
326 | if (p->state == IPOIB_CM_RX_LIVE) | |
327 | list_move(&p->list, &priv->cm.passive_ids); | |
328 | spin_unlock_irq(&priv->lock); | |
329 | ||
839fcaba MT |
330 | ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn); |
331 | if (ret) { | |
332 | ipoib_warn(priv, "failed to send REP: %d\n", ret); | |
3ec7393a MT |
333 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
334 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
839fcaba | 335 | } |
839fcaba MT |
336 | return 0; |
337 | ||
839fcaba MT |
338 | err_modify: |
339 | ib_destroy_qp(p->qp); | |
340 | err_qp: | |
341 | kfree(p); | |
342 | return ret; | |
343 | } | |
344 | ||
345 | static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, | |
346 | struct ib_cm_event *event) | |
347 | { | |
348 | struct ipoib_cm_rx *p; | |
349 | struct ipoib_dev_priv *priv; | |
839fcaba MT |
350 | |
351 | switch (event->event) { | |
352 | case IB_CM_REQ_RECEIVED: | |
353 | return ipoib_cm_req_handler(cm_id, event); | |
354 | case IB_CM_DREQ_RECEIVED: | |
355 | p = cm_id->context; | |
356 | ib_send_cm_drep(cm_id, NULL, 0); | |
357 | /* Fall through */ | |
358 | case IB_CM_REJ_RECEIVED: | |
359 | p = cm_id->context; | |
360 | priv = netdev_priv(p->dev); | |
518b1646 MT |
361 | if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE)) |
362 | ipoib_warn(priv, "unable to move qp to error state\n"); | |
363 | /* Fall through */ | |
839fcaba MT |
364 | default: |
365 | return 0; | |
366 | } | |
367 | } | |
368 | /* Adjust length of skb with fragments to match received data */ | |
369 | static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, | |
1812063b | 370 | unsigned int length, struct sk_buff *toskb) |
839fcaba MT |
371 | { |
372 | int i, num_frags; | |
373 | unsigned int size; | |
374 | ||
375 | /* put header into skb */ | |
376 | size = min(length, hdr_space); | |
377 | skb->tail += size; | |
378 | skb->len += size; | |
379 | length -= size; | |
380 | ||
381 | num_frags = skb_shinfo(skb)->nr_frags; | |
382 | for (i = 0; i < num_frags; i++) { | |
383 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
384 | ||
385 | if (length == 0) { | |
386 | /* don't need this page */ | |
1812063b | 387 | skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); |
839fcaba MT |
388 | --skb_shinfo(skb)->nr_frags; |
389 | } else { | |
390 | size = min(length, (unsigned) PAGE_SIZE); | |
391 | ||
392 | frag->size = size; | |
393 | skb->data_len += size; | |
394 | skb->truesize += size; | |
395 | skb->len += size; | |
396 | length -= size; | |
397 | } | |
398 | } | |
399 | } | |
400 | ||
401 | void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |
402 | { | |
403 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1b524963 | 404 | unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); |
1812063b | 405 | struct sk_buff *skb, *newskb; |
839fcaba MT |
406 | struct ipoib_cm_rx *p; |
407 | unsigned long flags; | |
408 | u64 mapping[IPOIB_CM_RX_SG]; | |
1812063b | 409 | int frags; |
839fcaba | 410 | |
a89875fc RD |
411 | ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n", |
412 | wr_id, wc->status); | |
839fcaba MT |
413 | |
414 | if (unlikely(wr_id >= ipoib_recvq_size)) { | |
1b524963 | 415 | if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { |
518b1646 MT |
416 | spin_lock_irqsave(&priv->lock, flags); |
417 | list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list); | |
418 | ipoib_cm_start_rx_drain(priv); | |
419 | queue_work(ipoib_workqueue, &priv->cm.rx_reap_task); | |
420 | spin_unlock_irqrestore(&priv->lock, flags); | |
421 | } else | |
422 | ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n", | |
423 | wr_id, ipoib_recvq_size); | |
839fcaba MT |
424 | return; |
425 | } | |
426 | ||
427 | skb = priv->cm.srq_ring[wr_id].skb; | |
428 | ||
429 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
430 | ipoib_dbg(priv, "cm recv error " | |
431 | "(status=%d, wrid=%d vend_err %x)\n", | |
432 | wc->status, wr_id, wc->vendor_err); | |
de903512 | 433 | ++dev->stats.rx_dropped; |
839fcaba MT |
434 | goto repost; |
435 | } | |
436 | ||
fd312561 | 437 | if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) { |
839fcaba | 438 | p = wc->qp->qp_context; |
d6ef7d68 | 439 | if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) { |
839fcaba MT |
440 | spin_lock_irqsave(&priv->lock, flags); |
441 | p->jiffies = jiffies; | |
518b1646 MT |
442 | /* Move this entry to list head, but do not re-add it |
443 | * if it has been moved out of list. */ | |
444 | if (p->state == IPOIB_CM_RX_LIVE) | |
839fcaba MT |
445 | list_move(&p->list, &priv->cm.passive_ids); |
446 | spin_unlock_irqrestore(&priv->lock, flags); | |
839fcaba MT |
447 | } |
448 | } | |
449 | ||
1812063b MT |
450 | frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, |
451 | (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; | |
452 | ||
453 | newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); | |
454 | if (unlikely(!newskb)) { | |
839fcaba MT |
455 | /* |
456 | * If we can't allocate a new RX buffer, dump | |
457 | * this packet and reuse the old buffer. | |
458 | */ | |
459 | ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id); | |
de903512 | 460 | ++dev->stats.rx_dropped; |
839fcaba MT |
461 | goto repost; |
462 | } | |
463 | ||
1812063b MT |
464 | ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); |
465 | memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); | |
839fcaba MT |
466 | |
467 | ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", | |
468 | wc->byte_len, wc->slid); | |
469 | ||
1812063b | 470 | skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); |
839fcaba MT |
471 | |
472 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | |
459a98ed | 473 | skb_reset_mac_header(skb); |
839fcaba MT |
474 | skb_pull(skb, IPOIB_ENCAP_LEN); |
475 | ||
476 | dev->last_rx = jiffies; | |
de903512 RD |
477 | ++dev->stats.rx_packets; |
478 | dev->stats.rx_bytes += skb->len; | |
839fcaba MT |
479 | |
480 | skb->dev = dev; | |
481 | /* XXX get correct PACKET_ type here */ | |
482 | skb->pkt_type = PACKET_HOST; | |
8d1cc86a | 483 | netif_receive_skb(skb); |
839fcaba MT |
484 | |
485 | repost: | |
486 | if (unlikely(ipoib_cm_post_receive(dev, wr_id))) | |
487 | ipoib_warn(priv, "ipoib_cm_post_receive failed " | |
488 | "for buf %d\n", wr_id); | |
489 | } | |
490 | ||
491 | static inline int post_send(struct ipoib_dev_priv *priv, | |
492 | struct ipoib_cm_tx *tx, | |
493 | unsigned int wr_id, | |
494 | u64 addr, int len) | |
495 | { | |
496 | struct ib_send_wr *bad_wr; | |
497 | ||
498 | priv->tx_sge.addr = addr; | |
499 | priv->tx_sge.length = len; | |
500 | ||
1b524963 | 501 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; |
839fcaba MT |
502 | |
503 | return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); | |
504 | } | |
505 | ||
506 | void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) | |
507 | { | |
508 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
509 | struct ipoib_tx_buf *tx_req; | |
510 | u64 addr; | |
511 | ||
512 | if (unlikely(skb->len > tx->mtu)) { | |
513 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | |
514 | skb->len, tx->mtu); | |
de903512 RD |
515 | ++dev->stats.tx_dropped; |
516 | ++dev->stats.tx_errors; | |
77d8e1ef | 517 | ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); |
839fcaba MT |
518 | return; |
519 | } | |
520 | ||
521 | ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n", | |
522 | tx->tx_head, skb->len, tx->qp->qp_num); | |
523 | ||
524 | /* | |
525 | * We put the skb into the tx_ring _before_ we call post_send() | |
526 | * because it's entirely possible that the completion handler will | |
527 | * run before we execute anything after the post_send(). That | |
528 | * means we have to make sure everything is properly recorded and | |
529 | * our state is consistent before we call post_send(). | |
530 | */ | |
531 | tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; | |
532 | tx_req->skb = skb; | |
533 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE); | |
534 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | |
de903512 | 535 | ++dev->stats.tx_errors; |
839fcaba MT |
536 | dev_kfree_skb_any(skb); |
537 | return; | |
538 | } | |
539 | ||
540 | tx_req->mapping = addr; | |
541 | ||
542 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), | |
543 | addr, skb->len))) { | |
544 | ipoib_warn(priv, "post_send failed\n"); | |
de903512 | 545 | ++dev->stats.tx_errors; |
839fcaba MT |
546 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); |
547 | dev_kfree_skb_any(skb); | |
548 | } else { | |
549 | dev->trans_start = jiffies; | |
550 | ++tx->tx_head; | |
551 | ||
1b524963 | 552 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
839fcaba MT |
553 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", |
554 | tx->qp->qp_num); | |
555 | netif_stop_queue(dev); | |
839fcaba MT |
556 | } |
557 | } | |
558 | } | |
559 | ||
1b524963 | 560 | void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) |
839fcaba MT |
561 | { |
562 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1b524963 MT |
563 | struct ipoib_cm_tx *tx = wc->qp->qp_context; |
564 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM; | |
839fcaba MT |
565 | struct ipoib_tx_buf *tx_req; |
566 | unsigned long flags; | |
567 | ||
a89875fc RD |
568 | ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n", |
569 | wr_id, wc->status); | |
839fcaba MT |
570 | |
571 | if (unlikely(wr_id >= ipoib_sendq_size)) { | |
572 | ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n", | |
573 | wr_id, ipoib_sendq_size); | |
574 | return; | |
575 | } | |
576 | ||
577 | tx_req = &tx->tx_ring[wr_id]; | |
578 | ||
579 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); | |
580 | ||
581 | /* FIXME: is this right? Shouldn't we only increment on success? */ | |
de903512 RD |
582 | ++dev->stats.tx_packets; |
583 | dev->stats.tx_bytes += tx_req->skb->len; | |
839fcaba MT |
584 | |
585 | dev_kfree_skb_any(tx_req->skb); | |
586 | ||
587 | spin_lock_irqsave(&priv->tx_lock, flags); | |
588 | ++tx->tx_tail; | |
1b524963 MT |
589 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && |
590 | netif_queue_stopped(dev) && | |
591 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | |
839fcaba | 592 | netif_wake_queue(dev); |
839fcaba MT |
593 | |
594 | if (wc->status != IB_WC_SUCCESS && | |
595 | wc->status != IB_WC_WR_FLUSH_ERR) { | |
596 | struct ipoib_neigh *neigh; | |
597 | ||
598 | ipoib_dbg(priv, "failed cm send event " | |
599 | "(status=%d, wrid=%d vend_err %x)\n", | |
600 | wc->status, wr_id, wc->vendor_err); | |
601 | ||
602 | spin_lock(&priv->lock); | |
603 | neigh = tx->neigh; | |
604 | ||
605 | if (neigh) { | |
606 | neigh->cm = NULL; | |
607 | list_del(&neigh->list); | |
608 | if (neigh->ah) | |
609 | ipoib_put_ah(neigh->ah); | |
610 | ipoib_neigh_free(dev, neigh); | |
611 | ||
612 | tx->neigh = NULL; | |
613 | } | |
614 | ||
839fcaba MT |
615 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { |
616 | list_move(&tx->list, &priv->cm.reap_list); | |
617 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
618 | } | |
619 | ||
620 | clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); | |
621 | ||
622 | spin_unlock(&priv->lock); | |
623 | } | |
624 | ||
625 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
626 | } | |
627 | ||
839fcaba MT |
628 | int ipoib_cm_dev_open(struct net_device *dev) |
629 | { | |
630 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
631 | int ret; | |
632 | ||
633 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr)) | |
634 | return 0; | |
635 | ||
636 | priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev); | |
637 | if (IS_ERR(priv->cm.id)) { | |
638 | printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name); | |
347fcfbe | 639 | ret = PTR_ERR(priv->cm.id); |
518b1646 | 640 | goto err_cm; |
839fcaba MT |
641 | } |
642 | ||
643 | ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), | |
644 | 0, NULL); | |
645 | if (ret) { | |
646 | printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name, | |
647 | IPOIB_CM_IETF_ID | priv->qp->qp_num); | |
518b1646 | 648 | goto err_listen; |
839fcaba | 649 | } |
518b1646 | 650 | |
839fcaba | 651 | return 0; |
518b1646 MT |
652 | |
653 | err_listen: | |
654 | ib_destroy_cm_id(priv->cm.id); | |
655 | err_cm: | |
656 | priv->cm.id = NULL; | |
518b1646 | 657 | return ret; |
839fcaba MT |
658 | } |
659 | ||
660 | void ipoib_cm_dev_stop(struct net_device *dev) | |
661 | { | |
662 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
518b1646 MT |
663 | struct ipoib_cm_rx *p, *n; |
664 | unsigned long begin; | |
665 | LIST_HEAD(list); | |
666 | int ret; | |
839fcaba | 667 | |
347fcfbe | 668 | if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id) |
839fcaba MT |
669 | return; |
670 | ||
671 | ib_destroy_cm_id(priv->cm.id); | |
347fcfbe | 672 | priv->cm.id = NULL; |
518b1646 | 673 | |
37aebbde | 674 | spin_lock_irq(&priv->lock); |
839fcaba MT |
675 | while (!list_empty(&priv->cm.passive_ids)) { |
676 | p = list_entry(priv->cm.passive_ids.next, typeof(*p), list); | |
518b1646 MT |
677 | list_move(&p->list, &priv->cm.rx_error_list); |
678 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 679 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
680 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
681 | if (ret) | |
682 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
683 | spin_lock_irq(&priv->lock); | |
684 | } | |
685 | ||
686 | /* Wait for all RX to be drained */ | |
687 | begin = jiffies; | |
688 | ||
689 | while (!list_empty(&priv->cm.rx_error_list) || | |
690 | !list_empty(&priv->cm.rx_flush_list) || | |
691 | !list_empty(&priv->cm.rx_drain_list)) { | |
8fd357a6 | 692 | if (time_after(jiffies, begin + 5 * HZ)) { |
518b1646 MT |
693 | ipoib_warn(priv, "RX drain timing out\n"); |
694 | ||
695 | /* | |
696 | * assume the HW is wedged and just free up everything. | |
697 | */ | |
698 | list_splice_init(&priv->cm.rx_flush_list, &list); | |
699 | list_splice_init(&priv->cm.rx_error_list, &list); | |
700 | list_splice_init(&priv->cm.rx_drain_list, &list); | |
701 | break; | |
702 | } | |
703 | spin_unlock_irq(&priv->lock); | |
704 | msleep(1); | |
2dfbfc37 | 705 | ipoib_drain_cq(dev); |
518b1646 MT |
706 | spin_lock_irq(&priv->lock); |
707 | } | |
708 | ||
709 | list_splice_init(&priv->cm.rx_reap_list, &list); | |
710 | ||
711 | spin_unlock_irq(&priv->lock); | |
712 | ||
713 | list_for_each_entry_safe(p, n, &list, list) { | |
839fcaba MT |
714 | ib_destroy_cm_id(p->id); |
715 | ib_destroy_qp(p->qp); | |
716 | kfree(p); | |
839fcaba | 717 | } |
839fcaba MT |
718 | |
719 | cancel_delayed_work(&priv->cm.stale_task); | |
720 | } | |
721 | ||
722 | static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |
723 | { | |
724 | struct ipoib_cm_tx *p = cm_id->context; | |
725 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
726 | struct ipoib_cm_data *data = event->private_data; | |
727 | struct sk_buff_head skqueue; | |
728 | struct ib_qp_attr qp_attr; | |
729 | int qp_attr_mask, ret; | |
730 | struct sk_buff *skb; | |
839fcaba MT |
731 | |
732 | p->mtu = be32_to_cpu(data->mtu); | |
733 | ||
82c3aca6 MT |
734 | if (p->mtu <= IPOIB_ENCAP_LEN) { |
735 | ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n", | |
736 | p->mtu, IPOIB_ENCAP_LEN); | |
839fcaba MT |
737 | return -EINVAL; |
738 | } | |
739 | ||
740 | qp_attr.qp_state = IB_QPS_RTR; | |
741 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
742 | if (ret) { | |
743 | ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret); | |
744 | return ret; | |
745 | } | |
746 | ||
747 | qp_attr.rq_psn = 0 /* FIXME */; | |
748 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
749 | if (ret) { | |
750 | ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret); | |
751 | return ret; | |
752 | } | |
753 | ||
754 | qp_attr.qp_state = IB_QPS_RTS; | |
755 | ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); | |
756 | if (ret) { | |
757 | ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret); | |
758 | return ret; | |
759 | } | |
760 | ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask); | |
761 | if (ret) { | |
762 | ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret); | |
763 | return ret; | |
764 | } | |
765 | ||
766 | skb_queue_head_init(&skqueue); | |
767 | ||
37aebbde | 768 | spin_lock_irq(&priv->lock); |
839fcaba MT |
769 | set_bit(IPOIB_FLAG_OPER_UP, &p->flags); |
770 | if (p->neigh) | |
771 | while ((skb = __skb_dequeue(&p->neigh->queue))) | |
772 | __skb_queue_tail(&skqueue, skb); | |
37aebbde | 773 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
774 | |
775 | while ((skb = __skb_dequeue(&skqueue))) { | |
776 | skb->dev = p->dev; | |
777 | if (dev_queue_xmit(skb)) | |
778 | ipoib_warn(priv, "dev_queue_xmit failed " | |
779 | "to requeue packet\n"); | |
780 | } | |
781 | ||
782 | ret = ib_send_cm_rtu(cm_id, NULL, 0); | |
783 | if (ret) { | |
784 | ipoib_warn(priv, "failed to send RTU: %d\n", ret); | |
785 | return ret; | |
786 | } | |
787 | return 0; | |
788 | } | |
789 | ||
1b524963 | 790 | static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) |
839fcaba MT |
791 | { |
792 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
ede6bc04 | 793 | struct ib_qp_init_attr attr = { |
1b524963 | 794 | .send_cq = priv->cq, |
ede6bc04 DB |
795 | .recv_cq = priv->cq, |
796 | .srq = priv->cm.srq, | |
797 | .cap.max_send_wr = ipoib_sendq_size, | |
798 | .cap.max_send_sge = 1, | |
799 | .sq_sig_type = IB_SIGNAL_ALL_WR, | |
800 | .qp_type = IB_QPT_RC, | |
1b524963 | 801 | .qp_context = tx |
ede6bc04 DB |
802 | }; |
803 | ||
839fcaba MT |
804 | return ib_create_qp(priv->pd, &attr); |
805 | } | |
806 | ||
807 | static int ipoib_cm_send_req(struct net_device *dev, | |
808 | struct ib_cm_id *id, struct ib_qp *qp, | |
809 | u32 qpn, | |
810 | struct ib_sa_path_rec *pathrec) | |
811 | { | |
812 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
813 | struct ipoib_cm_data data = {}; | |
814 | struct ib_cm_req_param req = {}; | |
815 | ||
816 | data.qpn = cpu_to_be32(priv->qp->qp_num); | |
817 | data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE); | |
818 | ||
819 | req.primary_path = pathrec; | |
820 | req.alternate_path = NULL; | |
821 | req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); | |
822 | req.qp_num = qp->qp_num; | |
823 | req.qp_type = qp->qp_type; | |
824 | req.private_data = &data; | |
825 | req.private_data_len = sizeof data; | |
826 | req.flow_control = 0; | |
827 | ||
828 | req.starting_psn = 0; /* FIXME */ | |
829 | ||
830 | /* | |
831 | * Pick some arbitrary defaults here; we could make these | |
832 | * module parameters if anyone cared about setting them. | |
833 | */ | |
834 | req.responder_resources = 4; | |
835 | req.remote_cm_response_timeout = 20; | |
836 | req.local_cm_response_timeout = 20; | |
837 | req.retry_count = 0; /* RFC draft warns against retries */ | |
838 | req.rnr_retry_count = 0; /* RFC draft warns against retries */ | |
839 | req.max_cm_retries = 15; | |
840 | req.srq = 1; | |
841 | return ib_send_cm_req(id, &req); | |
842 | } | |
843 | ||
844 | static int ipoib_cm_modify_tx_init(struct net_device *dev, | |
845 | struct ib_cm_id *cm_id, struct ib_qp *qp) | |
846 | { | |
847 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
848 | struct ib_qp_attr qp_attr; | |
849 | int qp_attr_mask, ret; | |
850 | ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index); | |
851 | if (ret) { | |
852 | ipoib_warn(priv, "pkey 0x%x not in cache: %d\n", priv->pkey, ret); | |
853 | return ret; | |
854 | } | |
855 | ||
856 | qp_attr.qp_state = IB_QPS_INIT; | |
857 | qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE; | |
858 | qp_attr.port_num = priv->port; | |
859 | qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT; | |
860 | ||
861 | ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask); | |
862 | if (ret) { | |
863 | ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); | |
864 | return ret; | |
865 | } | |
866 | return 0; | |
867 | } | |
868 | ||
869 | static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |
870 | struct ib_sa_path_rec *pathrec) | |
871 | { | |
872 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
873 | int ret; | |
874 | ||
875 | p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, | |
876 | GFP_KERNEL); | |
877 | if (!p->tx_ring) { | |
878 | ipoib_warn(priv, "failed to allocate tx ring\n"); | |
879 | ret = -ENOMEM; | |
880 | goto err_tx; | |
881 | } | |
882 | ||
1b524963 | 883 | p->qp = ipoib_cm_create_tx_qp(p->dev, p); |
839fcaba MT |
884 | if (IS_ERR(p->qp)) { |
885 | ret = PTR_ERR(p->qp); | |
886 | ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); | |
887 | goto err_qp; | |
888 | } | |
889 | ||
890 | p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p); | |
891 | if (IS_ERR(p->id)) { | |
892 | ret = PTR_ERR(p->id); | |
893 | ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); | |
894 | goto err_id; | |
895 | } | |
896 | ||
897 | ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp); | |
898 | if (ret) { | |
899 | ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); | |
900 | goto err_modify; | |
901 | } | |
902 | ||
903 | ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); | |
904 | if (ret) { | |
905 | ipoib_warn(priv, "failed to send cm req: %d\n", ret); | |
906 | goto err_send_cm; | |
907 | } | |
908 | ||
909 | ipoib_dbg(priv, "Request connection 0x%x for gid " IPOIB_GID_FMT " qpn 0x%x\n", | |
910 | p->qp->qp_num, IPOIB_GID_ARG(pathrec->dgid), qpn); | |
911 | ||
912 | return 0; | |
913 | ||
914 | err_send_cm: | |
915 | err_modify: | |
916 | ib_destroy_cm_id(p->id); | |
917 | err_id: | |
918 | p->id = NULL; | |
919 | ib_destroy_qp(p->qp); | |
839fcaba MT |
920 | err_qp: |
921 | p->qp = NULL; | |
839fcaba MT |
922 | err_tx: |
923 | return ret; | |
924 | } | |
925 | ||
926 | static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) | |
927 | { | |
928 | struct ipoib_dev_priv *priv = netdev_priv(p->dev); | |
929 | struct ipoib_tx_buf *tx_req; | |
1b524963 MT |
930 | unsigned long flags; |
931 | unsigned long begin; | |
839fcaba MT |
932 | |
933 | ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", | |
934 | p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail); | |
935 | ||
936 | if (p->id) | |
937 | ib_destroy_cm_id(p->id); | |
938 | ||
839fcaba | 939 | if (p->tx_ring) { |
1b524963 MT |
940 | /* Wait for all sends to complete */ |
941 | begin = jiffies; | |
839fcaba | 942 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
1b524963 MT |
943 | if (time_after(jiffies, begin + 5 * HZ)) { |
944 | ipoib_warn(priv, "timing out; %d sends not completed\n", | |
945 | p->tx_head - p->tx_tail); | |
946 | goto timeout; | |
947 | } | |
948 | ||
949 | msleep(1); | |
839fcaba | 950 | } |
1b524963 MT |
951 | } |
952 | ||
953 | timeout: | |
839fcaba | 954 | |
1b524963 MT |
955 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
956 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | |
957 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, | |
958 | DMA_TO_DEVICE); | |
959 | dev_kfree_skb_any(tx_req->skb); | |
960 | ++p->tx_tail; | |
961 | spin_lock_irqsave(&priv->tx_lock, flags); | |
962 | if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && | |
963 | netif_queue_stopped(p->dev) && | |
964 | test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) | |
965 | netif_wake_queue(p->dev); | |
966 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
839fcaba MT |
967 | } |
968 | ||
1b524963 MT |
969 | if (p->qp) |
970 | ib_destroy_qp(p->qp); | |
971 | ||
972 | kfree(p->tx_ring); | |
839fcaba MT |
973 | kfree(p); |
974 | } | |
975 | ||
976 | static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, | |
977 | struct ib_cm_event *event) | |
978 | { | |
979 | struct ipoib_cm_tx *tx = cm_id->context; | |
980 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | |
981 | struct net_device *dev = priv->dev; | |
982 | struct ipoib_neigh *neigh; | |
839fcaba MT |
983 | int ret; |
984 | ||
985 | switch (event->event) { | |
986 | case IB_CM_DREQ_RECEIVED: | |
987 | ipoib_dbg(priv, "DREQ received.\n"); | |
988 | ib_send_cm_drep(cm_id, NULL, 0); | |
989 | break; | |
990 | case IB_CM_REP_RECEIVED: | |
991 | ipoib_dbg(priv, "REP received.\n"); | |
992 | ret = ipoib_cm_rep_handler(cm_id, event); | |
993 | if (ret) | |
994 | ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, | |
995 | NULL, 0, NULL, 0); | |
996 | break; | |
997 | case IB_CM_REQ_ERROR: | |
998 | case IB_CM_REJ_RECEIVED: | |
999 | case IB_CM_TIMEWAIT_EXIT: | |
1000 | ipoib_dbg(priv, "CM error %d.\n", event->event); | |
37aebbde | 1001 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1002 | spin_lock(&priv->lock); |
1003 | neigh = tx->neigh; | |
1004 | ||
1005 | if (neigh) { | |
1006 | neigh->cm = NULL; | |
1007 | list_del(&neigh->list); | |
1008 | if (neigh->ah) | |
1009 | ipoib_put_ah(neigh->ah); | |
1010 | ipoib_neigh_free(dev, neigh); | |
1011 | ||
1012 | tx->neigh = NULL; | |
1013 | } | |
1014 | ||
1015 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
1016 | list_move(&tx->list, &priv->cm.reap_list); | |
1017 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
1018 | } | |
1019 | ||
1020 | spin_unlock(&priv->lock); | |
37aebbde | 1021 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1022 | break; |
1023 | default: | |
1024 | break; | |
1025 | } | |
1026 | ||
1027 | return 0; | |
1028 | } | |
1029 | ||
1030 | struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path, | |
1031 | struct ipoib_neigh *neigh) | |
1032 | { | |
1033 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1034 | struct ipoib_cm_tx *tx; | |
1035 | ||
1036 | tx = kzalloc(sizeof *tx, GFP_ATOMIC); | |
1037 | if (!tx) | |
1038 | return NULL; | |
1039 | ||
1040 | neigh->cm = tx; | |
1041 | tx->neigh = neigh; | |
1042 | tx->path = path; | |
1043 | tx->dev = dev; | |
1044 | list_add(&tx->list, &priv->cm.start_list); | |
1045 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); | |
1046 | queue_work(ipoib_workqueue, &priv->cm.start_task); | |
1047 | return tx; | |
1048 | } | |
1049 | ||
1050 | void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) | |
1051 | { | |
1052 | struct ipoib_dev_priv *priv = netdev_priv(tx->dev); | |
1053 | if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { | |
1054 | list_move(&tx->list, &priv->cm.reap_list); | |
1055 | queue_work(ipoib_workqueue, &priv->cm.reap_task); | |
1056 | ipoib_dbg(priv, "Reap connection for gid " IPOIB_GID_FMT "\n", | |
1057 | IPOIB_GID_ARG(tx->neigh->dgid)); | |
1058 | tx->neigh = NULL; | |
1059 | } | |
1060 | } | |
1061 | ||
1062 | static void ipoib_cm_tx_start(struct work_struct *work) | |
1063 | { | |
1064 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1065 | cm.start_task); | |
1066 | struct net_device *dev = priv->dev; | |
1067 | struct ipoib_neigh *neigh; | |
1068 | struct ipoib_cm_tx *p; | |
1069 | unsigned long flags; | |
1070 | int ret; | |
1071 | ||
1072 | struct ib_sa_path_rec pathrec; | |
1073 | u32 qpn; | |
1074 | ||
1075 | spin_lock_irqsave(&priv->tx_lock, flags); | |
1076 | spin_lock(&priv->lock); | |
1077 | while (!list_empty(&priv->cm.start_list)) { | |
1078 | p = list_entry(priv->cm.start_list.next, typeof(*p), list); | |
1079 | list_del_init(&p->list); | |
1080 | neigh = p->neigh; | |
1081 | qpn = IPOIB_QPN(neigh->neighbour->ha); | |
1082 | memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); | |
1083 | spin_unlock(&priv->lock); | |
1084 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
1085 | ret = ipoib_cm_tx_init(p, qpn, &pathrec); | |
1086 | spin_lock_irqsave(&priv->tx_lock, flags); | |
1087 | spin_lock(&priv->lock); | |
1088 | if (ret) { | |
1089 | neigh = p->neigh; | |
1090 | if (neigh) { | |
1091 | neigh->cm = NULL; | |
1092 | list_del(&neigh->list); | |
1093 | if (neigh->ah) | |
1094 | ipoib_put_ah(neigh->ah); | |
1095 | ipoib_neigh_free(dev, neigh); | |
1096 | } | |
1097 | list_del(&p->list); | |
1098 | kfree(p); | |
1099 | } | |
1100 | } | |
1101 | spin_unlock(&priv->lock); | |
1102 | spin_unlock_irqrestore(&priv->tx_lock, flags); | |
1103 | } | |
1104 | ||
1105 | static void ipoib_cm_tx_reap(struct work_struct *work) | |
1106 | { | |
1107 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1108 | cm.reap_task); | |
1109 | struct ipoib_cm_tx *p; | |
839fcaba | 1110 | |
37aebbde | 1111 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1112 | spin_lock(&priv->lock); |
1113 | while (!list_empty(&priv->cm.reap_list)) { | |
1114 | p = list_entry(priv->cm.reap_list.next, typeof(*p), list); | |
1115 | list_del(&p->list); | |
1116 | spin_unlock(&priv->lock); | |
37aebbde | 1117 | spin_unlock_irq(&priv->tx_lock); |
839fcaba | 1118 | ipoib_cm_tx_destroy(p); |
37aebbde | 1119 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1120 | spin_lock(&priv->lock); |
1121 | } | |
1122 | spin_unlock(&priv->lock); | |
37aebbde | 1123 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1124 | } |
1125 | ||
1126 | static void ipoib_cm_skb_reap(struct work_struct *work) | |
1127 | { | |
1128 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1129 | cm.skb_task); | |
839fcaba | 1130 | struct sk_buff *skb; |
839fcaba MT |
1131 | |
1132 | unsigned mtu = priv->mcast_mtu; | |
1133 | ||
37aebbde | 1134 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1135 | spin_lock(&priv->lock); |
1136 | while ((skb = skb_dequeue(&priv->cm.skb_queue))) { | |
1137 | spin_unlock(&priv->lock); | |
37aebbde | 1138 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1139 | if (skb->protocol == htons(ETH_P_IP)) |
1140 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | |
1141 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
1142 | else if (skb->protocol == htons(ETH_P_IPV6)) | |
20089ca5 | 1143 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); |
839fcaba MT |
1144 | #endif |
1145 | dev_kfree_skb_any(skb); | |
37aebbde | 1146 | spin_lock_irq(&priv->tx_lock); |
839fcaba MT |
1147 | spin_lock(&priv->lock); |
1148 | } | |
1149 | spin_unlock(&priv->lock); | |
37aebbde | 1150 | spin_unlock_irq(&priv->tx_lock); |
839fcaba MT |
1151 | } |
1152 | ||
1153 | void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb, | |
1154 | unsigned int mtu) | |
1155 | { | |
1156 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1157 | int e = skb_queue_empty(&priv->cm.skb_queue); | |
1158 | ||
1159 | if (skb->dst) | |
1160 | skb->dst->ops->update_pmtu(skb->dst, mtu); | |
1161 | ||
1162 | skb_queue_tail(&priv->cm.skb_queue, skb); | |
1163 | if (e) | |
1164 | queue_work(ipoib_workqueue, &priv->cm.skb_task); | |
1165 | } | |
1166 | ||
518b1646 MT |
1167 | static void ipoib_cm_rx_reap(struct work_struct *work) |
1168 | { | |
1169 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1170 | cm.rx_reap_task); | |
1171 | struct ipoib_cm_rx *p, *n; | |
1172 | LIST_HEAD(list); | |
1173 | ||
1174 | spin_lock_irq(&priv->lock); | |
1175 | list_splice_init(&priv->cm.rx_reap_list, &list); | |
1176 | spin_unlock_irq(&priv->lock); | |
1177 | ||
1178 | list_for_each_entry_safe(p, n, &list, list) { | |
1179 | ib_destroy_cm_id(p->id); | |
1180 | ib_destroy_qp(p->qp); | |
1181 | kfree(p); | |
1182 | } | |
1183 | } | |
1184 | ||
839fcaba MT |
1185 | static void ipoib_cm_stale_task(struct work_struct *work) |
1186 | { | |
1187 | struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, | |
1188 | cm.stale_task.work); | |
1189 | struct ipoib_cm_rx *p; | |
518b1646 | 1190 | int ret; |
839fcaba | 1191 | |
37aebbde | 1192 | spin_lock_irq(&priv->lock); |
839fcaba | 1193 | while (!list_empty(&priv->cm.passive_ids)) { |
518b1646 | 1194 | /* List is sorted by LRU, start from tail, |
839fcaba MT |
1195 | * stop when we see a recently used entry */ |
1196 | p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list); | |
60a596da | 1197 | if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT)) |
839fcaba | 1198 | break; |
518b1646 MT |
1199 | list_move(&p->list, &priv->cm.rx_error_list); |
1200 | p->state = IPOIB_CM_RX_ERROR; | |
37aebbde | 1201 | spin_unlock_irq(&priv->lock); |
518b1646 MT |
1202 | ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE); |
1203 | if (ret) | |
1204 | ipoib_warn(priv, "unable to move qp to error state: %d\n", ret); | |
37aebbde | 1205 | spin_lock_irq(&priv->lock); |
839fcaba | 1206 | } |
7c5b9ef8 MT |
1207 | |
1208 | if (!list_empty(&priv->cm.passive_ids)) | |
1209 | queue_delayed_work(ipoib_workqueue, | |
1210 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | |
37aebbde | 1211 | spin_unlock_irq(&priv->lock); |
839fcaba MT |
1212 | } |
1213 | ||
1214 | ||
1215 | static ssize_t show_mode(struct device *d, struct device_attribute *attr, | |
1216 | char *buf) | |
1217 | { | |
1218 | struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d)); | |
1219 | ||
1220 | if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags)) | |
1221 | return sprintf(buf, "connected\n"); | |
1222 | else | |
1223 | return sprintf(buf, "datagram\n"); | |
1224 | } | |
1225 | ||
1226 | static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |
1227 | const char *buf, size_t count) | |
1228 | { | |
1229 | struct net_device *dev = to_net_dev(d); | |
1230 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1231 | ||
1232 | /* flush paths if we switch modes so that connections are restarted */ | |
1233 | if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) { | |
1234 | set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | |
1235 | ipoib_warn(priv, "enabling connected mode " | |
1236 | "will cause multicast packet drops\n"); | |
1237 | ipoib_flush_paths(dev); | |
1238 | return count; | |
1239 | } | |
1240 | ||
1241 | if (!strcmp(buf, "datagram\n")) { | |
1242 | clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags); | |
1243 | dev->mtu = min(priv->mcast_mtu, dev->mtu); | |
1244 | ipoib_flush_paths(dev); | |
1245 | return count; | |
1246 | } | |
1247 | ||
1248 | return -EINVAL; | |
1249 | } | |
1250 | ||
551fd612 | 1251 | static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode); |
839fcaba MT |
1252 | |
1253 | int ipoib_cm_add_mode_attr(struct net_device *dev) | |
1254 | { | |
1255 | return device_create_file(&dev->dev, &dev_attr_mode); | |
1256 | } | |
1257 | ||
1258 | int ipoib_cm_dev_init(struct net_device *dev) | |
1259 | { | |
1260 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1261 | struct ib_srq_init_attr srq_init_attr = { | |
1262 | .attr = { | |
1263 | .max_wr = ipoib_recvq_size, | |
1264 | .max_sge = IPOIB_CM_RX_SG | |
1265 | } | |
1266 | }; | |
1267 | int ret, i; | |
1268 | ||
1269 | INIT_LIST_HEAD(&priv->cm.passive_ids); | |
1270 | INIT_LIST_HEAD(&priv->cm.reap_list); | |
1271 | INIT_LIST_HEAD(&priv->cm.start_list); | |
518b1646 MT |
1272 | INIT_LIST_HEAD(&priv->cm.rx_error_list); |
1273 | INIT_LIST_HEAD(&priv->cm.rx_flush_list); | |
1274 | INIT_LIST_HEAD(&priv->cm.rx_drain_list); | |
1275 | INIT_LIST_HEAD(&priv->cm.rx_reap_list); | |
839fcaba MT |
1276 | INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start); |
1277 | INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap); | |
1278 | INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap); | |
518b1646 | 1279 | INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap); |
839fcaba MT |
1280 | INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task); |
1281 | ||
1282 | skb_queue_head_init(&priv->cm.skb_queue); | |
1283 | ||
1284 | priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr); | |
1285 | if (IS_ERR(priv->cm.srq)) { | |
1286 | ret = PTR_ERR(priv->cm.srq); | |
1287 | priv->cm.srq = NULL; | |
1288 | return ret; | |
1289 | } | |
1290 | ||
1291 | priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, | |
1292 | GFP_KERNEL); | |
1293 | if (!priv->cm.srq_ring) { | |
1294 | printk(KERN_WARNING "%s: failed to allocate CM ring (%d entries)\n", | |
1295 | priv->ca->name, ipoib_recvq_size); | |
1296 | ipoib_cm_dev_cleanup(dev); | |
1297 | return -ENOMEM; | |
1298 | } | |
1299 | ||
1300 | for (i = 0; i < IPOIB_CM_RX_SG; ++i) | |
1301 | priv->cm.rx_sge[i].lkey = priv->mr->lkey; | |
1302 | ||
1303 | priv->cm.rx_sge[0].length = IPOIB_CM_HEAD_SIZE; | |
1304 | for (i = 1; i < IPOIB_CM_RX_SG; ++i) | |
1305 | priv->cm.rx_sge[i].length = PAGE_SIZE; | |
1306 | priv->cm.rx_wr.next = NULL; | |
1307 | priv->cm.rx_wr.sg_list = priv->cm.rx_sge; | |
1308 | priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; | |
1309 | ||
1310 | for (i = 0; i < ipoib_recvq_size; ++i) { | |
1812063b MT |
1311 | if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, |
1312 | priv->cm.srq_ring[i].mapping)) { | |
839fcaba MT |
1313 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
1314 | ipoib_cm_dev_cleanup(dev); | |
1315 | return -ENOMEM; | |
1316 | } | |
1317 | if (ipoib_cm_post_receive(dev, i)) { | |
1318 | ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); | |
1319 | ipoib_cm_dev_cleanup(dev); | |
1320 | return -EIO; | |
1321 | } | |
1322 | } | |
1323 | ||
1324 | priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; | |
1325 | return 0; | |
1326 | } | |
1327 | ||
1328 | void ipoib_cm_dev_cleanup(struct net_device *dev) | |
1329 | { | |
1330 | struct ipoib_dev_priv *priv = netdev_priv(dev); | |
1331 | int i, ret; | |
1332 | ||
1333 | if (!priv->cm.srq) | |
1334 | return; | |
1335 | ||
1336 | ipoib_dbg(priv, "Cleanup ipoib connected mode.\n"); | |
1337 | ||
1338 | ret = ib_destroy_srq(priv->cm.srq); | |
1339 | if (ret) | |
1340 | ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret); | |
1341 | ||
1342 | priv->cm.srq = NULL; | |
1343 | if (!priv->cm.srq_ring) | |
1344 | return; | |
1345 | for (i = 0; i < ipoib_recvq_size; ++i) | |
1346 | if (priv->cm.srq_ring[i].skb) { | |
1812063b MT |
1347 | ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, |
1348 | priv->cm.srq_ring[i].mapping); | |
839fcaba MT |
1349 | dev_kfree_skb_any(priv->cm.srq_ring[i].skb); |
1350 | priv->cm.srq_ring[i].skb = NULL; | |
1351 | } | |
1352 | kfree(priv->cm.srq_ring); | |
1353 | priv->cm.srq_ring = NULL; | |
1354 | } |