Commit | Line | Data |
---|---|---|
1e23b3ee AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
5a0e3ad6 | 34 | #include <linux/slab.h> |
1e23b3ee AG |
35 | #include <linux/pci.h> |
36 | #include <linux/dma-mapping.h> | |
37 | #include <rdma/rdma_cm.h> | |
38 | ||
39 | #include "rds.h" | |
40 | #include "ib.h" | |
41 | ||
42 | static struct kmem_cache *rds_ib_incoming_slab; | |
43 | static struct kmem_cache *rds_ib_frag_slab; | |
44 | static atomic_t rds_ib_allocation = ATOMIC_INIT(0); | |
45 | ||
1e23b3ee AG |
46 | void rds_ib_recv_init_ring(struct rds_ib_connection *ic) |
47 | { | |
48 | struct rds_ib_recv_work *recv; | |
49 | u32 i; | |
50 | ||
51 | for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { | |
52 | struct ib_sge *sge; | |
53 | ||
54 | recv->r_ibinc = NULL; | |
55 | recv->r_frag = NULL; | |
56 | ||
57 | recv->r_wr.next = NULL; | |
58 | recv->r_wr.wr_id = i; | |
59 | recv->r_wr.sg_list = recv->r_sge; | |
60 | recv->r_wr.num_sge = RDS_IB_RECV_SGE; | |
61 | ||
919ced4c | 62 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
63 | sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); |
64 | sge->length = sizeof(struct rds_header); | |
65 | sge->lkey = ic->i_mr->lkey; | |
919ced4c AG |
66 | |
67 | sge = &recv->r_sge[1]; | |
68 | sge->addr = 0; | |
69 | sge->length = RDS_FRAG_SIZE; | |
70 | sge->lkey = ic->i_mr->lkey; | |
1e23b3ee AG |
71 | } |
72 | } | |
73 | ||
33244125 CM |
74 | /* |
75 | * The entire 'from' list, including the from element itself, is put on | |
76 | * to the tail of the 'to' list. | |
77 | */ | |
78 | static void list_splice_entire_tail(struct list_head *from, | |
79 | struct list_head *to) | |
80 | { | |
81 | struct list_head *from_last = from->prev; | |
82 | ||
83 | list_splice_tail(from_last, to); | |
84 | list_add_tail(from_last, to); | |
85 | } | |
86 | ||
87 | static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) | |
88 | { | |
89 | struct list_head *tmp; | |
90 | ||
91 | tmp = xchg(&cache->xfer, NULL); | |
92 | if (tmp) { | |
93 | if (cache->ready) | |
94 | list_splice_entire_tail(tmp, cache->ready); | |
95 | else | |
96 | cache->ready = tmp; | |
97 | } | |
98 | } | |
99 | ||
100 | static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) | |
101 | { | |
102 | struct rds_ib_cache_head *head; | |
103 | int cpu; | |
104 | ||
105 | cache->percpu = alloc_percpu(struct rds_ib_cache_head); | |
106 | if (!cache->percpu) | |
107 | return -ENOMEM; | |
108 | ||
109 | for_each_possible_cpu(cpu) { | |
110 | head = per_cpu_ptr(cache->percpu, cpu); | |
111 | head->first = NULL; | |
112 | head->count = 0; | |
113 | } | |
114 | cache->xfer = NULL; | |
115 | cache->ready = NULL; | |
116 | ||
117 | return 0; | |
118 | } | |
119 | ||
120 | int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic) | |
121 | { | |
122 | int ret; | |
123 | ||
124 | ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs); | |
125 | if (!ret) { | |
126 | ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags); | |
127 | if (ret) | |
128 | free_percpu(ic->i_cache_incs.percpu); | |
129 | } | |
130 | ||
131 | return ret; | |
132 | } | |
133 | ||
134 | static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, | |
135 | struct list_head *caller_list) | |
136 | { | |
137 | struct rds_ib_cache_head *head; | |
138 | int cpu; | |
139 | ||
140 | for_each_possible_cpu(cpu) { | |
141 | head = per_cpu_ptr(cache->percpu, cpu); | |
142 | if (head->first) { | |
143 | list_splice_entire_tail(head->first, caller_list); | |
144 | head->first = NULL; | |
145 | } | |
146 | } | |
147 | ||
148 | if (cache->ready) { | |
149 | list_splice_entire_tail(cache->ready, caller_list); | |
150 | cache->ready = NULL; | |
151 | } | |
152 | } | |
153 | ||
154 | void rds_ib_recv_free_caches(struct rds_ib_connection *ic) | |
155 | { | |
156 | struct rds_ib_incoming *inc; | |
157 | struct rds_ib_incoming *inc_tmp; | |
158 | struct rds_page_frag *frag; | |
159 | struct rds_page_frag *frag_tmp; | |
160 | LIST_HEAD(list); | |
161 | ||
162 | rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); | |
163 | rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); | |
164 | free_percpu(ic->i_cache_incs.percpu); | |
165 | ||
166 | list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) { | |
167 | list_del(&inc->ii_cache_entry); | |
168 | WARN_ON(!list_empty(&inc->ii_frags)); | |
169 | kmem_cache_free(rds_ib_incoming_slab, inc); | |
170 | } | |
171 | ||
172 | rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); | |
173 | rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); | |
174 | free_percpu(ic->i_cache_frags.percpu); | |
175 | ||
176 | list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) { | |
177 | list_del(&frag->f_cache_entry); | |
178 | WARN_ON(!list_empty(&frag->f_item)); | |
179 | kmem_cache_free(rds_ib_frag_slab, frag); | |
180 | } | |
181 | } | |
182 | ||
183 | /* fwd decl */ | |
184 | static void rds_ib_recv_cache_put(struct list_head *new_item, | |
185 | struct rds_ib_refill_cache *cache); | |
186 | static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); | |
187 | ||
188 | ||
189 | /* Recycle frag and attached recv buffer f_sg */ | |
190 | static void rds_ib_frag_free(struct rds_ib_connection *ic, | |
191 | struct rds_page_frag *frag) | |
192 | { | |
193 | rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); | |
194 | ||
195 | rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); | |
196 | } | |
197 | ||
198 | /* Recycle inc after freeing attached frags */ | |
199 | void rds_ib_inc_free(struct rds_incoming *inc) | |
200 | { | |
201 | struct rds_ib_incoming *ibinc; | |
202 | struct rds_page_frag *frag; | |
203 | struct rds_page_frag *pos; | |
204 | struct rds_ib_connection *ic = inc->i_conn->c_transport_data; | |
205 | ||
206 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
207 | ||
208 | /* Free attached frags */ | |
209 | list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { | |
210 | list_del_init(&frag->f_item); | |
211 | rds_ib_frag_free(ic, frag); | |
212 | } | |
213 | BUG_ON(!list_empty(&ibinc->ii_frags)); | |
214 | ||
215 | rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc); | |
216 | rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); | |
217 | } | |
218 | ||
1e23b3ee AG |
219 | static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, |
220 | struct rds_ib_recv_work *recv) | |
221 | { | |
222 | if (recv->r_ibinc) { | |
223 | rds_inc_put(&recv->r_ibinc->ii_inc); | |
224 | recv->r_ibinc = NULL; | |
225 | } | |
226 | if (recv->r_frag) { | |
fc24f780 | 227 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
33244125 | 228 | rds_ib_frag_free(ic, recv->r_frag); |
1e23b3ee AG |
229 | recv->r_frag = NULL; |
230 | } | |
231 | } | |
232 | ||
233 | void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) | |
234 | { | |
235 | u32 i; | |
236 | ||
237 | for (i = 0; i < ic->i_recv_ring.w_nr; i++) | |
238 | rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); | |
1e23b3ee AG |
239 | } |
240 | ||
037f18a3 CM |
241 | static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, |
242 | gfp_t slab_mask) | |
33244125 CM |
243 | { |
244 | struct rds_ib_incoming *ibinc; | |
245 | struct list_head *cache_item; | |
246 | int avail_allocs; | |
247 | ||
248 | cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); | |
249 | if (cache_item) { | |
250 | ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry); | |
251 | } else { | |
252 | avail_allocs = atomic_add_unless(&rds_ib_allocation, | |
253 | 1, rds_ib_sysctl_max_recv_allocation); | |
254 | if (!avail_allocs) { | |
255 | rds_ib_stats_inc(s_ib_rx_alloc_limit); | |
256 | return NULL; | |
257 | } | |
037f18a3 | 258 | ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask); |
33244125 CM |
259 | if (!ibinc) { |
260 | atomic_dec(&rds_ib_allocation); | |
261 | return NULL; | |
262 | } | |
263 | } | |
264 | INIT_LIST_HEAD(&ibinc->ii_frags); | |
265 | rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr); | |
266 | ||
267 | return ibinc; | |
268 | } | |
269 | ||
037f18a3 CM |
270 | static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, |
271 | gfp_t slab_mask, gfp_t page_mask) | |
33244125 CM |
272 | { |
273 | struct rds_page_frag *frag; | |
274 | struct list_head *cache_item; | |
275 | int ret; | |
276 | ||
277 | cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); | |
278 | if (cache_item) { | |
279 | frag = container_of(cache_item, struct rds_page_frag, f_cache_entry); | |
280 | } else { | |
037f18a3 | 281 | frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask); |
33244125 CM |
282 | if (!frag) |
283 | return NULL; | |
284 | ||
285 | ret = rds_page_remainder_alloc(&frag->f_sg, | |
037f18a3 | 286 | RDS_FRAG_SIZE, page_mask); |
33244125 CM |
287 | if (ret) { |
288 | kmem_cache_free(rds_ib_frag_slab, frag); | |
289 | return NULL; | |
290 | } | |
291 | } | |
292 | ||
293 | INIT_LIST_HEAD(&frag->f_item); | |
294 | ||
295 | return frag; | |
296 | } | |
297 | ||
1e23b3ee | 298 | static int rds_ib_recv_refill_one(struct rds_connection *conn, |
037f18a3 | 299 | struct rds_ib_recv_work *recv, int prefill) |
1e23b3ee AG |
300 | { |
301 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1e23b3ee AG |
302 | struct ib_sge *sge; |
303 | int ret = -ENOMEM; | |
037f18a3 CM |
304 | gfp_t slab_mask = GFP_NOWAIT; |
305 | gfp_t page_mask = GFP_NOWAIT; | |
306 | ||
307 | if (prefill) { | |
308 | slab_mask = GFP_KERNEL; | |
309 | page_mask = GFP_HIGHUSER; | |
310 | } | |
1e23b3ee | 311 | |
33244125 CM |
312 | if (!ic->i_cache_incs.ready) |
313 | rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); | |
314 | if (!ic->i_cache_frags.ready) | |
315 | rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); | |
316 | ||
3427e854 AG |
317 | /* |
318 | * ibinc was taken from recv if recv contained the start of a message. | |
319 | * recvs that were continuations will still have this allocated. | |
320 | */ | |
8690bfa1 | 321 | if (!recv->r_ibinc) { |
037f18a3 | 322 | recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); |
33244125 | 323 | if (!recv->r_ibinc) |
1e23b3ee | 324 | goto out; |
1e23b3ee AG |
325 | } |
326 | ||
3427e854 | 327 | WARN_ON(recv->r_frag); /* leak! */ |
037f18a3 | 328 | recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); |
3427e854 AG |
329 | if (!recv->r_frag) |
330 | goto out; | |
1e23b3ee | 331 | |
0b088e00 AG |
332 | ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, |
333 | 1, DMA_FROM_DEVICE); | |
334 | WARN_ON(ret != 1); | |
1e23b3ee | 335 | |
919ced4c | 336 | sge = &recv->r_sge[0]; |
1e23b3ee AG |
337 | sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); |
338 | sge->length = sizeof(struct rds_header); | |
339 | ||
919ced4c | 340 | sge = &recv->r_sge[1]; |
0b088e00 AG |
341 | sge->addr = sg_dma_address(&recv->r_frag->f_sg); |
342 | sge->length = sg_dma_len(&recv->r_frag->f_sg); | |
1e23b3ee AG |
343 | |
344 | ret = 0; | |
345 | out: | |
346 | return ret; | |
347 | } | |
348 | ||
349 | /* | |
350 | * This tries to allocate and post unused work requests after making sure that | |
351 | * they have all the allocations they need to queue received fragments into | |
33244125 | 352 | * sockets. |
1e23b3ee AG |
353 | * |
354 | * -1 is returned if posting fails due to temporary resource exhaustion. | |
355 | */ | |
b6fb0df1 | 356 | void rds_ib_recv_refill(struct rds_connection *conn, int prefill) |
1e23b3ee AG |
357 | { |
358 | struct rds_ib_connection *ic = conn->c_transport_data; | |
359 | struct rds_ib_recv_work *recv; | |
360 | struct ib_recv_wr *failed_wr; | |
361 | unsigned int posted = 0; | |
362 | int ret = 0; | |
363 | u32 pos; | |
364 | ||
f64f9e71 JP |
365 | while ((prefill || rds_conn_up(conn)) && |
366 | rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { | |
1e23b3ee AG |
367 | if (pos >= ic->i_recv_ring.w_nr) { |
368 | printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", | |
369 | pos); | |
1e23b3ee AG |
370 | break; |
371 | } | |
372 | ||
373 | recv = &ic->i_recvs[pos]; | |
037f18a3 | 374 | ret = rds_ib_recv_refill_one(conn, recv, prefill); |
1e23b3ee | 375 | if (ret) { |
1e23b3ee AG |
376 | break; |
377 | } | |
378 | ||
379 | /* XXX when can this fail? */ | |
380 | ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr); | |
381 | rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv, | |
0b088e00 AG |
382 | recv->r_ibinc, sg_page(&recv->r_frag->f_sg), |
383 | (long) sg_dma_address(&recv->r_frag->f_sg), ret); | |
1e23b3ee AG |
384 | if (ret) { |
385 | rds_ib_conn_error(conn, "recv post on " | |
386 | "%pI4 returned %d, disconnecting and " | |
387 | "reconnecting\n", &conn->c_faddr, | |
388 | ret); | |
1e23b3ee AG |
389 | break; |
390 | } | |
391 | ||
392 | posted++; | |
393 | } | |
394 | ||
395 | /* We're doing flow control - update the window. */ | |
396 | if (ic->i_flowctl && posted) | |
397 | rds_ib_advertise_credits(conn, posted); | |
398 | ||
399 | if (ret) | |
400 | rds_ib_ring_unalloc(&ic->i_recv_ring, 1); | |
1e23b3ee AG |
401 | } |
402 | ||
33244125 CM |
403 | /* |
404 | * We want to recycle several types of recv allocations, like incs and frags. | |
405 | * To use this, the *_free() function passes in the ptr to a list_head within | |
406 | * the recyclee, as well as the cache to put it on. | |
407 | * | |
408 | * First, we put the memory on a percpu list. When this reaches a certain size, | |
409 | * We move it to an intermediate non-percpu list in a lockless manner, with some | |
410 | * xchg/compxchg wizardry. | |
411 | * | |
412 | * N.B. Instead of a list_head as the anchor, we use a single pointer, which can | |
413 | * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and | |
414 | * list_empty() will return true with one element is actually present. | |
415 | */ | |
416 | static void rds_ib_recv_cache_put(struct list_head *new_item, | |
417 | struct rds_ib_refill_cache *cache) | |
1e23b3ee | 418 | { |
33244125 CM |
419 | unsigned long flags; |
420 | struct rds_ib_cache_head *chp; | |
421 | struct list_head *old; | |
1e23b3ee | 422 | |
33244125 | 423 | local_irq_save(flags); |
1e23b3ee | 424 | |
33244125 CM |
425 | chp = per_cpu_ptr(cache->percpu, smp_processor_id()); |
426 | if (!chp->first) | |
427 | INIT_LIST_HEAD(new_item); | |
428 | else /* put on front */ | |
429 | list_add_tail(new_item, chp->first); | |
430 | chp->first = new_item; | |
431 | chp->count++; | |
432 | ||
433 | if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT) | |
434 | goto end; | |
435 | ||
436 | /* | |
437 | * Return our per-cpu first list to the cache's xfer by atomically | |
438 | * grabbing the current xfer list, appending it to our per-cpu list, | |
439 | * and then atomically returning that entire list back to the | |
440 | * cache's xfer list as long as it's still empty. | |
441 | */ | |
442 | do { | |
443 | old = xchg(&cache->xfer, NULL); | |
444 | if (old) | |
445 | list_splice_entire_tail(old, chp->first); | |
446 | old = cmpxchg(&cache->xfer, NULL, chp->first); | |
447 | } while (old); | |
448 | ||
449 | chp->first = NULL; | |
450 | chp->count = 0; | |
451 | end: | |
452 | local_irq_restore(flags); | |
1e23b3ee AG |
453 | } |
454 | ||
33244125 | 455 | static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) |
1e23b3ee | 456 | { |
33244125 CM |
457 | struct list_head *head = cache->ready; |
458 | ||
459 | if (head) { | |
460 | if (!list_empty(head)) { | |
461 | cache->ready = head->next; | |
462 | list_del_init(head); | |
463 | } else | |
464 | cache->ready = NULL; | |
465 | } | |
1e23b3ee | 466 | |
33244125 | 467 | return head; |
1e23b3ee AG |
468 | } |
469 | ||
470 | int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov, | |
471 | size_t size) | |
472 | { | |
473 | struct rds_ib_incoming *ibinc; | |
474 | struct rds_page_frag *frag; | |
475 | struct iovec *iov = first_iov; | |
476 | unsigned long to_copy; | |
477 | unsigned long frag_off = 0; | |
478 | unsigned long iov_off = 0; | |
479 | int copied = 0; | |
480 | int ret; | |
481 | u32 len; | |
482 | ||
483 | ibinc = container_of(inc, struct rds_ib_incoming, ii_inc); | |
484 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
485 | len = be32_to_cpu(inc->i_hdr.h_len); | |
486 | ||
487 | while (copied < size && copied < len) { | |
488 | if (frag_off == RDS_FRAG_SIZE) { | |
489 | frag = list_entry(frag->f_item.next, | |
490 | struct rds_page_frag, f_item); | |
491 | frag_off = 0; | |
492 | } | |
493 | while (iov_off == iov->iov_len) { | |
494 | iov_off = 0; | |
495 | iov++; | |
496 | } | |
497 | ||
498 | to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off); | |
499 | to_copy = min_t(size_t, to_copy, size - copied); | |
500 | to_copy = min_t(unsigned long, to_copy, len - copied); | |
501 | ||
502 | rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag " | |
0b088e00 | 503 | "[%p, %u] + %lu\n", |
1e23b3ee | 504 | to_copy, iov->iov_base, iov->iov_len, iov_off, |
0b088e00 | 505 | sg_page(&frag->f_sg), frag->f_sg.offset, frag_off); |
1e23b3ee AG |
506 | |
507 | /* XXX needs + offset for multiple recvs per page */ | |
0b088e00 AG |
508 | ret = rds_page_copy_to_user(sg_page(&frag->f_sg), |
509 | frag->f_sg.offset + frag_off, | |
1e23b3ee AG |
510 | iov->iov_base + iov_off, |
511 | to_copy); | |
512 | if (ret) { | |
513 | copied = ret; | |
514 | break; | |
515 | } | |
516 | ||
517 | iov_off += to_copy; | |
518 | frag_off += to_copy; | |
519 | copied += to_copy; | |
520 | } | |
521 | ||
522 | return copied; | |
523 | } | |
524 | ||
525 | /* ic starts out kzalloc()ed */ | |
526 | void rds_ib_recv_init_ack(struct rds_ib_connection *ic) | |
527 | { | |
528 | struct ib_send_wr *wr = &ic->i_ack_wr; | |
529 | struct ib_sge *sge = &ic->i_ack_sge; | |
530 | ||
531 | sge->addr = ic->i_ack_dma; | |
532 | sge->length = sizeof(struct rds_header); | |
533 | sge->lkey = ic->i_mr->lkey; | |
534 | ||
535 | wr->sg_list = sge; | |
536 | wr->num_sge = 1; | |
537 | wr->opcode = IB_WR_SEND; | |
538 | wr->wr_id = RDS_IB_ACK_WR_ID; | |
539 | wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; | |
540 | } | |
541 | ||
542 | /* | |
543 | * You'd think that with reliable IB connections you wouldn't need to ack | |
544 | * messages that have been received. The problem is that IB hardware generates | |
545 | * an ack message before it has DMAed the message into memory. This creates a | |
546 | * potential message loss if the HCA is disabled for any reason between when it | |
547 | * sends the ack and before the message is DMAed and processed. This is only a | |
548 | * potential issue if another HCA is available for fail-over. | |
549 | * | |
550 | * When the remote host receives our ack they'll free the sent message from | |
551 | * their send queue. To decrease the latency of this we always send an ack | |
552 | * immediately after we've received messages. | |
553 | * | |
554 | * For simplicity, we only have one ack in flight at a time. This puts | |
555 | * pressure on senders to have deep enough send queues to absorb the latency of | |
556 | * a single ack frame being in flight. This might not be good enough. | |
557 | * | |
558 | * This is implemented by have a long-lived send_wr and sge which point to a | |
559 | * statically allocated ack frame. This ack wr does not fall under the ring | |
560 | * accounting that the tx and rx wrs do. The QP attribute specifically makes | |
561 | * room for it beyond the ring size. Send completion notices its special | |
562 | * wr_id and avoids working with the ring in that case. | |
563 | */ | |
8cbd9606 | 564 | #ifndef KERNEL_HAS_ATOMIC64 |
1e23b3ee AG |
565 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, |
566 | int ack_required) | |
567 | { | |
8cbd9606 AG |
568 | unsigned long flags; |
569 | ||
570 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
571 | ic->i_ack_next = seq; | |
572 | if (ack_required) | |
573 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
574 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
575 | } | |
576 | ||
577 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
578 | { | |
579 | unsigned long flags; | |
580 | u64 seq; | |
581 | ||
582 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
583 | ||
584 | spin_lock_irqsave(&ic->i_ack_lock, flags); | |
585 | seq = ic->i_ack_next; | |
586 | spin_unlock_irqrestore(&ic->i_ack_lock, flags); | |
587 | ||
588 | return seq; | |
589 | } | |
590 | #else | |
591 | static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, | |
592 | int ack_required) | |
593 | { | |
594 | atomic64_set(&ic->i_ack_next, seq); | |
1e23b3ee AG |
595 | if (ack_required) { |
596 | smp_mb__before_clear_bit(); | |
597 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
598 | } | |
599 | } | |
600 | ||
601 | static u64 rds_ib_get_ack(struct rds_ib_connection *ic) | |
602 | { | |
603 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
604 | smp_mb__after_clear_bit(); | |
605 | ||
8cbd9606 | 606 | return atomic64_read(&ic->i_ack_next); |
1e23b3ee | 607 | } |
8cbd9606 AG |
608 | #endif |
609 | ||
1e23b3ee AG |
610 | |
611 | static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) | |
612 | { | |
613 | struct rds_header *hdr = ic->i_ack; | |
614 | struct ib_send_wr *failed_wr; | |
615 | u64 seq; | |
616 | int ret; | |
617 | ||
618 | seq = rds_ib_get_ack(ic); | |
619 | ||
620 | rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); | |
621 | rds_message_populate_header(hdr, 0, 0, 0); | |
622 | hdr->h_ack = cpu_to_be64(seq); | |
623 | hdr->h_credit = adv_credits; | |
624 | rds_message_make_checksum(hdr); | |
625 | ic->i_ack_queued = jiffies; | |
626 | ||
627 | ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr); | |
628 | if (unlikely(ret)) { | |
629 | /* Failed to send. Release the WR, and | |
630 | * force another ACK. | |
631 | */ | |
632 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
633 | set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
634 | ||
635 | rds_ib_stats_inc(s_ib_ack_send_failure); | |
735f61e6 AG |
636 | |
637 | rds_ib_conn_error(ic->conn, "sending ack failed\n"); | |
1e23b3ee AG |
638 | } else |
639 | rds_ib_stats_inc(s_ib_ack_sent); | |
640 | } | |
641 | ||
642 | /* | |
643 | * There are 3 ways of getting acknowledgements to the peer: | |
644 | * 1. We call rds_ib_attempt_ack from the recv completion handler | |
645 | * to send an ACK-only frame. | |
646 | * However, there can be only one such frame in the send queue | |
647 | * at any time, so we may have to postpone it. | |
648 | * 2. When another (data) packet is transmitted while there's | |
649 | * an ACK in the queue, we piggyback the ACK sequence number | |
650 | * on the data packet. | |
651 | * 3. If the ACK WR is done sending, we get called from the | |
652 | * send queue completion handler, and check whether there's | |
653 | * another ACK pending (postponed because the WR was on the | |
654 | * queue). If so, we transmit it. | |
655 | * | |
656 | * We maintain 2 variables: | |
657 | * - i_ack_flags, which keeps track of whether the ACK WR | |
658 | * is currently in the send queue or not (IB_ACK_IN_FLIGHT) | |
659 | * - i_ack_next, which is the last sequence number we received | |
660 | * | |
661 | * Potentially, send queue and receive queue handlers can run concurrently. | |
8cbd9606 AG |
662 | * It would be nice to not have to use a spinlock to synchronize things, |
663 | * but the one problem that rules this out is that 64bit updates are | |
664 | * not atomic on all platforms. Things would be a lot simpler if | |
665 | * we had atomic64 or maybe cmpxchg64 everywhere. | |
1e23b3ee AG |
666 | * |
667 | * Reconnecting complicates this picture just slightly. When we | |
668 | * reconnect, we may be seeing duplicate packets. The peer | |
669 | * is retransmitting them, because it hasn't seen an ACK for | |
670 | * them. It is important that we ACK these. | |
671 | * | |
672 | * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with | |
673 | * this flag set *MUST* be acknowledged immediately. | |
674 | */ | |
675 | ||
676 | /* | |
677 | * When we get here, we're called from the recv queue handler. | |
678 | * Check whether we ought to transmit an ACK. | |
679 | */ | |
680 | void rds_ib_attempt_ack(struct rds_ib_connection *ic) | |
681 | { | |
682 | unsigned int adv_credits; | |
683 | ||
684 | if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
685 | return; | |
686 | ||
687 | if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { | |
688 | rds_ib_stats_inc(s_ib_ack_send_delayed); | |
689 | return; | |
690 | } | |
691 | ||
692 | /* Can we get a send credit? */ | |
7b70d033 | 693 | if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { |
1e23b3ee AG |
694 | rds_ib_stats_inc(s_ib_tx_throttle); |
695 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
696 | return; | |
697 | } | |
698 | ||
699 | clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); | |
700 | rds_ib_send_ack(ic, adv_credits); | |
701 | } | |
702 | ||
703 | /* | |
704 | * We get here from the send completion handler, when the | |
705 | * adapter tells us the ACK frame was sent. | |
706 | */ | |
707 | void rds_ib_ack_send_complete(struct rds_ib_connection *ic) | |
708 | { | |
709 | clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); | |
710 | rds_ib_attempt_ack(ic); | |
711 | } | |
712 | ||
713 | /* | |
714 | * This is called by the regular xmit code when it wants to piggyback | |
715 | * an ACK on an outgoing frame. | |
716 | */ | |
717 | u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) | |
718 | { | |
719 | if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) | |
720 | rds_ib_stats_inc(s_ib_ack_send_piggybacked); | |
721 | return rds_ib_get_ack(ic); | |
722 | } | |
723 | ||
724 | /* | |
725 | * It's kind of lame that we're copying from the posted receive pages into | |
726 | * long-lived bitmaps. We could have posted the bitmaps and rdma written into | |
727 | * them. But receiving new congestion bitmaps should be a *rare* event, so | |
728 | * hopefully we won't need to invest that complexity in making it more | |
729 | * efficient. By copying we can share a simpler core with TCP which has to | |
730 | * copy. | |
731 | */ | |
732 | static void rds_ib_cong_recv(struct rds_connection *conn, | |
733 | struct rds_ib_incoming *ibinc) | |
734 | { | |
735 | struct rds_cong_map *map; | |
736 | unsigned int map_off; | |
737 | unsigned int map_page; | |
738 | struct rds_page_frag *frag; | |
739 | unsigned long frag_off; | |
740 | unsigned long to_copy; | |
741 | unsigned long copied; | |
742 | uint64_t uncongested = 0; | |
743 | void *addr; | |
744 | ||
745 | /* catch completely corrupt packets */ | |
746 | if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) | |
747 | return; | |
748 | ||
749 | map = conn->c_fcong; | |
750 | map_page = 0; | |
751 | map_off = 0; | |
752 | ||
753 | frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); | |
754 | frag_off = 0; | |
755 | ||
756 | copied = 0; | |
757 | ||
758 | while (copied < RDS_CONG_MAP_BYTES) { | |
759 | uint64_t *src, *dst; | |
760 | unsigned int k; | |
761 | ||
762 | to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); | |
763 | BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ | |
764 | ||
0b088e00 | 765 | addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0); |
1e23b3ee AG |
766 | |
767 | src = addr + frag_off; | |
768 | dst = (void *)map->m_page_addrs[map_page] + map_off; | |
769 | for (k = 0; k < to_copy; k += 8) { | |
770 | /* Record ports that became uncongested, ie | |
771 | * bits that changed from 0 to 1. */ | |
772 | uncongested |= ~(*src) & *dst; | |
773 | *dst++ = *src++; | |
774 | } | |
775 | kunmap_atomic(addr, KM_SOFTIRQ0); | |
776 | ||
777 | copied += to_copy; | |
778 | ||
779 | map_off += to_copy; | |
780 | if (map_off == PAGE_SIZE) { | |
781 | map_off = 0; | |
782 | map_page++; | |
783 | } | |
784 | ||
785 | frag_off += to_copy; | |
786 | if (frag_off == RDS_FRAG_SIZE) { | |
787 | frag = list_entry(frag->f_item.next, | |
788 | struct rds_page_frag, f_item); | |
789 | frag_off = 0; | |
790 | } | |
791 | } | |
792 | ||
793 | /* the congestion map is in little endian order */ | |
794 | uncongested = le64_to_cpu(uncongested); | |
795 | ||
796 | rds_cong_map_updated(map, uncongested); | |
797 | } | |
798 | ||
799 | /* | |
800 | * Rings are posted with all the allocations they'll need to queue the | |
801 | * incoming message to the receiving socket so this can't fail. | |
802 | * All fragments start with a header, so we can make sure we're not receiving | |
803 | * garbage, and we can tell a small 8 byte fragment from an ACK frame. | |
804 | */ | |
805 | struct rds_ib_ack_state { | |
806 | u64 ack_next; | |
807 | u64 ack_recv; | |
808 | unsigned int ack_required:1; | |
809 | unsigned int ack_next_valid:1; | |
810 | unsigned int ack_recv_valid:1; | |
811 | }; | |
812 | ||
813 | static void rds_ib_process_recv(struct rds_connection *conn, | |
597ddd50 | 814 | struct rds_ib_recv_work *recv, u32 data_len, |
1e23b3ee AG |
815 | struct rds_ib_ack_state *state) |
816 | { | |
817 | struct rds_ib_connection *ic = conn->c_transport_data; | |
818 | struct rds_ib_incoming *ibinc = ic->i_ibinc; | |
819 | struct rds_header *ihdr, *hdr; | |
820 | ||
821 | /* XXX shut down the connection if port 0,0 are seen? */ | |
822 | ||
823 | rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, | |
597ddd50 | 824 | data_len); |
1e23b3ee | 825 | |
597ddd50 | 826 | if (data_len < sizeof(struct rds_header)) { |
1e23b3ee AG |
827 | rds_ib_conn_error(conn, "incoming message " |
828 | "from %pI4 didn't inclue a " | |
829 | "header, disconnecting and " | |
830 | "reconnecting\n", | |
831 | &conn->c_faddr); | |
832 | return; | |
833 | } | |
597ddd50 | 834 | data_len -= sizeof(struct rds_header); |
1e23b3ee | 835 | |
f147dd9e | 836 | ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; |
1e23b3ee AG |
837 | |
838 | /* Validate the checksum. */ | |
839 | if (!rds_message_verify_checksum(ihdr)) { | |
840 | rds_ib_conn_error(conn, "incoming message " | |
841 | "from %pI4 has corrupted header - " | |
842 | "forcing a reconnect\n", | |
843 | &conn->c_faddr); | |
844 | rds_stats_inc(s_recv_drop_bad_checksum); | |
845 | return; | |
846 | } | |
847 | ||
848 | /* Process the ACK sequence which comes with every packet */ | |
849 | state->ack_recv = be64_to_cpu(ihdr->h_ack); | |
850 | state->ack_recv_valid = 1; | |
851 | ||
852 | /* Process the credits update if there was one */ | |
853 | if (ihdr->h_credit) | |
854 | rds_ib_send_add_credits(conn, ihdr->h_credit); | |
855 | ||
597ddd50 | 856 | if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { |
1e23b3ee AG |
857 | /* This is an ACK-only packet. The fact that it gets |
858 | * special treatment here is that historically, ACKs | |
859 | * were rather special beasts. | |
860 | */ | |
861 | rds_ib_stats_inc(s_ib_ack_received); | |
862 | ||
863 | /* | |
864 | * Usually the frags make their way on to incs and are then freed as | |
865 | * the inc is freed. We don't go that route, so we have to drop the | |
866 | * page ref ourselves. We can't just leave the page on the recv | |
867 | * because that confuses the dma mapping of pages and each recv's use | |
0b088e00 | 868 | * of a partial page. |
1e23b3ee AG |
869 | * |
870 | * FIXME: Fold this into the code path below. | |
871 | */ | |
33244125 | 872 | rds_ib_frag_free(ic, recv->r_frag); |
0b088e00 | 873 | recv->r_frag = NULL; |
1e23b3ee AG |
874 | return; |
875 | } | |
876 | ||
877 | /* | |
878 | * If we don't already have an inc on the connection then this | |
879 | * fragment has a header and starts a message.. copy its header | |
880 | * into the inc and save the inc so we can hang upcoming fragments | |
881 | * off its list. | |
882 | */ | |
8690bfa1 | 883 | if (!ibinc) { |
1e23b3ee AG |
884 | ibinc = recv->r_ibinc; |
885 | recv->r_ibinc = NULL; | |
886 | ic->i_ibinc = ibinc; | |
887 | ||
888 | hdr = &ibinc->ii_inc.i_hdr; | |
889 | memcpy(hdr, ihdr, sizeof(*hdr)); | |
890 | ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); | |
891 | ||
892 | rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, | |
893 | ic->i_recv_data_rem, hdr->h_flags); | |
894 | } else { | |
895 | hdr = &ibinc->ii_inc.i_hdr; | |
896 | /* We can't just use memcmp here; fragments of a | |
897 | * single message may carry different ACKs */ | |
f64f9e71 JP |
898 | if (hdr->h_sequence != ihdr->h_sequence || |
899 | hdr->h_len != ihdr->h_len || | |
900 | hdr->h_sport != ihdr->h_sport || | |
901 | hdr->h_dport != ihdr->h_dport) { | |
1e23b3ee AG |
902 | rds_ib_conn_error(conn, |
903 | "fragment header mismatch; forcing reconnect\n"); | |
904 | return; | |
905 | } | |
906 | } | |
907 | ||
908 | list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); | |
909 | recv->r_frag = NULL; | |
910 | ||
911 | if (ic->i_recv_data_rem > RDS_FRAG_SIZE) | |
912 | ic->i_recv_data_rem -= RDS_FRAG_SIZE; | |
913 | else { | |
914 | ic->i_recv_data_rem = 0; | |
915 | ic->i_ibinc = NULL; | |
916 | ||
917 | if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) | |
918 | rds_ib_cong_recv(conn, ibinc); | |
919 | else { | |
920 | rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, | |
921 | &ibinc->ii_inc, GFP_ATOMIC, | |
922 | KM_SOFTIRQ0); | |
923 | state->ack_next = be64_to_cpu(hdr->h_sequence); | |
924 | state->ack_next_valid = 1; | |
925 | } | |
926 | ||
927 | /* Evaluate the ACK_REQUIRED flag *after* we received | |
928 | * the complete frame, and after bumping the next_rx | |
929 | * sequence. */ | |
930 | if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { | |
931 | rds_stats_inc(s_recv_ack_required); | |
932 | state->ack_required = 1; | |
933 | } | |
934 | ||
935 | rds_inc_put(&ibinc->ii_inc); | |
936 | } | |
937 | } | |
938 | ||
939 | /* | |
940 | * Plucking the oldest entry from the ring can be done concurrently with | |
941 | * the thread refilling the ring. Each ring operation is protected by | |
942 | * spinlocks and the transient state of refilling doesn't change the | |
943 | * recording of which entry is oldest. | |
944 | * | |
945 | * This relies on IB only calling one cq comp_handler for each cq so that | |
946 | * there will only be one caller of rds_recv_incoming() per RDS connection. | |
947 | */ | |
948 | void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context) | |
949 | { | |
950 | struct rds_connection *conn = context; | |
951 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1e23b3ee AG |
952 | |
953 | rdsdebug("conn %p cq %p\n", conn, cq); | |
954 | ||
955 | rds_ib_stats_inc(s_ib_rx_cq_call); | |
956 | ||
d521b63b AG |
957 | tasklet_schedule(&ic->i_recv_tasklet); |
958 | } | |
1e23b3ee | 959 | |
d521b63b AG |
960 | static inline void rds_poll_cq(struct rds_ib_connection *ic, |
961 | struct rds_ib_ack_state *state) | |
962 | { | |
963 | struct rds_connection *conn = ic->conn; | |
964 | struct ib_wc wc; | |
965 | struct rds_ib_recv_work *recv; | |
966 | ||
967 | while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) { | |
1e23b3ee AG |
968 | rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n", |
969 | (unsigned long long)wc.wr_id, wc.status, wc.byte_len, | |
970 | be32_to_cpu(wc.ex.imm_data)); | |
971 | rds_ib_stats_inc(s_ib_rx_cq_event); | |
972 | ||
973 | recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; | |
974 | ||
fc24f780 | 975 | ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); |
1e23b3ee AG |
976 | |
977 | /* | |
978 | * Also process recvs in connecting state because it is possible | |
979 | * to get a recv completion _before_ the rdmacm ESTABLISHED | |
980 | * event is processed. | |
981 | */ | |
982 | if (rds_conn_up(conn) || rds_conn_connecting(conn)) { | |
983 | /* We expect errors as the qp is drained during shutdown */ | |
984 | if (wc.status == IB_WC_SUCCESS) { | |
d521b63b | 985 | rds_ib_process_recv(conn, recv, wc.byte_len, state); |
1e23b3ee AG |
986 | } else { |
987 | rds_ib_conn_error(conn, "recv completion on " | |
988 | "%pI4 had status %u, disconnecting and " | |
989 | "reconnecting\n", &conn->c_faddr, | |
990 | wc.status); | |
991 | } | |
992 | } | |
993 | ||
994 | rds_ib_ring_free(&ic->i_recv_ring, 1); | |
995 | } | |
d521b63b AG |
996 | } |
997 | ||
998 | void rds_ib_recv_tasklet_fn(unsigned long data) | |
999 | { | |
1000 | struct rds_ib_connection *ic = (struct rds_ib_connection *) data; | |
1001 | struct rds_connection *conn = ic->conn; | |
1002 | struct rds_ib_ack_state state = { 0, }; | |
1003 | ||
1004 | rds_poll_cq(ic, &state); | |
1005 | ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); | |
1006 | rds_poll_cq(ic, &state); | |
1e23b3ee AG |
1007 | |
1008 | if (state.ack_next_valid) | |
1009 | rds_ib_set_ack(ic, state.ack_next, state.ack_required); | |
1010 | if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { | |
1011 | rds_send_drop_acked(conn, state.ack_recv, NULL); | |
1012 | ic->i_ack_recv = state.ack_recv; | |
1013 | } | |
1014 | if (rds_conn_up(conn)) | |
1015 | rds_ib_attempt_ack(ic); | |
1016 | ||
1017 | /* If we ever end up with a really empty receive ring, we're | |
1018 | * in deep trouble, as the sender will definitely see RNR | |
1019 | * timeouts. */ | |
1020 | if (rds_ib_ring_empty(&ic->i_recv_ring)) | |
1021 | rds_ib_stats_inc(s_ib_rx_ring_empty); | |
1022 | ||
1e23b3ee | 1023 | if (rds_ib_ring_low(&ic->i_recv_ring)) |
f17a1a55 | 1024 | rds_ib_recv_refill(conn, 0); |
1e23b3ee AG |
1025 | } |
1026 | ||
1027 | int rds_ib_recv(struct rds_connection *conn) | |
1028 | { | |
1029 | struct rds_ib_connection *ic = conn->c_transport_data; | |
1030 | int ret = 0; | |
1031 | ||
1032 | rdsdebug("conn %p\n", conn); | |
1e23b3ee AG |
1033 | if (rds_conn_up(conn)) |
1034 | rds_ib_attempt_ack(ic); | |
1035 | ||
1036 | return ret; | |
1037 | } | |
1038 | ||
1039 | int __init rds_ib_recv_init(void) | |
1040 | { | |
1041 | struct sysinfo si; | |
1042 | int ret = -ENOMEM; | |
1043 | ||
1044 | /* Default to 30% of all available RAM for recv memory */ | |
1045 | si_meminfo(&si); | |
1046 | rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE; | |
1047 | ||
1048 | rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming", | |
1049 | sizeof(struct rds_ib_incoming), | |
1050 | 0, 0, NULL); | |
8690bfa1 | 1051 | if (!rds_ib_incoming_slab) |
1e23b3ee AG |
1052 | goto out; |
1053 | ||
1054 | rds_ib_frag_slab = kmem_cache_create("rds_ib_frag", | |
1055 | sizeof(struct rds_page_frag), | |
1056 | 0, 0, NULL); | |
8690bfa1 | 1057 | if (!rds_ib_frag_slab) |
1e23b3ee AG |
1058 | kmem_cache_destroy(rds_ib_incoming_slab); |
1059 | else | |
1060 | ret = 0; | |
1061 | out: | |
1062 | return ret; | |
1063 | } | |
1064 | ||
1065 | void rds_ib_recv_exit(void) | |
1066 | { | |
1067 | kmem_cache_destroy(rds_ib_incoming_slab); | |
1068 | kmem_cache_destroy(rds_ib_frag_slab); | |
1069 | } |