Commit | Line | Data |
---|---|---|
922cb17a AG |
1 | /* |
2 | * Copyright (c) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/types.h> | |
34 | #include <linux/rbtree.h> | |
35 | ||
36 | #include "rds.h" | |
37 | ||
38 | /* | |
39 | * This file implements the receive side of the unconventional congestion | |
40 | * management in RDS. | |
41 | * | |
42 | * Messages waiting in the receive queue on the receiving socket are accounted | |
43 | * against the sockets SO_RCVBUF option value. Only the payload bytes in the | |
44 | * message are accounted for. If the number of bytes queued equals or exceeds | |
45 | * rcvbuf then the socket is congested. All sends attempted to this socket's | |
46 | * address should return block or return -EWOULDBLOCK. | |
47 | * | |
48 | * Applications are expected to be reasonably tuned such that this situation | |
49 | * very rarely occurs. An application encountering this "back-pressure" is | |
50 | * considered a bug. | |
51 | * | |
52 | * This is implemented by having each node maintain bitmaps which indicate | |
53 | * which ports on bound addresses are congested. As the bitmap changes it is | |
54 | * sent through all the connections which terminate in the local address of the | |
55 | * bitmap which changed. | |
56 | * | |
57 | * The bitmaps are allocated as connections are brought up. This avoids | |
58 | * allocation in the interrupt handling path which queues messages on sockets. | |
59 | * The dense bitmaps let transports send the entire bitmap on any bitmap change | |
60 | * reasonably efficiently. This is much easier to implement than some | |
61 | * finer-grained communication of per-port congestion. The sender does a very | |
62 | * inexpensive bit test to test if the port it's about to send to is congested | |
63 | * or not. | |
64 | */ | |
65 | ||
66 | /* | |
67 | * Interaction with poll is a tad tricky. We want all processes stuck in | |
68 | * poll to wake up and check whether a congested destination became uncongested. | |
69 | * The really sad thing is we have no idea which destinations the application | |
70 | * wants to send to - we don't even know which rds_connections are involved. | |
71 | * So until we implement a more flexible rds poll interface, we have to make | |
72 | * do with this: | |
73 | * We maintain a global counter that is incremented each time a congestion map | |
74 | * update is received. Each rds socket tracks this value, and if rds_poll | |
75 | * finds that the saved generation number is smaller than the global generation | |
76 | * number, it wakes up the process. | |
77 | */ | |
78 | static atomic_t rds_cong_generation = ATOMIC_INIT(0); | |
79 | ||
80 | /* | |
81 | * Congestion monitoring | |
82 | */ | |
83 | static LIST_HEAD(rds_cong_monitor); | |
84 | static DEFINE_RWLOCK(rds_cong_monitor_lock); | |
85 | ||
86 | /* | |
87 | * Yes, a global lock. It's used so infrequently that it's worth keeping it | |
88 | * global to simplify the locking. It's only used in the following | |
89 | * circumstances: | |
90 | * | |
91 | * - on connection buildup to associate a conn with its maps | |
92 | * - on map changes to inform conns of a new map to send | |
93 | * | |
94 | * It's sadly ordered under the socket callback lock and the connection lock. | |
95 | * Receive paths can mark ports congested from interrupt context so the | |
96 | * lock masks interrupts. | |
97 | */ | |
98 | static DEFINE_SPINLOCK(rds_cong_lock); | |
99 | static struct rb_root rds_cong_tree = RB_ROOT; | |
100 | ||
101 | static struct rds_cong_map *rds_cong_tree_walk(__be32 addr, | |
102 | struct rds_cong_map *insert) | |
103 | { | |
104 | struct rb_node **p = &rds_cong_tree.rb_node; | |
105 | struct rb_node *parent = NULL; | |
106 | struct rds_cong_map *map; | |
107 | ||
108 | while (*p) { | |
109 | parent = *p; | |
110 | map = rb_entry(parent, struct rds_cong_map, m_rb_node); | |
111 | ||
112 | if (addr < map->m_addr) | |
113 | p = &(*p)->rb_left; | |
114 | else if (addr > map->m_addr) | |
115 | p = &(*p)->rb_right; | |
116 | else | |
117 | return map; | |
118 | } | |
119 | ||
120 | if (insert) { | |
121 | rb_link_node(&insert->m_rb_node, parent, p); | |
122 | rb_insert_color(&insert->m_rb_node, &rds_cong_tree); | |
123 | } | |
124 | return NULL; | |
125 | } | |
126 | ||
127 | /* | |
128 | * There is only ever one bitmap for any address. Connections try and allocate | |
129 | * these bitmaps in the process getting pointers to them. The bitmaps are only | |
130 | * ever freed as the module is removed after all connections have been freed. | |
131 | */ | |
132 | static struct rds_cong_map *rds_cong_from_addr(__be32 addr) | |
133 | { | |
134 | struct rds_cong_map *map; | |
135 | struct rds_cong_map *ret = NULL; | |
136 | unsigned long zp; | |
137 | unsigned long i; | |
138 | unsigned long flags; | |
139 | ||
140 | map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); | |
141 | if (map == NULL) | |
142 | return NULL; | |
143 | ||
144 | map->m_addr = addr; | |
145 | init_waitqueue_head(&map->m_waitq); | |
146 | INIT_LIST_HEAD(&map->m_conn_list); | |
147 | ||
148 | for (i = 0; i < RDS_CONG_MAP_PAGES; i++) { | |
149 | zp = get_zeroed_page(GFP_KERNEL); | |
150 | if (zp == 0) | |
151 | goto out; | |
152 | map->m_page_addrs[i] = zp; | |
153 | } | |
154 | ||
155 | spin_lock_irqsave(&rds_cong_lock, flags); | |
156 | ret = rds_cong_tree_walk(addr, map); | |
157 | spin_unlock_irqrestore(&rds_cong_lock, flags); | |
158 | ||
159 | if (ret == NULL) { | |
160 | ret = map; | |
161 | map = NULL; | |
162 | } | |
163 | ||
164 | out: | |
165 | if (map) { | |
166 | for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) | |
167 | free_page(map->m_page_addrs[i]); | |
168 | kfree(map); | |
169 | } | |
170 | ||
171 | rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr)); | |
172 | ||
173 | return ret; | |
174 | } | |
175 | ||
176 | /* | |
177 | * Put the conn on its local map's list. This is called when the conn is | |
178 | * really added to the hash. It's nested under the rds_conn_lock, sadly. | |
179 | */ | |
180 | void rds_cong_add_conn(struct rds_connection *conn) | |
181 | { | |
182 | unsigned long flags; | |
183 | ||
184 | rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong); | |
185 | spin_lock_irqsave(&rds_cong_lock, flags); | |
186 | list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list); | |
187 | spin_unlock_irqrestore(&rds_cong_lock, flags); | |
188 | } | |
189 | ||
190 | void rds_cong_remove_conn(struct rds_connection *conn) | |
191 | { | |
192 | unsigned long flags; | |
193 | ||
194 | rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong); | |
195 | spin_lock_irqsave(&rds_cong_lock, flags); | |
196 | list_del_init(&conn->c_map_item); | |
197 | spin_unlock_irqrestore(&rds_cong_lock, flags); | |
198 | } | |
199 | ||
200 | int rds_cong_get_maps(struct rds_connection *conn) | |
201 | { | |
202 | conn->c_lcong = rds_cong_from_addr(conn->c_laddr); | |
203 | conn->c_fcong = rds_cong_from_addr(conn->c_faddr); | |
204 | ||
205 | if (conn->c_lcong == NULL || conn->c_fcong == NULL) | |
206 | return -ENOMEM; | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | void rds_cong_queue_updates(struct rds_cong_map *map) | |
212 | { | |
213 | struct rds_connection *conn; | |
214 | unsigned long flags; | |
215 | ||
216 | spin_lock_irqsave(&rds_cong_lock, flags); | |
217 | ||
218 | list_for_each_entry(conn, &map->m_conn_list, c_map_item) { | |
219 | if (!test_and_set_bit(0, &conn->c_map_queued)) { | |
220 | rds_stats_inc(s_cong_update_queued); | |
221 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | |
222 | } | |
223 | } | |
224 | ||
225 | spin_unlock_irqrestore(&rds_cong_lock, flags); | |
226 | } | |
227 | ||
228 | void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) | |
229 | { | |
230 | rdsdebug("waking map %p for %pI4\n", | |
231 | map, &map->m_addr); | |
232 | rds_stats_inc(s_cong_update_received); | |
233 | atomic_inc(&rds_cong_generation); | |
234 | if (waitqueue_active(&map->m_waitq)) | |
235 | wake_up(&map->m_waitq); | |
236 | if (waitqueue_active(&rds_poll_waitq)) | |
237 | wake_up_all(&rds_poll_waitq); | |
238 | ||
239 | if (portmask && !list_empty(&rds_cong_monitor)) { | |
240 | unsigned long flags; | |
241 | struct rds_sock *rs; | |
242 | ||
243 | read_lock_irqsave(&rds_cong_monitor_lock, flags); | |
244 | list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) { | |
245 | spin_lock(&rs->rs_lock); | |
246 | rs->rs_cong_notify |= (rs->rs_cong_mask & portmask); | |
247 | rs->rs_cong_mask &= ~portmask; | |
248 | spin_unlock(&rs->rs_lock); | |
249 | if (rs->rs_cong_notify) | |
250 | rds_wake_sk_sleep(rs); | |
251 | } | |
252 | read_unlock_irqrestore(&rds_cong_monitor_lock, flags); | |
253 | } | |
254 | } | |
255 | ||
256 | int rds_cong_updated_since(unsigned long *recent) | |
257 | { | |
258 | unsigned long gen = atomic_read(&rds_cong_generation); | |
259 | ||
260 | if (likely(*recent == gen)) | |
261 | return 0; | |
262 | *recent = gen; | |
263 | return 1; | |
264 | } | |
265 | ||
266 | /* | |
267 | * We're called under the locking that protects the sockets receive buffer | |
268 | * consumption. This makes it a lot easier for the caller to only call us | |
269 | * when it knows that an existing set bit needs to be cleared, and vice versa. | |
270 | * We can't block and we need to deal with concurrent sockets working against | |
271 | * the same per-address map. | |
272 | */ | |
273 | void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) | |
274 | { | |
275 | unsigned long i; | |
276 | unsigned long off; | |
277 | ||
278 | rdsdebug("setting congestion for %pI4:%u in map %p\n", | |
279 | &map->m_addr, ntohs(port), map); | |
280 | ||
281 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; | |
282 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; | |
283 | ||
284 | generic___set_le_bit(off, (void *)map->m_page_addrs[i]); | |
285 | } | |
286 | ||
287 | void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) | |
288 | { | |
289 | unsigned long i; | |
290 | unsigned long off; | |
291 | ||
292 | rdsdebug("clearing congestion for %pI4:%u in map %p\n", | |
293 | &map->m_addr, ntohs(port), map); | |
294 | ||
295 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; | |
296 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; | |
297 | ||
298 | generic___clear_le_bit(off, (void *)map->m_page_addrs[i]); | |
299 | } | |
300 | ||
301 | static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) | |
302 | { | |
303 | unsigned long i; | |
304 | unsigned long off; | |
305 | ||
306 | i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS; | |
307 | off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS; | |
308 | ||
309 | return generic_test_le_bit(off, (void *)map->m_page_addrs[i]); | |
310 | } | |
311 | ||
312 | void rds_cong_add_socket(struct rds_sock *rs) | |
313 | { | |
314 | unsigned long flags; | |
315 | ||
316 | write_lock_irqsave(&rds_cong_monitor_lock, flags); | |
317 | if (list_empty(&rs->rs_cong_list)) | |
318 | list_add(&rs->rs_cong_list, &rds_cong_monitor); | |
319 | write_unlock_irqrestore(&rds_cong_monitor_lock, flags); | |
320 | } | |
321 | ||
322 | void rds_cong_remove_socket(struct rds_sock *rs) | |
323 | { | |
324 | unsigned long flags; | |
325 | struct rds_cong_map *map; | |
326 | ||
327 | write_lock_irqsave(&rds_cong_monitor_lock, flags); | |
328 | list_del_init(&rs->rs_cong_list); | |
329 | write_unlock_irqrestore(&rds_cong_monitor_lock, flags); | |
330 | ||
331 | /* update congestion map for now-closed port */ | |
332 | spin_lock_irqsave(&rds_cong_lock, flags); | |
333 | map = rds_cong_tree_walk(rs->rs_bound_addr, NULL); | |
334 | spin_unlock_irqrestore(&rds_cong_lock, flags); | |
335 | ||
336 | if (map && rds_cong_test_bit(map, rs->rs_bound_port)) { | |
337 | rds_cong_clear_bit(map, rs->rs_bound_port); | |
338 | rds_cong_queue_updates(map); | |
339 | } | |
340 | } | |
341 | ||
342 | int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, | |
343 | struct rds_sock *rs) | |
344 | { | |
345 | if (!rds_cong_test_bit(map, port)) | |
346 | return 0; | |
347 | if (nonblock) { | |
348 | if (rs && rs->rs_cong_monitor) { | |
349 | unsigned long flags; | |
350 | ||
351 | /* It would have been nice to have an atomic set_bit on | |
352 | * a uint64_t. */ | |
353 | spin_lock_irqsave(&rs->rs_lock, flags); | |
354 | rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port)); | |
355 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
356 | ||
357 | /* Test again - a congestion update may have arrived in | |
358 | * the meantime. */ | |
359 | if (!rds_cong_test_bit(map, port)) | |
360 | return 0; | |
361 | } | |
362 | rds_stats_inc(s_cong_send_error); | |
363 | return -ENOBUFS; | |
364 | } | |
365 | ||
366 | rds_stats_inc(s_cong_send_blocked); | |
367 | rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port)); | |
368 | ||
369 | return wait_event_interruptible(map->m_waitq, | |
370 | !rds_cong_test_bit(map, port)); | |
371 | } | |
372 | ||
373 | void rds_cong_exit(void) | |
374 | { | |
375 | struct rb_node *node; | |
376 | struct rds_cong_map *map; | |
377 | unsigned long i; | |
378 | ||
379 | while ((node = rb_first(&rds_cong_tree))) { | |
380 | map = rb_entry(node, struct rds_cong_map, m_rb_node); | |
381 | rdsdebug("freeing map %p\n", map); | |
382 | rb_erase(&map->m_rb_node, &rds_cong_tree); | |
383 | for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) | |
384 | free_page(map->m_page_addrs[i]); | |
385 | kfree(map); | |
386 | } | |
387 | } | |
388 | ||
389 | /* | |
390 | * Allocate a RDS message containing a congestion update. | |
391 | */ | |
392 | struct rds_message *rds_cong_update_alloc(struct rds_connection *conn) | |
393 | { | |
394 | struct rds_cong_map *map = conn->c_lcong; | |
395 | struct rds_message *rm; | |
396 | ||
397 | rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES); | |
398 | if (!IS_ERR(rm)) | |
399 | rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP; | |
400 | ||
401 | return rm; | |
402 | } |