Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf | |
19 | * | |
20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, | |
21 | * CA 95054 USA or visit www.sun.com if you need additional information or | |
22 | * have any questions. | |
23 | * | |
24 | * GPL HEADER END | |
25 | */ | |
26 | /* | |
27 | * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. | |
28 | * Use is subject to license terms. | |
29 | * | |
30 | * Copyright (c) 2010, 2012, Intel Corporation. | |
31 | */ | |
32 | /* | |
33 | * This file is part of Lustre, http://www.lustre.org/ | |
34 | * Lustre is a trademark of Sun Microsystems, Inc. | |
35 | * | |
36 | * lustre/ldlm/ldlm_pool.c | |
37 | * | |
38 | * Author: Yury Umanets <umka@clusterfs.com> | |
39 | */ | |
40 | ||
41 | /* | |
42 | * Idea of this code is rather simple. Each second, for each server namespace | |
43 | * we have SLV - server lock volume which is calculated on current number of | |
44 | * granted locks, grant speed for past period, etc - that is, locking load. | |
45 | * This SLV number may be thought as a flow definition for simplicity. It is | |
46 | * sent to clients with each occasion to let them know what is current load | |
47 | * situation on the server. By default, at the beginning, SLV on server is | |
48 | * set max value which is calculated as the following: allow to one client | |
49 | * have all locks of limit ->pl_limit for 10h. | |
50 | * | |
51 | * Next, on clients, number of cached locks is not limited artificially in any | |
52 | * way as it was before. Instead, client calculates CLV, that is, client lock | |
53 | * volume for each lock and compares it with last SLV from the server. CLV is | |
54 | * calculated as the number of locks in LRU * lock live time in seconds. If | |
55 | * CLV > SLV - lock is canceled. | |
56 | * | |
e7ddc48c AR |
57 | * Client has LVF, that is, lock volume factor which regulates how much |
58 | * sensitive client should be about last SLV from server. The higher LVF is the | |
59 | * more locks will be canceled on client. Default value for it is 1. Setting LVF | |
60 | * to 2 means that client will cancel locks 2 times faster. | |
d7e09d03 PT |
61 | * |
62 | * Locks on a client will be canceled more intensively in these cases: | |
63 | * (1) if SLV is smaller, that is, load is higher on the server; | |
64 | * (2) client has a lot of locks (the more locks are held by client, the bigger | |
65 | * chances that some of them should be canceled); | |
66 | * (3) client has old locks (taken some time ago); | |
67 | * | |
68 | * Thus, according to flow paradigm that we use for better understanding SLV, | |
69 | * CLV is the volume of particle in flow described by SLV. According to this, | |
70 | * if flow is getting thinner, more and more particles become outside of it and | |
71 | * as particles are locks, they should be canceled. | |
72 | * | |
e7ddc48c AR |
73 | * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). |
74 | * Andreas Dilger (adilger@clusterfs.com) proposed few nice ideas like using | |
75 | * LVF and many cleanups. Flow definition to allow more easy understanding of | |
76 | * the logic belongs to Nikita Danilov (nikita@clusterfs.com) as well as many | |
77 | * cleanups and fixes. And design and implementation are done by Yury Umanets | |
78 | * (umka@clusterfs.com). | |
d7e09d03 PT |
79 | * |
80 | * Glossary for terms used: | |
81 | * | |
82 | * pl_limit - Number of allowed locks in pool. Applies to server and client | |
83 | * side (tunable); | |
84 | * | |
85 | * pl_granted - Number of granted locks (calculated); | |
86 | * pl_grant_rate - Number of granted locks for last T (calculated); | |
87 | * pl_cancel_rate - Number of canceled locks for last T (calculated); | |
88 | * pl_grant_speed - Grant speed (GR - CR) for last T (calculated); | |
89 | * pl_grant_plan - Planned number of granted locks for next T (calculated); | |
90 | * pl_server_lock_volume - Current server lock volume (calculated); | |
91 | * | |
92 | * As it may be seen from list above, we have few possible tunables which may | |
f2825e03 | 93 | * affect behavior much. They all may be modified via sysfs. However, they also |
d7e09d03 PT |
94 | * give a possibility for constructing few pre-defined behavior policies. If |
95 | * none of predefines is suitable for a working pattern being used, new one may | |
f2825e03 | 96 | * be "constructed" via sysfs tunables. |
d7e09d03 PT |
97 | */ |
98 | ||
99 | #define DEBUG_SUBSYSTEM S_LDLM | |
100 | ||
e27db149 GKH |
101 | #include "../include/lustre_dlm.h" |
102 | #include "../include/cl_object.h" | |
103 | #include "../include/obd_class.h" | |
104 | #include "../include/obd_support.h" | |
d7e09d03 PT |
105 | #include "ldlm_internal.h" |
106 | ||
d7e09d03 PT |
107 | /* |
108 | * 50 ldlm locks for 1MB of RAM. | |
109 | */ | |
110 | #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) | |
111 | ||
112 | /* | |
113 | * Maximal possible grant step plan in %. | |
114 | */ | |
115 | #define LDLM_POOL_MAX_GSP (30) | |
116 | ||
117 | /* | |
118 | * Minimal possible grant step plan in %. | |
119 | */ | |
120 | #define LDLM_POOL_MIN_GSP (1) | |
121 | ||
122 | /* | |
123 | * This controls the speed of reaching LDLM_POOL_MAX_GSP | |
124 | * with increasing thread period. | |
125 | */ | |
126 | #define LDLM_POOL_GSP_STEP_SHIFT (2) | |
127 | ||
128 | /* | |
129 | * LDLM_POOL_GSP% of all locks is default GP. | |
130 | */ | |
131 | #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100) | |
132 | ||
133 | /* | |
134 | * Max age for locks on clients. | |
135 | */ | |
136 | #define LDLM_POOL_MAX_AGE (36000) | |
137 | ||
138 | /* | |
139 | * The granularity of SLV calculation. | |
140 | */ | |
141 | #define LDLM_POOL_SLV_SHIFT (10) | |
142 | ||
d7e09d03 PT |
143 | static inline __u64 dru(__u64 val, __u32 shift, int round_up) |
144 | { | |
145 | return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift; | |
146 | } | |
147 | ||
148 | static inline __u64 ldlm_pool_slv_max(__u32 L) | |
149 | { | |
150 | /* | |
151 | * Allow to have all locks for 1 client for 10 hrs. | |
152 | * Formula is the following: limit * 10h / 1 client. | |
153 | */ | |
154 | __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1; | |
155 | return lim; | |
156 | } | |
157 | ||
158 | static inline __u64 ldlm_pool_slv_min(__u32 L) | |
159 | { | |
160 | return 1; | |
161 | } | |
162 | ||
163 | enum { | |
164 | LDLM_POOL_FIRST_STAT = 0, | |
165 | LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT, | |
166 | LDLM_POOL_GRANT_STAT, | |
167 | LDLM_POOL_CANCEL_STAT, | |
168 | LDLM_POOL_GRANT_RATE_STAT, | |
169 | LDLM_POOL_CANCEL_RATE_STAT, | |
170 | LDLM_POOL_GRANT_PLAN_STAT, | |
171 | LDLM_POOL_SLV_STAT, | |
172 | LDLM_POOL_SHRINK_REQTD_STAT, | |
173 | LDLM_POOL_SHRINK_FREED_STAT, | |
174 | LDLM_POOL_RECALC_STAT, | |
175 | LDLM_POOL_TIMING_STAT, | |
176 | LDLM_POOL_LAST_STAT | |
177 | }; | |
178 | ||
179 | static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl) | |
180 | { | |
181 | return container_of(pl, struct ldlm_namespace, ns_pool); | |
182 | } | |
183 | ||
184 | /** | |
185 | * Calculates suggested grant_step in % of available locks for passed | |
186 | * \a period. This is later used in grant_plan calculations. | |
187 | */ | |
188 | static inline int ldlm_pool_t2gsp(unsigned int t) | |
189 | { | |
190 | /* | |
191 | * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP | |
192 | * and up to 30% for anything higher than LDLM_POOL_GSP_STEP. | |
193 | * | |
194 | * How this will affect execution is the following: | |
195 | * | |
196 | * - for thread period 1s we will have grant_step 1% which good from | |
197 | * pov of taking some load off from server and push it out to clients. | |
198 | * This is like that because 1% for grant_step means that server will | |
199 | * not allow clients to get lots of locks in short period of time and | |
200 | * keep all old locks in their caches. Clients will always have to | |
201 | * get some locks back if they want to take some new; | |
202 | * | |
203 | * - for thread period 10s (which is default) we will have 23% which | |
204 | * means that clients will have enough of room to take some new locks | |
205 | * without getting some back. All locks from this 23% which were not | |
206 | * taken by clients in current period will contribute in SLV growing. | |
207 | * SLV growing means more locks cached on clients until limit or grant | |
208 | * plan is reached. | |
209 | */ | |
210 | return LDLM_POOL_MAX_GSP - | |
211 | ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >> | |
212 | (t >> LDLM_POOL_GSP_STEP_SHIFT)); | |
213 | } | |
214 | ||
58c6d133 OD |
215 | /** |
216 | * Returns current \a pl limit. | |
217 | */ | |
218 | static __u32 ldlm_pool_get_limit(struct ldlm_pool *pl) | |
219 | { | |
220 | return atomic_read(&pl->pl_limit); | |
221 | } | |
222 | ||
223 | /** | |
224 | * Sets passed \a limit to \a pl. | |
225 | */ | |
226 | static void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit) | |
227 | { | |
228 | atomic_set(&pl->pl_limit, limit); | |
229 | } | |
230 | ||
d7e09d03 PT |
231 | /** |
232 | * Recalculates next stats on passed \a pl. | |
233 | * | |
234 | * \pre ->pl_lock is locked. | |
235 | */ | |
236 | static void ldlm_pool_recalc_stats(struct ldlm_pool *pl) | |
237 | { | |
238 | int grant_plan = pl->pl_grant_plan; | |
239 | __u64 slv = pl->pl_server_lock_volume; | |
240 | int granted = atomic_read(&pl->pl_granted); | |
241 | int grant_rate = atomic_read(&pl->pl_grant_rate); | |
242 | int cancel_rate = atomic_read(&pl->pl_cancel_rate); | |
243 | ||
244 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT, | |
245 | slv); | |
246 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT, | |
247 | granted); | |
248 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, | |
249 | grant_rate); | |
250 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, | |
251 | grant_plan); | |
252 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, | |
253 | cancel_rate); | |
254 | } | |
255 | ||
d7e09d03 PT |
256 | /** |
257 | * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl. | |
258 | */ | |
259 | static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) | |
260 | { | |
261 | struct obd_device *obd; | |
262 | ||
263 | /* | |
264 | * Get new SLV and Limit from obd which is updated with coming | |
265 | * RPCs. | |
266 | */ | |
267 | obd = ldlm_pl2ns(pl)->ns_obd; | |
268 | LASSERT(obd != NULL); | |
269 | read_lock(&obd->obd_pool_lock); | |
270 | pl->pl_server_lock_volume = obd->obd_pool_slv; | |
271 | ldlm_pool_set_limit(pl, obd->obd_pool_limit); | |
272 | read_unlock(&obd->obd_pool_lock); | |
273 | } | |
274 | ||
275 | /** | |
276 | * Recalculates client size pool \a pl according to current SLV and Limit. | |
277 | */ | |
278 | static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) | |
279 | { | |
8f83409c | 280 | time64_t recalc_interval_sec; |
4d2c7b30 | 281 | int ret; |
d7e09d03 | 282 | |
8f83409c | 283 | recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; |
d7e09d03 | 284 | if (recalc_interval_sec < pl->pl_recalc_period) |
0a3bdb00 | 285 | return 0; |
d7e09d03 PT |
286 | |
287 | spin_lock(&pl->pl_lock); | |
288 | /* | |
289 | * Check if we need to recalc lists now. | |
290 | */ | |
8f83409c | 291 | recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; |
d7e09d03 PT |
292 | if (recalc_interval_sec < pl->pl_recalc_period) { |
293 | spin_unlock(&pl->pl_lock); | |
0a3bdb00 | 294 | return 0; |
d7e09d03 PT |
295 | } |
296 | ||
297 | /* | |
298 | * Make sure that pool knows last SLV and Limit from obd. | |
299 | */ | |
300 | ldlm_cli_pool_pop_slv(pl); | |
301 | ||
d7e09d03 PT |
302 | spin_unlock(&pl->pl_lock); |
303 | ||
304 | /* | |
305 | * Do not cancel locks in case lru resize is disabled for this ns. | |
306 | */ | |
4d2c7b30 LX |
307 | if (!ns_connect_lru_resize(ldlm_pl2ns(pl))) { |
308 | ret = 0; | |
309 | goto out; | |
310 | } | |
d7e09d03 PT |
311 | |
312 | /* | |
313 | * In the time of canceling locks on client we do not need to maintain | |
314 | * sharp timing, we only want to cancel locks asap according to new SLV. | |
315 | * It may be called when SLV has changed much, this is why we do not | |
316 | * take into account pl->pl_recalc_time here. | |
317 | */ | |
4d2c7b30 LX |
318 | ret = ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR); |
319 | ||
320 | out: | |
321 | spin_lock(&pl->pl_lock); | |
322 | /* | |
323 | * Time of LRU resizing might be longer than period, | |
324 | * so update after LRU resizing rather than before it. | |
325 | */ | |
8f83409c | 326 | pl->pl_recalc_time = ktime_get_real_seconds(); |
4d2c7b30 LX |
327 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, |
328 | recalc_interval_sec); | |
329 | spin_unlock(&pl->pl_lock); | |
330 | return ret; | |
d7e09d03 PT |
331 | } |
332 | ||
333 | /** | |
334 | * This function is main entry point for memory pressure handling on client | |
335 | * side. Main goal of this function is to cancel some number of locks on | |
336 | * passed \a pl according to \a nr and \a gfp_mask. | |
337 | */ | |
338 | static int ldlm_cli_pool_shrink(struct ldlm_pool *pl, | |
5802572e | 339 | int nr, gfp_t gfp_mask) |
d7e09d03 PT |
340 | { |
341 | struct ldlm_namespace *ns; | |
cbc3769e | 342 | int unused; |
d7e09d03 PT |
343 | |
344 | ns = ldlm_pl2ns(pl); | |
345 | ||
346 | /* | |
347 | * Do not cancel locks in case lru resize is disabled for this ns. | |
348 | */ | |
349 | if (!ns_connect_lru_resize(ns)) | |
0a3bdb00 | 350 | return 0; |
d7e09d03 PT |
351 | |
352 | /* | |
353 | * Make sure that pool knows last SLV and Limit from obd. | |
354 | */ | |
355 | ldlm_cli_pool_pop_slv(pl); | |
356 | ||
357 | spin_lock(&ns->ns_lock); | |
358 | unused = ns->ns_nr_unused; | |
359 | spin_unlock(&ns->ns_lock); | |
360 | ||
cbc3769e PT |
361 | if (nr == 0) |
362 | return (unused / 100) * sysctl_vfs_cache_pressure; | |
363 | else | |
364 | return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK); | |
d7e09d03 PT |
365 | } |
366 | ||
b9c98cfa | 367 | static const struct ldlm_pool_ops ldlm_cli_pool_ops = { |
d7e09d03 PT |
368 | .po_recalc = ldlm_cli_pool_recalc, |
369 | .po_shrink = ldlm_cli_pool_shrink | |
370 | }; | |
371 | ||
372 | /** | |
373 | * Pool recalc wrapper. Will call either client or server pool recalc callback | |
374 | * depending what pool \a pl is used. | |
375 | */ | |
58c6d133 | 376 | static int ldlm_pool_recalc(struct ldlm_pool *pl) |
d7e09d03 | 377 | { |
8f83409c | 378 | u32 recalc_interval_sec; |
d7e09d03 PT |
379 | int count; |
380 | ||
8f83409c | 381 | recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time; |
d7e09d03 PT |
382 | if (recalc_interval_sec <= 0) |
383 | goto recalc; | |
384 | ||
385 | spin_lock(&pl->pl_lock); | |
d7e09d03 PT |
386 | if (recalc_interval_sec > 0) { |
387 | /* | |
388 | * Update pool statistics every 1s. | |
389 | */ | |
390 | ldlm_pool_recalc_stats(pl); | |
391 | ||
392 | /* | |
393 | * Zero out all rates and speed for the last period. | |
394 | */ | |
395 | atomic_set(&pl->pl_grant_rate, 0); | |
396 | atomic_set(&pl->pl_cancel_rate, 0); | |
397 | } | |
398 | spin_unlock(&pl->pl_lock); | |
399 | ||
400 | recalc: | |
401 | if (pl->pl_ops->po_recalc != NULL) { | |
402 | count = pl->pl_ops->po_recalc(pl); | |
403 | lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT, | |
404 | count); | |
d7e09d03 | 405 | } |
8f83409c | 406 | recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() + |
3eface59 | 407 | pl->pl_recalc_period; |
4d2c7b30 LX |
408 | if (recalc_interval_sec <= 0) { |
409 | /* Prevent too frequent recalculation. */ | |
8f83409c AB |
410 | CDEBUG(D_DLMTRACE, |
411 | "Negative interval(%d), too short period(%lld)", | |
4d2c7b30 | 412 | recalc_interval_sec, |
8f83409c | 413 | (s64)pl->pl_recalc_period); |
4d2c7b30 LX |
414 | recalc_interval_sec = 1; |
415 | } | |
d7e09d03 | 416 | |
3eface59 | 417 | return recalc_interval_sec; |
d7e09d03 | 418 | } |
d7e09d03 | 419 | |
cbc3769e | 420 | /* |
d7e09d03 | 421 | * Pool shrink wrapper. Will call either client or server pool recalc callback |
cbc3769e PT |
422 | * depending what pool pl is used. When nr == 0, just return the number of |
423 | * freeable locks. Otherwise, return the number of canceled locks. | |
d7e09d03 | 424 | */ |
58c6d133 | 425 | static int ldlm_pool_shrink(struct ldlm_pool *pl, int nr, gfp_t gfp_mask) |
d7e09d03 PT |
426 | { |
427 | int cancel = 0; | |
428 | ||
429 | if (pl->pl_ops->po_shrink != NULL) { | |
430 | cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask); | |
431 | if (nr > 0) { | |
432 | lprocfs_counter_add(pl->pl_stats, | |
433 | LDLM_POOL_SHRINK_REQTD_STAT, | |
434 | nr); | |
435 | lprocfs_counter_add(pl->pl_stats, | |
436 | LDLM_POOL_SHRINK_FREED_STAT, | |
437 | cancel); | |
2d00bd17 JP |
438 | CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, shrunk %d\n", |
439 | pl->pl_name, nr, cancel); | |
d7e09d03 PT |
440 | } |
441 | } | |
442 | return cancel; | |
443 | } | |
d7e09d03 | 444 | |
73bb1da6 | 445 | static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused) |
d7e09d03 | 446 | { |
71570b98 OD |
447 | int granted, grant_rate, cancel_rate; |
448 | int grant_speed, lvf; | |
73bb1da6 | 449 | struct ldlm_pool *pl = m->private; |
d7e09d03 PT |
450 | __u64 slv, clv; |
451 | __u32 limit; | |
452 | ||
453 | spin_lock(&pl->pl_lock); | |
454 | slv = pl->pl_server_lock_volume; | |
455 | clv = pl->pl_client_lock_volume; | |
456 | limit = ldlm_pool_get_limit(pl); | |
d7e09d03 PT |
457 | granted = atomic_read(&pl->pl_granted); |
458 | grant_rate = atomic_read(&pl->pl_grant_rate); | |
459 | cancel_rate = atomic_read(&pl->pl_cancel_rate); | |
460 | grant_speed = grant_rate - cancel_rate; | |
461 | lvf = atomic_read(&pl->pl_lock_volume_factor); | |
d7e09d03 PT |
462 | spin_unlock(&pl->pl_lock); |
463 | ||
73bb1da6 | 464 | seq_printf(m, "LDLM pool state (%s):\n" |
b0f5aad5 GKH |
465 | " SLV: %llu\n" |
466 | " CLV: %llu\n" | |
73bb1da6 PT |
467 | " LVF: %d\n", |
468 | pl->pl_name, slv, clv, lvf); | |
d7e09d03 | 469 | |
2c2b7c05 HM |
470 | seq_printf(m, " GR: %d\n CR: %d\n GS: %d\n" |
471 | " G: %d\n L: %d\n", | |
73bb1da6 PT |
472 | grant_rate, cancel_rate, grant_speed, |
473 | granted, limit); | |
474 | ||
475 | return 0; | |
d7e09d03 | 476 | } |
c9f6bb96 | 477 | |
73bb1da6 | 478 | LPROC_SEQ_FOPS_RO(lprocfs_pool_state); |
d7e09d03 | 479 | |
24b8c88a OD |
480 | static ssize_t grant_speed_show(struct kobject *kobj, struct attribute *attr, |
481 | char *buf) | |
d7e09d03 | 482 | { |
24b8c88a OD |
483 | struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, |
484 | pl_kobj); | |
485 | ||
d7e09d03 PT |
486 | int grant_speed; |
487 | ||
488 | spin_lock(&pl->pl_lock); | |
489 | /* serialize with ldlm_pool_recalc */ | |
490 | grant_speed = atomic_read(&pl->pl_grant_rate) - | |
491 | atomic_read(&pl->pl_cancel_rate); | |
492 | spin_unlock(&pl->pl_lock); | |
24b8c88a | 493 | return sprintf(buf, "%d\n", grant_speed); |
d7e09d03 | 494 | } |
24b8c88a | 495 | LUSTRE_RO_ATTR(grant_speed); |
d7e09d03 | 496 | |
24b8c88a OD |
497 | LDLM_POOL_SYSFS_READER_SHOW(grant_plan, int); |
498 | LUSTRE_RO_ATTR(grant_plan); | |
73bb1da6 | 499 | |
24b8c88a OD |
500 | LDLM_POOL_SYSFS_READER_SHOW(recalc_period, int); |
501 | LDLM_POOL_SYSFS_WRITER_STORE(recalc_period, int); | |
502 | LUSTRE_RW_ATTR(recalc_period); | |
73bb1da6 | 503 | |
24b8c88a OD |
504 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(server_lock_volume, u64); |
505 | LUSTRE_RO_ATTR(server_lock_volume); | |
506 | ||
507 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(limit, atomic); | |
508 | LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(limit, atomic); | |
509 | LUSTRE_RW_ATTR(limit); | |
510 | ||
511 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(granted, atomic); | |
512 | LUSTRE_RO_ATTR(granted); | |
513 | ||
514 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(cancel_rate, atomic); | |
515 | LUSTRE_RO_ATTR(cancel_rate); | |
73bb1da6 | 516 | |
24b8c88a OD |
517 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(grant_rate, atomic); |
518 | LUSTRE_RO_ATTR(grant_rate); | |
73bb1da6 | 519 | |
24b8c88a OD |
520 | LDLM_POOL_SYSFS_READER_NOLOCK_SHOW(lock_volume_factor, atomic); |
521 | LDLM_POOL_SYSFS_WRITER_NOLOCK_STORE(lock_volume_factor, atomic); | |
522 | LUSTRE_RW_ATTR(lock_volume_factor); | |
73bb1da6 PT |
523 | |
524 | #define LDLM_POOL_ADD_VAR(name, var, ops) \ | |
525 | do { \ | |
526 | snprintf(var_name, MAX_STRING_SIZE, #name); \ | |
527 | pool_vars[0].data = var; \ | |
528 | pool_vars[0].fops = ops; \ | |
700815d4 | 529 | ldebugfs_add_vars(pl->pl_debugfs_entry, pool_vars, NULL);\ |
73bb1da6 | 530 | } while (0) |
d7e09d03 | 531 | |
f2825e03 OD |
532 | /* These are for pools in /sys/fs/lustre/ldlm/namespaces/.../pool */ |
533 | static struct attribute *ldlm_pl_attrs[] = { | |
24b8c88a OD |
534 | &lustre_attr_grant_speed.attr, |
535 | &lustre_attr_grant_plan.attr, | |
536 | &lustre_attr_recalc_period.attr, | |
537 | &lustre_attr_server_lock_volume.attr, | |
538 | &lustre_attr_limit.attr, | |
539 | &lustre_attr_granted.attr, | |
540 | &lustre_attr_cancel_rate.attr, | |
541 | &lustre_attr_grant_rate.attr, | |
542 | &lustre_attr_lock_volume_factor.attr, | |
f2825e03 OD |
543 | NULL, |
544 | }; | |
545 | ||
546 | static void ldlm_pl_release(struct kobject *kobj) | |
547 | { | |
548 | struct ldlm_pool *pl = container_of(kobj, struct ldlm_pool, | |
549 | pl_kobj); | |
550 | complete(&pl->pl_kobj_unregister); | |
551 | } | |
552 | ||
553 | static struct kobj_type ldlm_pl_ktype = { | |
554 | .default_attrs = ldlm_pl_attrs, | |
555 | .sysfs_ops = &lustre_sysfs_ops, | |
556 | .release = ldlm_pl_release, | |
557 | }; | |
558 | ||
559 | static int ldlm_pool_sysfs_init(struct ldlm_pool *pl) | |
560 | { | |
561 | struct ldlm_namespace *ns = ldlm_pl2ns(pl); | |
562 | int err; | |
563 | ||
564 | init_completion(&pl->pl_kobj_unregister); | |
565 | err = kobject_init_and_add(&pl->pl_kobj, &ldlm_pl_ktype, &ns->ns_kobj, | |
566 | "pool"); | |
567 | ||
568 | return err; | |
569 | } | |
570 | ||
700815d4 | 571 | static int ldlm_pool_debugfs_init(struct ldlm_pool *pl) |
d7e09d03 PT |
572 | { |
573 | struct ldlm_namespace *ns = ldlm_pl2ns(pl); | |
700815d4 | 574 | struct dentry *debugfs_ns_parent; |
d7e09d03 PT |
575 | struct lprocfs_vars pool_vars[2]; |
576 | char *var_name = NULL; | |
577 | int rc = 0; | |
d7e09d03 | 578 | |
352f7891 | 579 | var_name = kzalloc(MAX_STRING_SIZE + 1, GFP_NOFS); |
d7e09d03 | 580 | if (!var_name) |
0a3bdb00 | 581 | return -ENOMEM; |
d7e09d03 | 582 | |
700815d4 DE |
583 | debugfs_ns_parent = ns->ns_debugfs_entry; |
584 | if (IS_ERR_OR_NULL(debugfs_ns_parent)) { | |
585 | CERROR("%s: debugfs entry is not initialized\n", | |
d7e09d03 | 586 | ldlm_ns_name(ns)); |
d1c0d446 JL |
587 | rc = -EINVAL; |
588 | goto out_free_name; | |
d7e09d03 | 589 | } |
700815d4 DE |
590 | pl->pl_debugfs_entry = ldebugfs_register("pool", debugfs_ns_parent, |
591 | NULL, NULL); | |
592 | if (IS_ERR(pl->pl_debugfs_entry)) { | |
593 | CERROR("LdebugFS failed in ldlm-pool-init\n"); | |
594 | rc = PTR_ERR(pl->pl_debugfs_entry); | |
595 | pl->pl_debugfs_entry = NULL; | |
d1c0d446 | 596 | goto out_free_name; |
d7e09d03 PT |
597 | } |
598 | ||
599 | var_name[MAX_STRING_SIZE] = '\0'; | |
600 | memset(pool_vars, 0, sizeof(pool_vars)); | |
601 | pool_vars[0].name = var_name; | |
602 | ||
700815d4 | 603 | LDLM_POOL_ADD_VAR(state, pl, &lprocfs_pool_state_fops); |
d7e09d03 PT |
604 | |
605 | pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT - | |
606 | LDLM_POOL_FIRST_STAT, 0); | |
d1c0d446 JL |
607 | if (!pl->pl_stats) { |
608 | rc = -ENOMEM; | |
609 | goto out_free_name; | |
610 | } | |
d7e09d03 PT |
611 | |
612 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT, | |
613 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
614 | "granted", "locks"); | |
615 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT, | |
616 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
617 | "grant", "locks"); | |
618 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT, | |
619 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
620 | "cancel", "locks"); | |
621 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT, | |
622 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
623 | "grant_rate", "locks/s"); | |
624 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT, | |
625 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
626 | "cancel_rate", "locks/s"); | |
627 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT, | |
628 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
629 | "grant_plan", "locks/s"); | |
630 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT, | |
631 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
632 | "slv", "slv"); | |
633 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT, | |
634 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
635 | "shrink_request", "locks"); | |
636 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT, | |
637 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
638 | "shrink_freed", "locks"); | |
639 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT, | |
640 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
641 | "recalc_freed", "locks"); | |
642 | lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT, | |
643 | LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV, | |
644 | "recalc_timing", "sec"); | |
700815d4 DE |
645 | rc = ldebugfs_register_stats(pl->pl_debugfs_entry, "stats", |
646 | pl->pl_stats); | |
d7e09d03 | 647 | |
d7e09d03 | 648 | out_free_name: |
352f7891 | 649 | kfree(var_name); |
d7e09d03 PT |
650 | return rc; |
651 | } | |
652 | ||
f2825e03 OD |
653 | static void ldlm_pool_sysfs_fini(struct ldlm_pool *pl) |
654 | { | |
655 | kobject_put(&pl->pl_kobj); | |
656 | wait_for_completion(&pl->pl_kobj_unregister); | |
657 | } | |
658 | ||
700815d4 | 659 | static void ldlm_pool_debugfs_fini(struct ldlm_pool *pl) |
d7e09d03 PT |
660 | { |
661 | if (pl->pl_stats != NULL) { | |
662 | lprocfs_free_stats(&pl->pl_stats); | |
663 | pl->pl_stats = NULL; | |
664 | } | |
700815d4 DE |
665 | if (pl->pl_debugfs_entry != NULL) { |
666 | ldebugfs_remove(&pl->pl_debugfs_entry); | |
667 | pl->pl_debugfs_entry = NULL; | |
d7e09d03 PT |
668 | } |
669 | } | |
670 | ||
671 | int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, | |
672 | int idx, ldlm_side_t client) | |
673 | { | |
674 | int rc; | |
d7e09d03 PT |
675 | |
676 | spin_lock_init(&pl->pl_lock); | |
677 | atomic_set(&pl->pl_granted, 0); | |
8f83409c | 678 | pl->pl_recalc_time = ktime_get_seconds(); |
d7e09d03 PT |
679 | atomic_set(&pl->pl_lock_volume_factor, 1); |
680 | ||
681 | atomic_set(&pl->pl_grant_rate, 0); | |
682 | atomic_set(&pl->pl_cancel_rate, 0); | |
683 | pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L); | |
684 | ||
685 | snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d", | |
686 | ldlm_ns_name(ns), idx); | |
687 | ||
00f9d12b OD |
688 | ldlm_pool_set_limit(pl, 1); |
689 | pl->pl_server_lock_volume = 0; | |
690 | pl->pl_ops = &ldlm_cli_pool_ops; | |
691 | pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD; | |
d7e09d03 | 692 | pl->pl_client_lock_volume = 0; |
700815d4 | 693 | rc = ldlm_pool_debugfs_init(pl); |
d7e09d03 | 694 | if (rc) |
0a3bdb00 | 695 | return rc; |
d7e09d03 | 696 | |
f2825e03 OD |
697 | rc = ldlm_pool_sysfs_init(pl); |
698 | if (rc) | |
699 | return rc; | |
700 | ||
d7e09d03 PT |
701 | CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name); |
702 | ||
0a3bdb00 | 703 | return rc; |
d7e09d03 PT |
704 | } |
705 | EXPORT_SYMBOL(ldlm_pool_init); | |
706 | ||
707 | void ldlm_pool_fini(struct ldlm_pool *pl) | |
708 | { | |
f2825e03 | 709 | ldlm_pool_sysfs_fini(pl); |
700815d4 | 710 | ldlm_pool_debugfs_fini(pl); |
d7e09d03 PT |
711 | |
712 | /* | |
713 | * Pool should not be used after this point. We can't free it here as | |
714 | * it lives in struct ldlm_namespace, but still interested in catching | |
715 | * any abnormal using cases. | |
716 | */ | |
717 | POISON(pl, 0x5a, sizeof(*pl)); | |
d7e09d03 PT |
718 | } |
719 | EXPORT_SYMBOL(ldlm_pool_fini); | |
720 | ||
721 | /** | |
722 | * Add new taken ldlm lock \a lock into pool \a pl accounting. | |
723 | */ | |
724 | void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock) | |
725 | { | |
726 | /* | |
727 | * FLOCK locks are special in a sense that they are almost never | |
728 | * cancelled, instead special kind of lock is used to drop them. | |
729 | * also there is no LRU for flock locks, so no point in tracking | |
730 | * them anyway. | |
731 | */ | |
732 | if (lock->l_resource->lr_type == LDLM_FLOCK) | |
733 | return; | |
734 | ||
735 | atomic_inc(&pl->pl_granted); | |
736 | atomic_inc(&pl->pl_grant_rate); | |
737 | lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT); | |
738 | /* | |
739 | * Do not do pool recalc for client side as all locks which | |
740 | * potentially may be canceled has already been packed into | |
741 | * enqueue/cancel rpc. Also we do not want to run out of stack | |
742 | * with too long call paths. | |
743 | */ | |
d7e09d03 PT |
744 | } |
745 | EXPORT_SYMBOL(ldlm_pool_add); | |
746 | ||
747 | /** | |
748 | * Remove ldlm lock \a lock from pool \a pl accounting. | |
749 | */ | |
750 | void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock) | |
751 | { | |
752 | /* | |
753 | * Filter out FLOCK locks. Read above comment in ldlm_pool_add(). | |
754 | */ | |
755 | if (lock->l_resource->lr_type == LDLM_FLOCK) | |
756 | return; | |
757 | ||
758 | LASSERT(atomic_read(&pl->pl_granted) > 0); | |
759 | atomic_dec(&pl->pl_granted); | |
760 | atomic_inc(&pl->pl_cancel_rate); | |
761 | ||
762 | lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT); | |
d7e09d03 PT |
763 | } |
764 | EXPORT_SYMBOL(ldlm_pool_del); | |
765 | ||
766 | /** | |
767 | * Returns current \a pl SLV. | |
768 | * | |
769 | * \pre ->pl_lock is not locked. | |
770 | */ | |
771 | __u64 ldlm_pool_get_slv(struct ldlm_pool *pl) | |
772 | { | |
773 | __u64 slv; | |
902f3bb1 | 774 | |
d7e09d03 PT |
775 | spin_lock(&pl->pl_lock); |
776 | slv = pl->pl_server_lock_volume; | |
777 | spin_unlock(&pl->pl_lock); | |
778 | return slv; | |
779 | } | |
d7e09d03 | 780 | |
d7e09d03 PT |
781 | /** |
782 | * Sets passed \a clv to \a pl. | |
783 | * | |
784 | * \pre ->pl_lock is not locked. | |
785 | */ | |
786 | void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv) | |
787 | { | |
788 | spin_lock(&pl->pl_lock); | |
789 | pl->pl_client_lock_volume = clv; | |
790 | spin_unlock(&pl->pl_lock); | |
791 | } | |
d7e09d03 PT |
792 | |
793 | /** | |
794 | * Returns current LVF from \a pl. | |
795 | */ | |
796 | __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl) | |
797 | { | |
798 | return atomic_read(&pl->pl_lock_volume_factor); | |
799 | } | |
d7e09d03 PT |
800 | |
801 | static int ldlm_pool_granted(struct ldlm_pool *pl) | |
802 | { | |
803 | return atomic_read(&pl->pl_granted); | |
804 | } | |
805 | ||
806 | static struct ptlrpc_thread *ldlm_pools_thread; | |
d7e09d03 PT |
807 | static struct completion ldlm_pools_comp; |
808 | ||
809 | /* | |
cbc3769e PT |
810 | * count locks from all namespaces (if possible). Returns number of |
811 | * cached locks. | |
d7e09d03 | 812 | */ |
5802572e | 813 | static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask) |
d7e09d03 | 814 | { |
cbc3769e | 815 | int total = 0, nr_ns; |
d7e09d03 | 816 | struct ldlm_namespace *ns; |
91a50030 | 817 | struct ldlm_namespace *ns_old = NULL; /* loop detection */ |
d7e09d03 PT |
818 | void *cookie; |
819 | ||
cbc3769e PT |
820 | if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) |
821 | return 0; | |
d7e09d03 | 822 | |
cbc3769e PT |
823 | CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n", |
824 | client == LDLM_NAMESPACE_CLIENT ? "client" : "server"); | |
d7e09d03 PT |
825 | |
826 | cookie = cl_env_reenter(); | |
827 | ||
828 | /* | |
829 | * Find out how many resources we may release. | |
830 | */ | |
91a50030 | 831 | for (nr_ns = ldlm_namespace_nr_read(client); |
cbc3769e | 832 | nr_ns > 0; nr_ns--) { |
d7e09d03 PT |
833 | mutex_lock(ldlm_namespace_lock(client)); |
834 | if (list_empty(ldlm_namespace_list(client))) { | |
835 | mutex_unlock(ldlm_namespace_lock(client)); | |
836 | cl_env_reexit(cookie); | |
837 | return 0; | |
838 | } | |
839 | ns = ldlm_namespace_first_locked(client); | |
91a50030 OD |
840 | |
841 | if (ns == ns_old) { | |
842 | mutex_unlock(ldlm_namespace_lock(client)); | |
843 | break; | |
844 | } | |
845 | ||
846 | if (ldlm_ns_empty(ns)) { | |
847 | ldlm_namespace_move_to_inactive_locked(ns, client); | |
848 | mutex_unlock(ldlm_namespace_lock(client)); | |
849 | continue; | |
850 | } | |
851 | ||
852 | if (ns_old == NULL) | |
853 | ns_old = ns; | |
854 | ||
d7e09d03 | 855 | ldlm_namespace_get(ns); |
91a50030 | 856 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
857 | mutex_unlock(ldlm_namespace_lock(client)); |
858 | total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask); | |
859 | ldlm_namespace_put(ns); | |
860 | } | |
861 | ||
cbc3769e PT |
862 | cl_env_reexit(cookie); |
863 | return total; | |
864 | } | |
865 | ||
5802572e | 866 | static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask) |
cbc3769e PT |
867 | { |
868 | unsigned long freed = 0; | |
869 | int tmp, nr_ns; | |
870 | struct ldlm_namespace *ns; | |
871 | void *cookie; | |
872 | ||
873 | if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS)) | |
874 | return -1; | |
875 | ||
876 | cookie = cl_env_reenter(); | |
d7e09d03 PT |
877 | |
878 | /* | |
cbc3769e | 879 | * Shrink at least ldlm_namespace_nr_read(client) namespaces. |
d7e09d03 | 880 | */ |
cbc3769e PT |
881 | for (tmp = nr_ns = ldlm_namespace_nr_read(client); |
882 | tmp > 0; tmp--) { | |
d7e09d03 PT |
883 | int cancel, nr_locks; |
884 | ||
885 | /* | |
886 | * Do not call shrink under ldlm_namespace_lock(client) | |
887 | */ | |
888 | mutex_lock(ldlm_namespace_lock(client)); | |
889 | if (list_empty(ldlm_namespace_list(client))) { | |
890 | mutex_unlock(ldlm_namespace_lock(client)); | |
d7e09d03 PT |
891 | break; |
892 | } | |
893 | ns = ldlm_namespace_first_locked(client); | |
894 | ldlm_namespace_get(ns); | |
91a50030 | 895 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
896 | mutex_unlock(ldlm_namespace_lock(client)); |
897 | ||
898 | nr_locks = ldlm_pool_granted(&ns->ns_pool); | |
cbc3769e PT |
899 | /* |
900 | * We use to shrink propotionally but with new shrinker API, | |
901 | * we lost the total number of freeable locks. | |
902 | */ | |
903 | cancel = 1 + min_t(int, nr_locks, nr / nr_ns); | |
904 | freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask); | |
d7e09d03 PT |
905 | ldlm_namespace_put(ns); |
906 | } | |
907 | cl_env_reexit(cookie); | |
cbc3769e PT |
908 | /* |
909 | * we only decrease the SLV in server pools shrinker, return | |
910 | * SHRINK_STOP to kernel to avoid needless loop. LU-1128 | |
911 | */ | |
00f9d12b | 912 | return freed; |
d7e09d03 PT |
913 | } |
914 | ||
e7ddc48c AR |
915 | static unsigned long ldlm_pools_cli_count(struct shrinker *s, |
916 | struct shrink_control *sc) | |
d7e09d03 | 917 | { |
cbc3769e PT |
918 | return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask); |
919 | } | |
920 | ||
e7ddc48c AR |
921 | static unsigned long ldlm_pools_cli_scan(struct shrinker *s, |
922 | struct shrink_control *sc) | |
cbc3769e PT |
923 | { |
924 | return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan, | |
925 | sc->gfp_mask); | |
d7e09d03 PT |
926 | } |
927 | ||
00f9d12b | 928 | static int ldlm_pools_recalc(ldlm_side_t client) |
d7e09d03 | 929 | { |
d7e09d03 | 930 | struct ldlm_namespace *ns; |
91a50030 | 931 | struct ldlm_namespace *ns_old = NULL; |
00f9d12b | 932 | int nr; |
3eface59 | 933 | int time = 50; /* seconds of sleep if no active namespaces */ |
d7e09d03 | 934 | |
d7e09d03 | 935 | /* |
cbc3769e | 936 | * Recalc at least ldlm_namespace_nr_read(client) namespaces. |
d7e09d03 | 937 | */ |
91a50030 | 938 | for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) { |
d7e09d03 PT |
939 | int skip; |
940 | /* | |
941 | * Lock the list, get first @ns in the list, getref, move it | |
942 | * to the tail, unlock and call pool recalc. This way we avoid | |
943 | * calling recalc under @ns lock what is really good as we get | |
944 | * rid of potential deadlock on client nodes when canceling | |
945 | * locks synchronously. | |
946 | */ | |
947 | mutex_lock(ldlm_namespace_lock(client)); | |
948 | if (list_empty(ldlm_namespace_list(client))) { | |
949 | mutex_unlock(ldlm_namespace_lock(client)); | |
950 | break; | |
951 | } | |
952 | ns = ldlm_namespace_first_locked(client); | |
953 | ||
91a50030 OD |
954 | if (ns_old == ns) { /* Full pass complete */ |
955 | mutex_unlock(ldlm_namespace_lock(client)); | |
956 | break; | |
957 | } | |
958 | ||
959 | /* We got an empty namespace, need to move it back to inactive | |
960 | * list. | |
961 | * The race with parallel resource creation is fine: | |
962 | * - If they do namespace_get before our check, we fail the | |
963 | * check and they move this item to the end of the list anyway | |
964 | * - If we do the check and then they do namespace_get, then | |
965 | * we move the namespace to inactive and they will move | |
966 | * it back to active (synchronised by the lock, so no clash | |
967 | * there). | |
968 | */ | |
969 | if (ldlm_ns_empty(ns)) { | |
970 | ldlm_namespace_move_to_inactive_locked(ns, client); | |
971 | mutex_unlock(ldlm_namespace_lock(client)); | |
972 | continue; | |
973 | } | |
974 | ||
975 | if (ns_old == NULL) | |
976 | ns_old = ns; | |
977 | ||
d7e09d03 PT |
978 | spin_lock(&ns->ns_lock); |
979 | /* | |
980 | * skip ns which is being freed, and we don't want to increase | |
981 | * its refcount again, not even temporarily. bz21519 & LU-499. | |
982 | */ | |
983 | if (ns->ns_stopping) { | |
984 | skip = 1; | |
985 | } else { | |
986 | skip = 0; | |
987 | ldlm_namespace_get(ns); | |
988 | } | |
989 | spin_unlock(&ns->ns_lock); | |
990 | ||
91a50030 | 991 | ldlm_namespace_move_to_active_locked(ns, client); |
d7e09d03 PT |
992 | mutex_unlock(ldlm_namespace_lock(client)); |
993 | ||
994 | /* | |
995 | * After setup is done - recalc the pool. | |
996 | */ | |
997 | if (!skip) { | |
3eface59 OD |
998 | int ttime = ldlm_pool_recalc(&ns->ns_pool); |
999 | ||
1000 | if (ttime < time) | |
1001 | time = ttime; | |
1002 | ||
d7e09d03 PT |
1003 | ldlm_namespace_put(ns); |
1004 | } | |
1005 | } | |
3eface59 | 1006 | return time; |
d7e09d03 | 1007 | } |
d7e09d03 PT |
1008 | |
1009 | static int ldlm_pools_thread_main(void *arg) | |
1010 | { | |
1011 | struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg; | |
00f9d12b | 1012 | int c_time; |
d7e09d03 PT |
1013 | |
1014 | thread_set_flags(thread, SVC_RUNNING); | |
1015 | wake_up(&thread->t_ctl_waitq); | |
1016 | ||
1017 | CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n", | |
1018 | "ldlm_poold", current_pid()); | |
1019 | ||
1020 | while (1) { | |
1021 | struct l_wait_info lwi; | |
1022 | ||
1023 | /* | |
1024 | * Recal all pools on this tick. | |
1025 | */ | |
3eface59 | 1026 | c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT); |
d7e09d03 PT |
1027 | |
1028 | /* | |
1029 | * Wait until the next check time, or until we're | |
1030 | * stopped. | |
1031 | */ | |
00f9d12b | 1032 | lwi = LWI_TIMEOUT(cfs_time_seconds(c_time), |
d7e09d03 PT |
1033 | NULL, NULL); |
1034 | l_wait_event(thread->t_ctl_waitq, | |
1035 | thread_is_stopping(thread) || | |
1036 | thread_is_event(thread), | |
1037 | &lwi); | |
1038 | ||
1039 | if (thread_test_and_clear_flags(thread, SVC_STOPPING)) | |
1040 | break; | |
71e8dd9a | 1041 | thread_test_and_clear_flags(thread, SVC_EVENT); |
d7e09d03 PT |
1042 | } |
1043 | ||
1044 | thread_set_flags(thread, SVC_STOPPED); | |
1045 | wake_up(&thread->t_ctl_waitq); | |
1046 | ||
1047 | CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n", | |
1048 | "ldlm_poold", current_pid()); | |
1049 | ||
1050 | complete_and_exit(&ldlm_pools_comp, 0); | |
1051 | } | |
1052 | ||
1053 | static int ldlm_pools_thread_start(void) | |
1054 | { | |
1055 | struct l_wait_info lwi = { 0 }; | |
68b636b6 | 1056 | struct task_struct *task; |
d7e09d03 PT |
1057 | |
1058 | if (ldlm_pools_thread != NULL) | |
0a3bdb00 | 1059 | return -EALREADY; |
d7e09d03 | 1060 | |
352f7891 | 1061 | ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS); |
94e67761 | 1062 | if (!ldlm_pools_thread) |
0a3bdb00 | 1063 | return -ENOMEM; |
d7e09d03 PT |
1064 | |
1065 | init_completion(&ldlm_pools_comp); | |
1066 | init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq); | |
1067 | ||
1068 | task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread, | |
1069 | "ldlm_poold"); | |
1070 | if (IS_ERR(task)) { | |
1071 | CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task)); | |
352f7891 | 1072 | kfree(ldlm_pools_thread); |
d7e09d03 | 1073 | ldlm_pools_thread = NULL; |
0a3bdb00 | 1074 | return PTR_ERR(task); |
d7e09d03 PT |
1075 | } |
1076 | l_wait_event(ldlm_pools_thread->t_ctl_waitq, | |
1077 | thread_is_running(ldlm_pools_thread), &lwi); | |
0a3bdb00 | 1078 | return 0; |
d7e09d03 PT |
1079 | } |
1080 | ||
1081 | static void ldlm_pools_thread_stop(void) | |
1082 | { | |
8d2ff65d | 1083 | if (ldlm_pools_thread == NULL) |
d7e09d03 | 1084 | return; |
d7e09d03 PT |
1085 | |
1086 | thread_set_flags(ldlm_pools_thread, SVC_STOPPING); | |
1087 | wake_up(&ldlm_pools_thread->t_ctl_waitq); | |
1088 | ||
1089 | /* | |
1090 | * Make sure that pools thread is finished before freeing @thread. | |
1091 | * This fixes possible race and oops due to accessing freed memory | |
1092 | * in pools thread. | |
1093 | */ | |
1094 | wait_for_completion(&ldlm_pools_comp); | |
352f7891 | 1095 | kfree(ldlm_pools_thread); |
d7e09d03 | 1096 | ldlm_pools_thread = NULL; |
d7e09d03 PT |
1097 | } |
1098 | ||
cbc3769e PT |
1099 | static struct shrinker ldlm_pools_cli_shrinker = { |
1100 | .count_objects = ldlm_pools_cli_count, | |
1101 | .scan_objects = ldlm_pools_cli_scan, | |
1102 | .seeks = DEFAULT_SEEKS, | |
1103 | }; | |
1104 | ||
d7e09d03 PT |
1105 | int ldlm_pools_init(void) |
1106 | { | |
1107 | int rc; | |
d7e09d03 PT |
1108 | |
1109 | rc = ldlm_pools_thread_start(); | |
00f9d12b | 1110 | if (rc == 0) |
cbc3769e | 1111 | register_shrinker(&ldlm_pools_cli_shrinker); |
00f9d12b | 1112 | |
0a3bdb00 | 1113 | return rc; |
d7e09d03 PT |
1114 | } |
1115 | EXPORT_SYMBOL(ldlm_pools_init); | |
1116 | ||
1117 | void ldlm_pools_fini(void) | |
1118 | { | |
00f9d12b | 1119 | if (ldlm_pools_thread) |
faa7a4e3 | 1120 | unregister_shrinker(&ldlm_pools_cli_shrinker); |
00f9d12b | 1121 | |
d7e09d03 PT |
1122 | ldlm_pools_thread_stop(); |
1123 | } | |
1124 | EXPORT_SYMBOL(ldlm_pools_fini); |