staging/lustre/ldlm: drop redundant ibits lock interoperability check
[deliverable/linux.git] / drivers / staging / lustre / lustre / ldlm / ldlm_pool.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ldlm/ldlm_pool.c
37 *
38 * Author: Yury Umanets <umka@clusterfs.com>
39 */
40
41 /*
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
50 *
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
56 *
57 * Client has LVF, that is, lock volume factor which regulates how much sensitive
58 * client should be about last SLV from server. The higher LVF is the more locks
59 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
60 * that client will cancel locks 2 times faster.
61 *
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
67 *
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
72 *
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
74 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
75 * cleanups. Flow definition to allow more easy understanding of the logic belongs
76 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
77 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
78 *
79 * Glossary for terms used:
80 *
81 * pl_limit - Number of allowed locks in pool. Applies to server and client
82 * side (tunable);
83 *
84 * pl_granted - Number of granted locks (calculated);
85 * pl_grant_rate - Number of granted locks for last T (calculated);
86 * pl_cancel_rate - Number of canceled locks for last T (calculated);
87 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
88 * pl_grant_plan - Planned number of granted locks for next T (calculated);
89 * pl_server_lock_volume - Current server lock volume (calculated);
90 *
91 * As it may be seen from list above, we have few possible tunables which may
92 * affect behavior much. They all may be modified via proc. However, they also
93 * give a possibility for constructing few pre-defined behavior policies. If
94 * none of predefines is suitable for a working pattern being used, new one may
95 * be "constructed" via proc tunables.
96 */
97
98 #define DEBUG_SUBSYSTEM S_LDLM
99
100 #include "../include/lustre_dlm.h"
101 #include "../include/cl_object.h"
102 #include "../include/obd_class.h"
103 #include "../include/obd_support.h"
104 #include "ldlm_internal.h"
105
106
107 /*
108 * 50 ldlm locks for 1MB of RAM.
109 */
110 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
111
112 /*
113 * Maximal possible grant step plan in %.
114 */
115 #define LDLM_POOL_MAX_GSP (30)
116
117 /*
118 * Minimal possible grant step plan in %.
119 */
120 #define LDLM_POOL_MIN_GSP (1)
121
122 /*
123 * This controls the speed of reaching LDLM_POOL_MAX_GSP
124 * with increasing thread period.
125 */
126 #define LDLM_POOL_GSP_STEP_SHIFT (2)
127
128 /*
129 * LDLM_POOL_GSP% of all locks is default GP.
130 */
131 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
132
133 /*
134 * Max age for locks on clients.
135 */
136 #define LDLM_POOL_MAX_AGE (36000)
137
138 /*
139 * The granularity of SLV calculation.
140 */
141 #define LDLM_POOL_SLV_SHIFT (10)
142
143 extern struct proc_dir_entry *ldlm_ns_proc_dir;
144
145 static inline __u64 dru(__u64 val, __u32 shift, int round_up)
146 {
147 return (val + (round_up ? (1 << shift) - 1 : 0)) >> shift;
148 }
149
150 static inline __u64 ldlm_pool_slv_max(__u32 L)
151 {
152 /*
153 * Allow to have all locks for 1 client for 10 hrs.
154 * Formula is the following: limit * 10h / 1 client.
155 */
156 __u64 lim = (__u64)L * LDLM_POOL_MAX_AGE / 1;
157 return lim;
158 }
159
160 static inline __u64 ldlm_pool_slv_min(__u32 L)
161 {
162 return 1;
163 }
164
165 enum {
166 LDLM_POOL_FIRST_STAT = 0,
167 LDLM_POOL_GRANTED_STAT = LDLM_POOL_FIRST_STAT,
168 LDLM_POOL_GRANT_STAT,
169 LDLM_POOL_CANCEL_STAT,
170 LDLM_POOL_GRANT_RATE_STAT,
171 LDLM_POOL_CANCEL_RATE_STAT,
172 LDLM_POOL_GRANT_PLAN_STAT,
173 LDLM_POOL_SLV_STAT,
174 LDLM_POOL_SHRINK_REQTD_STAT,
175 LDLM_POOL_SHRINK_FREED_STAT,
176 LDLM_POOL_RECALC_STAT,
177 LDLM_POOL_TIMING_STAT,
178 LDLM_POOL_LAST_STAT
179 };
180
181 static inline struct ldlm_namespace *ldlm_pl2ns(struct ldlm_pool *pl)
182 {
183 return container_of(pl, struct ldlm_namespace, ns_pool);
184 }
185
186 /**
187 * Calculates suggested grant_step in % of available locks for passed
188 * \a period. This is later used in grant_plan calculations.
189 */
190 static inline int ldlm_pool_t2gsp(unsigned int t)
191 {
192 /*
193 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
194 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
195 *
196 * How this will affect execution is the following:
197 *
198 * - for thread period 1s we will have grant_step 1% which good from
199 * pov of taking some load off from server and push it out to clients.
200 * This is like that because 1% for grant_step means that server will
201 * not allow clients to get lots of locks in short period of time and
202 * keep all old locks in their caches. Clients will always have to
203 * get some locks back if they want to take some new;
204 *
205 * - for thread period 10s (which is default) we will have 23% which
206 * means that clients will have enough of room to take some new locks
207 * without getting some back. All locks from this 23% which were not
208 * taken by clients in current period will contribute in SLV growing.
209 * SLV growing means more locks cached on clients until limit or grant
210 * plan is reached.
211 */
212 return LDLM_POOL_MAX_GSP -
213 ((LDLM_POOL_MAX_GSP - LDLM_POOL_MIN_GSP) >>
214 (t >> LDLM_POOL_GSP_STEP_SHIFT));
215 }
216
217 /**
218 * Recalculates next grant limit on passed \a pl.
219 *
220 * \pre ->pl_lock is locked.
221 */
222 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool *pl)
223 {
224 int granted, grant_step, limit;
225
226 limit = ldlm_pool_get_limit(pl);
227 granted = atomic_read(&pl->pl_granted);
228
229 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
230 grant_step = ((limit - granted) * grant_step) / 100;
231 pl->pl_grant_plan = granted + grant_step;
232 limit = (limit * 5) >> 2;
233 if (pl->pl_grant_plan > limit)
234 pl->pl_grant_plan = limit;
235 }
236
237 /**
238 * Recalculates next SLV on passed \a pl.
239 *
240 * \pre ->pl_lock is locked.
241 */
242 static void ldlm_pool_recalc_slv(struct ldlm_pool *pl)
243 {
244 int granted;
245 int grant_plan;
246 int round_up;
247 __u64 slv;
248 __u64 slv_factor;
249 __u64 grant_usage;
250 __u32 limit;
251
252 slv = pl->pl_server_lock_volume;
253 grant_plan = pl->pl_grant_plan;
254 limit = ldlm_pool_get_limit(pl);
255 granted = atomic_read(&pl->pl_granted);
256 round_up = granted < limit;
257
258 grant_usage = max_t(int, limit - (granted - grant_plan), 1);
259
260 /*
261 * Find out SLV change factor which is the ratio of grant usage
262 * from limit. SLV changes as fast as the ratio of grant plan
263 * consumption. The more locks from grant plan are not consumed
264 * by clients in last interval (idle time), the faster grows
265 * SLV. And the opposite, the more grant plan is over-consumed
266 * (load time) the faster drops SLV.
267 */
268 slv_factor = (grant_usage << LDLM_POOL_SLV_SHIFT);
269 do_div(slv_factor, limit);
270 slv = slv * slv_factor;
271 slv = dru(slv, LDLM_POOL_SLV_SHIFT, round_up);
272
273 if (slv > ldlm_pool_slv_max(limit)) {
274 slv = ldlm_pool_slv_max(limit);
275 } else if (slv < ldlm_pool_slv_min(limit)) {
276 slv = ldlm_pool_slv_min(limit);
277 }
278
279 pl->pl_server_lock_volume = slv;
280 }
281
282 /**
283 * Recalculates next stats on passed \a pl.
284 *
285 * \pre ->pl_lock is locked.
286 */
287 static void ldlm_pool_recalc_stats(struct ldlm_pool *pl)
288 {
289 int grant_plan = pl->pl_grant_plan;
290 __u64 slv = pl->pl_server_lock_volume;
291 int granted = atomic_read(&pl->pl_granted);
292 int grant_rate = atomic_read(&pl->pl_grant_rate);
293 int cancel_rate = atomic_read(&pl->pl_cancel_rate);
294
295 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_SLV_STAT,
296 slv);
297 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
298 granted);
299 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
300 grant_rate);
301 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
302 grant_plan);
303 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
304 cancel_rate);
305 }
306
307 /**
308 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
309 */
310 static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
311 {
312 struct obd_device *obd;
313
314 /*
315 * Set new SLV in obd field for using it later without accessing the
316 * pool. This is required to avoid race between sending reply to client
317 * with new SLV and cleanup server stack in which we can't guarantee
318 * that namespace is still alive. We know only that obd is alive as
319 * long as valid export is alive.
320 */
321 obd = ldlm_pl2ns(pl)->ns_obd;
322 LASSERT(obd != NULL);
323 write_lock(&obd->obd_pool_lock);
324 obd->obd_pool_slv = pl->pl_server_lock_volume;
325 write_unlock(&obd->obd_pool_lock);
326 }
327
328 /**
329 * Recalculates all pool fields on passed \a pl.
330 *
331 * \pre ->pl_lock is not locked.
332 */
333 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
334 {
335 time_t recalc_interval_sec;
336
337 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
338 if (recalc_interval_sec < pl->pl_recalc_period)
339 return 0;
340
341 spin_lock(&pl->pl_lock);
342 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
343 if (recalc_interval_sec < pl->pl_recalc_period) {
344 spin_unlock(&pl->pl_lock);
345 return 0;
346 }
347 /*
348 * Recalc SLV after last period. This should be done
349 * _before_ recalculating new grant plan.
350 */
351 ldlm_pool_recalc_slv(pl);
352
353 /*
354 * Make sure that pool informed obd of last SLV changes.
355 */
356 ldlm_srv_pool_push_slv(pl);
357
358 /*
359 * Update grant_plan for new period.
360 */
361 ldlm_pool_recalc_grant_plan(pl);
362
363 pl->pl_recalc_time = get_seconds();
364 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
365 recalc_interval_sec);
366 spin_unlock(&pl->pl_lock);
367 return 0;
368 }
369
370 /**
371 * This function is used on server side as main entry point for memory
372 * pressure handling. It decreases SLV on \a pl according to passed
373 * \a nr and \a gfp_mask.
374 *
375 * Our goal here is to decrease SLV such a way that clients hold \a nr
376 * locks smaller in next 10h.
377 */
378 static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
379 int nr, gfp_t gfp_mask)
380 {
381 __u32 limit;
382
383 /*
384 * VM is asking how many entries may be potentially freed.
385 */
386 if (nr == 0)
387 return atomic_read(&pl->pl_granted);
388
389 /*
390 * Client already canceled locks but server is already in shrinker
391 * and can't cancel anything. Let's catch this race.
392 */
393 if (atomic_read(&pl->pl_granted) == 0)
394 return 0;
395
396 spin_lock(&pl->pl_lock);
397
398 /*
399 * We want shrinker to possibly cause cancellation of @nr locks from
400 * clients or grant approximately @nr locks smaller next intervals.
401 *
402 * This is why we decreased SLV by @nr. This effect will only be as
403 * long as one re-calc interval (1s these days) and this should be
404 * enough to pass this decreased SLV to all clients. On next recalc
405 * interval pool will either increase SLV if locks load is not high
406 * or will keep on same level or even decrease again, thus, shrinker
407 * decreased SLV will affect next recalc intervals and this way will
408 * make locking load lower.
409 */
410 if (nr < pl->pl_server_lock_volume) {
411 pl->pl_server_lock_volume = pl->pl_server_lock_volume - nr;
412 } else {
413 limit = ldlm_pool_get_limit(pl);
414 pl->pl_server_lock_volume = ldlm_pool_slv_min(limit);
415 }
416
417 /*
418 * Make sure that pool informed obd of last SLV changes.
419 */
420 ldlm_srv_pool_push_slv(pl);
421 spin_unlock(&pl->pl_lock);
422
423 /*
424 * We did not really free any memory here so far, it only will be
425 * freed later may be, so that we return 0 to not confuse VM.
426 */
427 return 0;
428 }
429
430 /**
431 * Setup server side pool \a pl with passed \a limit.
432 */
433 static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
434 {
435 struct obd_device *obd;
436
437 obd = ldlm_pl2ns(pl)->ns_obd;
438 LASSERT(obd != NULL && obd != LP_POISON);
439 LASSERT(obd->obd_type != LP_POISON);
440 write_lock(&obd->obd_pool_lock);
441 obd->obd_pool_limit = limit;
442 write_unlock(&obd->obd_pool_lock);
443
444 ldlm_pool_set_limit(pl, limit);
445 return 0;
446 }
447
448 /**
449 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
450 */
451 static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
452 {
453 struct obd_device *obd;
454
455 /*
456 * Get new SLV and Limit from obd which is updated with coming
457 * RPCs.
458 */
459 obd = ldlm_pl2ns(pl)->ns_obd;
460 LASSERT(obd != NULL);
461 read_lock(&obd->obd_pool_lock);
462 pl->pl_server_lock_volume = obd->obd_pool_slv;
463 ldlm_pool_set_limit(pl, obd->obd_pool_limit);
464 read_unlock(&obd->obd_pool_lock);
465 }
466
467 /**
468 * Recalculates client size pool \a pl according to current SLV and Limit.
469 */
470 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
471 {
472 time_t recalc_interval_sec;
473
474 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
475 if (recalc_interval_sec < pl->pl_recalc_period)
476 return 0;
477
478 spin_lock(&pl->pl_lock);
479 /*
480 * Check if we need to recalc lists now.
481 */
482 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
483 if (recalc_interval_sec < pl->pl_recalc_period) {
484 spin_unlock(&pl->pl_lock);
485 return 0;
486 }
487
488 /*
489 * Make sure that pool knows last SLV and Limit from obd.
490 */
491 ldlm_cli_pool_pop_slv(pl);
492
493 pl->pl_recalc_time = get_seconds();
494 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
495 recalc_interval_sec);
496 spin_unlock(&pl->pl_lock);
497
498 /*
499 * Do not cancel locks in case lru resize is disabled for this ns.
500 */
501 if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
502 return 0;
503
504 /*
505 * In the time of canceling locks on client we do not need to maintain
506 * sharp timing, we only want to cancel locks asap according to new SLV.
507 * It may be called when SLV has changed much, this is why we do not
508 * take into account pl->pl_recalc_time here.
509 */
510 return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
511 }
512
513 /**
514 * This function is main entry point for memory pressure handling on client
515 * side. Main goal of this function is to cancel some number of locks on
516 * passed \a pl according to \a nr and \a gfp_mask.
517 */
518 static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
519 int nr, gfp_t gfp_mask)
520 {
521 struct ldlm_namespace *ns;
522 int unused;
523
524 ns = ldlm_pl2ns(pl);
525
526 /*
527 * Do not cancel locks in case lru resize is disabled for this ns.
528 */
529 if (!ns_connect_lru_resize(ns))
530 return 0;
531
532 /*
533 * Make sure that pool knows last SLV and Limit from obd.
534 */
535 ldlm_cli_pool_pop_slv(pl);
536
537 spin_lock(&ns->ns_lock);
538 unused = ns->ns_nr_unused;
539 spin_unlock(&ns->ns_lock);
540
541 if (nr == 0)
542 return (unused / 100) * sysctl_vfs_cache_pressure;
543 else
544 return ldlm_cancel_lru(ns, nr, LCF_ASYNC, LDLM_CANCEL_SHRINK);
545 }
546
547 static const struct ldlm_pool_ops ldlm_srv_pool_ops = {
548 .po_recalc = ldlm_srv_pool_recalc,
549 .po_shrink = ldlm_srv_pool_shrink,
550 .po_setup = ldlm_srv_pool_setup
551 };
552
553 static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
554 .po_recalc = ldlm_cli_pool_recalc,
555 .po_shrink = ldlm_cli_pool_shrink
556 };
557
558 /**
559 * Pool recalc wrapper. Will call either client or server pool recalc callback
560 * depending what pool \a pl is used.
561 */
562 int ldlm_pool_recalc(struct ldlm_pool *pl)
563 {
564 time_t recalc_interval_sec;
565 int count;
566
567 recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
568 if (recalc_interval_sec <= 0)
569 goto recalc;
570
571 spin_lock(&pl->pl_lock);
572 if (recalc_interval_sec > 0) {
573 /*
574 * Update pool statistics every 1s.
575 */
576 ldlm_pool_recalc_stats(pl);
577
578 /*
579 * Zero out all rates and speed for the last period.
580 */
581 atomic_set(&pl->pl_grant_rate, 0);
582 atomic_set(&pl->pl_cancel_rate, 0);
583 }
584 spin_unlock(&pl->pl_lock);
585
586 recalc:
587 if (pl->pl_ops->po_recalc != NULL) {
588 count = pl->pl_ops->po_recalc(pl);
589 lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
590 count);
591 }
592 recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
593 pl->pl_recalc_period;
594
595 return recalc_interval_sec;
596 }
597
598 /*
599 * Pool shrink wrapper. Will call either client or server pool recalc callback
600 * depending what pool pl is used. When nr == 0, just return the number of
601 * freeable locks. Otherwise, return the number of canceled locks.
602 */
603 int ldlm_pool_shrink(struct ldlm_pool *pl, int nr,
604 gfp_t gfp_mask)
605 {
606 int cancel = 0;
607
608 if (pl->pl_ops->po_shrink != NULL) {
609 cancel = pl->pl_ops->po_shrink(pl, nr, gfp_mask);
610 if (nr > 0) {
611 lprocfs_counter_add(pl->pl_stats,
612 LDLM_POOL_SHRINK_REQTD_STAT,
613 nr);
614 lprocfs_counter_add(pl->pl_stats,
615 LDLM_POOL_SHRINK_FREED_STAT,
616 cancel);
617 CDEBUG(D_DLMTRACE, "%s: request to shrink %d locks, "
618 "shrunk %d\n", pl->pl_name, nr, cancel);
619 }
620 }
621 return cancel;
622 }
623 EXPORT_SYMBOL(ldlm_pool_shrink);
624
625 /**
626 * Pool setup wrapper. Will call either client or server pool recalc callback
627 * depending what pool \a pl is used.
628 *
629 * Sets passed \a limit into pool \a pl.
630 */
631 int ldlm_pool_setup(struct ldlm_pool *pl, int limit)
632 {
633 if (pl->pl_ops->po_setup != NULL)
634 return(pl->pl_ops->po_setup(pl, limit));
635 return 0;
636 }
637 EXPORT_SYMBOL(ldlm_pool_setup);
638
639 #if defined (CONFIG_PROC_FS)
640 static int lprocfs_pool_state_seq_show(struct seq_file *m, void *unused)
641 {
642 int granted, grant_rate, cancel_rate, grant_step;
643 int grant_speed, grant_plan, lvf;
644 struct ldlm_pool *pl = m->private;
645 __u64 slv, clv;
646 __u32 limit;
647
648 spin_lock(&pl->pl_lock);
649 slv = pl->pl_server_lock_volume;
650 clv = pl->pl_client_lock_volume;
651 limit = ldlm_pool_get_limit(pl);
652 grant_plan = pl->pl_grant_plan;
653 granted = atomic_read(&pl->pl_granted);
654 grant_rate = atomic_read(&pl->pl_grant_rate);
655 cancel_rate = atomic_read(&pl->pl_cancel_rate);
656 grant_speed = grant_rate - cancel_rate;
657 lvf = atomic_read(&pl->pl_lock_volume_factor);
658 grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
659 spin_unlock(&pl->pl_lock);
660
661 seq_printf(m, "LDLM pool state (%s):\n"
662 " SLV: %llu\n"
663 " CLV: %llu\n"
664 " LVF: %d\n",
665 pl->pl_name, slv, clv, lvf);
666
667 if (ns_is_server(ldlm_pl2ns(pl))) {
668 seq_printf(m, " GSP: %d%%\n"
669 " GP: %d\n",
670 grant_step, grant_plan);
671 }
672 seq_printf(m, " GR: %d\n" " CR: %d\n" " GS: %d\n"
673 " G: %d\n" " L: %d\n",
674 grant_rate, cancel_rate, grant_speed,
675 granted, limit);
676
677 return 0;
678 }
679 LPROC_SEQ_FOPS_RO(lprocfs_pool_state);
680
681 static int lprocfs_grant_speed_seq_show(struct seq_file *m, void *unused)
682 {
683 struct ldlm_pool *pl = m->private;
684 int grant_speed;
685
686 spin_lock(&pl->pl_lock);
687 /* serialize with ldlm_pool_recalc */
688 grant_speed = atomic_read(&pl->pl_grant_rate) -
689 atomic_read(&pl->pl_cancel_rate);
690 spin_unlock(&pl->pl_lock);
691 return lprocfs_rd_uint(m, &grant_speed);
692 }
693
694 LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan, int);
695 LPROC_SEQ_FOPS_RO(lprocfs_grant_plan);
696
697 LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period, int);
698 LDLM_POOL_PROC_WRITER(recalc_period, int);
699 static ssize_t lprocfs_recalc_period_seq_write(struct file *file, const char *buf,
700 size_t len, loff_t *off)
701 {
702 struct seq_file *seq = file->private_data;
703
704 return lprocfs_wr_recalc_period(file, buf, len, seq->private);
705 }
706 LPROC_SEQ_FOPS(lprocfs_recalc_period);
707
708 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, u64);
709 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool, atomic);
710 LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw, atomic);
711
712 LPROC_SEQ_FOPS_RO(lprocfs_grant_speed);
713
714 #define LDLM_POOL_ADD_VAR(name, var, ops) \
715 do { \
716 snprintf(var_name, MAX_STRING_SIZE, #name); \
717 pool_vars[0].data = var; \
718 pool_vars[0].fops = ops; \
719 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, NULL);\
720 } while (0)
721
722 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
723 {
724 struct ldlm_namespace *ns = ldlm_pl2ns(pl);
725 struct proc_dir_entry *parent_ns_proc;
726 struct lprocfs_vars pool_vars[2];
727 char *var_name = NULL;
728 int rc = 0;
729
730 OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
731 if (!var_name)
732 return -ENOMEM;
733
734 parent_ns_proc = ns->ns_proc_dir_entry;
735 if (parent_ns_proc == NULL) {
736 CERROR("%s: proc entry is not initialized\n",
737 ldlm_ns_name(ns));
738 GOTO(out_free_name, rc = -EINVAL);
739 }
740 pl->pl_proc_dir = lprocfs_register("pool", parent_ns_proc,
741 NULL, NULL);
742 if (IS_ERR(pl->pl_proc_dir)) {
743 CERROR("LProcFS failed in ldlm-pool-init\n");
744 rc = PTR_ERR(pl->pl_proc_dir);
745 pl->pl_proc_dir = NULL;
746 GOTO(out_free_name, rc);
747 }
748
749 var_name[MAX_STRING_SIZE] = '\0';
750 memset(pool_vars, 0, sizeof(pool_vars));
751 pool_vars[0].name = var_name;
752
753 LDLM_POOL_ADD_VAR("server_lock_volume", &pl->pl_server_lock_volume,
754 &ldlm_pool_u64_fops);
755 LDLM_POOL_ADD_VAR("limit", &pl->pl_limit, &ldlm_pool_rw_atomic_fops);
756 LDLM_POOL_ADD_VAR("granted", &pl->pl_granted, &ldlm_pool_atomic_fops);
757 LDLM_POOL_ADD_VAR("grant_speed", pl, &lprocfs_grant_speed_fops);
758 LDLM_POOL_ADD_VAR("cancel_rate", &pl->pl_cancel_rate,
759 &ldlm_pool_atomic_fops);
760 LDLM_POOL_ADD_VAR("grant_rate", &pl->pl_grant_rate,
761 &ldlm_pool_atomic_fops);
762 LDLM_POOL_ADD_VAR("grant_plan", pl, &lprocfs_grant_plan_fops);
763 LDLM_POOL_ADD_VAR("recalc_period", pl, &lprocfs_recalc_period_fops);
764 LDLM_POOL_ADD_VAR("lock_volume_factor", &pl->pl_lock_volume_factor,
765 &ldlm_pool_rw_atomic_fops);
766 LDLM_POOL_ADD_VAR("state", pl, &lprocfs_pool_state_fops);
767
768 pl->pl_stats = lprocfs_alloc_stats(LDLM_POOL_LAST_STAT -
769 LDLM_POOL_FIRST_STAT, 0);
770 if (!pl->pl_stats)
771 GOTO(out_free_name, rc = -ENOMEM);
772
773 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANTED_STAT,
774 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
775 "granted", "locks");
776 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_STAT,
777 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
778 "grant", "locks");
779 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_STAT,
780 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
781 "cancel", "locks");
782 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_RATE_STAT,
783 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
784 "grant_rate", "locks/s");
785 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_CANCEL_RATE_STAT,
786 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
787 "cancel_rate", "locks/s");
788 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_GRANT_PLAN_STAT,
789 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
790 "grant_plan", "locks/s");
791 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SLV_STAT,
792 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
793 "slv", "slv");
794 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_REQTD_STAT,
795 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
796 "shrink_request", "locks");
797 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_SHRINK_FREED_STAT,
798 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
799 "shrink_freed", "locks");
800 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_RECALC_STAT,
801 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
802 "recalc_freed", "locks");
803 lprocfs_counter_init(pl->pl_stats, LDLM_POOL_TIMING_STAT,
804 LPROCFS_CNTR_AVGMINMAX | LPROCFS_CNTR_STDDEV,
805 "recalc_timing", "sec");
806 rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
807
808 out_free_name:
809 OBD_FREE(var_name, MAX_STRING_SIZE + 1);
810 return rc;
811 }
812
813 static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
814 {
815 if (pl->pl_stats != NULL) {
816 lprocfs_free_stats(&pl->pl_stats);
817 pl->pl_stats = NULL;
818 }
819 if (pl->pl_proc_dir != NULL) {
820 lprocfs_remove(&pl->pl_proc_dir);
821 pl->pl_proc_dir = NULL;
822 }
823 }
824 #else /* !CONFIG_PROC_FS */
825 static int ldlm_pool_proc_init(struct ldlm_pool *pl)
826 {
827 return 0;
828 }
829
830 static void ldlm_pool_proc_fini(struct ldlm_pool *pl) {}
831 #endif /* CONFIG_PROC_FS */
832
833 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
834 int idx, ldlm_side_t client)
835 {
836 int rc;
837
838 spin_lock_init(&pl->pl_lock);
839 atomic_set(&pl->pl_granted, 0);
840 pl->pl_recalc_time = get_seconds();
841 atomic_set(&pl->pl_lock_volume_factor, 1);
842
843 atomic_set(&pl->pl_grant_rate, 0);
844 atomic_set(&pl->pl_cancel_rate, 0);
845 pl->pl_grant_plan = LDLM_POOL_GP(LDLM_POOL_HOST_L);
846
847 snprintf(pl->pl_name, sizeof(pl->pl_name), "ldlm-pool-%s-%d",
848 ldlm_ns_name(ns), idx);
849
850 if (client == LDLM_NAMESPACE_SERVER) {
851 pl->pl_ops = &ldlm_srv_pool_ops;
852 ldlm_pool_set_limit(pl, LDLM_POOL_HOST_L);
853 pl->pl_recalc_period = LDLM_POOL_SRV_DEF_RECALC_PERIOD;
854 pl->pl_server_lock_volume = ldlm_pool_slv_max(LDLM_POOL_HOST_L);
855 } else {
856 ldlm_pool_set_limit(pl, 1);
857 pl->pl_server_lock_volume = 0;
858 pl->pl_ops = &ldlm_cli_pool_ops;
859 pl->pl_recalc_period = LDLM_POOL_CLI_DEF_RECALC_PERIOD;
860 }
861 pl->pl_client_lock_volume = 0;
862 rc = ldlm_pool_proc_init(pl);
863 if (rc)
864 return rc;
865
866 CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
867
868 return rc;
869 }
870 EXPORT_SYMBOL(ldlm_pool_init);
871
872 void ldlm_pool_fini(struct ldlm_pool *pl)
873 {
874 ldlm_pool_proc_fini(pl);
875
876 /*
877 * Pool should not be used after this point. We can't free it here as
878 * it lives in struct ldlm_namespace, but still interested in catching
879 * any abnormal using cases.
880 */
881 POISON(pl, 0x5a, sizeof(*pl));
882 }
883 EXPORT_SYMBOL(ldlm_pool_fini);
884
885 /**
886 * Add new taken ldlm lock \a lock into pool \a pl accounting.
887 */
888 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
889 {
890 /*
891 * FLOCK locks are special in a sense that they are almost never
892 * cancelled, instead special kind of lock is used to drop them.
893 * also there is no LRU for flock locks, so no point in tracking
894 * them anyway.
895 */
896 if (lock->l_resource->lr_type == LDLM_FLOCK)
897 return;
898
899 atomic_inc(&pl->pl_granted);
900 atomic_inc(&pl->pl_grant_rate);
901 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_GRANT_STAT);
902 /*
903 * Do not do pool recalc for client side as all locks which
904 * potentially may be canceled has already been packed into
905 * enqueue/cancel rpc. Also we do not want to run out of stack
906 * with too long call paths.
907 */
908 if (ns_is_server(ldlm_pl2ns(pl)))
909 ldlm_pool_recalc(pl);
910 }
911 EXPORT_SYMBOL(ldlm_pool_add);
912
913 /**
914 * Remove ldlm lock \a lock from pool \a pl accounting.
915 */
916 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
917 {
918 /*
919 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
920 */
921 if (lock->l_resource->lr_type == LDLM_FLOCK)
922 return;
923
924 LASSERT(atomic_read(&pl->pl_granted) > 0);
925 atomic_dec(&pl->pl_granted);
926 atomic_inc(&pl->pl_cancel_rate);
927
928 lprocfs_counter_incr(pl->pl_stats, LDLM_POOL_CANCEL_STAT);
929
930 if (ns_is_server(ldlm_pl2ns(pl)))
931 ldlm_pool_recalc(pl);
932 }
933 EXPORT_SYMBOL(ldlm_pool_del);
934
935 /**
936 * Returns current \a pl SLV.
937 *
938 * \pre ->pl_lock is not locked.
939 */
940 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
941 {
942 __u64 slv;
943 spin_lock(&pl->pl_lock);
944 slv = pl->pl_server_lock_volume;
945 spin_unlock(&pl->pl_lock);
946 return slv;
947 }
948 EXPORT_SYMBOL(ldlm_pool_get_slv);
949
950 /**
951 * Sets passed \a slv to \a pl.
952 *
953 * \pre ->pl_lock is not locked.
954 */
955 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
956 {
957 spin_lock(&pl->pl_lock);
958 pl->pl_server_lock_volume = slv;
959 spin_unlock(&pl->pl_lock);
960 }
961 EXPORT_SYMBOL(ldlm_pool_set_slv);
962
963 /**
964 * Returns current \a pl CLV.
965 *
966 * \pre ->pl_lock is not locked.
967 */
968 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
969 {
970 __u64 slv;
971 spin_lock(&pl->pl_lock);
972 slv = pl->pl_client_lock_volume;
973 spin_unlock(&pl->pl_lock);
974 return slv;
975 }
976 EXPORT_SYMBOL(ldlm_pool_get_clv);
977
978 /**
979 * Sets passed \a clv to \a pl.
980 *
981 * \pre ->pl_lock is not locked.
982 */
983 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
984 {
985 spin_lock(&pl->pl_lock);
986 pl->pl_client_lock_volume = clv;
987 spin_unlock(&pl->pl_lock);
988 }
989 EXPORT_SYMBOL(ldlm_pool_set_clv);
990
991 /**
992 * Returns current \a pl limit.
993 */
994 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
995 {
996 return atomic_read(&pl->pl_limit);
997 }
998 EXPORT_SYMBOL(ldlm_pool_get_limit);
999
1000 /**
1001 * Sets passed \a limit to \a pl.
1002 */
1003 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
1004 {
1005 atomic_set(&pl->pl_limit, limit);
1006 }
1007 EXPORT_SYMBOL(ldlm_pool_set_limit);
1008
1009 /**
1010 * Returns current LVF from \a pl.
1011 */
1012 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
1013 {
1014 return atomic_read(&pl->pl_lock_volume_factor);
1015 }
1016 EXPORT_SYMBOL(ldlm_pool_get_lvf);
1017
1018 static int ldlm_pool_granted(struct ldlm_pool *pl)
1019 {
1020 return atomic_read(&pl->pl_granted);
1021 }
1022
1023 static struct ptlrpc_thread *ldlm_pools_thread;
1024 static struct completion ldlm_pools_comp;
1025
1026 /*
1027 * count locks from all namespaces (if possible). Returns number of
1028 * cached locks.
1029 */
1030 static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
1031 {
1032 int total = 0, nr_ns;
1033 struct ldlm_namespace *ns;
1034 struct ldlm_namespace *ns_old = NULL; /* loop detection */
1035 void *cookie;
1036
1037 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1038 return 0;
1039
1040 CDEBUG(D_DLMTRACE, "Request to count %s locks from all pools\n",
1041 client == LDLM_NAMESPACE_CLIENT ? "client" : "server");
1042
1043 cookie = cl_env_reenter();
1044
1045 /*
1046 * Find out how many resources we may release.
1047 */
1048 for (nr_ns = ldlm_namespace_nr_read(client);
1049 nr_ns > 0; nr_ns--) {
1050 mutex_lock(ldlm_namespace_lock(client));
1051 if (list_empty(ldlm_namespace_list(client))) {
1052 mutex_unlock(ldlm_namespace_lock(client));
1053 cl_env_reexit(cookie);
1054 return 0;
1055 }
1056 ns = ldlm_namespace_first_locked(client);
1057
1058 if (ns == ns_old) {
1059 mutex_unlock(ldlm_namespace_lock(client));
1060 break;
1061 }
1062
1063 if (ldlm_ns_empty(ns)) {
1064 ldlm_namespace_move_to_inactive_locked(ns, client);
1065 mutex_unlock(ldlm_namespace_lock(client));
1066 continue;
1067 }
1068
1069 if (ns_old == NULL)
1070 ns_old = ns;
1071
1072 ldlm_namespace_get(ns);
1073 ldlm_namespace_move_to_active_locked(ns, client);
1074 mutex_unlock(ldlm_namespace_lock(client));
1075 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
1076 ldlm_namespace_put(ns);
1077 }
1078
1079 cl_env_reexit(cookie);
1080 return total;
1081 }
1082
1083 static unsigned long ldlm_pools_scan(ldlm_side_t client, int nr, gfp_t gfp_mask)
1084 {
1085 unsigned long freed = 0;
1086 int tmp, nr_ns;
1087 struct ldlm_namespace *ns;
1088 void *cookie;
1089
1090 if (client == LDLM_NAMESPACE_CLIENT && !(gfp_mask & __GFP_FS))
1091 return -1;
1092
1093 cookie = cl_env_reenter();
1094
1095 /*
1096 * Shrink at least ldlm_namespace_nr_read(client) namespaces.
1097 */
1098 for (tmp = nr_ns = ldlm_namespace_nr_read(client);
1099 tmp > 0; tmp--) {
1100 int cancel, nr_locks;
1101
1102 /*
1103 * Do not call shrink under ldlm_namespace_lock(client)
1104 */
1105 mutex_lock(ldlm_namespace_lock(client));
1106 if (list_empty(ldlm_namespace_list(client))) {
1107 mutex_unlock(ldlm_namespace_lock(client));
1108 break;
1109 }
1110 ns = ldlm_namespace_first_locked(client);
1111 ldlm_namespace_get(ns);
1112 ldlm_namespace_move_to_active_locked(ns, client);
1113 mutex_unlock(ldlm_namespace_lock(client));
1114
1115 nr_locks = ldlm_pool_granted(&ns->ns_pool);
1116 /*
1117 * We use to shrink propotionally but with new shrinker API,
1118 * we lost the total number of freeable locks.
1119 */
1120 cancel = 1 + min_t(int, nr_locks, nr / nr_ns);
1121 freed += ldlm_pool_shrink(&ns->ns_pool, cancel, gfp_mask);
1122 ldlm_namespace_put(ns);
1123 }
1124 cl_env_reexit(cookie);
1125 /*
1126 * we only decrease the SLV in server pools shrinker, return
1127 * SHRINK_STOP to kernel to avoid needless loop. LU-1128
1128 */
1129 return (client == LDLM_NAMESPACE_SERVER) ? SHRINK_STOP : freed;
1130 }
1131
1132 static unsigned long ldlm_pools_srv_count(struct shrinker *s, struct shrink_control *sc)
1133 {
1134 return ldlm_pools_count(LDLM_NAMESPACE_SERVER, sc->gfp_mask);
1135 }
1136
1137 static unsigned long ldlm_pools_srv_scan(struct shrinker *s, struct shrink_control *sc)
1138 {
1139 return ldlm_pools_scan(LDLM_NAMESPACE_SERVER, sc->nr_to_scan,
1140 sc->gfp_mask);
1141 }
1142
1143 static unsigned long ldlm_pools_cli_count(struct shrinker *s, struct shrink_control *sc)
1144 {
1145 return ldlm_pools_count(LDLM_NAMESPACE_CLIENT, sc->gfp_mask);
1146 }
1147
1148 static unsigned long ldlm_pools_cli_scan(struct shrinker *s, struct shrink_control *sc)
1149 {
1150 return ldlm_pools_scan(LDLM_NAMESPACE_CLIENT, sc->nr_to_scan,
1151 sc->gfp_mask);
1152 }
1153
1154 int ldlm_pools_recalc(ldlm_side_t client)
1155 {
1156 __u32 nr_l = 0, nr_p = 0, l;
1157 struct ldlm_namespace *ns;
1158 struct ldlm_namespace *ns_old = NULL;
1159 int nr, equal = 0;
1160 int time = 50; /* seconds of sleep if no active namespaces */
1161
1162 /*
1163 * No need to setup pool limit for client pools.
1164 */
1165 if (client == LDLM_NAMESPACE_SERVER) {
1166 /*
1167 * Check all modest namespaces first.
1168 */
1169 mutex_lock(ldlm_namespace_lock(client));
1170 list_for_each_entry(ns, ldlm_namespace_list(client),
1171 ns_list_chain)
1172 {
1173 if (ns->ns_appetite != LDLM_NAMESPACE_MODEST)
1174 continue;
1175
1176 l = ldlm_pool_granted(&ns->ns_pool);
1177 if (l == 0)
1178 l = 1;
1179
1180 /*
1181 * Set the modest pools limit equal to their avg granted
1182 * locks + ~6%.
1183 */
1184 l += dru(l, LDLM_POOLS_MODEST_MARGIN_SHIFT, 0);
1185 ldlm_pool_setup(&ns->ns_pool, l);
1186 nr_l += l;
1187 nr_p++;
1188 }
1189
1190 /*
1191 * Make sure that modest namespaces did not eat more that 2/3
1192 * of limit.
1193 */
1194 if (nr_l >= 2 * (LDLM_POOL_HOST_L / 3)) {
1195 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1196 "limit (%d of %lu). This means that you have too "
1197 "many clients for this amount of server RAM. "
1198 "Upgrade server!\n", nr_l, LDLM_POOL_HOST_L);
1199 equal = 1;
1200 }
1201
1202 /*
1203 * The rest is given to greedy namespaces.
1204 */
1205 list_for_each_entry(ns, ldlm_namespace_list(client),
1206 ns_list_chain)
1207 {
1208 if (!equal && ns->ns_appetite != LDLM_NAMESPACE_GREEDY)
1209 continue;
1210
1211 if (equal) {
1212 /*
1213 * In the case 2/3 locks are eaten out by
1214 * modest pools, we re-setup equal limit
1215 * for _all_ pools.
1216 */
1217 l = LDLM_POOL_HOST_L /
1218 ldlm_namespace_nr_read(client);
1219 } else {
1220 /*
1221 * All the rest of greedy pools will have
1222 * all locks in equal parts.
1223 */
1224 l = (LDLM_POOL_HOST_L - nr_l) /
1225 (ldlm_namespace_nr_read(client) -
1226 nr_p);
1227 }
1228 ldlm_pool_setup(&ns->ns_pool, l);
1229 }
1230 mutex_unlock(ldlm_namespace_lock(client));
1231 }
1232
1233 /*
1234 * Recalc at least ldlm_namespace_nr_read(client) namespaces.
1235 */
1236 for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
1237 int skip;
1238 /*
1239 * Lock the list, get first @ns in the list, getref, move it
1240 * to the tail, unlock and call pool recalc. This way we avoid
1241 * calling recalc under @ns lock what is really good as we get
1242 * rid of potential deadlock on client nodes when canceling
1243 * locks synchronously.
1244 */
1245 mutex_lock(ldlm_namespace_lock(client));
1246 if (list_empty(ldlm_namespace_list(client))) {
1247 mutex_unlock(ldlm_namespace_lock(client));
1248 break;
1249 }
1250 ns = ldlm_namespace_first_locked(client);
1251
1252 if (ns_old == ns) { /* Full pass complete */
1253 mutex_unlock(ldlm_namespace_lock(client));
1254 break;
1255 }
1256
1257 /* We got an empty namespace, need to move it back to inactive
1258 * list.
1259 * The race with parallel resource creation is fine:
1260 * - If they do namespace_get before our check, we fail the
1261 * check and they move this item to the end of the list anyway
1262 * - If we do the check and then they do namespace_get, then
1263 * we move the namespace to inactive and they will move
1264 * it back to active (synchronised by the lock, so no clash
1265 * there).
1266 */
1267 if (ldlm_ns_empty(ns)) {
1268 ldlm_namespace_move_to_inactive_locked(ns, client);
1269 mutex_unlock(ldlm_namespace_lock(client));
1270 continue;
1271 }
1272
1273 if (ns_old == NULL)
1274 ns_old = ns;
1275
1276 spin_lock(&ns->ns_lock);
1277 /*
1278 * skip ns which is being freed, and we don't want to increase
1279 * its refcount again, not even temporarily. bz21519 & LU-499.
1280 */
1281 if (ns->ns_stopping) {
1282 skip = 1;
1283 } else {
1284 skip = 0;
1285 ldlm_namespace_get(ns);
1286 }
1287 spin_unlock(&ns->ns_lock);
1288
1289 ldlm_namespace_move_to_active_locked(ns, client);
1290 mutex_unlock(ldlm_namespace_lock(client));
1291
1292 /*
1293 * After setup is done - recalc the pool.
1294 */
1295 if (!skip) {
1296 int ttime = ldlm_pool_recalc(&ns->ns_pool);
1297
1298 if (ttime < time)
1299 time = ttime;
1300
1301 ldlm_namespace_put(ns);
1302 }
1303 }
1304 return time;
1305 }
1306 EXPORT_SYMBOL(ldlm_pools_recalc);
1307
1308 static int ldlm_pools_thread_main(void *arg)
1309 {
1310 struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
1311 int s_time, c_time;
1312
1313 thread_set_flags(thread, SVC_RUNNING);
1314 wake_up(&thread->t_ctl_waitq);
1315
1316 CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
1317 "ldlm_poold", current_pid());
1318
1319 while (1) {
1320 struct l_wait_info lwi;
1321
1322 /*
1323 * Recal all pools on this tick.
1324 */
1325 s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
1326 c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
1327
1328 /*
1329 * Wait until the next check time, or until we're
1330 * stopped.
1331 */
1332 lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
1333 NULL, NULL);
1334 l_wait_event(thread->t_ctl_waitq,
1335 thread_is_stopping(thread) ||
1336 thread_is_event(thread),
1337 &lwi);
1338
1339 if (thread_test_and_clear_flags(thread, SVC_STOPPING))
1340 break;
1341 else
1342 thread_test_and_clear_flags(thread, SVC_EVENT);
1343 }
1344
1345 thread_set_flags(thread, SVC_STOPPED);
1346 wake_up(&thread->t_ctl_waitq);
1347
1348 CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
1349 "ldlm_poold", current_pid());
1350
1351 complete_and_exit(&ldlm_pools_comp, 0);
1352 }
1353
1354 static int ldlm_pools_thread_start(void)
1355 {
1356 struct l_wait_info lwi = { 0 };
1357 struct task_struct *task;
1358
1359 if (ldlm_pools_thread != NULL)
1360 return -EALREADY;
1361
1362 OBD_ALLOC_PTR(ldlm_pools_thread);
1363 if (ldlm_pools_thread == NULL)
1364 return -ENOMEM;
1365
1366 init_completion(&ldlm_pools_comp);
1367 init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
1368
1369 task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread,
1370 "ldlm_poold");
1371 if (IS_ERR(task)) {
1372 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
1373 OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
1374 ldlm_pools_thread = NULL;
1375 return PTR_ERR(task);
1376 }
1377 l_wait_event(ldlm_pools_thread->t_ctl_waitq,
1378 thread_is_running(ldlm_pools_thread), &lwi);
1379 return 0;
1380 }
1381
1382 static void ldlm_pools_thread_stop(void)
1383 {
1384 if (ldlm_pools_thread == NULL) {
1385 return;
1386 }
1387
1388 thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
1389 wake_up(&ldlm_pools_thread->t_ctl_waitq);
1390
1391 /*
1392 * Make sure that pools thread is finished before freeing @thread.
1393 * This fixes possible race and oops due to accessing freed memory
1394 * in pools thread.
1395 */
1396 wait_for_completion(&ldlm_pools_comp);
1397 OBD_FREE_PTR(ldlm_pools_thread);
1398 ldlm_pools_thread = NULL;
1399 }
1400
1401 static struct shrinker ldlm_pools_srv_shrinker = {
1402 .count_objects = ldlm_pools_srv_count,
1403 .scan_objects = ldlm_pools_srv_scan,
1404 .seeks = DEFAULT_SEEKS,
1405 };
1406
1407 static struct shrinker ldlm_pools_cli_shrinker = {
1408 .count_objects = ldlm_pools_cli_count,
1409 .scan_objects = ldlm_pools_cli_scan,
1410 .seeks = DEFAULT_SEEKS,
1411 };
1412
1413 int ldlm_pools_init(void)
1414 {
1415 int rc;
1416
1417 rc = ldlm_pools_thread_start();
1418 if (rc == 0) {
1419 register_shrinker(&ldlm_pools_srv_shrinker);
1420 register_shrinker(&ldlm_pools_cli_shrinker);
1421 }
1422 return rc;
1423 }
1424 EXPORT_SYMBOL(ldlm_pools_init);
1425
1426 void ldlm_pools_fini(void)
1427 {
1428 unregister_shrinker(&ldlm_pools_srv_shrinker);
1429 unregister_shrinker(&ldlm_pools_cli_shrinker);
1430 ldlm_pools_thread_stop();
1431 }
1432 EXPORT_SYMBOL(ldlm_pools_fini);
This page took 0.061535 seconds and 5 git commands to generate.