4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_pool.c
38 * Author: Yury Umanets <umka@clusterfs.com>
42 * Idea of this code is rather simple. Each second, for each server namespace
43 * we have SLV - server lock volume which is calculated on current number of
44 * granted locks, grant speed for past period, etc - that is, locking load.
45 * This SLV number may be thought as a flow definition for simplicity. It is
46 * sent to clients with each occasion to let them know what is current load
47 * situation on the server. By default, at the beginning, SLV on server is
48 * set max value which is calculated as the following: allow to one client
49 * have all locks of limit ->pl_limit for 10h.
51 * Next, on clients, number of cached locks is not limited artificially in any
52 * way as it was before. Instead, client calculates CLV, that is, client lock
53 * volume for each lock and compares it with last SLV from the server. CLV is
54 * calculated as the number of locks in LRU * lock live time in seconds. If
55 * CLV > SLV - lock is canceled.
57 * Client has LVF, that is, lock volume factor which regulates how much sensitive
58 * client should be about last SLV from server. The higher LVF is the more locks
59 * will be canceled on client. Default value for it is 1. Setting LVF to 2 means
60 * that client will cancel locks 2 times faster.
62 * Locks on a client will be canceled more intensively in these cases:
63 * (1) if SLV is smaller, that is, load is higher on the server;
64 * (2) client has a lot of locks (the more locks are held by client, the bigger
65 * chances that some of them should be canceled);
66 * (3) client has old locks (taken some time ago);
68 * Thus, according to flow paradigm that we use for better understanding SLV,
69 * CLV is the volume of particle in flow described by SLV. According to this,
70 * if flow is getting thinner, more and more particles become outside of it and
71 * as particles are locks, they should be canceled.
73 * General idea of this belongs to Vitaly Fertman (vitaly@clusterfs.com). Andreas
74 * Dilger (adilger@clusterfs.com) proposed few nice ideas like using LVF and many
75 * cleanups. Flow definition to allow more easy understanding of the logic belongs
76 * to Nikita Danilov (nikita@clusterfs.com) as well as many cleanups and fixes.
77 * And design and implementation are done by Yury Umanets (umka@clusterfs.com).
79 * Glossary for terms used:
81 * pl_limit - Number of allowed locks in pool. Applies to server and client
84 * pl_granted - Number of granted locks (calculated);
85 * pl_grant_rate - Number of granted locks for last T (calculated);
86 * pl_cancel_rate - Number of canceled locks for last T (calculated);
87 * pl_grant_speed - Grant speed (GR - CR) for last T (calculated);
88 * pl_grant_plan - Planned number of granted locks for next T (calculated);
89 * pl_server_lock_volume - Current server lock volume (calculated);
91 * As it may be seen from list above, we have few possible tunables which may
92 * affect behavior much. They all may be modified via proc. However, they also
93 * give a possibility for constructing few pre-defined behavior policies. If
94 * none of predefines is suitable for a working pattern being used, new one may
95 * be "constructed" via proc tunables.
98 #define DEBUG_SUBSYSTEM S_LDLM
100 # include <lustre_dlm.h>
102 #include <cl_object.h>
104 #include <obd_class.h>
105 #include <obd_support.h>
106 #include "ldlm_internal.h"
110 * 50 ldlm locks for 1MB of RAM.
112 #define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
115 * Maximal possible grant step plan in %.
117 #define LDLM_POOL_MAX_GSP (30)
120 * Minimal possible grant step plan in %.
122 #define LDLM_POOL_MIN_GSP (1)
125 * This controls the speed of reaching LDLM_POOL_MAX_GSP
126 * with increasing thread period.
128 #define LDLM_POOL_GSP_STEP_SHIFT (2)
131 * LDLM_POOL_GSP% of all locks is default GP.
133 #define LDLM_POOL_GP(L) (((L) * LDLM_POOL_MAX_GSP) / 100)
136 * Max age for locks on clients.
138 #define LDLM_POOL_MAX_AGE (36000)
141 * The granularity of SLV calculation.
143 #define LDLM_POOL_SLV_SHIFT (10)
145 extern proc_dir_entry_t
*ldlm_ns_proc_dir
;
147 static inline __u64
dru(__u64 val
, __u32 shift
, int round_up
)
149 return (val
+ (round_up
? (1 << shift
) - 1 : 0)) >> shift
;
152 static inline __u64
ldlm_pool_slv_max(__u32 L
)
155 * Allow to have all locks for 1 client for 10 hrs.
156 * Formula is the following: limit * 10h / 1 client.
158 __u64 lim
= (__u64
)L
* LDLM_POOL_MAX_AGE
/ 1;
162 static inline __u64
ldlm_pool_slv_min(__u32 L
)
168 LDLM_POOL_FIRST_STAT
= 0,
169 LDLM_POOL_GRANTED_STAT
= LDLM_POOL_FIRST_STAT
,
170 LDLM_POOL_GRANT_STAT
,
171 LDLM_POOL_CANCEL_STAT
,
172 LDLM_POOL_GRANT_RATE_STAT
,
173 LDLM_POOL_CANCEL_RATE_STAT
,
174 LDLM_POOL_GRANT_PLAN_STAT
,
176 LDLM_POOL_SHRINK_REQTD_STAT
,
177 LDLM_POOL_SHRINK_FREED_STAT
,
178 LDLM_POOL_RECALC_STAT
,
179 LDLM_POOL_TIMING_STAT
,
183 static inline struct ldlm_namespace
*ldlm_pl2ns(struct ldlm_pool
*pl
)
185 return container_of(pl
, struct ldlm_namespace
, ns_pool
);
189 * Calculates suggested grant_step in % of available locks for passed
190 * \a period. This is later used in grant_plan calculations.
192 static inline int ldlm_pool_t2gsp(unsigned int t
)
195 * This yields 1% grant step for anything below LDLM_POOL_GSP_STEP
196 * and up to 30% for anything higher than LDLM_POOL_GSP_STEP.
198 * How this will affect execution is the following:
200 * - for thread period 1s we will have grant_step 1% which good from
201 * pov of taking some load off from server and push it out to clients.
202 * This is like that because 1% for grant_step means that server will
203 * not allow clients to get lots of locks in short period of time and
204 * keep all old locks in their caches. Clients will always have to
205 * get some locks back if they want to take some new;
207 * - for thread period 10s (which is default) we will have 23% which
208 * means that clients will have enough of room to take some new locks
209 * without getting some back. All locks from this 23% which were not
210 * taken by clients in current period will contribute in SLV growing.
211 * SLV growing means more locks cached on clients until limit or grant
214 return LDLM_POOL_MAX_GSP
-
215 ((LDLM_POOL_MAX_GSP
- LDLM_POOL_MIN_GSP
) >>
216 (t
>> LDLM_POOL_GSP_STEP_SHIFT
));
220 * Recalculates next grant limit on passed \a pl.
222 * \pre ->pl_lock is locked.
224 static void ldlm_pool_recalc_grant_plan(struct ldlm_pool
*pl
)
226 int granted
, grant_step
, limit
;
228 limit
= ldlm_pool_get_limit(pl
);
229 granted
= atomic_read(&pl
->pl_granted
);
231 grant_step
= ldlm_pool_t2gsp(pl
->pl_recalc_period
);
232 grant_step
= ((limit
- granted
) * grant_step
) / 100;
233 pl
->pl_grant_plan
= granted
+ grant_step
;
234 limit
= (limit
* 5) >> 2;
235 if (pl
->pl_grant_plan
> limit
)
236 pl
->pl_grant_plan
= limit
;
240 * Recalculates next SLV on passed \a pl.
242 * \pre ->pl_lock is locked.
244 static void ldlm_pool_recalc_slv(struct ldlm_pool
*pl
)
254 slv
= pl
->pl_server_lock_volume
;
255 grant_plan
= pl
->pl_grant_plan
;
256 limit
= ldlm_pool_get_limit(pl
);
257 granted
= atomic_read(&pl
->pl_granted
);
258 round_up
= granted
< limit
;
260 grant_usage
= max_t(int, limit
- (granted
- grant_plan
), 1);
263 * Find out SLV change factor which is the ratio of grant usage
264 * from limit. SLV changes as fast as the ratio of grant plan
265 * consumption. The more locks from grant plan are not consumed
266 * by clients in last interval (idle time), the faster grows
267 * SLV. And the opposite, the more grant plan is over-consumed
268 * (load time) the faster drops SLV.
270 slv_factor
= (grant_usage
<< LDLM_POOL_SLV_SHIFT
);
271 do_div(slv_factor
, limit
);
272 slv
= slv
* slv_factor
;
273 slv
= dru(slv
, LDLM_POOL_SLV_SHIFT
, round_up
);
275 if (slv
> ldlm_pool_slv_max(limit
)) {
276 slv
= ldlm_pool_slv_max(limit
);
277 } else if (slv
< ldlm_pool_slv_min(limit
)) {
278 slv
= ldlm_pool_slv_min(limit
);
281 pl
->pl_server_lock_volume
= slv
;
285 * Recalculates next stats on passed \a pl.
287 * \pre ->pl_lock is locked.
289 static void ldlm_pool_recalc_stats(struct ldlm_pool
*pl
)
291 int grant_plan
= pl
->pl_grant_plan
;
292 __u64 slv
= pl
->pl_server_lock_volume
;
293 int granted
= atomic_read(&pl
->pl_granted
);
294 int grant_rate
= atomic_read(&pl
->pl_grant_rate
);
295 int cancel_rate
= atomic_read(&pl
->pl_cancel_rate
);
297 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_SLV_STAT
,
299 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_GRANTED_STAT
,
301 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_GRANT_RATE_STAT
,
303 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_GRANT_PLAN_STAT
,
305 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_CANCEL_RATE_STAT
,
310 * Sets current SLV into obd accessible via ldlm_pl2ns(pl)->ns_obd.
312 static void ldlm_srv_pool_push_slv(struct ldlm_pool
*pl
)
314 struct obd_device
*obd
;
317 * Set new SLV in obd field for using it later without accessing the
318 * pool. This is required to avoid race between sending reply to client
319 * with new SLV and cleanup server stack in which we can't guarantee
320 * that namespace is still alive. We know only that obd is alive as
321 * long as valid export is alive.
323 obd
= ldlm_pl2ns(pl
)->ns_obd
;
324 LASSERT(obd
!= NULL
);
325 write_lock(&obd
->obd_pool_lock
);
326 obd
->obd_pool_slv
= pl
->pl_server_lock_volume
;
327 write_unlock(&obd
->obd_pool_lock
);
331 * Recalculates all pool fields on passed \a pl.
333 * \pre ->pl_lock is not locked.
335 static int ldlm_srv_pool_recalc(struct ldlm_pool
*pl
)
337 time_t recalc_interval_sec
;
339 recalc_interval_sec
= cfs_time_current_sec() - pl
->pl_recalc_time
;
340 if (recalc_interval_sec
< pl
->pl_recalc_period
)
343 spin_lock(&pl
->pl_lock
);
344 recalc_interval_sec
= cfs_time_current_sec() - pl
->pl_recalc_time
;
345 if (recalc_interval_sec
< pl
->pl_recalc_period
) {
346 spin_unlock(&pl
->pl_lock
);
350 * Recalc SLV after last period. This should be done
351 * _before_ recalculating new grant plan.
353 ldlm_pool_recalc_slv(pl
);
356 * Make sure that pool informed obd of last SLV changes.
358 ldlm_srv_pool_push_slv(pl
);
361 * Update grant_plan for new period.
363 ldlm_pool_recalc_grant_plan(pl
);
365 pl
->pl_recalc_time
= cfs_time_current_sec();
366 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_TIMING_STAT
,
367 recalc_interval_sec
);
368 spin_unlock(&pl
->pl_lock
);
373 * This function is used on server side as main entry point for memory
374 * pressure handling. It decreases SLV on \a pl according to passed
375 * \a nr and \a gfp_mask.
377 * Our goal here is to decrease SLV such a way that clients hold \a nr
378 * locks smaller in next 10h.
380 static int ldlm_srv_pool_shrink(struct ldlm_pool
*pl
,
381 int nr
, unsigned int gfp_mask
)
386 * VM is asking how many entries may be potentially freed.
389 return atomic_read(&pl
->pl_granted
);
392 * Client already canceled locks but server is already in shrinker
393 * and can't cancel anything. Let's catch this race.
395 if (atomic_read(&pl
->pl_granted
) == 0)
398 spin_lock(&pl
->pl_lock
);
401 * We want shrinker to possibly cause cancellation of @nr locks from
402 * clients or grant approximately @nr locks smaller next intervals.
404 * This is why we decreased SLV by @nr. This effect will only be as
405 * long as one re-calc interval (1s these days) and this should be
406 * enough to pass this decreased SLV to all clients. On next recalc
407 * interval pool will either increase SLV if locks load is not high
408 * or will keep on same level or even decrease again, thus, shrinker
409 * decreased SLV will affect next recalc intervals and this way will
410 * make locking load lower.
412 if (nr
< pl
->pl_server_lock_volume
) {
413 pl
->pl_server_lock_volume
= pl
->pl_server_lock_volume
- nr
;
415 limit
= ldlm_pool_get_limit(pl
);
416 pl
->pl_server_lock_volume
= ldlm_pool_slv_min(limit
);
420 * Make sure that pool informed obd of last SLV changes.
422 ldlm_srv_pool_push_slv(pl
);
423 spin_unlock(&pl
->pl_lock
);
426 * We did not really free any memory here so far, it only will be
427 * freed later may be, so that we return 0 to not confuse VM.
433 * Setup server side pool \a pl with passed \a limit.
435 static int ldlm_srv_pool_setup(struct ldlm_pool
*pl
, int limit
)
437 struct obd_device
*obd
;
439 obd
= ldlm_pl2ns(pl
)->ns_obd
;
440 LASSERT(obd
!= NULL
&& obd
!= LP_POISON
);
441 LASSERT(obd
->obd_type
!= LP_POISON
);
442 write_lock(&obd
->obd_pool_lock
);
443 obd
->obd_pool_limit
= limit
;
444 write_unlock(&obd
->obd_pool_lock
);
446 ldlm_pool_set_limit(pl
, limit
);
451 * Sets SLV and Limit from ldlm_pl2ns(pl)->ns_obd tp passed \a pl.
453 static void ldlm_cli_pool_pop_slv(struct ldlm_pool
*pl
)
455 struct obd_device
*obd
;
458 * Get new SLV and Limit from obd which is updated with coming
461 obd
= ldlm_pl2ns(pl
)->ns_obd
;
462 LASSERT(obd
!= NULL
);
463 read_lock(&obd
->obd_pool_lock
);
464 pl
->pl_server_lock_volume
= obd
->obd_pool_slv
;
465 ldlm_pool_set_limit(pl
, obd
->obd_pool_limit
);
466 read_unlock(&obd
->obd_pool_lock
);
470 * Recalculates client size pool \a pl according to current SLV and Limit.
472 static int ldlm_cli_pool_recalc(struct ldlm_pool
*pl
)
474 time_t recalc_interval_sec
;
476 recalc_interval_sec
= cfs_time_current_sec() - pl
->pl_recalc_time
;
477 if (recalc_interval_sec
< pl
->pl_recalc_period
)
480 spin_lock(&pl
->pl_lock
);
482 * Check if we need to recalc lists now.
484 recalc_interval_sec
= cfs_time_current_sec() - pl
->pl_recalc_time
;
485 if (recalc_interval_sec
< pl
->pl_recalc_period
) {
486 spin_unlock(&pl
->pl_lock
);
491 * Make sure that pool knows last SLV and Limit from obd.
493 ldlm_cli_pool_pop_slv(pl
);
495 pl
->pl_recalc_time
= cfs_time_current_sec();
496 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_TIMING_STAT
,
497 recalc_interval_sec
);
498 spin_unlock(&pl
->pl_lock
);
501 * Do not cancel locks in case lru resize is disabled for this ns.
503 if (!ns_connect_lru_resize(ldlm_pl2ns(pl
)))
507 * In the time of canceling locks on client we do not need to maintain
508 * sharp timing, we only want to cancel locks asap according to new SLV.
509 * It may be called when SLV has changed much, this is why we do not
510 * take into account pl->pl_recalc_time here.
512 return ldlm_cancel_lru(ldlm_pl2ns(pl
), 0, LCF_ASYNC
, LDLM_CANCEL_LRUR
);
516 * This function is main entry point for memory pressure handling on client
517 * side. Main goal of this function is to cancel some number of locks on
518 * passed \a pl according to \a nr and \a gfp_mask.
520 static int ldlm_cli_pool_shrink(struct ldlm_pool
*pl
,
521 int nr
, unsigned int gfp_mask
)
523 struct ldlm_namespace
*ns
;
524 int canceled
= 0, unused
;
529 * Do not cancel locks in case lru resize is disabled for this ns.
531 if (!ns_connect_lru_resize(ns
))
535 * Make sure that pool knows last SLV and Limit from obd.
537 ldlm_cli_pool_pop_slv(pl
);
539 spin_lock(&ns
->ns_lock
);
540 unused
= ns
->ns_nr_unused
;
541 spin_unlock(&ns
->ns_lock
);
544 canceled
= ldlm_cancel_lru(ns
, nr
, LCF_ASYNC
,
548 * Return the number of potentially reclaimable locks.
550 return ((unused
- canceled
) / 100) * sysctl_vfs_cache_pressure
;
553 struct ldlm_pool_ops ldlm_srv_pool_ops
= {
554 .po_recalc
= ldlm_srv_pool_recalc
,
555 .po_shrink
= ldlm_srv_pool_shrink
,
556 .po_setup
= ldlm_srv_pool_setup
559 struct ldlm_pool_ops ldlm_cli_pool_ops
= {
560 .po_recalc
= ldlm_cli_pool_recalc
,
561 .po_shrink
= ldlm_cli_pool_shrink
565 * Pool recalc wrapper. Will call either client or server pool recalc callback
566 * depending what pool \a pl is used.
568 int ldlm_pool_recalc(struct ldlm_pool
*pl
)
570 time_t recalc_interval_sec
;
573 recalc_interval_sec
= cfs_time_current_sec() - pl
->pl_recalc_time
;
574 if (recalc_interval_sec
<= 0)
577 spin_lock(&pl
->pl_lock
);
578 if (recalc_interval_sec
> 0) {
580 * Update pool statistics every 1s.
582 ldlm_pool_recalc_stats(pl
);
585 * Zero out all rates and speed for the last period.
587 atomic_set(&pl
->pl_grant_rate
, 0);
588 atomic_set(&pl
->pl_cancel_rate
, 0);
590 spin_unlock(&pl
->pl_lock
);
593 if (pl
->pl_ops
->po_recalc
!= NULL
) {
594 count
= pl
->pl_ops
->po_recalc(pl
);
595 lprocfs_counter_add(pl
->pl_stats
, LDLM_POOL_RECALC_STAT
,
598 recalc_interval_sec
= pl
->pl_recalc_time
- cfs_time_current_sec() +
599 pl
->pl_recalc_period
;
601 return recalc_interval_sec
;
605 * Pool shrink wrapper. Will call either client or server pool recalc callback
606 * depending what pool \a pl is used.
608 int ldlm_pool_shrink(struct ldlm_pool
*pl
, int nr
,
609 unsigned int gfp_mask
)
613 if (pl
->pl_ops
->po_shrink
!= NULL
) {
614 cancel
= pl
->pl_ops
->po_shrink(pl
, nr
, gfp_mask
);
616 lprocfs_counter_add(pl
->pl_stats
,
617 LDLM_POOL_SHRINK_REQTD_STAT
,
619 lprocfs_counter_add(pl
->pl_stats
,
620 LDLM_POOL_SHRINK_FREED_STAT
,
622 CDEBUG(D_DLMTRACE
, "%s: request to shrink %d locks, "
623 "shrunk %d\n", pl
->pl_name
, nr
, cancel
);
628 EXPORT_SYMBOL(ldlm_pool_shrink
);
631 * Pool setup wrapper. Will call either client or server pool recalc callback
632 * depending what pool \a pl is used.
634 * Sets passed \a limit into pool \a pl.
636 int ldlm_pool_setup(struct ldlm_pool
*pl
, int limit
)
638 if (pl
->pl_ops
->po_setup
!= NULL
)
639 return(pl
->pl_ops
->po_setup(pl
, limit
));
642 EXPORT_SYMBOL(ldlm_pool_setup
);
644 static int lprocfs_pool_state_seq_show(struct seq_file
*m
, void *unused
)
646 int granted
, grant_rate
, cancel_rate
, grant_step
;
647 int grant_speed
, grant_plan
, lvf
;
648 struct ldlm_pool
*pl
= m
->private;
652 spin_lock(&pl
->pl_lock
);
653 slv
= pl
->pl_server_lock_volume
;
654 clv
= pl
->pl_client_lock_volume
;
655 limit
= ldlm_pool_get_limit(pl
);
656 grant_plan
= pl
->pl_grant_plan
;
657 granted
= atomic_read(&pl
->pl_granted
);
658 grant_rate
= atomic_read(&pl
->pl_grant_rate
);
659 cancel_rate
= atomic_read(&pl
->pl_cancel_rate
);
660 grant_speed
= grant_rate
- cancel_rate
;
661 lvf
= atomic_read(&pl
->pl_lock_volume_factor
);
662 grant_step
= ldlm_pool_t2gsp(pl
->pl_recalc_period
);
663 spin_unlock(&pl
->pl_lock
);
665 seq_printf(m
, "LDLM pool state (%s):\n"
669 pl
->pl_name
, slv
, clv
, lvf
);
671 if (ns_is_server(ldlm_pl2ns(pl
))) {
672 seq_printf(m
, " GSP: %d%%\n"
674 grant_step
, grant_plan
);
676 seq_printf(m
, " GR: %d\n" " CR: %d\n" " GS: %d\n"
677 " G: %d\n" " L: %d\n",
678 grant_rate
, cancel_rate
, grant_speed
,
683 LPROC_SEQ_FOPS_RO(lprocfs_pool_state
);
685 static int lprocfs_grant_speed_seq_show(struct seq_file
*m
, void *unused
)
687 struct ldlm_pool
*pl
= m
->private;
690 spin_lock(&pl
->pl_lock
);
691 /* serialize with ldlm_pool_recalc */
692 grant_speed
= atomic_read(&pl
->pl_grant_rate
) -
693 atomic_read(&pl
->pl_cancel_rate
);
694 spin_unlock(&pl
->pl_lock
);
695 return lprocfs_rd_uint(m
, &grant_speed
);
698 LDLM_POOL_PROC_READER_SEQ_SHOW(grant_plan
, int);
699 LPROC_SEQ_FOPS_RO(lprocfs_grant_plan
);
701 LDLM_POOL_PROC_READER_SEQ_SHOW(recalc_period
, int);
702 LDLM_POOL_PROC_WRITER(recalc_period
, int);
703 static ssize_t
lprocfs_recalc_period_seq_write(struct file
*file
, const char *buf
,
704 size_t len
, loff_t
*off
)
706 struct seq_file
*seq
= file
->private_data
;
708 return lprocfs_wr_recalc_period(file
, buf
, len
, seq
->private);
710 LPROC_SEQ_FOPS(lprocfs_recalc_period
);
712 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool
, u64
);
713 LPROC_SEQ_FOPS_RO_TYPE(ldlm_pool
, atomic
);
714 LPROC_SEQ_FOPS_RW_TYPE(ldlm_pool_rw
, atomic
);
716 LPROC_SEQ_FOPS_RO(lprocfs_grant_speed
);
718 #define LDLM_POOL_ADD_VAR(name, var, ops) \
720 snprintf(var_name, MAX_STRING_SIZE, #name); \
721 pool_vars[0].data = var; \
722 pool_vars[0].fops = ops; \
723 lprocfs_add_vars(pl->pl_proc_dir, pool_vars, 0);\
726 static int ldlm_pool_proc_init(struct ldlm_pool
*pl
)
728 struct ldlm_namespace
*ns
= ldlm_pl2ns(pl
);
729 struct proc_dir_entry
*parent_ns_proc
;
730 struct lprocfs_vars pool_vars
[2];
731 char *var_name
= NULL
;
734 OBD_ALLOC(var_name
, MAX_STRING_SIZE
+ 1);
738 parent_ns_proc
= ns
->ns_proc_dir_entry
;
739 if (parent_ns_proc
== NULL
) {
740 CERROR("%s: proc entry is not initialized\n",
742 GOTO(out_free_name
, rc
= -EINVAL
);
744 pl
->pl_proc_dir
= lprocfs_register("pool", parent_ns_proc
,
746 if (IS_ERR(pl
->pl_proc_dir
)) {
747 CERROR("LProcFS failed in ldlm-pool-init\n");
748 rc
= PTR_ERR(pl
->pl_proc_dir
);
749 pl
->pl_proc_dir
= NULL
;
750 GOTO(out_free_name
, rc
);
753 var_name
[MAX_STRING_SIZE
] = '\0';
754 memset(pool_vars
, 0, sizeof(pool_vars
));
755 pool_vars
[0].name
= var_name
;
757 LDLM_POOL_ADD_VAR("server_lock_volume", &pl
->pl_server_lock_volume
,
758 &ldlm_pool_u64_fops
);
759 LDLM_POOL_ADD_VAR("limit", &pl
->pl_limit
, &ldlm_pool_rw_atomic_fops
);
760 LDLM_POOL_ADD_VAR("granted", &pl
->pl_granted
, &ldlm_pool_atomic_fops
);
761 LDLM_POOL_ADD_VAR("grant_speed", pl
, &lprocfs_grant_speed_fops
);
762 LDLM_POOL_ADD_VAR("cancel_rate", &pl
->pl_cancel_rate
,
763 &ldlm_pool_atomic_fops
);
764 LDLM_POOL_ADD_VAR("grant_rate", &pl
->pl_grant_rate
,
765 &ldlm_pool_atomic_fops
);
766 LDLM_POOL_ADD_VAR("grant_plan", pl
, &lprocfs_grant_plan_fops
);
767 LDLM_POOL_ADD_VAR("recalc_period", pl
, &lprocfs_recalc_period_fops
);
768 LDLM_POOL_ADD_VAR("lock_volume_factor", &pl
->pl_lock_volume_factor
,
769 &ldlm_pool_rw_atomic_fops
);
770 LDLM_POOL_ADD_VAR("state", pl
, &lprocfs_pool_state_fops
);
772 pl
->pl_stats
= lprocfs_alloc_stats(LDLM_POOL_LAST_STAT
-
773 LDLM_POOL_FIRST_STAT
, 0);
775 GOTO(out_free_name
, rc
= -ENOMEM
);
777 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_GRANTED_STAT
,
778 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
780 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_GRANT_STAT
,
781 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
783 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_CANCEL_STAT
,
784 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
786 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_GRANT_RATE_STAT
,
787 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
788 "grant_rate", "locks/s");
789 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_CANCEL_RATE_STAT
,
790 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
791 "cancel_rate", "locks/s");
792 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_GRANT_PLAN_STAT
,
793 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
794 "grant_plan", "locks/s");
795 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_SLV_STAT
,
796 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
798 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_SHRINK_REQTD_STAT
,
799 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
800 "shrink_request", "locks");
801 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_SHRINK_FREED_STAT
,
802 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
803 "shrink_freed", "locks");
804 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_RECALC_STAT
,
805 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
806 "recalc_freed", "locks");
807 lprocfs_counter_init(pl
->pl_stats
, LDLM_POOL_TIMING_STAT
,
808 LPROCFS_CNTR_AVGMINMAX
| LPROCFS_CNTR_STDDEV
,
809 "recalc_timing", "sec");
810 rc
= lprocfs_register_stats(pl
->pl_proc_dir
, "stats", pl
->pl_stats
);
813 OBD_FREE(var_name
, MAX_STRING_SIZE
+ 1);
817 static void ldlm_pool_proc_fini(struct ldlm_pool
*pl
)
819 if (pl
->pl_stats
!= NULL
) {
820 lprocfs_free_stats(&pl
->pl_stats
);
823 if (pl
->pl_proc_dir
!= NULL
) {
824 lprocfs_remove(&pl
->pl_proc_dir
);
825 pl
->pl_proc_dir
= NULL
;
829 int ldlm_pool_init(struct ldlm_pool
*pl
, struct ldlm_namespace
*ns
,
830 int idx
, ldlm_side_t client
)
834 spin_lock_init(&pl
->pl_lock
);
835 atomic_set(&pl
->pl_granted
, 0);
836 pl
->pl_recalc_time
= cfs_time_current_sec();
837 atomic_set(&pl
->pl_lock_volume_factor
, 1);
839 atomic_set(&pl
->pl_grant_rate
, 0);
840 atomic_set(&pl
->pl_cancel_rate
, 0);
841 pl
->pl_grant_plan
= LDLM_POOL_GP(LDLM_POOL_HOST_L
);
843 snprintf(pl
->pl_name
, sizeof(pl
->pl_name
), "ldlm-pool-%s-%d",
844 ldlm_ns_name(ns
), idx
);
846 if (client
== LDLM_NAMESPACE_SERVER
) {
847 pl
->pl_ops
= &ldlm_srv_pool_ops
;
848 ldlm_pool_set_limit(pl
, LDLM_POOL_HOST_L
);
849 pl
->pl_recalc_period
= LDLM_POOL_SRV_DEF_RECALC_PERIOD
;
850 pl
->pl_server_lock_volume
= ldlm_pool_slv_max(LDLM_POOL_HOST_L
);
852 ldlm_pool_set_limit(pl
, 1);
853 pl
->pl_server_lock_volume
= 0;
854 pl
->pl_ops
= &ldlm_cli_pool_ops
;
855 pl
->pl_recalc_period
= LDLM_POOL_CLI_DEF_RECALC_PERIOD
;
857 pl
->pl_client_lock_volume
= 0;
858 rc
= ldlm_pool_proc_init(pl
);
862 CDEBUG(D_DLMTRACE
, "Lock pool %s is initialized\n", pl
->pl_name
);
866 EXPORT_SYMBOL(ldlm_pool_init
);
868 void ldlm_pool_fini(struct ldlm_pool
*pl
)
870 ldlm_pool_proc_fini(pl
);
873 * Pool should not be used after this point. We can't free it here as
874 * it lives in struct ldlm_namespace, but still interested in catching
875 * any abnormal using cases.
877 POISON(pl
, 0x5a, sizeof(*pl
));
879 EXPORT_SYMBOL(ldlm_pool_fini
);
882 * Add new taken ldlm lock \a lock into pool \a pl accounting.
884 void ldlm_pool_add(struct ldlm_pool
*pl
, struct ldlm_lock
*lock
)
887 * FLOCK locks are special in a sense that they are almost never
888 * cancelled, instead special kind of lock is used to drop them.
889 * also there is no LRU for flock locks, so no point in tracking
892 if (lock
->l_resource
->lr_type
== LDLM_FLOCK
)
895 atomic_inc(&pl
->pl_granted
);
896 atomic_inc(&pl
->pl_grant_rate
);
897 lprocfs_counter_incr(pl
->pl_stats
, LDLM_POOL_GRANT_STAT
);
899 * Do not do pool recalc for client side as all locks which
900 * potentially may be canceled has already been packed into
901 * enqueue/cancel rpc. Also we do not want to run out of stack
902 * with too long call paths.
904 if (ns_is_server(ldlm_pl2ns(pl
)))
905 ldlm_pool_recalc(pl
);
907 EXPORT_SYMBOL(ldlm_pool_add
);
910 * Remove ldlm lock \a lock from pool \a pl accounting.
912 void ldlm_pool_del(struct ldlm_pool
*pl
, struct ldlm_lock
*lock
)
915 * Filter out FLOCK locks. Read above comment in ldlm_pool_add().
917 if (lock
->l_resource
->lr_type
== LDLM_FLOCK
)
920 LASSERT(atomic_read(&pl
->pl_granted
) > 0);
921 atomic_dec(&pl
->pl_granted
);
922 atomic_inc(&pl
->pl_cancel_rate
);
924 lprocfs_counter_incr(pl
->pl_stats
, LDLM_POOL_CANCEL_STAT
);
926 if (ns_is_server(ldlm_pl2ns(pl
)))
927 ldlm_pool_recalc(pl
);
929 EXPORT_SYMBOL(ldlm_pool_del
);
932 * Returns current \a pl SLV.
934 * \pre ->pl_lock is not locked.
936 __u64
ldlm_pool_get_slv(struct ldlm_pool
*pl
)
939 spin_lock(&pl
->pl_lock
);
940 slv
= pl
->pl_server_lock_volume
;
941 spin_unlock(&pl
->pl_lock
);
944 EXPORT_SYMBOL(ldlm_pool_get_slv
);
947 * Sets passed \a slv to \a pl.
949 * \pre ->pl_lock is not locked.
951 void ldlm_pool_set_slv(struct ldlm_pool
*pl
, __u64 slv
)
953 spin_lock(&pl
->pl_lock
);
954 pl
->pl_server_lock_volume
= slv
;
955 spin_unlock(&pl
->pl_lock
);
957 EXPORT_SYMBOL(ldlm_pool_set_slv
);
960 * Returns current \a pl CLV.
962 * \pre ->pl_lock is not locked.
964 __u64
ldlm_pool_get_clv(struct ldlm_pool
*pl
)
967 spin_lock(&pl
->pl_lock
);
968 slv
= pl
->pl_client_lock_volume
;
969 spin_unlock(&pl
->pl_lock
);
972 EXPORT_SYMBOL(ldlm_pool_get_clv
);
975 * Sets passed \a clv to \a pl.
977 * \pre ->pl_lock is not locked.
979 void ldlm_pool_set_clv(struct ldlm_pool
*pl
, __u64 clv
)
981 spin_lock(&pl
->pl_lock
);
982 pl
->pl_client_lock_volume
= clv
;
983 spin_unlock(&pl
->pl_lock
);
985 EXPORT_SYMBOL(ldlm_pool_set_clv
);
988 * Returns current \a pl limit.
990 __u32
ldlm_pool_get_limit(struct ldlm_pool
*pl
)
992 return atomic_read(&pl
->pl_limit
);
994 EXPORT_SYMBOL(ldlm_pool_get_limit
);
997 * Sets passed \a limit to \a pl.
999 void ldlm_pool_set_limit(struct ldlm_pool
*pl
, __u32 limit
)
1001 atomic_set(&pl
->pl_limit
, limit
);
1003 EXPORT_SYMBOL(ldlm_pool_set_limit
);
1006 * Returns current LVF from \a pl.
1008 __u32
ldlm_pool_get_lvf(struct ldlm_pool
*pl
)
1010 return atomic_read(&pl
->pl_lock_volume_factor
);
1012 EXPORT_SYMBOL(ldlm_pool_get_lvf
);
1014 static int ldlm_pool_granted(struct ldlm_pool
*pl
)
1016 return atomic_read(&pl
->pl_granted
);
1019 static struct ptlrpc_thread
*ldlm_pools_thread
;
1020 static struct shrinker
*ldlm_pools_srv_shrinker
;
1021 static struct shrinker
*ldlm_pools_cli_shrinker
;
1022 static struct completion ldlm_pools_comp
;
1025 * Cancel \a nr locks from all namespaces (if possible). Returns number of
1026 * cached locks after shrink is finished. All namespaces are asked to
1027 * cancel approximately equal amount of locks to keep balancing.
1029 static int ldlm_pools_shrink(ldlm_side_t client
, int nr
,
1030 unsigned int gfp_mask
)
1032 int total
= 0, cached
= 0, nr_ns
;
1033 struct ldlm_namespace
*ns
;
1034 struct ldlm_namespace
*ns_old
= NULL
; /* loop detection */
1037 if (client
== LDLM_NAMESPACE_CLIENT
&& nr
!= 0 &&
1038 !(gfp_mask
& __GFP_FS
))
1041 CDEBUG(D_DLMTRACE
, "Request to shrink %d %s locks from all pools\n",
1042 nr
, client
== LDLM_NAMESPACE_CLIENT
? "client" : "server");
1044 cookie
= cl_env_reenter();
1047 * Find out how many resources we may release.
1049 for (nr_ns
= ldlm_namespace_nr_read(client
);
1052 mutex_lock(ldlm_namespace_lock(client
));
1053 if (list_empty(ldlm_namespace_list(client
))) {
1054 mutex_unlock(ldlm_namespace_lock(client
));
1055 cl_env_reexit(cookie
);
1058 ns
= ldlm_namespace_first_locked(client
);
1061 mutex_unlock(ldlm_namespace_lock(client
));
1065 if (ldlm_ns_empty(ns
)) {
1066 ldlm_namespace_move_to_inactive_locked(ns
, client
);
1067 mutex_unlock(ldlm_namespace_lock(client
));
1074 ldlm_namespace_get(ns
);
1075 ldlm_namespace_move_to_active_locked(ns
, client
);
1076 mutex_unlock(ldlm_namespace_lock(client
));
1077 total
+= ldlm_pool_shrink(&ns
->ns_pool
, 0, gfp_mask
);
1078 ldlm_namespace_put(ns
);
1081 if (nr
== 0 || total
== 0) {
1082 cl_env_reexit(cookie
);
1087 * Shrink at least ldlm_namespace_nr(client) namespaces.
1089 for (nr_ns
= ldlm_namespace_nr_read(client
) - nr_ns
;
1092 int cancel
, nr_locks
;
1095 * Do not call shrink under ldlm_namespace_lock(client)
1097 mutex_lock(ldlm_namespace_lock(client
));
1098 if (list_empty(ldlm_namespace_list(client
))) {
1099 mutex_unlock(ldlm_namespace_lock(client
));
1101 * If list is empty, we can't return any @cached > 0,
1102 * that probably would cause needless shrinker
1108 ns
= ldlm_namespace_first_locked(client
);
1109 ldlm_namespace_get(ns
);
1110 ldlm_namespace_move_to_active_locked(ns
, client
);
1111 mutex_unlock(ldlm_namespace_lock(client
));
1113 nr_locks
= ldlm_pool_granted(&ns
->ns_pool
);
1114 cancel
= 1 + nr_locks
* nr
/ total
;
1115 ldlm_pool_shrink(&ns
->ns_pool
, cancel
, gfp_mask
);
1116 cached
+= ldlm_pool_granted(&ns
->ns_pool
);
1117 ldlm_namespace_put(ns
);
1119 cl_env_reexit(cookie
);
1120 /* we only decrease the SLV in server pools shrinker, return -1 to
1121 * kernel to avoid needless loop. LU-1128 */
1122 return (client
== LDLM_NAMESPACE_SERVER
) ? -1 : cached
;
1125 static int ldlm_pools_srv_shrink(SHRINKER_ARGS(sc
, nr_to_scan
, gfp_mask
))
1127 return ldlm_pools_shrink(LDLM_NAMESPACE_SERVER
,
1128 shrink_param(sc
, nr_to_scan
),
1129 shrink_param(sc
, gfp_mask
));
1132 static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc
, nr_to_scan
, gfp_mask
))
1134 return ldlm_pools_shrink(LDLM_NAMESPACE_CLIENT
,
1135 shrink_param(sc
, nr_to_scan
),
1136 shrink_param(sc
, gfp_mask
));
1139 int ldlm_pools_recalc(ldlm_side_t client
)
1141 __u32 nr_l
= 0, nr_p
= 0, l
;
1142 struct ldlm_namespace
*ns
;
1143 struct ldlm_namespace
*ns_old
= NULL
;
1145 int time
= 50; /* seconds of sleep if no active namespaces */
1148 * No need to setup pool limit for client pools.
1150 if (client
== LDLM_NAMESPACE_SERVER
) {
1152 * Check all modest namespaces first.
1154 mutex_lock(ldlm_namespace_lock(client
));
1155 list_for_each_entry(ns
, ldlm_namespace_list(client
),
1158 if (ns
->ns_appetite
!= LDLM_NAMESPACE_MODEST
)
1161 l
= ldlm_pool_granted(&ns
->ns_pool
);
1166 * Set the modest pools limit equal to their avg granted
1169 l
+= dru(l
, LDLM_POOLS_MODEST_MARGIN_SHIFT
, 0);
1170 ldlm_pool_setup(&ns
->ns_pool
, l
);
1176 * Make sure that modest namespaces did not eat more that 2/3
1179 if (nr_l
>= 2 * (LDLM_POOL_HOST_L
/ 3)) {
1180 CWARN("\"Modest\" pools eat out 2/3 of server locks "
1181 "limit (%d of %lu). This means that you have too "
1182 "many clients for this amount of server RAM. "
1183 "Upgrade server!\n", nr_l
, LDLM_POOL_HOST_L
);
1188 * The rest is given to greedy namespaces.
1190 list_for_each_entry(ns
, ldlm_namespace_list(client
),
1193 if (!equal
&& ns
->ns_appetite
!= LDLM_NAMESPACE_GREEDY
)
1198 * In the case 2/3 locks are eaten out by
1199 * modest pools, we re-setup equal limit
1202 l
= LDLM_POOL_HOST_L
/
1203 ldlm_namespace_nr_read(client
);
1206 * All the rest of greedy pools will have
1207 * all locks in equal parts.
1209 l
= (LDLM_POOL_HOST_L
- nr_l
) /
1210 (ldlm_namespace_nr_read(client
) -
1213 ldlm_pool_setup(&ns
->ns_pool
, l
);
1215 mutex_unlock(ldlm_namespace_lock(client
));
1219 * Recalc at least ldlm_namespace_nr(client) namespaces.
1221 for (nr
= ldlm_namespace_nr_read(client
); nr
> 0; nr
--) {
1224 * Lock the list, get first @ns in the list, getref, move it
1225 * to the tail, unlock and call pool recalc. This way we avoid
1226 * calling recalc under @ns lock what is really good as we get
1227 * rid of potential deadlock on client nodes when canceling
1228 * locks synchronously.
1230 mutex_lock(ldlm_namespace_lock(client
));
1231 if (list_empty(ldlm_namespace_list(client
))) {
1232 mutex_unlock(ldlm_namespace_lock(client
));
1235 ns
= ldlm_namespace_first_locked(client
);
1237 if (ns_old
== ns
) { /* Full pass complete */
1238 mutex_unlock(ldlm_namespace_lock(client
));
1242 /* We got an empty namespace, need to move it back to inactive
1244 * The race with parallel resource creation is fine:
1245 * - If they do namespace_get before our check, we fail the
1246 * check and they move this item to the end of the list anyway
1247 * - If we do the check and then they do namespace_get, then
1248 * we move the namespace to inactive and they will move
1249 * it back to active (synchronised by the lock, so no clash
1252 if (ldlm_ns_empty(ns
)) {
1253 ldlm_namespace_move_to_inactive_locked(ns
, client
);
1254 mutex_unlock(ldlm_namespace_lock(client
));
1261 spin_lock(&ns
->ns_lock
);
1263 * skip ns which is being freed, and we don't want to increase
1264 * its refcount again, not even temporarily. bz21519 & LU-499.
1266 if (ns
->ns_stopping
) {
1270 ldlm_namespace_get(ns
);
1272 spin_unlock(&ns
->ns_lock
);
1274 ldlm_namespace_move_to_active_locked(ns
, client
);
1275 mutex_unlock(ldlm_namespace_lock(client
));
1278 * After setup is done - recalc the pool.
1281 int ttime
= ldlm_pool_recalc(&ns
->ns_pool
);
1286 ldlm_namespace_put(ns
);
1291 EXPORT_SYMBOL(ldlm_pools_recalc
);
1293 static int ldlm_pools_thread_main(void *arg
)
1295 struct ptlrpc_thread
*thread
= (struct ptlrpc_thread
*)arg
;
1298 thread_set_flags(thread
, SVC_RUNNING
);
1299 wake_up(&thread
->t_ctl_waitq
);
1301 CDEBUG(D_DLMTRACE
, "%s: pool thread starting, process %d\n",
1302 "ldlm_poold", current_pid());
1305 struct l_wait_info lwi
;
1308 * Recal all pools on this tick.
1310 s_time
= ldlm_pools_recalc(LDLM_NAMESPACE_SERVER
);
1311 c_time
= ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT
);
1314 * Wait until the next check time, or until we're
1317 lwi
= LWI_TIMEOUT(cfs_time_seconds(min(s_time
, c_time
)),
1319 l_wait_event(thread
->t_ctl_waitq
,
1320 thread_is_stopping(thread
) ||
1321 thread_is_event(thread
),
1324 if (thread_test_and_clear_flags(thread
, SVC_STOPPING
))
1327 thread_test_and_clear_flags(thread
, SVC_EVENT
);
1330 thread_set_flags(thread
, SVC_STOPPED
);
1331 wake_up(&thread
->t_ctl_waitq
);
1333 CDEBUG(D_DLMTRACE
, "%s: pool thread exiting, process %d\n",
1334 "ldlm_poold", current_pid());
1336 complete_and_exit(&ldlm_pools_comp
, 0);
1339 static int ldlm_pools_thread_start(void)
1341 struct l_wait_info lwi
= { 0 };
1344 if (ldlm_pools_thread
!= NULL
)
1347 OBD_ALLOC_PTR(ldlm_pools_thread
);
1348 if (ldlm_pools_thread
== NULL
)
1351 init_completion(&ldlm_pools_comp
);
1352 init_waitqueue_head(&ldlm_pools_thread
->t_ctl_waitq
);
1354 task
= kthread_run(ldlm_pools_thread_main
, ldlm_pools_thread
,
1357 CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task
));
1358 OBD_FREE(ldlm_pools_thread
, sizeof(*ldlm_pools_thread
));
1359 ldlm_pools_thread
= NULL
;
1360 return PTR_ERR(task
);
1362 l_wait_event(ldlm_pools_thread
->t_ctl_waitq
,
1363 thread_is_running(ldlm_pools_thread
), &lwi
);
1367 static void ldlm_pools_thread_stop(void)
1369 if (ldlm_pools_thread
== NULL
) {
1373 thread_set_flags(ldlm_pools_thread
, SVC_STOPPING
);
1374 wake_up(&ldlm_pools_thread
->t_ctl_waitq
);
1377 * Make sure that pools thread is finished before freeing @thread.
1378 * This fixes possible race and oops due to accessing freed memory
1381 wait_for_completion(&ldlm_pools_comp
);
1382 OBD_FREE_PTR(ldlm_pools_thread
);
1383 ldlm_pools_thread
= NULL
;
1386 int ldlm_pools_init(void)
1390 rc
= ldlm_pools_thread_start();
1392 ldlm_pools_srv_shrinker
=
1393 set_shrinker(DEFAULT_SEEKS
,
1394 ldlm_pools_srv_shrink
);
1395 ldlm_pools_cli_shrinker
=
1396 set_shrinker(DEFAULT_SEEKS
,
1397 ldlm_pools_cli_shrink
);
1401 EXPORT_SYMBOL(ldlm_pools_init
);
1403 void ldlm_pools_fini(void)
1405 if (ldlm_pools_srv_shrinker
!= NULL
) {
1406 remove_shrinker(ldlm_pools_srv_shrinker
);
1407 ldlm_pools_srv_shrinker
= NULL
;
1409 if (ldlm_pools_cli_shrinker
!= NULL
) {
1410 remove_shrinker(ldlm_pools_cli_shrinker
);
1411 ldlm_pools_cli_shrinker
= NULL
;
1413 ldlm_pools_thread_stop();
1415 EXPORT_SYMBOL(ldlm_pools_fini
);