staging: lustre: update Intel copyright messages 2015
[deliverable/linux.git] / drivers / staging / lustre / lustre / ptlrpc / ptlrpcd.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lustre/ptlrpc/ptlrpcd.c
37 */
38
39/** \defgroup ptlrpcd PortalRPC daemon
40 *
41 * ptlrpcd is a special thread with its own set where other user might add
42 * requests when they don't want to wait for their completion.
43 * PtlRPCD will take care of sending such requests and then processing their
44 * replies and calling completion callbacks as necessary.
45 * The callbacks are called directly from ptlrpcd context.
46 * It is important to never significantly block (esp. on RPCs!) within such
47 * completion handler or a deadlock might occur where ptlrpcd enters some
48 * callback that attempts to send another RPC and wait for it to return,
49 * during which time ptlrpcd is completely blocked, so e.g. if import
50 * fails, recovery cannot progress because connection requests are also
51 * sent by ptlrpcd.
52 *
53 * @{
54 */
55
56#define DEBUG_SUBSYSTEM S_RPC
57
9fdaf8c0 58#include "../../include/linux/libcfs/libcfs.h"
d7e09d03 59
e27db149
GKH
60#include "../include/lustre_net.h"
61#include "../include/lustre_lib.h"
62#include "../include/lustre_ha.h"
63#include "../include/obd_class.h" /* for obd_zombie */
64#include "../include/obd_support.h" /* for OBD_FAIL_CHECK */
65#include "../include/cl_object.h" /* cl_env_{get,put}() */
66#include "../include/lprocfs_status.h"
d7e09d03
PT
67
68#include "ptlrpc_internal.h"
69
c5c4c6fa 70/* One of these per CPT. */
d7e09d03 71struct ptlrpcd {
d0bfef31
CH
72 int pd_size;
73 int pd_index;
c5c4c6fa
OW
74 int pd_cpt;
75 int pd_cursor;
d0bfef31 76 int pd_nthreads;
c5c4c6fa 77 int pd_groupsize;
d7e09d03
PT
78 struct ptlrpcd_ctl pd_threads[0];
79};
80
c5c4c6fa
OW
81/*
82 * max_ptlrpcds is obsolete, but retained to ensure that the kernel
83 * module will load on a system where it has been tuned.
84 * A value other than 0 implies it was tuned, in which case the value
85 * is used to derive a setting for ptlrpcd_per_cpt_max.
86 */
d7e09d03 87static int max_ptlrpcds;
8cc7b4b9
PT
88module_param(max_ptlrpcds, int, 0644);
89MODULE_PARM_DESC(max_ptlrpcds, "Max ptlrpcd thread count to be started.");
d7e09d03 90
c5c4c6fa
OW
91/*
92 * ptlrpcd_bind_policy is obsolete, but retained to ensure that
93 * the kernel module will load on a system where it has been tuned.
94 * A value other than 0 implies it was tuned, in which case the value
95 * is used to derive a setting for ptlrpcd_partner_group_size.
96 */
97static int ptlrpcd_bind_policy;
8cc7b4b9 98module_param(ptlrpcd_bind_policy, int, 0644);
c5c4c6fa
OW
99MODULE_PARM_DESC(ptlrpcd_bind_policy,
100 "Ptlrpcd threads binding mode (obsolete).");
101
102/*
103 * ptlrpcd_per_cpt_max: The maximum number of ptlrpcd threads to run
104 * in a CPT.
105 */
106static int ptlrpcd_per_cpt_max;
107module_param(ptlrpcd_per_cpt_max, int, 0644);
108MODULE_PARM_DESC(ptlrpcd_per_cpt_max,
109 "Max ptlrpcd thread count to be started per cpt.");
110
111/*
112 * ptlrpcd_partner_group_size: The desired number of threads in each
113 * ptlrpcd partner thread group. Default is 2, corresponding to the
114 * old PDB_POLICY_PAIR. A negative value makes all ptlrpcd threads in
115 * a CPT partners of each other.
116 */
117static int ptlrpcd_partner_group_size;
118module_param(ptlrpcd_partner_group_size, int, 0644);
119MODULE_PARM_DESC(ptlrpcd_partner_group_size,
120 "Number of ptlrpcd threads in a partner group.");
121
122/*
123 * ptlrpcd_cpts: A CPT string describing the CPU partitions that
124 * ptlrpcd threads should run on. Used to make ptlrpcd threads run on
125 * a subset of all CPTs.
126 *
127 * ptlrpcd_cpts=2
128 * ptlrpcd_cpts=[2]
129 * run ptlrpcd threads only on CPT 2.
130 *
131 * ptlrpcd_cpts=0-3
132 * ptlrpcd_cpts=[0-3]
133 * run ptlrpcd threads on CPTs 0, 1, 2, and 3.
134 *
135 * ptlrpcd_cpts=[0-3,5,7]
136 * run ptlrpcd threads on CPTS 0, 1, 2, 3, 5, and 7.
137 */
138static char *ptlrpcd_cpts;
139module_param(ptlrpcd_cpts, charp, 0644);
140MODULE_PARM_DESC(ptlrpcd_cpts,
141 "CPU partitions ptlrpcd threads should run in");
142
143/* ptlrpcds_cpt_idx maps cpt numbers to an index in the ptlrpcds array. */
144static int *ptlrpcds_cpt_idx;
145
146/* ptlrpcds_num is the number of entries in the ptlrpcds array. */
147static int ptlrpcds_num;
148static struct ptlrpcd **ptlrpcds;
149
150/*
151 * In addition to the regular thread pool above, there is a single
152 * global recovery thread. Recovery isn't critical for performance,
153 * and doesn't block, but must always be able to proceed, and it is
154 * possible that all normal ptlrpcd threads are blocked. Hence the
155 * need for a dedicated thread.
156 */
157static struct ptlrpcd_ctl ptlrpcd_rcv;
d7e09d03
PT
158
159struct mutex ptlrpcd_mutex;
225f597c 160static int ptlrpcd_users;
d7e09d03
PT
161
162void ptlrpcd_wake(struct ptlrpc_request *req)
163{
164 struct ptlrpc_request_set *rq_set = req->rq_set;
165
166 LASSERT(rq_set != NULL);
167
168 wake_up(&rq_set->set_waitq);
169}
170EXPORT_SYMBOL(ptlrpcd_wake);
171
172static struct ptlrpcd_ctl *
c5c4c6fa 173ptlrpcd_select_pc(struct ptlrpc_request *req)
d7e09d03 174{
c5c4c6fa
OW
175 struct ptlrpcd *pd;
176 int cpt;
177 int idx;
d7e09d03
PT
178
179 if (req != NULL && req->rq_send_state != LUSTRE_IMP_FULL)
c5c4c6fa
OW
180 return &ptlrpcd_rcv;
181
182 cpt = cfs_cpt_current(cfs_cpt_table, 1);
183 if (!ptlrpcds_cpt_idx)
184 idx = cpt;
185 else
186 idx = ptlrpcds_cpt_idx[cpt];
187 pd = ptlrpcds[idx];
188
d7e09d03 189 /* We do not care whether it is strict load balance. */
c5c4c6fa
OW
190 idx = pd->pd_cursor;
191 if (++idx == pd->pd_nthreads)
192 idx = 0;
193 pd->pd_cursor = idx;
d7e09d03 194
c5c4c6fa 195 return &pd->pd_threads[idx];
d7e09d03
PT
196}
197
d7e09d03
PT
198/**
199 * Return transferred RPCs count.
200 */
201static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
202 struct ptlrpc_request_set *src)
203{
204 struct list_head *tmp, *pos;
205 struct ptlrpc_request *req;
206 int rc = 0;
207
208 spin_lock(&src->set_new_req_lock);
209 if (likely(!list_empty(&src->set_new_requests))) {
210 list_for_each_safe(pos, tmp, &src->set_new_requests) {
211 req = list_entry(pos, struct ptlrpc_request,
212 rq_set_chain);
213 req->rq_set = des;
214 }
215 list_splice_init(&src->set_new_requests,
216 &des->set_requests);
217 rc = atomic_read(&src->set_new_count);
218 atomic_add(rc, &des->set_remaining);
219 atomic_set(&src->set_new_count, 0);
220 }
221 spin_unlock(&src->set_new_req_lock);
222 return rc;
223}
224
225/**
226 * Requests that are added to the ptlrpcd queue are sent via
227 * ptlrpcd_check->ptlrpc_check_set().
228 */
c5c4c6fa 229void ptlrpcd_add_req(struct ptlrpc_request *req)
d7e09d03
PT
230{
231 struct ptlrpcd_ctl *pc;
232
233 if (req->rq_reqmsg)
234 lustre_msg_set_jobid(req->rq_reqmsg, NULL);
235
236 spin_lock(&req->rq_lock);
237 if (req->rq_invalid_rqset) {
238 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
239 back_to_sleep, NULL);
240
241 req->rq_invalid_rqset = 0;
242 spin_unlock(&req->rq_lock);
243 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
244 } else if (req->rq_set) {
b6da17f3 245 /* If we have a valid "rq_set", just reuse it to avoid double
d7e09d03
PT
246 * linked. */
247 LASSERT(req->rq_phase == RQ_PHASE_NEW);
248 LASSERT(req->rq_send_state == LUSTRE_IMP_REPLAY);
249
250 /* ptlrpc_check_set will decrease the count */
251 atomic_inc(&req->rq_set->set_remaining);
252 spin_unlock(&req->rq_lock);
253 wake_up(&req->rq_set->set_waitq);
254 return;
255 } else {
256 spin_unlock(&req->rq_lock);
257 }
258
c5c4c6fa 259 pc = ptlrpcd_select_pc(req);
d7e09d03
PT
260
261 DEBUG_REQ(D_INFO, req, "add req [%p] to pc [%s:%d]",
262 req, pc->pc_name, pc->pc_index);
263
264 ptlrpc_set_add_new_req(pc, req);
265}
266EXPORT_SYMBOL(ptlrpcd_add_req);
267
268static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set)
269{
270 atomic_inc(&set->set_refcount);
271}
272
273/**
274 * Check if there is more work to do on ptlrpcd set.
275 * Returns 1 if yes.
276 */
277static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
278{
279 struct list_head *tmp, *pos;
280 struct ptlrpc_request *req;
281 struct ptlrpc_request_set *set = pc->pc_set;
282 int rc = 0;
283 int rc2;
d7e09d03
PT
284
285 if (atomic_read(&set->set_new_count)) {
286 spin_lock(&set->set_new_req_lock);
287 if (likely(!list_empty(&set->set_new_requests))) {
288 list_splice_init(&set->set_new_requests,
289 &set->set_requests);
290 atomic_add(atomic_read(&set->set_new_count),
291 &set->set_remaining);
292 atomic_set(&set->set_new_count, 0);
293 /*
294 * Need to calculate its timeout.
295 */
296 rc = 1;
297 }
298 spin_unlock(&set->set_new_req_lock);
299 }
300
301 /* We should call lu_env_refill() before handling new requests to make
302 * sure that env key the requests depending on really exists.
303 */
304 rc2 = lu_env_refill(env);
305 if (rc2 != 0) {
306 /*
307 * XXX This is very awkward situation, because
308 * execution can neither continue (request
309 * interpreters assume that env is set up), nor repeat
310 * the loop (as this potentially results in a tight
311 * loop of -ENOMEM's).
312 *
313 * Fortunately, refill only ever does something when
314 * new modules are loaded, i.e., early during boot up.
315 */
316 CERROR("Failure to refill session: %d\n", rc2);
0a3bdb00 317 return rc;
d7e09d03
PT
318 }
319
320 if (atomic_read(&set->set_remaining))
321 rc |= ptlrpc_check_set(env, set);
322
fa55c6a4
LZ
323 /* NB: ptlrpc_check_set has already moved completed request at the
324 * head of seq::set_requests */
325 list_for_each_safe(pos, tmp, &set->set_requests) {
326 req = list_entry(pos, struct ptlrpc_request, rq_set_chain);
327 if (req->rq_phase != RQ_PHASE_COMPLETE)
328 break;
d7e09d03 329
fa55c6a4
LZ
330 list_del_init(&req->rq_set_chain);
331 req->rq_set = NULL;
332 ptlrpc_req_finished(req);
d7e09d03
PT
333 }
334
335 if (rc == 0) {
336 /*
337 * If new requests have been added, make sure to wake up.
338 */
339 rc = atomic_read(&set->set_new_count);
340
341 /* If we have nothing to do, check whether we can take some
342 * work from our partner threads. */
343 if (rc == 0 && pc->pc_npartners > 0) {
344 struct ptlrpcd_ctl *partner;
345 struct ptlrpc_request_set *ps;
346 int first = pc->pc_cursor;
347
348 do {
349 partner = pc->pc_partners[pc->pc_cursor++];
350 if (pc->pc_cursor >= pc->pc_npartners)
351 pc->pc_cursor = 0;
352 if (partner == NULL)
353 continue;
354
355 spin_lock(&partner->pc_lock);
356 ps = partner->pc_set;
357 if (ps == NULL) {
358 spin_unlock(&partner->pc_lock);
359 continue;
360 }
361
362 ptlrpc_reqset_get(ps);
363 spin_unlock(&partner->pc_lock);
364
365 if (atomic_read(&ps->set_new_count)) {
366 rc = ptlrpcd_steal_rqset(set, ps);
367 if (rc > 0)
2d00bd17
JP
368 CDEBUG(D_RPCTRACE, "transfer %d async RPCs [%d->%d]\n",
369 rc, partner->pc_index,
370 pc->pc_index);
d7e09d03
PT
371 }
372 ptlrpc_reqset_put(ps);
373 } while (rc == 0 && pc->pc_cursor != first);
374 }
375 }
376
0a3bdb00 377 return rc;
d7e09d03
PT
378}
379
380/**
381 * Main ptlrpcd thread.
382 * ptlrpc's code paths like to execute in process context, so we have this
383 * thread which spins on a set which contains the rpcs and sends them.
384 *
385 */
386static int ptlrpcd(void *arg)
387{
388 struct ptlrpcd_ctl *pc = arg;
c5c4c6fa 389 struct ptlrpc_request_set *set;
d7e09d03 390 struct lu_env env = { .le_ses = NULL };
c5c4c6fa
OW
391 int rc = 0;
392 int exit = 0;
d7e09d03
PT
393
394 unshare_fs_struct();
c5c4c6fa
OW
395 if (cfs_cpt_bind(cfs_cpt_table, pc->pc_cpt) != 0)
396 CWARN("Failed to bind %s on CPT %d\n", pc->pc_name, pc->pc_cpt);
397
398 /*
399 * Allocate the request set after the thread has been bound
400 * above. This is safe because no requests will be queued
401 * until all ptlrpcd threads have confirmed that they have
402 * successfully started.
403 */
404 set = ptlrpc_prep_set();
405 if (!set) {
406 rc = -ENOMEM;
407 goto failed;
d7e09d03 408 }
c5c4c6fa
OW
409 spin_lock(&pc->pc_lock);
410 pc->pc_set = set;
411 spin_unlock(&pc->pc_lock);
d7e09d03
PT
412 /*
413 * XXX So far only "client" ptlrpcd uses an environment. In
414 * the future, ptlrpcd thread (or a thread-set) has to given
415 * an argument, describing its "scope".
416 */
417 rc = lu_context_init(&env.le_ctx,
418 LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
d7e09d03 419 if (rc != 0)
c5c4c6fa
OW
420 goto failed;
421
422 complete(&pc->pc_starting);
d7e09d03
PT
423
424 /*
9563fe8a 425
d7e09d03
PT
426 * This mainloop strongly resembles ptlrpc_set_wait() except that our
427 * set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
428 * there are requests in the set. New requests come in on the set's
429 * new_req_list and ptlrpcd_check() moves them into the set.
430 */
431 do {
432 struct l_wait_info lwi;
433 int timeout;
434
435 timeout = ptlrpc_set_next_timeout(set);
436 lwi = LWI_TIMEOUT(cfs_time_seconds(timeout ? timeout : 1),
437 ptlrpc_expired_set, set);
438
439 lu_context_enter(&env.le_ctx);
440 l_wait_event(set->set_waitq,
441 ptlrpcd_check(&env, pc), &lwi);
442 lu_context_exit(&env.le_ctx);
443
444 /*
445 * Abort inflight rpcs for forced stop case.
446 */
447 if (test_bit(LIOD_STOP, &pc->pc_flags)) {
448 if (test_bit(LIOD_FORCE, &pc->pc_flags))
449 ptlrpc_abort_set(set);
450 exit++;
451 }
452
453 /*
454 * Let's make one more loop to make sure that ptlrpcd_check()
455 * copied all raced new rpcs into the set so we can kill them.
456 */
457 } while (exit < 2);
458
459 /*
460 * Wait for inflight requests to drain.
461 */
462 if (!list_empty(&set->set_requests))
463 ptlrpc_set_wait(set);
464 lu_context_fini(&env.le_ctx);
465
466 complete(&pc->pc_finishing);
467
468 return 0;
c5c4c6fa
OW
469failed:
470 pc->pc_error = rc;
471 complete(&pc->pc_starting);
472 return rc;
d7e09d03
PT
473}
474
c5c4c6fa
OW
475static void ptlrpcd_ctl_init(struct ptlrpcd_ctl *pc, int index, int cpt)
476{
477 pc->pc_index = index;
478 pc->pc_cpt = cpt;
479 init_completion(&pc->pc_starting);
480 init_completion(&pc->pc_finishing);
481 spin_lock_init(&pc->pc_lock);
482
483 if (index < 0) {
484 /* Recovery thread. */
485 snprintf(pc->pc_name, sizeof(pc->pc_name), "ptlrpcd_rcv");
486 } else {
487 /* Regular thread. */
488 snprintf(pc->pc_name, sizeof(pc->pc_name),
489 "ptlrpcd_%02d_%02d", cpt, index);
490 }
491}
492
493/* XXX: We want multiple CPU cores to share the async RPC load. So we
494 * start many ptlrpcd threads. We also want to reduce the ptlrpcd
495 * overhead caused by data transfer cross-CPU cores. So we bind
496 * all ptlrpcd threads to a CPT, in the expectation that CPTs
497 * will be defined in a way that matches these boundaries. Within
498 * a CPT a ptlrpcd thread can be scheduled on any available core.
d7e09d03 499 *
c5c4c6fa
OW
500 * Each ptlrpcd thread has its own request queue. This can cause
501 * response delay if the thread is already busy. To help with
502 * this we define partner threads: these are other threads bound
503 * to the same CPT which will check for work in each other's
504 * request queues if they have no work to do.
d7e09d03 505 *
c5c4c6fa
OW
506 * The desired number of partner threads can be tuned by setting
507 * ptlrpcd_partner_group_size. The default is to create pairs of
508 * partner threads.
d7e09d03 509 */
c5c4c6fa 510static int ptlrpcd_partners(struct ptlrpcd *pd, int index)
d7e09d03
PT
511{
512 struct ptlrpcd_ctl *pc;
c5c4c6fa
OW
513 struct ptlrpcd_ctl **ppc;
514 int first;
515 int i;
d7e09d03 516 int rc = 0;
c5c4c6fa
OW
517 int size;
518
519 LASSERT(index >= 0 && index < pd->pd_nthreads);
520 pc = &pd->pd_threads[index];
521 pc->pc_npartners = pd->pd_groupsize - 1;
522
523 if (pc->pc_npartners <= 0)
524 goto out;
d7e09d03 525
c5c4c6fa
OW
526 size = sizeof(struct ptlrpcd_ctl *) * pc->pc_npartners;
527 pc->pc_partners = kzalloc_node(size, GFP_NOFS,
528 cfs_cpt_spread_node(cfs_cpt_table,
529 pc->pc_cpt));
530 if (!pc->pc_partners) {
d7e09d03 531 pc->pc_npartners = 0;
c5c4c6fa
OW
532 rc = -ENOMEM;
533 goto out;
d7e09d03
PT
534 }
535
c5c4c6fa
OW
536 first = index - index % pd->pd_groupsize;
537 ppc = pc->pc_partners;
538 for (i = first; i < first + pd->pd_groupsize; i++) {
539 if (i != index)
540 *ppc++ = &pd->pd_threads[i];
d7e09d03 541 }
c5c4c6fa 542out:
0a3bdb00 543 return rc;
d7e09d03
PT
544}
545
c5c4c6fa 546int ptlrpcd_start(struct ptlrpcd_ctl *pc)
d7e09d03 547{
c5c4c6fa
OW
548 struct task_struct *task;
549 int rc = 0;
d7e09d03
PT
550
551 /*
552 * Do not allow start second thread for one pc.
553 */
554 if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
555 CWARN("Starting second thread (%s) for same pc %p\n",
c5c4c6fa 556 pc->pc_name, pc);
0a3bdb00 557 return 0;
d7e09d03
PT
558 }
559
d7e09d03
PT
560 /*
561 * So far only "client" ptlrpcd uses an environment. In the future,
562 * ptlrpcd thread (or a thread-set) has to be given an argument,
563 * describing its "scope".
564 */
565 rc = lu_context_init(&pc->pc_env.le_ctx, LCT_CL_THREAD|LCT_REMEMBER);
566 if (rc != 0)
c5c4c6fa 567 goto out;
d7e09d03 568
c5c4c6fa
OW
569 task = kthread_run(ptlrpcd, pc, "%s", pc->pc_name);
570 if (IS_ERR(task)) {
571 rc = PTR_ERR(task);
572 goto out_set;
573 }
d7e09d03 574
c5c4c6fa
OW
575 wait_for_completion(&pc->pc_starting);
576 rc = pc->pc_error;
577 if (rc != 0)
578 goto out_set;
d7e09d03 579
87c7d315
DE
580 return 0;
581
87c7d315
DE
582out_set:
583 if (pc->pc_set != NULL) {
584 struct ptlrpc_request_set *set = pc->pc_set;
585
586 spin_lock(&pc->pc_lock);
587 pc->pc_set = NULL;
588 spin_unlock(&pc->pc_lock);
589 ptlrpc_set_destroy(set);
d7e09d03 590 }
c5c4c6fa 591 lu_context_fini(&pc->pc_env.le_ctx);
87c7d315
DE
592
593out:
594 clear_bit(LIOD_START, &pc->pc_flags);
0a3bdb00 595 return rc;
d7e09d03
PT
596}
597
598void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
599{
d7e09d03
PT
600 if (!test_bit(LIOD_START, &pc->pc_flags)) {
601 CWARN("Thread for pc %p was not started\n", pc);
23f14e79 602 return;
d7e09d03
PT
603 }
604
605 set_bit(LIOD_STOP, &pc->pc_flags);
606 if (force)
607 set_bit(LIOD_FORCE, &pc->pc_flags);
608 wake_up(&pc->pc_set->set_waitq);
d7e09d03
PT
609}
610
611void ptlrpcd_free(struct ptlrpcd_ctl *pc)
612{
613 struct ptlrpc_request_set *set = pc->pc_set;
d7e09d03
PT
614
615 if (!test_bit(LIOD_START, &pc->pc_flags)) {
616 CWARN("Thread for pc %p was not started\n", pc);
617 goto out;
618 }
619
620 wait_for_completion(&pc->pc_finishing);
621 lu_context_fini(&pc->pc_env.le_ctx);
622
623 spin_lock(&pc->pc_lock);
624 pc->pc_set = NULL;
625 spin_unlock(&pc->pc_lock);
626 ptlrpc_set_destroy(set);
627
628 clear_bit(LIOD_START, &pc->pc_flags);
629 clear_bit(LIOD_STOP, &pc->pc_flags);
630 clear_bit(LIOD_FORCE, &pc->pc_flags);
d7e09d03
PT
631
632out:
633 if (pc->pc_npartners > 0) {
634 LASSERT(pc->pc_partners != NULL);
635
9ae10597 636 kfree(pc->pc_partners);
d7e09d03
PT
637 pc->pc_partners = NULL;
638 }
639 pc->pc_npartners = 0;
c5c4c6fa 640 pc->pc_error = 0;
d7e09d03
PT
641}
642
643static void ptlrpcd_fini(void)
644{
645 int i;
c5c4c6fa 646 int j;
d7e09d03
PT
647
648 if (ptlrpcds != NULL) {
c5c4c6fa
OW
649 for (i = 0; i < ptlrpcds_num; i++) {
650 if (!ptlrpcds[i])
651 break;
652 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
653 ptlrpcd_stop(&ptlrpcds[i]->pd_threads[j], 0);
654 for (j = 0; j < ptlrpcds[i]->pd_nthreads; j++)
655 ptlrpcd_free(&ptlrpcds[i]->pd_threads[j]);
656 kfree(ptlrpcds[i]);
657 ptlrpcds[i] = NULL;
658 }
9ae10597 659 kfree(ptlrpcds);
d7e09d03 660 }
c5c4c6fa
OW
661 ptlrpcds_num = 0;
662
663 ptlrpcd_stop(&ptlrpcd_rcv, 0);
664 ptlrpcd_free(&ptlrpcd_rcv);
665
666 kfree(ptlrpcds_cpt_idx);
667 ptlrpcds_cpt_idx = NULL;
d7e09d03
PT
668}
669
670static int ptlrpcd_init(void)
671{
c5c4c6fa
OW
672 int nthreads;
673 int groupsize;
674 int size;
675 int i;
676 int j;
677 int rc = 0;
678 struct cfs_cpt_table *cptable;
679 __u32 *cpts = NULL;
680 int ncpts;
681 int cpt;
682 struct ptlrpcd *pd;
683
684 /*
685 * Determine the CPTs that ptlrpcd threads will run on.
686 */
687 cptable = cfs_cpt_table;
688 ncpts = cfs_cpt_number(cptable);
689 if (ptlrpcd_cpts) {
690 struct cfs_expr_list *el;
691
692 size = ncpts * sizeof(ptlrpcds_cpt_idx[0]);
693 ptlrpcds_cpt_idx = kzalloc(size, GFP_KERNEL);
694 if (!ptlrpcds_cpt_idx) {
695 rc = -ENOMEM;
696 goto out;
697 }
698
699 rc = cfs_expr_list_parse(ptlrpcd_cpts,
700 strlen(ptlrpcd_cpts),
701 0, ncpts - 1, &el);
702
703 if (rc != 0) {
704 CERROR("ptlrpcd_cpts: invalid CPT pattern string: %s",
705 ptlrpcd_cpts);
706 rc = -EINVAL;
707 goto out;
708 }
709
710 rc = cfs_expr_list_values(el, ncpts, &cpts);
711 cfs_expr_list_free(el);
712 if (rc <= 0) {
713 CERROR("ptlrpcd_cpts: failed to parse CPT array %s: %d\n",
714 ptlrpcd_cpts, rc);
715 if (rc == 0)
716 rc = -EINVAL;
717 goto out;
718 }
719
720 /*
721 * Create the cpt-to-index map. When there is no match
722 * in the cpt table, pick a cpt at random. This could
723 * be changed to take the topology of the system into
724 * account.
725 */
726 for (cpt = 0; cpt < ncpts; cpt++) {
727 for (i = 0; i < rc; i++)
728 if (cpts[i] == cpt)
729 break;
730 if (i >= rc)
731 i = cpt % rc;
732 ptlrpcds_cpt_idx[cpt] = i;
733 }
734
735 cfs_expr_list_values_free(cpts, rc);
736 ncpts = rc;
737 }
738 ptlrpcds_num = ncpts;
739
740 size = ncpts * sizeof(ptlrpcds[0]);
741 ptlrpcds = kzalloc(size, GFP_KERNEL);
597851ac 742 if (!ptlrpcds) {
a9b3e8f3
JL
743 rc = -ENOMEM;
744 goto out;
745 }
d7e09d03 746
c5c4c6fa
OW
747 /*
748 * The max_ptlrpcds parameter is obsolete, but do something
749 * sane if it has been tuned, and complain if
750 * ptlrpcd_per_cpt_max has also been tuned.
751 */
752 if (max_ptlrpcds != 0) {
753 CWARN("max_ptlrpcds is obsolete.\n");
754 if (ptlrpcd_per_cpt_max == 0) {
755 ptlrpcd_per_cpt_max = max_ptlrpcds / ncpts;
756 /* Round up if there is a remainder. */
757 if (max_ptlrpcds % ncpts != 0)
758 ptlrpcd_per_cpt_max++;
759 CWARN("Setting ptlrpcd_per_cpt_max = %d\n",
760 ptlrpcd_per_cpt_max);
761 } else {
762 CWARN("ptlrpd_per_cpt_max is also set!\n");
763 }
764 }
765
766 /*
767 * The ptlrpcd_bind_policy parameter is obsolete, but do
768 * something sane if it has been tuned, and complain if
769 * ptlrpcd_partner_group_size is also tuned.
770 */
771 if (ptlrpcd_bind_policy != 0) {
772 CWARN("ptlrpcd_bind_policy is obsolete.\n");
773 if (ptlrpcd_partner_group_size == 0) {
774 switch (ptlrpcd_bind_policy) {
775 case 1: /* PDB_POLICY_NONE */
776 case 2: /* PDB_POLICY_FULL */
777 ptlrpcd_partner_group_size = 1;
778 break;
779 case 3: /* PDB_POLICY_PAIR */
780 ptlrpcd_partner_group_size = 2;
781 break;
782 case 4: /* PDB_POLICY_NEIGHBOR */
783#ifdef CONFIG_NUMA
784 ptlrpcd_partner_group_size = -1; /* CPT */
785#else
786 ptlrpcd_partner_group_size = 3; /* Triplets */
787#endif
788 break;
789 default: /* Illegal value, use the default. */
790 ptlrpcd_partner_group_size = 2;
791 break;
792 }
793 CWARN("Setting ptlrpcd_partner_group_size = %d\n",
794 ptlrpcd_partner_group_size);
795 } else {
796 CWARN("ptlrpcd_partner_group_size is also set!\n");
797 }
798 }
799
800 if (ptlrpcd_partner_group_size == 0)
801 ptlrpcd_partner_group_size = 2;
802 else if (ptlrpcd_partner_group_size < 0)
803 ptlrpcd_partner_group_size = -1;
804 else if (ptlrpcd_per_cpt_max > 0 &&
805 ptlrpcd_partner_group_size > ptlrpcd_per_cpt_max)
806 ptlrpcd_partner_group_size = ptlrpcd_per_cpt_max;
807
808 /*
809 * Start the recovery thread first.
810 */
811 set_bit(LIOD_RECOVERY, &ptlrpcd_rcv.pc_flags);
812 ptlrpcd_ctl_init(&ptlrpcd_rcv, -1, CFS_CPT_ANY);
813 rc = ptlrpcd_start(&ptlrpcd_rcv);
d7e09d03 814 if (rc < 0)
a9b3e8f3 815 goto out;
d7e09d03 816
c5c4c6fa
OW
817 for (i = 0; i < ncpts; i++) {
818 if (!cpts)
819 cpt = i;
820 else
821 cpt = cpts[i];
822
823 nthreads = cfs_cpt_weight(cptable, cpt);
824 if (ptlrpcd_per_cpt_max > 0 && ptlrpcd_per_cpt_max < nthreads)
825 nthreads = ptlrpcd_per_cpt_max;
826 if (nthreads < 2)
827 nthreads = 2;
828
829 if (ptlrpcd_partner_group_size <= 0) {
830 groupsize = nthreads;
831 } else if (nthreads <= ptlrpcd_partner_group_size) {
832 groupsize = nthreads;
833 } else {
834 groupsize = ptlrpcd_partner_group_size;
835 if (nthreads % groupsize != 0)
836 nthreads += groupsize - (nthreads % groupsize);
837 }
838
839 size = offsetof(struct ptlrpcd, pd_threads[nthreads]);
840 pd = kzalloc_node(size, GFP_NOFS,
841 cfs_cpt_spread_node(cfs_cpt_table, cpt));
842 if (!pd) {
843 rc = -ENOMEM;
a9b3e8f3 844 goto out;
c5c4c6fa
OW
845 }
846 pd->pd_size = size;
847 pd->pd_index = i;
848 pd->pd_cpt = cpt;
849 pd->pd_cursor = 0;
850 pd->pd_nthreads = nthreads;
851 pd->pd_groupsize = groupsize;
852 ptlrpcds[i] = pd;
d7e09d03 853
c5c4c6fa
OW
854 /*
855 * The ptlrpcd threads in a partner group can access
856 * each other's struct ptlrpcd_ctl, so these must be
857 * initialized before any thread is started.
858 */
859 for (j = 0; j < nthreads; j++) {
860 ptlrpcd_ctl_init(&pd->pd_threads[j], j, cpt);
861 rc = ptlrpcd_partners(pd, j);
862 if (rc < 0)
863 goto out;
864 }
d7e09d03 865
c5c4c6fa
OW
866 /* XXX: We start nthreads ptlrpc daemons.
867 * Each of them can process any non-recovery
868 * async RPC to improve overall async RPC
869 * efficiency.
870 *
871 * But there are some issues with async I/O RPCs
872 * and async non-I/O RPCs processed in the same
873 * set under some cases. The ptlrpcd may be
874 * blocked by some async I/O RPC(s), then will
875 * cause other async non-I/O RPC(s) can not be
876 * processed in time.
877 *
878 * Maybe we should distinguish blocked async RPCs
879 * from non-blocked async RPCs, and process them
880 * in different ptlrpcd sets to avoid unnecessary
881 * dependency. But how to distribute async RPCs
882 * load among all the ptlrpc daemons becomes
883 * another trouble.
884 */
885 for (j = 0; j < nthreads; j++) {
886 rc = ptlrpcd_start(&pd->pd_threads[j]);
887 if (rc < 0)
888 goto out;
889 }
d7e09d03 890 }
c5c4c6fa
OW
891out:
892 if (rc != 0)
893 ptlrpcd_fini();
d7e09d03 894
c5c4c6fa 895 return rc;
d7e09d03
PT
896}
897
898int ptlrpcd_addref(void)
899{
900 int rc = 0;
d7e09d03
PT
901
902 mutex_lock(&ptlrpcd_mutex);
903 if (++ptlrpcd_users == 1)
904 rc = ptlrpcd_init();
905 mutex_unlock(&ptlrpcd_mutex);
0a3bdb00 906 return rc;
d7e09d03
PT
907}
908EXPORT_SYMBOL(ptlrpcd_addref);
909
910void ptlrpcd_decref(void)
911{
912 mutex_lock(&ptlrpcd_mutex);
913 if (--ptlrpcd_users == 0)
914 ptlrpcd_fini();
915 mutex_unlock(&ptlrpcd_mutex);
916}
917EXPORT_SYMBOL(ptlrpcd_decref);
918/** @} ptlrpcd */
This page took 0.475115 seconds and 5 git commands to generate.