lustre/mdc: fix bad ERR_PTR usage in mdc_locks.c
[deliverable/linux.git] / drivers / staging / lustre / lustre / include / lustre_net.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36/** \defgroup PtlRPC Portal RPC and networking module.
37 *
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
43 *
44 * PortalRPC utilizes LNet as its transport layer.
45 *
46 * @{
47 */
48
49
50#ifndef _LUSTRE_NET_H
51#define _LUSTRE_NET_H
52
53/** \defgroup net net
54 *
55 * @{
56 */
57
58#include <linux/lustre_net.h>
59
60#include <linux/libcfs/libcfs.h>
61// #include <obd.h>
62#include <linux/lnet/lnet.h>
63#include <lustre/lustre_idl.h>
64#include <lustre_ha.h>
65#include <lustre_sec.h>
66#include <lustre_import.h>
67#include <lprocfs_status.h>
68#include <lu_object.h>
69#include <lustre_req_layout.h>
70
71#include <obd_support.h>
72#include <lustre_ver.h>
73
74/* MD flags we _always_ use */
75#define PTLRPC_MD_OPTIONS 0
76
77/**
78 * Max # of bulk operations in one request.
79 * In order for the client and server to properly negotiate the maximum
80 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
81 * value. The client is free to limit the actual RPC size for any bulk
82 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
83#define PTLRPC_BULK_OPS_BITS 2
84#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
85/**
86 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
87 * should not be used on the server at all. Otherwise, it imposes a
88 * protocol limitation on the maximum RPC size that can be used by any
89 * RPC sent to that server in the future. Instead, the server should
90 * use the negotiated per-client ocd_brw_size to determine the bulk
91 * RPC count. */
92#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
93
94/**
95 * Define maxima for bulk I/O.
96 *
97 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
98 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
99 * currently supported maximum between peers at connect via ocd_brw_size.
100 */
101#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
102#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
103#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
104
105#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
107#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
108#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
109#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
110#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
111
112/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
113# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
114# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
115# endif
116# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
117# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
118# endif
119# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
120# error "PTLRPC_MAX_BRW_SIZE too big"
121# endif
122# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
123# error "PTLRPC_MAX_BRW_PAGES too big"
124# endif
125
126#define PTLRPC_NTHRS_INIT 2
127
128/**
129 * Buffer Constants
130 *
131 * Constants determine how memory is used to buffer incoming service requests.
132 *
133 * ?_NBUFS # buffers to allocate when growing the pool
134 * ?_BUFSIZE # bytes in a single request buffer
135 * ?_MAXREQSIZE # maximum request service will receive
136 *
137 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
138 * of ?_NBUFS is added to the pool.
139 *
140 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
141 * considered full when less than ?_MAXREQSIZE is left in them.
142 */
143/**
144 * Thread Constants
145 *
146 * Constants determine how threads are created for ptlrpc service.
147 *
148 * ?_NTHRS_INIT # threads to create for each service partition on
149 * initializing. If it's non-affinity service and
150 * there is only one partition, it's the overall #
151 * threads for the service while initializing.
152 * ?_NTHRS_BASE # threads should be created at least for each
153 * ptlrpc partition to keep the service healthy.
154 * It's the low-water mark of threads upper-limit
155 * for each partition.
156 * ?_THR_FACTOR # threads can be added on threads upper-limit for
157 * each CPU core. This factor is only for reference,
158 * we might decrease value of factor if number of cores
159 * per CPT is above a limit.
160 * ?_NTHRS_MAX # overall threads can be created for a service,
161 * it's a soft limit because if service is running
162 * on machine with hundreds of cores and tens of
163 * CPU partitions, we need to guarantee each partition
164 * has ?_NTHRS_BASE threads, which means total threads
165 * will be ?_NTHRS_BASE * number_of_cpts which can
166 * exceed ?_NTHRS_MAX.
167 *
168 * Examples
169 *
170 * #define MDS_NTHRS_INIT 2
171 * #define MDS_NTHRS_BASE 64
172 * #define MDS_NTHRS_FACTOR 8
173 * #define MDS_NTHRS_MAX 1024
174 *
175 * Example 1):
176 * ---------------------------------------------------------------------
177 * Server(A) has 16 cores, user configured it to 4 partitions so each
178 * partition has 4 cores, then actual number of service threads on each
179 * partition is:
180 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
181 *
182 * Total number of threads for the service is:
183 * 96 * partitions(4) = 384
184 *
185 * Example 2):
186 * ---------------------------------------------------------------------
187 * Server(B) has 32 cores, user configured it to 4 partitions so each
188 * partition has 8 cores, then actual number of service threads on each
189 * partition is:
190 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
191 *
192 * Total number of threads for the service is:
193 * 128 * partitions(4) = 512
194 *
195 * Example 3):
196 * ---------------------------------------------------------------------
197 * Server(B) has 96 cores, user configured it to 8 partitions so each
198 * partition has 12 cores, then actual number of service threads on each
199 * partition is:
200 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
201 *
202 * Total number of threads for the service is:
203 * 160 * partitions(8) = 1280
204 *
205 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
206 * as upper limit of threads number for each partition:
207 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
208 *
209 * Example 4):
210 * ---------------------------------------------------------------------
211 * Server(C) have a thousand of cores and user configured it to 32 partitions
212 * MDS_NTHRS_BASE(64) * 32 = 2048
213 *
214 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
215 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
216 * to keep service healthy, so total number of threads will just be 2048.
217 *
218 * NB: we don't suggest to choose server with that many cores because backend
219 * filesystem itself, buffer cache, or underlying network stack might
220 * have some SMP scalability issues at that large scale.
221 *
222 * If user already has a fat machine with hundreds or thousands of cores,
223 * there are two choices for configuration:
224 * a) create CPU table from subset of all CPUs and run Lustre on
225 * top of this subset
226 * b) bind service threads on a few partitions, see modparameters of
227 * MDS and OSS for details
228*
229 * NB: these calculations (and examples below) are simplified to help
230 * understanding, the real implementation is a little more complex,
231 * please see ptlrpc_server_nthreads_check() for details.
232 *
233 */
234
235 /*
236 * LDLM threads constants:
237 *
238 * Given 8 as factor and 24 as base threads number
239 *
240 * example 1)
241 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
242 *
243 * example 2)
244 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
245 * threads for each partition and total threads number will be 112.
246 *
247 * example 3)
248 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
249 * threads for each partition to keep service healthy, so total threads
250 * number should be 24 * 8 = 192.
251 *
252 * So with these constants, threads number will be at the similar level
253 * of old versions, unless target machine has over a hundred cores
254 */
255#define LDLM_THR_FACTOR 8
256#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
257#define LDLM_NTHRS_BASE 24
258#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
259
260#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
261#define LDLM_CLIENT_NBUFS 1
262#define LDLM_SERVER_NBUFS 64
263#define LDLM_BUFSIZE (8 * 1024)
264#define LDLM_MAXREQSIZE (5 * 1024)
265#define LDLM_MAXREPSIZE (1024)
266
d7e09d03 267#define OST_MAXREQSIZE (5 * 1024)
d7e09d03
PT
268
269/* Macro to hide a typecast. */
270#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
271
272/**
273 * Structure to single define portal connection.
274 */
275struct ptlrpc_connection {
276 /** linkage for connections hash table */
277 struct hlist_node c_hash;
278 /** Our own lnet nid for this connection */
279 lnet_nid_t c_self;
280 /** Remote side nid for this connection */
281 lnet_process_id_t c_peer;
282 /** UUID of the other side */
283 struct obd_uuid c_remote_uuid;
284 /** reference counter for this connection */
285 atomic_t c_refcount;
286};
287
288/** Client definition for PortalRPC */
289struct ptlrpc_client {
290 /** What lnet portal does this client send messages to by default */
291 __u32 cli_request_portal;
292 /** What portal do we expect replies on */
293 __u32 cli_reply_portal;
294 /** Name of the client */
295 char *cli_name;
296};
297
298/** state flags of requests */
299/* XXX only ones left are those used by the bulk descs as well! */
300#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
301#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
302
303#define REQ_MAX_ACK_LOCKS 8
304
305union ptlrpc_async_args {
306 /**
307 * Scratchpad for passing args to completion interpreter. Users
308 * cast to the struct of their choosing, and CLASSERT that this is
309 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
310 * a pointer to it here. The pointer_arg ensures this struct is at
311 * least big enough for that.
312 */
313 void *pointer_arg[11];
314 __u64 space[7];
315};
316
317struct ptlrpc_request_set;
318typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
319typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
320
321/**
322 * Definition of request set structure.
323 * Request set is a list of requests (not necessary to the same target) that
324 * once populated with RPCs could be sent in parallel.
325 * There are two kinds of request sets. General purpose and with dedicated
326 * serving thread. Example of the latter is ptlrpcd set.
327 * For general purpose sets once request set started sending it is impossible
328 * to add new requests to such set.
329 * Provides a way to call "completion callbacks" when all requests in the set
330 * returned.
331 */
332struct ptlrpc_request_set {
333 atomic_t set_refcount;
334 /** number of in queue requests */
335 atomic_t set_new_count;
336 /** number of uncompleted requests */
337 atomic_t set_remaining;
338 /** wait queue to wait on for request events */
339 wait_queue_head_t set_waitq;
340 wait_queue_head_t *set_wakeup_ptr;
341 /** List of requests in the set */
342 struct list_head set_requests;
343 /**
344 * List of completion callbacks to be called when the set is completed
345 * This is only used if \a set_interpret is NULL.
346 * Links struct ptlrpc_set_cbdata.
347 */
348 struct list_head set_cblist;
349 /** Completion callback, if only one. */
350 set_interpreter_func set_interpret;
351 /** opaq argument passed to completion \a set_interpret callback. */
352 void *set_arg;
353 /**
354 * Lock for \a set_new_requests manipulations
355 * locked so that any old caller can communicate requests to
356 * the set holder who can then fold them into the lock-free set
357 */
358 spinlock_t set_new_req_lock;
359 /** List of new yet unsent requests. Only used with ptlrpcd now. */
360 struct list_head set_new_requests;
361
362 /** rq_status of requests that have been freed already */
363 int set_rc;
364 /** Additional fields used by the flow control extension */
365 /** Maximum number of RPCs in flight */
366 int set_max_inflight;
367 /** Callback function used to generate RPCs */
368 set_producer_func set_producer;
369 /** opaq argument passed to the producer callback */
370 void *set_producer_arg;
371};
372
373/**
374 * Description of a single ptrlrpc_set callback
375 */
376struct ptlrpc_set_cbdata {
377 /** List linkage item */
378 struct list_head psc_item;
379 /** Pointer to interpreting function */
380 set_interpreter_func psc_interpret;
381 /** Opaq argument to pass to the callback */
382 void *psc_data;
383};
384
385struct ptlrpc_bulk_desc;
386struct ptlrpc_service_part;
387struct ptlrpc_service;
388
389/**
390 * ptlrpc callback & work item stuff
391 */
392struct ptlrpc_cb_id {
393 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
394 void *cbid_arg; /* additional arg */
395};
396
397/** Maximum number of locks to fit into reply state */
398#define RS_MAX_LOCKS 8
399#define RS_DEBUG 0
400
401/**
402 * Structure to define reply state on the server
403 * Reply state holds various reply message information. Also for "difficult"
404 * replies (rep-ack case) we store the state after sending reply and wait
405 * for the client to acknowledge the reception. In these cases locks could be
406 * added to the state for replay/failover consistency guarantees.
407 */
408struct ptlrpc_reply_state {
409 /** Callback description */
410 struct ptlrpc_cb_id rs_cb_id;
411 /** Linkage for list of all reply states in a system */
412 struct list_head rs_list;
413 /** Linkage for list of all reply states on same export */
414 struct list_head rs_exp_list;
415 /** Linkage for list of all reply states for same obd */
416 struct list_head rs_obd_list;
417#if RS_DEBUG
418 struct list_head rs_debug_list;
419#endif
420 /** A spinlock to protect the reply state flags */
421 spinlock_t rs_lock;
422 /** Reply state flags */
423 unsigned long rs_difficult:1; /* ACK/commit stuff */
424 unsigned long rs_no_ack:1; /* no ACK, even for
425 difficult requests */
426 unsigned long rs_scheduled:1; /* being handled? */
427 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
428 unsigned long rs_handled:1; /* been handled yet? */
429 unsigned long rs_on_net:1; /* reply_out_callback pending? */
430 unsigned long rs_prealloc:1; /* rs from prealloc list */
431 unsigned long rs_committed:1;/* the transaction was committed
432 and the rs was dispatched
433 by ptlrpc_commit_replies */
434 /** Size of the state */
435 int rs_size;
436 /** opcode */
437 __u32 rs_opc;
438 /** Transaction number */
439 __u64 rs_transno;
440 /** xid */
441 __u64 rs_xid;
442 struct obd_export *rs_export;
443 struct ptlrpc_service_part *rs_svcpt;
444 /** Lnet metadata handle for the reply */
445 lnet_handle_md_t rs_md_h;
446 atomic_t rs_refcount;
447
448 /** Context for the sevice thread */
449 struct ptlrpc_svc_ctx *rs_svc_ctx;
450 /** Reply buffer (actually sent to the client), encoded if needed */
451 struct lustre_msg *rs_repbuf; /* wrapper */
452 /** Size of the reply buffer */
453 int rs_repbuf_len; /* wrapper buf length */
454 /** Size of the reply message */
455 int rs_repdata_len; /* wrapper msg length */
456 /**
457 * Actual reply message. Its content is encrupted (if needed) to
458 * produce reply buffer for actual sending. In simple case
459 * of no network encryption we jus set \a rs_repbuf to \a rs_msg
460 */
461 struct lustre_msg *rs_msg; /* reply message */
462
463 /** Number of locks awaiting client ACK */
464 int rs_nlocks;
465 /** Handles of locks awaiting client reply ACK */
466 struct lustre_handle rs_locks[RS_MAX_LOCKS];
467 /** Lock modes of locks in \a rs_locks */
468 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
469};
470
471struct ptlrpc_thread;
472
473/** RPC stages */
474enum rq_phase {
475 RQ_PHASE_NEW = 0xebc0de00,
476 RQ_PHASE_RPC = 0xebc0de01,
477 RQ_PHASE_BULK = 0xebc0de02,
478 RQ_PHASE_INTERPRET = 0xebc0de03,
479 RQ_PHASE_COMPLETE = 0xebc0de04,
480 RQ_PHASE_UNREGISTERING = 0xebc0de05,
481 RQ_PHASE_UNDEFINED = 0xebc0de06
482};
483
484/** Type of request interpreter call-back */
485typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
486 struct ptlrpc_request *req,
487 void *arg, int rc);
488
489/**
490 * Definition of request pool structure.
491 * The pool is used to store empty preallocated requests for the case
492 * when we would actually need to send something without performing
493 * any allocations (to avoid e.g. OOM).
494 */
495struct ptlrpc_request_pool {
496 /** Locks the list */
497 spinlock_t prp_lock;
498 /** list of ptlrpc_request structs */
499 struct list_head prp_req_list;
500 /** Maximum message size that would fit into a rquest from this pool */
501 int prp_rq_size;
502 /** Function to allocate more requests for this pool */
503 void (*prp_populate)(struct ptlrpc_request_pool *, int);
504};
505
506struct lu_context;
507struct lu_env;
508
509struct ldlm_lock;
510
511/**
512 * \defgroup nrs Network Request Scheduler
513 * @{
514 */
515struct ptlrpc_nrs_policy;
516struct ptlrpc_nrs_resource;
517struct ptlrpc_nrs_request;
518
519/**
520 * NRS control operations.
521 *
522 * These are common for all policies.
523 */
524enum ptlrpc_nrs_ctl {
525 /**
526 * Not a valid opcode.
527 */
528 PTLRPC_NRS_CTL_INVALID,
529 /**
530 * Activate the policy.
531 */
532 PTLRPC_NRS_CTL_START,
533 /**
534 * Reserved for multiple primary policies, which may be a possibility
535 * in the future.
536 */
537 PTLRPC_NRS_CTL_STOP,
538 /**
539 * Policies can start using opcodes from this value and onwards for
540 * their own purposes; the assigned value itself is arbitrary.
541 */
542 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
543};
544
545/**
546 * ORR policy operations
547 */
548enum nrs_ctl_orr {
549 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
550 NRS_CTL_ORR_WR_QUANTUM,
551 NRS_CTL_ORR_RD_OFF_TYPE,
552 NRS_CTL_ORR_WR_OFF_TYPE,
553 NRS_CTL_ORR_RD_SUPP_REQ,
554 NRS_CTL_ORR_WR_SUPP_REQ,
555};
556
557/**
558 * NRS policy operations.
559 *
560 * These determine the behaviour of a policy, and are called in response to
561 * NRS core events.
562 */
563struct ptlrpc_nrs_pol_ops {
564 /**
565 * Called during policy registration; this operation is optional.
566 *
567 * \param[in,out] policy The policy being initialized
568 */
569 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
570 /**
571 * Called during policy unregistration; this operation is optional.
572 *
573 * \param[in,out] policy The policy being unregistered/finalized
574 */
575 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
576 /**
577 * Called when activating a policy via lprocfs; policies allocate and
578 * initialize their resources here; this operation is optional.
579 *
580 * \param[in,out] policy The policy being started
581 *
582 * \see nrs_policy_start_locked()
583 */
584 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
585 /**
586 * Called when deactivating a policy via lprocfs; policies deallocate
587 * their resources here; this operation is optional
588 *
589 * \param[in,out] policy The policy being stopped
590 *
591 * \see nrs_policy_stop0()
592 */
593 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
594 /**
595 * Used for policy-specific operations; i.e. not generic ones like
596 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
597 * to an ioctl; this operation is optional.
598 *
599 * \param[in,out] policy The policy carrying out operation \a opc
600 * \param[in] opc The command operation being carried out
601 * \param[in,out] arg An generic buffer for communication between the
602 * user and the control operation
603 *
604 * \retval -ve error
605 * \retval 0 success
606 *
607 * \see ptlrpc_nrs_policy_control()
608 */
609 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
610 enum ptlrpc_nrs_ctl opc, void *arg);
611
612 /**
613 * Called when obtaining references to the resources of the resource
614 * hierarchy for a request that has arrived for handling at the PTLRPC
615 * service. Policies should return -ve for requests they do not wish
616 * to handle. This operation is mandatory.
617 *
618 * \param[in,out] policy The policy we're getting resources for.
619 * \param[in,out] nrq The request we are getting resources for.
620 * \param[in] parent The parent resource of the resource being
621 * requested; set to NULL if none.
622 * \param[out] resp The resource is to be returned here; the
623 * fallback policy in an NRS head should
624 * \e always return a non-NULL pointer value.
625 * \param[in] moving_req When set, signifies that this is an attempt
626 * to obtain resources for a request being moved
627 * to the high-priority NRS head by
628 * ldlm_lock_reorder_req().
629 * This implies two things:
630 * 1. We are under obd_export::exp_rpc_lock and
631 * so should not sleep.
632 * 2. We should not perform non-idempotent or can
633 * skip performing idempotent operations that
634 * were carried out when resources were first
635 * taken for the request when it was initialized
636 * in ptlrpc_nrs_req_initialize().
637 *
638 * \retval 0, +ve The level of the returned resource in the resource
639 * hierarchy; currently only 0 (for a non-leaf resource)
640 * and 1 (for a leaf resource) are supported by the
641 * framework.
642 * \retval -ve error
643 *
644 * \see ptlrpc_nrs_req_initialize()
645 * \see ptlrpc_nrs_hpreq_add_nolock()
646 * \see ptlrpc_nrs_req_hp_move()
647 */
648 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
649 struct ptlrpc_nrs_request *nrq,
650 const struct ptlrpc_nrs_resource *parent,
651 struct ptlrpc_nrs_resource **resp,
652 bool moving_req);
653 /**
654 * Called when releasing references taken for resources in the resource
655 * hierarchy for the request; this operation is optional.
656 *
657 * \param[in,out] policy The policy the resource belongs to
658 * \param[in] res The resource to be freed
659 *
660 * \see ptlrpc_nrs_req_finalize()
661 * \see ptlrpc_nrs_hpreq_add_nolock()
662 * \see ptlrpc_nrs_req_hp_move()
663 */
664 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
665 const struct ptlrpc_nrs_resource *res);
666
667 /**
668 * Obtains a request for handling from the policy, and optionally
669 * removes the request from the policy; this operation is mandatory.
670 *
671 * \param[in,out] policy The policy to poll
672 * \param[in] peek When set, signifies that we just want to
673 * examine the request, and not handle it, so the
674 * request is not removed from the policy.
675 * \param[in] force When set, it will force a policy to return a
676 * request if it has one queued.
677 *
678 * \retval NULL No request available for handling
679 * \retval valid-pointer The request polled for handling
680 *
681 * \see ptlrpc_nrs_req_get_nolock()
682 */
683 struct ptlrpc_nrs_request *
684 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
685 bool force);
686 /**
687 * Called when attempting to add a request to a policy for later
688 * handling; this operation is mandatory.
689 *
690 * \param[in,out] policy The policy on which to enqueue \a nrq
691 * \param[in,out] nrq The request to enqueue
692 *
693 * \retval 0 success
694 * \retval != 0 error
695 *
696 * \see ptlrpc_nrs_req_add_nolock()
697 */
698 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
699 struct ptlrpc_nrs_request *nrq);
700 /**
701 * Removes a request from the policy's set of pending requests. Normally
702 * called after a request has been polled successfully from the policy
703 * for handling; this operation is mandatory.
704 *
705 * \param[in,out] policy The policy the request \a nrq belongs to
706 * \param[in,out] nrq The request to dequeue
707 *
708 * \see ptlrpc_nrs_req_del_nolock()
709 */
710 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
711 struct ptlrpc_nrs_request *nrq);
712 /**
713 * Called after the request being carried out. Could be used for
714 * job/resource control; this operation is optional.
715 *
716 * \param[in,out] policy The policy which is stopping to handle request
717 * \a nrq
718 * \param[in,out] nrq The request
719 *
720 * \pre spin_is_locked(&svcpt->scp_req_lock)
721 *
722 * \see ptlrpc_nrs_req_stop_nolock()
723 */
724 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
725 struct ptlrpc_nrs_request *nrq);
726 /**
727 * Registers the policy's lprocfs interface with a PTLRPC service.
728 *
729 * \param[in] svc The service
730 *
731 * \retval 0 success
732 * \retval != 0 error
733 */
734 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
735 /**
736 * Unegisters the policy's lprocfs interface with a PTLRPC service.
737 *
738 * In cases of failed policy registration in
739 * \e ptlrpc_nrs_policy_register(), this function may be called for a
740 * service which has not registered the policy successfully, so
741 * implementations of this method should make sure their operations are
742 * safe in such cases.
743 *
744 * \param[in] svc The service
745 */
746 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
747};
748
749/**
750 * Policy flags
751 */
752enum nrs_policy_flags {
753 /**
754 * Fallback policy, use this flag only on a single supported policy per
755 * service. The flag cannot be used on policies that use
756 * \e PTLRPC_NRS_FL_REG_EXTERN
757 */
758 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
759 /**
760 * Start policy immediately after registering.
761 */
762 PTLRPC_NRS_FL_REG_START = (1 << 1),
763 /**
764 * This is a policy registering from a module different to the one NRS
765 * core ships in (currently ptlrpc).
766 */
767 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
768};
769
770/**
771 * NRS queue type.
772 *
773 * Denotes whether an NRS instance is for handling normal or high-priority
774 * RPCs, or whether an operation pertains to one or both of the NRS instances
775 * in a service.
776 */
777enum ptlrpc_nrs_queue_type {
778 PTLRPC_NRS_QUEUE_REG = (1 << 0),
779 PTLRPC_NRS_QUEUE_HP = (1 << 1),
780 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
781};
782
783/**
784 * NRS head
785 *
786 * A PTLRPC service has at least one NRS head instance for handling normal
787 * priority RPCs, and may optionally have a second NRS head instance for
788 * handling high-priority RPCs. Each NRS head maintains a list of available
789 * policies, of which one and only one policy is acting as the fallback policy,
790 * and optionally a different policy may be acting as the primary policy. For
791 * all RPCs handled by this NRS head instance, NRS core will first attempt to
792 * enqueue the RPC using the primary policy (if any). The fallback policy is
793 * used in the following cases:
794 * - when there was no primary policy in the
795 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
796 * was initialized.
797 * - when the primary policy that was at the
798 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
799 * RPC was initialized, denoted it did not wish, or for some other reason was
800 * not able to handle the request, by returning a non-valid NRS resource
801 * reference.
802 * - when the primary policy that was at the
803 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
804 * RPC was initialized, fails later during the request enqueueing stage.
805 *
806 * \see nrs_resource_get_safe()
807 * \see nrs_request_enqueue()
808 */
809struct ptlrpc_nrs {
810 spinlock_t nrs_lock;
811 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
812 /**
813 * List of registered policies
814 */
815 struct list_head nrs_policy_list;
816 /**
817 * List of policies with queued requests. Policies that have any
818 * outstanding requests are queued here, and this list is queried
819 * in a round-robin manner from NRS core when obtaining a request
820 * for handling. This ensures that requests from policies that at some
821 * point transition away from the
822 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
823 */
824 struct list_head nrs_policy_queued;
825 /**
826 * Service partition for this NRS head
827 */
828 struct ptlrpc_service_part *nrs_svcpt;
829 /**
830 * Primary policy, which is the preferred policy for handling RPCs
831 */
832 struct ptlrpc_nrs_policy *nrs_policy_primary;
833 /**
834 * Fallback policy, which is the backup policy for handling RPCs
835 */
836 struct ptlrpc_nrs_policy *nrs_policy_fallback;
837 /**
838 * This NRS head handles either HP or regular requests
839 */
840 enum ptlrpc_nrs_queue_type nrs_queue_type;
841 /**
842 * # queued requests from all policies in this NRS head
843 */
844 unsigned long nrs_req_queued;
845 /**
846 * # scheduled requests from all policies in this NRS head
847 */
848 unsigned long nrs_req_started;
849 /**
850 * # policies on this NRS
851 */
852 unsigned nrs_num_pols;
853 /**
854 * This NRS head is in progress of starting a policy
855 */
856 unsigned nrs_policy_starting:1;
857 /**
858 * In progress of shutting down the whole NRS head; used during
859 * unregistration
860 */
861 unsigned nrs_stopping:1;
862};
863
864#define NRS_POL_NAME_MAX 16
865
866struct ptlrpc_nrs_pol_desc;
867
868/**
869 * Service compatibility predicate; this determines whether a policy is adequate
870 * for handling RPCs of a particular PTLRPC service.
871 *
872 * XXX:This should give the same result during policy registration and
873 * unregistration, and for all partitions of a service; so the result should not
874 * depend on temporal service or other properties, that may influence the
875 * result.
876 */
877typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
878 const struct ptlrpc_nrs_pol_desc *desc);
879
880struct ptlrpc_nrs_pol_conf {
881 /**
882 * Human-readable policy name
883 */
884 char nc_name[NRS_POL_NAME_MAX];
885 /**
886 * NRS operations for this policy
887 */
888 const struct ptlrpc_nrs_pol_ops *nc_ops;
889 /**
890 * Service compatibility predicate
891 */
892 nrs_pol_desc_compat_t nc_compat;
893 /**
894 * Set for policies that support a single ptlrpc service, i.e. ones that
895 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
896 * depicts the name of the single service that such policies are
897 * compatible with.
898 */
899 const char *nc_compat_svc_name;
900 /**
901 * Owner module for this policy descriptor; policies registering from a
902 * different module to the one the NRS framework is held within
903 * (currently ptlrpc), should set this field to THIS_MODULE.
904 */
c34d9cd8 905 struct module *nc_owner;
d7e09d03
PT
906 /**
907 * Policy registration flags; a bitmast of \e nrs_policy_flags
908 */
909 unsigned nc_flags;
910};
911
912/**
913 * NRS policy registering descriptor
914 *
915 * Is used to hold a description of a policy that can be passed to NRS core in
916 * order to register the policy with NRS heads in different PTLRPC services.
917 */
918struct ptlrpc_nrs_pol_desc {
919 /**
920 * Human-readable policy name
921 */
922 char pd_name[NRS_POL_NAME_MAX];
923 /**
924 * Link into nrs_core::nrs_policies
925 */
926 struct list_head pd_list;
927 /**
928 * NRS operations for this policy
929 */
930 const struct ptlrpc_nrs_pol_ops *pd_ops;
931 /**
932 * Service compatibility predicate
933 */
934 nrs_pol_desc_compat_t pd_compat;
935 /**
936 * Set for policies that are compatible with only one PTLRPC service.
937 *
938 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
939 */
940 const char *pd_compat_svc_name;
941 /**
942 * Owner module for this policy descriptor.
943 *
944 * We need to hold a reference to the module whenever we might make use
945 * of any of the module's contents, i.e.
946 * - If one or more instances of the policy are at a state where they
947 * might be handling a request, i.e.
948 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
949 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
950 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
951 * is taken on the module when
952 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
953 * becomes 0, so that we hold only one reference to the module maximum
954 * at any time.
955 *
956 * We do not need to hold a reference to the module, even though we
957 * might use code and data from the module, in the following cases:
958 * - During external policy registration, because this should happen in
959 * the module's init() function, in which case the module is safe from
960 * removal because a reference is being held on the module by the
961 * kernel, and iirc kmod (and I guess module-init-tools also) will
962 * serialize any racing processes properly anyway.
963 * - During external policy unregistration, because this should happen
964 * in a module's exit() function, and any attempts to start a policy
965 * instance would need to take a reference on the module, and this is
966 * not possible once we have reached the point where the exit()
967 * handler is called.
968 * - During service registration and unregistration, as service setup
969 * and cleanup, and policy registration, unregistration and policy
970 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
971 * as long as users adhere to the convention of registering policies
972 * in init() and unregistering them in module exit() functions, there
973 * should not be a race between these operations.
974 * - During any policy-specific lprocfs operations, because a reference
975 * is held by the kernel on a proc entry that has been entered by a
976 * syscall, so as long as proc entries are removed during unregistration time,
977 * then unregistration and lprocfs operations will be properly
978 * serialized.
979 */
c34d9cd8 980 struct module *pd_owner;
d7e09d03
PT
981 /**
982 * Bitmask of \e nrs_policy_flags
983 */
984 unsigned pd_flags;
985 /**
986 * # of references on this descriptor
987 */
988 atomic_t pd_refs;
989};
990
991/**
992 * NRS policy state
993 *
994 * Policies transition from one state to the other during their lifetime
995 */
996enum ptlrpc_nrs_pol_state {
997 /**
998 * Not a valid policy state.
999 */
1000 NRS_POL_STATE_INVALID,
1001 /**
1002 * Policies are at this state either at the start of their life, or
1003 * transition here when the user selects a different policy to act
1004 * as the primary one.
1005 */
1006 NRS_POL_STATE_STOPPED,
1007 /**
1008 * Policy is progress of stopping
1009 */
1010 NRS_POL_STATE_STOPPING,
1011 /**
1012 * Policy is in progress of starting
1013 */
1014 NRS_POL_STATE_STARTING,
1015 /**
1016 * A policy is in this state in two cases:
1017 * - it is the fallback policy, which is always in this state.
1018 * - it has been activated by the user; i.e. it is the primary policy,
1019 */
1020 NRS_POL_STATE_STARTED,
1021};
1022
1023/**
1024 * NRS policy information
1025 *
1026 * Used for obtaining information for the status of a policy via lprocfs
1027 */
1028struct ptlrpc_nrs_pol_info {
1029 /**
1030 * Policy name
1031 */
1032 char pi_name[NRS_POL_NAME_MAX];
1033 /**
1034 * Current policy state
1035 */
1036 enum ptlrpc_nrs_pol_state pi_state;
1037 /**
1038 * # RPCs enqueued for later dispatching by the policy
1039 */
1040 long pi_req_queued;
1041 /**
1042 * # RPCs started for dispatch by the policy
1043 */
1044 long pi_req_started;
1045 /**
1046 * Is this a fallback policy?
1047 */
1048 unsigned pi_fallback:1;
1049};
1050
1051/**
1052 * NRS policy
1053 *
1054 * There is one instance of this for each policy in each NRS head of each
1055 * PTLRPC service partition.
1056 */
1057struct ptlrpc_nrs_policy {
1058 /**
1059 * Linkage into the NRS head's list of policies,
1060 * ptlrpc_nrs:nrs_policy_list
1061 */
1062 struct list_head pol_list;
1063 /**
1064 * Linkage into the NRS head's list of policies with enqueued
1065 * requests ptlrpc_nrs:nrs_policy_queued
1066 */
1067 struct list_head pol_list_queued;
1068 /**
1069 * Current state of this policy
1070 */
1071 enum ptlrpc_nrs_pol_state pol_state;
1072 /**
1073 * Bitmask of nrs_policy_flags
1074 */
1075 unsigned pol_flags;
1076 /**
1077 * # RPCs enqueued for later dispatching by the policy
1078 */
1079 long pol_req_queued;
1080 /**
1081 * # RPCs started for dispatch by the policy
1082 */
1083 long pol_req_started;
1084 /**
1085 * Usage Reference count taken on the policy instance
1086 */
1087 long pol_ref;
1088 /**
1089 * The NRS head this policy has been created at
1090 */
1091 struct ptlrpc_nrs *pol_nrs;
1092 /**
1093 * Private policy data; varies by policy type
1094 */
1095 void *pol_private;
1096 /**
1097 * Policy descriptor for this policy instance.
1098 */
1099 struct ptlrpc_nrs_pol_desc *pol_desc;
1100};
1101
1102/**
1103 * NRS resource
1104 *
1105 * Resources are embedded into two types of NRS entities:
1106 * - Inside NRS policies, in the policy's private data in
1107 * ptlrpc_nrs_policy::pol_private
1108 * - In objects that act as prime-level scheduling entities in different NRS
1109 * policies; e.g. on a policy that performs round robin or similar order
1110 * scheduling across client NIDs, there would be one NRS resource per unique
1111 * client NID. On a policy which performs round robin scheduling across
1112 * backend filesystem objects, there would be one resource associated with
1113 * each of the backend filesystem objects partaking in the scheduling
1114 * performed by the policy.
1115 *
1116 * NRS resources share a parent-child relationship, in which resources embedded
1117 * in policy instances are the parent entities, with all scheduling entities
1118 * a policy schedules across being the children, thus forming a simple resource
1119 * hierarchy. This hierarchy may be extended with one or more levels in the
1120 * future if the ability to have more than one primary policy is added.
1121 *
1122 * Upon request initialization, references to the then active NRS policies are
1123 * taken and used to later handle the dispatching of the request with one of
1124 * these policies.
1125 *
1126 * \see nrs_resource_get_safe()
1127 * \see ptlrpc_nrs_req_add()
1128 */
1129struct ptlrpc_nrs_resource {
1130 /**
1131 * This NRS resource's parent; is NULL for resources embedded in NRS
1132 * policy instances; i.e. those are top-level ones.
1133 */
1134 struct ptlrpc_nrs_resource *res_parent;
1135 /**
1136 * The policy associated with this resource.
1137 */
1138 struct ptlrpc_nrs_policy *res_policy;
1139};
1140
1141enum {
1142 NRS_RES_FALLBACK,
1143 NRS_RES_PRIMARY,
1144 NRS_RES_MAX
1145};
1146
1147/* \name fifo
1148 *
1149 * FIFO policy
1150 *
1151 * This policy is a logical wrapper around previous, non-NRS functionality.
1152 * It dispatches RPCs in the same order as they arrive from the network. This
1153 * policy is currently used as the fallback policy, and the only enabled policy
1154 * on all NRS heads of all PTLRPC service partitions.
1155 * @{
1156 */
1157
1158/**
1159 * Private data structure for the FIFO policy
1160 */
1161struct nrs_fifo_head {
1162 /**
1163 * Resource object for policy instance.
1164 */
1165 struct ptlrpc_nrs_resource fh_res;
1166 /**
1167 * List of queued requests.
1168 */
1169 struct list_head fh_list;
1170 /**
1171 * For debugging purposes.
1172 */
1173 __u64 fh_sequence;
1174};
1175
1176struct nrs_fifo_req {
1177 struct list_head fr_list;
1178 __u64 fr_sequence;
1179};
1180
1181/** @} fifo */
1182
1183/**
1184 * \name CRR-N
1185 *
1186 * CRR-N, Client Round Robin over NIDs
1187 * @{
1188 */
1189
1190/**
1191 * private data structure for CRR-N NRS
1192 */
1193struct nrs_crrn_net {
1194 struct ptlrpc_nrs_resource cn_res;
1195 cfs_binheap_t *cn_binheap;
6da6eabe 1196 struct cfs_hash *cn_cli_hash;
d7e09d03
PT
1197 /**
1198 * Used when a new scheduling round commences, in order to synchronize
1199 * all clients with the new round number.
1200 */
1201 __u64 cn_round;
1202 /**
1203 * Determines the relevant ordering amongst request batches within a
1204 * scheduling round.
1205 */
1206 __u64 cn_sequence;
1207 /**
1208 * Round Robin quantum; the maximum number of RPCs that each request
1209 * batch for each client can have in a scheduling round.
1210 */
1211 __u16 cn_quantum;
1212};
1213
1214/**
1215 * Object representing a client in CRR-N, as identified by its NID
1216 */
1217struct nrs_crrn_client {
1218 struct ptlrpc_nrs_resource cc_res;
1219 struct hlist_node cc_hnode;
1220 lnet_nid_t cc_nid;
1221 /**
1222 * The round number against which this client is currently scheduling
1223 * requests.
1224 */
1225 __u64 cc_round;
1226 /**
1227 * The sequence number used for requests scheduled by this client during
1228 * the current round number.
1229 */
1230 __u64 cc_sequence;
1231 atomic_t cc_ref;
1232 /**
1233 * Round Robin quantum; the maximum number of RPCs the client is allowed
1234 * to schedule in a single batch of each round.
1235 */
1236 __u16 cc_quantum;
1237 /**
1238 * # of pending requests for this client, on all existing rounds
1239 */
1240 __u16 cc_active;
1241};
1242
1243/**
1244 * CRR-N NRS request definition
1245 */
1246struct nrs_crrn_req {
1247 /**
1248 * Round number for this request; shared with all other requests in the
1249 * same batch.
1250 */
1251 __u64 cr_round;
1252 /**
1253 * Sequence number for this request; shared with all other requests in
1254 * the same batch.
1255 */
1256 __u64 cr_sequence;
1257};
1258
1259/**
1260 * CRR-N policy operations.
1261 */
1262enum nrs_ctl_crr {
1263 /**
1264 * Read the RR quantum size of a CRR-N policy.
1265 */
1266 NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
1267 /**
1268 * Write the RR quantum size of a CRR-N policy.
1269 */
1270 NRS_CTL_CRRN_WR_QUANTUM,
1271};
1272
1273/** @} CRR-N */
1274
1275/**
1276 * \name ORR/TRR
1277 *
1278 * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
1279 * @{
1280 */
1281
1282/**
1283 * Lower and upper byte offsets of a brw RPC
1284 */
1285struct nrs_orr_req_range {
1286 __u64 or_start;
1287 __u64 or_end;
1288};
1289
1290/**
1291 * RPC types supported by the ORR/TRR policies
1292 */
1293enum nrs_orr_supp {
1294 NOS_OST_READ = (1 << 0),
1295 NOS_OST_WRITE = (1 << 1),
1296 NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE),
1297 /**
1298 * Default value for policies.
1299 */
1300 NOS_DFLT = NOS_OST_READ
1301};
1302
1303/**
1304 * As unique keys for grouping RPCs together, we use the object's OST FID for
1305 * the ORR policy, and the OST index for the TRR policy.
1306 *
1307 * XXX: We waste some space for TRR policy instances by using a union, but it
1308 * allows to consolidate some of the code between ORR and TRR, and these
1309 * policies will probably eventually merge into one anyway.
1310 */
1311struct nrs_orr_key {
1312 union {
1313 /** object FID for ORR */
1314 struct lu_fid ok_fid;
1315 /** OST index for TRR */
1316 __u32 ok_idx;
1317 };
1318};
1319
1320/**
1321 * The largest base string for unique hash/slab object names is
1322 * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT
1323 * id number, so this _should_ be more than enough for the maximum number of
1324 * CPTs on any system. If it does happen that this statement is incorrect,
1325 * nrs_orr_genobjname() will inevitably yield a non-unique name and cause
1326 * kmem_cache_create() to complain (on Linux), so the erroneous situation
1327 * will hopefully not go unnoticed.
1328 */
1329#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
1330
1331/**
1332 * private data structure for ORR and TRR NRS
1333 */
1334struct nrs_orr_data {
1335 struct ptlrpc_nrs_resource od_res;
1336 cfs_binheap_t *od_binheap;
6da6eabe 1337 struct cfs_hash *od_obj_hash;
d7e09d03
PT
1338 struct kmem_cache *od_cache;
1339 /**
1340 * Used when a new scheduling round commences, in order to synchronize
1341 * all object or OST batches with the new round number.
1342 */
1343 __u64 od_round;
1344 /**
1345 * Determines the relevant ordering amongst request batches within a
1346 * scheduling round.
1347 */
1348 __u64 od_sequence;
1349 /**
1350 * RPC types that are currently supported.
1351 */
1352 enum nrs_orr_supp od_supp;
1353 /**
1354 * Round Robin quantum; the maxium number of RPCs that each request
1355 * batch for each object or OST can have in a scheduling round.
1356 */
1357 __u16 od_quantum;
1358 /**
1359 * Whether to use physical disk offsets or logical file offsets.
1360 */
1361 bool od_physical;
1362 /**
1363 * XXX: We need to provide a persistently allocated string to hold
1364 * unique object names for this policy, since in currently supported
1365 * versions of Linux by Lustre, kmem_cache_create() just sets a pointer
1366 * to the name string provided. kstrdup() is used in the version of
1367 * kmeme_cache_create() in current Linux mainline, so we may be able to
1368 * remove this in the future.
1369 */
1370 char od_objname[NRS_ORR_OBJ_NAME_MAX];
1371};
1372
1373/**
1374 * Represents a backend-fs object or OST in the ORR and TRR policies
1375 * respectively
1376 */
1377struct nrs_orr_object {
1378 struct ptlrpc_nrs_resource oo_res;
1379 struct hlist_node oo_hnode;
1380 /**
1381 * The round number against which requests are being scheduled for this
1382 * object or OST
1383 */
1384 __u64 oo_round;
1385 /**
1386 * The sequence number used for requests scheduled for this object or
1387 * OST during the current round number.
1388 */
1389 __u64 oo_sequence;
1390 /**
1391 * The key of the object or OST for which this structure instance is
1392 * scheduling RPCs
1393 */
1394 struct nrs_orr_key oo_key;
1395 atomic_t oo_ref;
1396 /**
1397 * Round Robin quantum; the maximum number of RPCs that are allowed to
1398 * be scheduled for the object or OST in a single batch of each round.
1399 */
1400 __u16 oo_quantum;
1401 /**
1402 * # of pending requests for this object or OST, on all existing rounds
1403 */
1404 __u16 oo_active;
1405};
1406
1407/**
1408 * ORR/TRR NRS request definition
1409 */
1410struct nrs_orr_req {
1411 /**
1412 * The offset range this request covers
1413 */
1414 struct nrs_orr_req_range or_range;
1415 /**
1416 * Round number for this request; shared with all other requests in the
1417 * same batch.
1418 */
1419 __u64 or_round;
1420 /**
1421 * Sequence number for this request; shared with all other requests in
1422 * the same batch.
1423 */
1424 __u64 or_sequence;
1425 /**
1426 * For debugging purposes.
1427 */
1428 struct nrs_orr_key or_key;
1429 /**
1430 * An ORR policy instance has filled in request information while
1431 * enqueueing the request on the service partition's regular NRS head.
1432 */
1433 unsigned int or_orr_set:1;
1434 /**
1435 * A TRR policy instance has filled in request information while
1436 * enqueueing the request on the service partition's regular NRS head.
1437 */
1438 unsigned int or_trr_set:1;
1439 /**
1440 * Request offset ranges have been filled in with logical offset
1441 * values.
1442 */
1443 unsigned int or_logical_set:1;
1444 /**
1445 * Request offset ranges have been filled in with physical offset
1446 * values.
1447 */
1448 unsigned int or_physical_set:1;
1449};
1450
1451/** @} ORR/TRR */
1452
1453/**
1454 * NRS request
1455 *
1456 * Instances of this object exist embedded within ptlrpc_request; the main
1457 * purpose of this object is to hold references to the request's resources
1458 * for the lifetime of the request, and to hold properties that policies use
1459 * use for determining the request's scheduling priority.
1460 * */
1461struct ptlrpc_nrs_request {
1462 /**
1463 * The request's resource hierarchy.
1464 */
1465 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1466 /**
1467 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1468 * policy that was used to enqueue the request.
1469 *
1470 * \see nrs_request_enqueue()
1471 */
1472 unsigned nr_res_idx;
1473 unsigned nr_initialized:1;
1474 unsigned nr_enqueued:1;
1475 unsigned nr_started:1;
1476 unsigned nr_finalized:1;
1477 cfs_binheap_node_t nr_node;
1478
1479 /**
1480 * Policy-specific fields, used for determining a request's scheduling
1481 * priority, and other supporting functionality.
1482 */
1483 union {
1484 /**
1485 * Fields for the FIFO policy
1486 */
1487 struct nrs_fifo_req fifo;
1488 /**
1489 * CRR-N request defintion
1490 */
1491 struct nrs_crrn_req crr;
1492 /** ORR and TRR share the same request definition */
1493 struct nrs_orr_req orr;
1494 } nr_u;
1495 /**
1496 * Externally-registering policies may want to use this to allocate
1497 * their own request properties.
1498 */
1499 void *ext;
1500};
1501
1502/** @} nrs */
1503
1504/**
1505 * Basic request prioritization operations structure.
1506 * The whole idea is centered around locks and RPCs that might affect locks.
1507 * When a lock is contended we try to give priority to RPCs that might lead
1508 * to fastest release of that lock.
1509 * Currently only implemented for OSTs only in a way that makes all
1510 * IO and truncate RPCs that are coming from a locked region where a lock is
1511 * contended a priority over other requests.
1512 */
1513struct ptlrpc_hpreq_ops {
1514 /**
1515 * Check if the lock handle of the given lock is the same as
1516 * taken from the request.
1517 */
1518 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1519 /**
1520 * Check if the request is a high priority one.
1521 */
1522 int (*hpreq_check)(struct ptlrpc_request *);
1523 /**
1524 * Called after the request has been handled.
1525 */
1526 void (*hpreq_fini)(struct ptlrpc_request *);
1527};
1528
1529/**
1530 * Represents remote procedure call.
1531 *
1532 * This is a staple structure used by everybody wanting to send a request
1533 * in Lustre.
1534 */
1535struct ptlrpc_request {
1536 /* Request type: one of PTL_RPC_MSG_* */
1537 int rq_type;
1538 /** Result of request processing */
1539 int rq_status;
1540 /**
1541 * Linkage item through which this request is included into
1542 * sending/delayed lists on client and into rqbd list on server
1543 */
1544 struct list_head rq_list;
1545 /**
1546 * Server side list of incoming unserved requests sorted by arrival
1547 * time. Traversed from time to time to notice about to expire
1548 * requests and sent back "early replies" to clients to let them
1549 * know server is alive and well, just very busy to service their
1550 * requests in time
1551 */
1552 struct list_head rq_timed_list;
1553 /** server-side history, used for debuging purposes. */
1554 struct list_head rq_history_list;
1555 /** server-side per-export list */
1556 struct list_head rq_exp_list;
1557 /** server-side hp handlers */
1558 struct ptlrpc_hpreq_ops *rq_ops;
1559
1560 /** initial thread servicing this request */
1561 struct ptlrpc_thread *rq_svc_thread;
1562
1563 /** history sequence # */
1564 __u64 rq_history_seq;
1565 /** \addtogroup nrs
1566 * @{
1567 */
1568 /** stub for NRS request */
1569 struct ptlrpc_nrs_request rq_nrq;
1570 /** @} nrs */
1571 /** the index of service's srv_at_array into which request is linked */
1572 time_t rq_at_index;
1573 /** Lock to protect request flags and some other important bits, like
1574 * rq_list
1575 */
1576 spinlock_t rq_lock;
1577 /** client-side flags are serialized by rq_lock */
1578 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1579 rq_timedout:1, rq_resend:1, rq_restart:1,
1580 /**
1581 * when ->rq_replay is set, request is kept by the client even
1582 * after server commits corresponding transaction. This is
1583 * used for operations that require sequence of multiple
1584 * requests to be replayed. The only example currently is file
1585 * open/close. When last request in such a sequence is
1586 * committed, ->rq_replay is cleared on all requests in the
1587 * sequence.
1588 */
1589 rq_replay:1,
1590 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1591 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
1592 rq_early:1, rq_must_unlink:1,
1593 rq_memalloc:1, /* req originated from "kswapd" */
1594 /* server-side flags */
1595 rq_packed_final:1, /* packed final reply */
1596 rq_hp:1, /* high priority RPC */
1597 rq_at_linked:1, /* link into service's srv_at_array */
1598 rq_reply_truncate:1,
1599 rq_committed:1,
1600 /* whether the "rq_set" is a valid one */
1601 rq_invalid_rqset:1,
1602 rq_generation_set:1,
1603 /* do not resend request on -EINPROGRESS */
1604 rq_no_retry_einprogress:1,
1605 /* allow the req to be sent if the import is in recovery
1606 * status */
c52f69c5 1607 rq_allow_replay:1;
d7e09d03
PT
1608
1609 unsigned int rq_nr_resend;
1610
1611 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1612 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1613 atomic_t rq_refcount;/* client-side refcount for SENT race,
1614 server-side refcounf for multiple replies */
1615
1616 /** Portal to which this request would be sent */
1617 short rq_request_portal; /* XXX FIXME bug 249 */
1618 /** Portal where to wait for reply and where reply would be sent */
1619 short rq_reply_portal; /* XXX FIXME bug 249 */
1620
1621 /**
1622 * client-side:
1623 * !rq_truncate : # reply bytes actually received,
1624 * rq_truncate : required repbuf_len for resend
1625 */
1626 int rq_nob_received;
1627 /** Request length */
1628 int rq_reqlen;
1629 /** Reply length */
1630 int rq_replen;
1631 /** Request message - what client sent */
1632 struct lustre_msg *rq_reqmsg;
1633 /** Reply message - server response */
1634 struct lustre_msg *rq_repmsg;
1635 /** Transaction number */
1636 __u64 rq_transno;
1637 /** xid */
1638 __u64 rq_xid;
1639 /**
1640 * List item to for replay list. Not yet commited requests get linked
1641 * there.
1642 * Also see \a rq_replay comment above.
1643 */
1644 struct list_head rq_replay_list;
1645
1646 /**
1647 * security and encryption data
1648 * @{ */
1649 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1650 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1651 struct list_head rq_ctx_chain; /**< link to waited ctx */
1652
1653 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1654 enum lustre_sec_part rq_sp_from;
1655
1656 /* client/server security flags */
1657 unsigned int
1658 rq_ctx_init:1, /* context initiation */
1659 rq_ctx_fini:1, /* context destroy */
1660 rq_bulk_read:1, /* request bulk read */
1661 rq_bulk_write:1, /* request bulk write */
1662 /* server authentication flags */
1663 rq_auth_gss:1, /* authenticated by gss */
1664 rq_auth_remote:1, /* authed as remote user */
1665 rq_auth_usr_root:1, /* authed as root */
1666 rq_auth_usr_mdt:1, /* authed as mdt */
1667 rq_auth_usr_ost:1, /* authed as ost */
1668 /* security tfm flags */
1669 rq_pack_udesc:1,
1670 rq_pack_bulk:1,
1671 /* doesn't expect reply FIXME */
1672 rq_no_reply:1,
1673 rq_pill_init:1; /* pill initialized */
1674
1675 uid_t rq_auth_uid; /* authed uid */
1676 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1677
1678 /* (server side), pointed directly into req buffer */
1679 struct ptlrpc_user_desc *rq_user_desc;
1680
1681 /* various buffer pointers */
1682 struct lustre_msg *rq_reqbuf; /* req wrapper */
1683 char *rq_repbuf; /* rep buffer */
1684 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1685 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1686 int rq_reqbuf_len; /* req wrapper buf len */
1687 int rq_reqdata_len; /* req wrapper msg len */
1688 int rq_repbuf_len; /* rep buffer len */
1689 int rq_repdata_len; /* rep wrapper msg len */
1690 int rq_clrbuf_len; /* only in priv mode */
1691 int rq_clrdata_len; /* only in priv mode */
1692
1693 /** early replies go to offset 0, regular replies go after that */
1694 unsigned int rq_reply_off;
1695
1696 /** @} */
1697
1698 /** Fields that help to see if request and reply were swabbed or not */
1699 __u32 rq_req_swab_mask;
1700 __u32 rq_rep_swab_mask;
1701
1702 /** What was import generation when this request was sent */
1703 int rq_import_generation;
1704 enum lustre_imp_state rq_send_state;
1705
1706 /** how many early replies (for stats) */
1707 int rq_early_count;
1708
1709 /** client+server request */
1710 lnet_handle_md_t rq_req_md_h;
1711 struct ptlrpc_cb_id rq_req_cbid;
1712 /** optional time limit for send attempts */
1713 cfs_duration_t rq_delay_limit;
1714 /** time request was first queued */
1715 cfs_time_t rq_queued_time;
1716
1717 /* server-side... */
1718 /** request arrival time */
1719 struct timeval rq_arrival_time;
1720 /** separated reply state */
1721 struct ptlrpc_reply_state *rq_reply_state;
1722 /** incoming request buffer */
1723 struct ptlrpc_request_buffer_desc *rq_rqbd;
1724
1725 /** client-only incoming reply */
1726 lnet_handle_md_t rq_reply_md_h;
1727 wait_queue_head_t rq_reply_waitq;
1728 struct ptlrpc_cb_id rq_reply_cbid;
1729
1730 /** our LNet NID */
1731 lnet_nid_t rq_self;
1732 /** Peer description (the other side) */
1733 lnet_process_id_t rq_peer;
1734 /** Server-side, export on which request was received */
1735 struct obd_export *rq_export;
1736 /** Client side, import where request is being sent */
1737 struct obd_import *rq_import;
1738
1739 /** Replay callback, called after request is replayed at recovery */
1740 void (*rq_replay_cb)(struct ptlrpc_request *);
1741 /**
1742 * Commit callback, called when request is committed and about to be
1743 * freed.
1744 */
1745 void (*rq_commit_cb)(struct ptlrpc_request *);
1746 /** Opaq data for replay and commit callbacks. */
1747 void *rq_cb_data;
1748
1749 /** For bulk requests on client only: bulk descriptor */
1750 struct ptlrpc_bulk_desc *rq_bulk;
1751
1752 /** client outgoing req */
1753 /**
1754 * when request/reply sent (secs), or time when request should be sent
1755 */
1756 time_t rq_sent;
1757 /** time for request really sent out */
1758 time_t rq_real_sent;
1759
1760 /** when request must finish. volatile
1761 * so that servers' early reply updates to the deadline aren't
1762 * kept in per-cpu cache */
1763 volatile time_t rq_deadline;
1764 /** when req reply unlink must finish. */
1765 time_t rq_reply_deadline;
1766 /** when req bulk unlink must finish. */
1767 time_t rq_bulk_deadline;
1768 /**
1769 * service time estimate (secs)
1770 * If the requestsis not served by this time, it is marked as timed out.
1771 */
1772 int rq_timeout;
1773
1774 /** Multi-rpc bits */
1775 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1776 wait_queue_head_t rq_set_waitq;
1777 /** Link item for request set lists */
1778 struct list_head rq_set_chain;
1779 /** Link back to the request set */
1780 struct ptlrpc_request_set *rq_set;
1781 /** Async completion handler, called when reply is received */
1782 ptlrpc_interpterer_t rq_interpret_reply;
1783 /** Async completion context */
1784 union ptlrpc_async_args rq_async_args;
1785
1786 /** Pool if request is from preallocated list */
1787 struct ptlrpc_request_pool *rq_pool;
1788
1789 struct lu_context rq_session;
1790 struct lu_context rq_recov_session;
1791
1792 /** request format description */
1793 struct req_capsule rq_pill;
1794};
1795
1796/**
1797 * Call completion handler for rpc if any, return it's status or original
1798 * rc if there was no handler defined for this request.
1799 */
1800static inline int ptlrpc_req_interpret(const struct lu_env *env,
1801 struct ptlrpc_request *req, int rc)
1802{
1803 if (req->rq_interpret_reply != NULL) {
1804 req->rq_status = req->rq_interpret_reply(env, req,
1805 &req->rq_async_args,
1806 rc);
1807 return req->rq_status;
1808 }
1809 return rc;
1810}
1811
1812/** \addtogroup nrs
1813 * @{
1814 */
1815int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
1816int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
1817void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
1818void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
1819 struct ptlrpc_nrs_pol_info *info);
1820
1821/*
1822 * Can the request be moved from the regular NRS head to the high-priority NRS
1823 * head (of the same PTLRPC service partition), if any?
1824 *
1825 * For a reliable result, this should be checked under svcpt->scp_req lock.
1826 */
1827static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1828{
1829 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1830
1831 /**
1832 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1833 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1834 * to make sure it has not been scheduled yet (analogous to previous
1835 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1836 */
1837 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1838}
1839/** @} nrs */
1840
1841/**
1842 * Returns 1 if request buffer at offset \a index was already swabbed
1843 */
1844static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1845{
1846 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1847 return req->rq_req_swab_mask & (1 << index);
1848}
1849
1850/**
1851 * Returns 1 if request reply buffer at offset \a index was already swabbed
1852 */
1853static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1854{
1855 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1856 return req->rq_rep_swab_mask & (1 << index);
1857}
1858
1859/**
1860 * Returns 1 if request needs to be swabbed into local cpu byteorder
1861 */
1862static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1863{
1864 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1865}
1866
1867/**
1868 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1869 */
1870static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1871{
1872 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1873}
1874
1875/**
1876 * Mark request buffer at offset \a index that it was already swabbed
1877 */
1878static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1879{
1880 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1881 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1882 req->rq_req_swab_mask |= 1 << index;
1883}
1884
1885/**
1886 * Mark request reply buffer at offset \a index that it was already swabbed
1887 */
1888static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1889{
1890 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1891 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1892 req->rq_rep_swab_mask |= 1 << index;
1893}
1894
1895/**
1896 * Convert numerical request phase value \a phase into text string description
1897 */
1898static inline const char *
1899ptlrpc_phase2str(enum rq_phase phase)
1900{
1901 switch (phase) {
1902 case RQ_PHASE_NEW:
1903 return "New";
1904 case RQ_PHASE_RPC:
1905 return "Rpc";
1906 case RQ_PHASE_BULK:
1907 return "Bulk";
1908 case RQ_PHASE_INTERPRET:
1909 return "Interpret";
1910 case RQ_PHASE_COMPLETE:
1911 return "Complete";
1912 case RQ_PHASE_UNREGISTERING:
1913 return "Unregistering";
1914 default:
1915 return "?Phase?";
1916 }
1917}
1918
1919/**
1920 * Convert numerical request phase of the request \a req into text stringi
1921 * description
1922 */
1923static inline const char *
1924ptlrpc_rqphase2str(struct ptlrpc_request *req)
1925{
1926 return ptlrpc_phase2str(req->rq_phase);
1927}
1928
1929/**
1930 * Debugging functions and helpers to print request structure into debug log
1931 * @{
1932 */
1933/* Spare the preprocessor, spoil the bugs. */
1934#define FLAG(field, str) (field ? str : "")
1935
1936/** Convert bit flags into a string */
1937#define DEBUG_REQ_FLAGS(req) \
1938 ptlrpc_rqphase2str(req), \
1939 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
1940 FLAG(req->rq_err, "E"), \
1941 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1942 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1943 FLAG(req->rq_no_resend, "N"), \
1944 FLAG(req->rq_waiting, "W"), \
1945 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1946 FLAG(req->rq_committed, "M")
1947
1948#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
1949
1950void _debug_req(struct ptlrpc_request *req,
1951 struct libcfs_debug_msg_data *data, const char *fmt, ...)
1952 __attribute__ ((format (printf, 3, 4)));
1953
1954/**
1955 * Helper that decides if we need to print request accordig to current debug
1956 * level settings
1957 */
1958#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1959do { \
1960 CFS_CHECK_STACK(msgdata, mask, cdls); \
1961 \
1962 if (((mask) & D_CANTMASK) != 0 || \
1963 ((libcfs_debug & (mask)) != 0 && \
1964 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1965 _debug_req((req), msgdata, fmt, ##a); \
1966} while(0)
1967
1968/**
1969 * This is the debug print function you need to use to print request sturucture
1970 * content into lustre debug log.
1971 * for most callers (level is a constant) this is resolved at compile time */
1972#define DEBUG_REQ(level, req, fmt, args...) \
1973do { \
1974 if ((level) & (D_ERROR | D_WARNING)) { \
a3ea59e0 1975 static struct cfs_debug_limit_state cdls; \
d7e09d03
PT
1976 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1977 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1978 } else { \
1979 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1980 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1981 } \
1982} while (0)
1983/** @} */
1984
1985/**
1986 * Structure that defines a single page of a bulk transfer
1987 */
1988struct ptlrpc_bulk_page {
1989 /** Linkage to list of pages in a bulk */
1990 struct list_head bp_link;
1991 /**
1992 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1993 */
1994 int bp_buflen;
1995 /** offset within a page */
1996 int bp_pageoffset;
1997 /** The page itself */
1998 struct page *bp_page;
1999};
2000
2001#define BULK_GET_SOURCE 0
2002#define BULK_PUT_SINK 1
2003#define BULK_GET_SINK 2
2004#define BULK_PUT_SOURCE 3
2005
2006/**
2007 * Definition of bulk descriptor.
2008 * Bulks are special "Two phase" RPCs where initial request message
2009 * is sent first and it is followed bt a transfer (o receiving) of a large
2010 * amount of data to be settled into pages referenced from the bulk descriptors.
2011 * Bulks transfers (the actual data following the small requests) are done
2012 * on separate LNet portals.
2013 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
2014 * Another user is readpage for MDT.
2015 */
2016struct ptlrpc_bulk_desc {
2017 /** completed with failure */
2018 unsigned long bd_failure:1;
2019 /** {put,get}{source,sink} */
2020 unsigned long bd_type:2;
2021 /** client side */
2022 unsigned long bd_registered:1;
2023 /** For serialization with callback */
2024 spinlock_t bd_lock;
2025 /** Import generation when request for this bulk was sent */
2026 int bd_import_generation;
2027 /** LNet portal for this bulk */
2028 __u32 bd_portal;
2029 /** Server side - export this bulk created for */
2030 struct obd_export *bd_export;
2031 /** Client side - import this bulk was sent on */
2032 struct obd_import *bd_import;
2033 /** Back pointer to the request */
2034 struct ptlrpc_request *bd_req;
2035 wait_queue_head_t bd_waitq; /* server side only WQ */
2036 int bd_iov_count; /* # entries in bd_iov */
2037 int bd_max_iov; /* allocated size of bd_iov */
2038 int bd_nob; /* # bytes covered */
2039 int bd_nob_transferred; /* # bytes GOT/PUT */
2040
2041 __u64 bd_last_xid;
2042
2043 struct ptlrpc_cb_id bd_cbid; /* network callback info */
2044 lnet_nid_t bd_sender; /* stash event::sender */
2045 int bd_md_count; /* # valid entries in bd_mds */
2046 int bd_md_max_brw; /* max entries in bd_mds */
2047 /** array of associated MDs */
2048 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
2049
2050 /*
2051 * encrypt iov, size is either 0 or bd_iov_count.
2052 */
2053 lnet_kiov_t *bd_enc_iov;
2054
2055 lnet_kiov_t bd_iov[0];
2056};
2057
2058enum {
2059 SVC_STOPPED = 1 << 0,
2060 SVC_STOPPING = 1 << 1,
2061 SVC_STARTING = 1 << 2,
2062 SVC_RUNNING = 1 << 3,
2063 SVC_EVENT = 1 << 4,
2064 SVC_SIGNAL = 1 << 5,
2065};
2066
2067#define PTLRPC_THR_NAME_LEN 32
2068/**
2069 * Definition of server service thread structure
2070 */
2071struct ptlrpc_thread {
2072 /**
2073 * List of active threads in svc->srv_threads
2074 */
2075 struct list_head t_link;
2076 /**
2077 * thread-private data (preallocated memory)
2078 */
2079 void *t_data;
2080 __u32 t_flags;
2081 /**
2082 * service thread index, from ptlrpc_start_threads
2083 */
2084 unsigned int t_id;
2085 /**
2086 * service thread pid
2087 */
2088 pid_t t_pid;
2089 /**
2090 * put watchdog in the structure per thread b=14840
5d4450c4
PT
2091 *
2092 * Lustre watchdog is removed for client in the hope
2093 * of a generic watchdog can be merged in kernel.
2094 * When that happens, we should add below back.
2095 *
2096 * struct lc_watchdog *t_watchdog;
d7e09d03 2097 */
d7e09d03
PT
2098 /**
2099 * the svc this thread belonged to b=18582
2100 */
2101 struct ptlrpc_service_part *t_svcpt;
2102 wait_queue_head_t t_ctl_waitq;
2103 struct lu_env *t_env;
2104 char t_name[PTLRPC_THR_NAME_LEN];
2105};
2106
2107static inline int thread_is_init(struct ptlrpc_thread *thread)
2108{
2109 return thread->t_flags == 0;
2110}
2111
2112static inline int thread_is_stopped(struct ptlrpc_thread *thread)
2113{
2114 return !!(thread->t_flags & SVC_STOPPED);
2115}
2116
2117static inline int thread_is_stopping(struct ptlrpc_thread *thread)
2118{
2119 return !!(thread->t_flags & SVC_STOPPING);
2120}
2121
2122static inline int thread_is_starting(struct ptlrpc_thread *thread)
2123{
2124 return !!(thread->t_flags & SVC_STARTING);
2125}
2126
2127static inline int thread_is_running(struct ptlrpc_thread *thread)
2128{
2129 return !!(thread->t_flags & SVC_RUNNING);
2130}
2131
2132static inline int thread_is_event(struct ptlrpc_thread *thread)
2133{
2134 return !!(thread->t_flags & SVC_EVENT);
2135}
2136
2137static inline int thread_is_signal(struct ptlrpc_thread *thread)
2138{
2139 return !!(thread->t_flags & SVC_SIGNAL);
2140}
2141
2142static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
2143{
2144 thread->t_flags &= ~flags;
2145}
2146
2147static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
2148{
2149 thread->t_flags = flags;
2150}
2151
2152static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
2153{
2154 thread->t_flags |= flags;
2155}
2156
2157static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
2158 __u32 flags)
2159{
2160 if (thread->t_flags & flags) {
2161 thread->t_flags &= ~flags;
2162 return 1;
2163 }
2164 return 0;
2165}
2166
2167/**
2168 * Request buffer descriptor structure.
2169 * This is a structure that contains one posted request buffer for service.
2170 * Once data land into a buffer, event callback creates actual request and
2171 * notifies wakes one of the service threads to process new incoming request.
2172 * More than one request can fit into the buffer.
2173 */
2174struct ptlrpc_request_buffer_desc {
2175 /** Link item for rqbds on a service */
2176 struct list_head rqbd_list;
2177 /** History of requests for this buffer */
2178 struct list_head rqbd_reqs;
2179 /** Back pointer to service for which this buffer is registered */
2180 struct ptlrpc_service_part *rqbd_svcpt;
2181 /** LNet descriptor */
2182 lnet_handle_md_t rqbd_md_h;
2183 int rqbd_refcount;
2184 /** The buffer itself */
2185 char *rqbd_buffer;
2186 struct ptlrpc_cb_id rqbd_cbid;
2187 /**
2188 * This "embedded" request structure is only used for the
2189 * last request to fit into the buffer
2190 */
2191 struct ptlrpc_request rqbd_req;
2192};
2193
2194typedef int (*svc_handler_t)(struct ptlrpc_request *req);
2195
2196struct ptlrpc_service_ops {
2197 /**
2198 * if non-NULL called during thread creation (ptlrpc_start_thread())
2199 * to initialize service specific per-thread state.
2200 */
2201 int (*so_thr_init)(struct ptlrpc_thread *thr);
2202 /**
2203 * if non-NULL called during thread shutdown (ptlrpc_main()) to
2204 * destruct state created by ->srv_init().
2205 */
2206 void (*so_thr_done)(struct ptlrpc_thread *thr);
2207 /**
2208 * Handler function for incoming requests for this service
2209 */
2210 int (*so_req_handler)(struct ptlrpc_request *req);
2211 /**
2212 * function to determine priority of the request, it's called
2213 * on every new request
2214 */
2215 int (*so_hpreq_handler)(struct ptlrpc_request *);
2216 /**
2217 * service-specific print fn
2218 */
2219 void (*so_req_printer)(void *, struct ptlrpc_request *);
2220};
2221
2222#ifndef __cfs_cacheline_aligned
2223/* NB: put it here for reducing patche dependence */
2224# define __cfs_cacheline_aligned
2225#endif
2226
2227/**
2228 * How many high priority requests to serve before serving one normal
2229 * priority request
2230 */
2231#define PTLRPC_SVC_HP_RATIO 10
2232
2233/**
2234 * Definition of PortalRPC service.
2235 * The service is listening on a particular portal (like tcp port)
2236 * and perform actions for a specific server like IO service for OST
2237 * or general metadata service for MDS.
2238 */
2239struct ptlrpc_service {
2240 /** serialize /proc operations */
2241 spinlock_t srv_lock;
2242 /** most often accessed fields */
2243 /** chain thru all services */
2244 struct list_head srv_list;
2245 /** service operations table */
2246 struct ptlrpc_service_ops srv_ops;
2247 /** only statically allocated strings here; we don't clean them */
2248 char *srv_name;
2249 /** only statically allocated strings here; we don't clean them */
2250 char *srv_thread_name;
2251 /** service thread list */
2252 struct list_head srv_threads;
2253 /** threads # should be created for each partition on initializing */
2254 int srv_nthrs_cpt_init;
2255 /** limit of threads number for each partition */
2256 int srv_nthrs_cpt_limit;
2257 /** Root of /proc dir tree for this service */
b59fe845 2258 struct proc_dir_entry *srv_procroot;
d7e09d03
PT
2259 /** Pointer to statistic data for this service */
2260 struct lprocfs_stats *srv_stats;
2261 /** # hp per lp reqs to handle */
2262 int srv_hpreq_ratio;
2263 /** biggest request to receive */
2264 int srv_max_req_size;
2265 /** biggest reply to send */
2266 int srv_max_reply_size;
2267 /** size of individual buffers */
2268 int srv_buf_size;
2269 /** # buffers to allocate in 1 group */
2270 int srv_nbuf_per_group;
2271 /** Local portal on which to receive requests */
2272 __u32 srv_req_portal;
2273 /** Portal on the client to send replies to */
2274 __u32 srv_rep_portal;
2275 /**
2276 * Tags for lu_context associated with this thread, see struct
2277 * lu_context.
2278 */
2279 __u32 srv_ctx_tags;
2280 /** soft watchdog timeout multiplier */
2281 int srv_watchdog_factor;
2282 /** under unregister_service */
2283 unsigned srv_is_stopping:1;
2284
2285 /** max # request buffers in history per partition */
2286 int srv_hist_nrqbds_cpt_max;
2287 /** number of CPTs this service bound on */
2288 int srv_ncpts;
2289 /** CPTs array this service bound on */
2290 __u32 *srv_cpts;
2291 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2292 int srv_cpt_bits;
2293 /** CPT table this service is running over */
2294 struct cfs_cpt_table *srv_cptable;
2295 /**
2296 * partition data for ptlrpc service
2297 */
2298 struct ptlrpc_service_part *srv_parts[0];
2299};
2300
2301/**
2302 * Definition of PortalRPC service partition data.
2303 * Although a service only has one instance of it right now, but we
2304 * will have multiple instances very soon (instance per CPT).
2305 *
2306 * it has four locks:
2307 * \a scp_lock
2308 * serialize operations on rqbd and requests waiting for preprocess
2309 * \a scp_req_lock
2310 * serialize operations active requests sent to this portal
2311 * \a scp_at_lock
2312 * serialize adaptive timeout stuff
2313 * \a scp_rep_lock
2314 * serialize operations on RS list (reply states)
2315 *
2316 * We don't have any use-case to take two or more locks at the same time
2317 * for now, so there is no lock order issue.
2318 */
2319struct ptlrpc_service_part {
2320 /** back reference to owner */
2321 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2322 /* CPT id, reserved */
2323 int scp_cpt;
2324 /** always increasing number */
2325 int scp_thr_nextid;
2326 /** # of starting threads */
2327 int scp_nthrs_starting;
2328 /** # of stopping threads, reserved for shrinking threads */
2329 int scp_nthrs_stopping;
2330 /** # running threads */
2331 int scp_nthrs_running;
2332 /** service threads list */
2333 struct list_head scp_threads;
2334
2335 /**
2336 * serialize the following fields, used for protecting
2337 * rqbd list and incoming requests waiting for preprocess,
2338 * threads starting & stopping are also protected by this lock.
2339 */
2340 spinlock_t scp_lock __cfs_cacheline_aligned;
2341 /** total # req buffer descs allocated */
2342 int scp_nrqbds_total;
2343 /** # posted request buffers for receiving */
2344 int scp_nrqbds_posted;
2345 /** in progress of allocating rqbd */
2346 int scp_rqbd_allocating;
2347 /** # incoming reqs */
2348 int scp_nreqs_incoming;
2349 /** request buffers to be reposted */
2350 struct list_head scp_rqbd_idle;
2351 /** req buffers receiving */
2352 struct list_head scp_rqbd_posted;
2353 /** incoming reqs */
2354 struct list_head scp_req_incoming;
2355 /** timeout before re-posting reqs, in tick */
2356 cfs_duration_t scp_rqbd_timeout;
2357 /**
2358 * all threads sleep on this. This wait-queue is signalled when new
2359 * incoming request arrives and when difficult reply has to be handled.
2360 */
2361 wait_queue_head_t scp_waitq;
2362
2363 /** request history */
2364 struct list_head scp_hist_reqs;
2365 /** request buffer history */
2366 struct list_head scp_hist_rqbds;
2367 /** # request buffers in history */
2368 int scp_hist_nrqbds;
2369 /** sequence number for request */
2370 __u64 scp_hist_seq;
2371 /** highest seq culled from history */
2372 __u64 scp_hist_seq_culled;
2373
2374 /**
2375 * serialize the following fields, used for processing requests
2376 * sent to this portal
2377 */
2378 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2379 /** # reqs in either of the NRS heads below */
2380 /** # reqs being served */
2381 int scp_nreqs_active;
2382 /** # HPreqs being served */
2383 int scp_nhreqs_active;
2384 /** # hp requests handled */
2385 int scp_hreq_count;
2386
2387 /** NRS head for regular requests */
2388 struct ptlrpc_nrs scp_nrs_reg;
2389 /** NRS head for HP requests; this is only valid for services that can
2390 * handle HP requests */
2391 struct ptlrpc_nrs *scp_nrs_hp;
2392
2393 /** AT stuff */
2394 /** @{ */
2395 /**
2396 * serialize the following fields, used for changes on
2397 * adaptive timeout
2398 */
2399 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2400 /** estimated rpc service time */
2401 struct adaptive_timeout scp_at_estimate;
2402 /** reqs waiting for replies */
2403 struct ptlrpc_at_array scp_at_array;
2404 /** early reply timer */
54319351 2405 struct timer_list scp_at_timer;
d7e09d03
PT
2406 /** debug */
2407 cfs_time_t scp_at_checktime;
2408 /** check early replies */
2409 unsigned scp_at_check;
2410 /** @} */
2411
2412 /**
2413 * serialize the following fields, used for processing
2414 * replies for this portal
2415 */
2416 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2417 /** all the active replies */
2418 struct list_head scp_rep_active;
2419 /** List of free reply_states */
2420 struct list_head scp_rep_idle;
2421 /** waitq to run, when adding stuff to srv_free_rs_list */
2422 wait_queue_head_t scp_rep_waitq;
2423 /** # 'difficult' replies */
2424 atomic_t scp_nreps_difficult;
2425};
2426
2427#define ptlrpc_service_for_each_part(part, i, svc) \
2428 for (i = 0; \
2429 i < (svc)->srv_ncpts && \
2430 (svc)->srv_parts != NULL && \
2431 ((part) = (svc)->srv_parts[i]) != NULL; i++)
2432
2433/**
2434 * Declaration of ptlrpcd control structure
2435 */
2436struct ptlrpcd_ctl {
2437 /**
2438 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2439 */
2440 unsigned long pc_flags;
2441 /**
2442 * Thread lock protecting structure fields.
2443 */
2444 spinlock_t pc_lock;
2445 /**
2446 * Start completion.
2447 */
2448 struct completion pc_starting;
2449 /**
2450 * Stop completion.
2451 */
2452 struct completion pc_finishing;
2453 /**
2454 * Thread requests set.
2455 */
2456 struct ptlrpc_request_set *pc_set;
2457 /**
2458 * Thread name used in cfs_daemonize()
2459 */
2460 char pc_name[16];
2461 /**
2462 * Environment for request interpreters to run in.
2463 */
2464 struct lu_env pc_env;
2465 /**
2466 * Index of ptlrpcd thread in the array.
2467 */
2468 int pc_index;
2469 /**
2470 * Number of the ptlrpcd's partners.
2471 */
2472 int pc_npartners;
2473 /**
2474 * Pointer to the array of partners' ptlrpcd_ctl structure.
2475 */
2476 struct ptlrpcd_ctl **pc_partners;
2477 /**
2478 * Record the partner index to be processed next.
2479 */
2480 int pc_cursor;
2481};
2482
2483/* Bits for pc_flags */
2484enum ptlrpcd_ctl_flags {
2485 /**
2486 * Ptlrpc thread start flag.
2487 */
2488 LIOD_START = 1 << 0,
2489 /**
2490 * Ptlrpc thread stop flag.
2491 */
2492 LIOD_STOP = 1 << 1,
2493 /**
2494 * Ptlrpc thread force flag (only stop force so far).
2495 * This will cause aborting any inflight rpcs handled
2496 * by thread if LIOD_STOP is specified.
2497 */
2498 LIOD_FORCE = 1 << 2,
2499 /**
2500 * This is a recovery ptlrpc thread.
2501 */
2502 LIOD_RECOVERY = 1 << 3,
2503 /**
2504 * The ptlrpcd is bound to some CPU core.
2505 */
2506 LIOD_BIND = 1 << 4,
2507};
2508
2509/**
2510 * \addtogroup nrs
2511 * @{
2512 *
2513 * Service compatibility function; the policy is compatible with all services.
2514 *
2515 * \param[in] svc The service the policy is attempting to register with.
2516 * \param[in] desc The policy descriptor
2517 *
2518 * \retval true The policy is compatible with the service
2519 *
2520 * \see ptlrpc_nrs_pol_desc::pd_compat()
2521 */
2522static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2523 const struct ptlrpc_nrs_pol_desc *desc)
2524{
2525 return true;
2526}
2527
2528/**
2529 * Service compatibility function; the policy is compatible with only a specific
2530 * service which is identified by its human-readable name at
2531 * ptlrpc_service::srv_name.
2532 *
2533 * \param[in] svc The service the policy is attempting to register with.
2534 * \param[in] desc The policy descriptor
2535 *
2536 * \retval false The policy is not compatible with the service
2537 * \retval true The policy is compatible with the service
2538 *
2539 * \see ptlrpc_nrs_pol_desc::pd_compat()
2540 */
2541static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2542 const struct ptlrpc_nrs_pol_desc *desc)
2543{
2544 LASSERT(desc->pd_compat_svc_name != NULL);
2545 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2546}
2547
2548/** @} nrs */
2549
2550/* ptlrpc/events.c */
2551extern lnet_handle_eq_t ptlrpc_eq_h;
2552extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2553 lnet_process_id_t *peer, lnet_nid_t *self);
2554/**
2555 * These callbacks are invoked by LNet when something happened to
2556 * underlying buffer
2557 * @{
2558 */
2559extern void request_out_callback(lnet_event_t *ev);
2560extern void reply_in_callback(lnet_event_t *ev);
2561extern void client_bulk_callback(lnet_event_t *ev);
2562extern void request_in_callback(lnet_event_t *ev);
2563extern void reply_out_callback(lnet_event_t *ev);
2564/** @} */
2565
2566/* ptlrpc/connection.c */
2567struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2568 lnet_nid_t self,
2569 struct obd_uuid *uuid);
2570int ptlrpc_connection_put(struct ptlrpc_connection *c);
2571struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2572int ptlrpc_connection_init(void);
2573void ptlrpc_connection_fini(void);
2574extern lnet_pid_t ptl_get_pid(void);
2575
2576/* ptlrpc/niobuf.c */
2577/**
2578 * Actual interfacing with LNet to put/get/register/unregister stuff
2579 * @{
2580 */
2581
2582int ptlrpc_register_bulk(struct ptlrpc_request *req);
2583int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2584
2585static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2586{
2587 struct ptlrpc_bulk_desc *desc;
2588 int rc;
2589
2590 LASSERT(req != NULL);
2591 desc = req->rq_bulk;
2592
2593 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
2594 req->rq_bulk_deadline > cfs_time_current_sec())
2595 return 1;
2596
2597 if (!desc)
2598 return 0;
2599
2600 spin_lock(&desc->bd_lock);
2601 rc = desc->bd_md_count;
2602 spin_unlock(&desc->bd_lock);
2603 return rc;
2604}
2605
2606#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2607#define PTLRPC_REPLY_EARLY 0x02
2608int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2609int ptlrpc_reply(struct ptlrpc_request *req);
2610int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2611int ptlrpc_error(struct ptlrpc_request *req);
2612void ptlrpc_resend_req(struct ptlrpc_request *request);
2613int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2614int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2615int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2616/** @} */
2617
2618/* ptlrpc/client.c */
2619/**
2620 * Client-side portals API. Everything to send requests, receive replies,
2621 * request queues, request management, etc.
2622 * @{
2623 */
2624void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2625 struct ptlrpc_client *);
2626void ptlrpc_cleanup_client(struct obd_import *imp);
2627struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2628
2629int ptlrpc_queue_wait(struct ptlrpc_request *req);
2630int ptlrpc_replay_req(struct ptlrpc_request *req);
2631int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2632void ptlrpc_restart_req(struct ptlrpc_request *req);
2633void ptlrpc_abort_inflight(struct obd_import *imp);
2634void ptlrpc_cleanup_imp(struct obd_import *imp);
2635void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2636
2637struct ptlrpc_request_set *ptlrpc_prep_set(void);
2638struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2639 void *arg);
2640int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
2641 set_interpreter_func fn, void *data);
2642int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2643int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2644int ptlrpc_set_wait(struct ptlrpc_request_set *);
2645int ptlrpc_expired_set(void *data);
2646void ptlrpc_interrupted_set(void *data);
2647void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2648void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2649void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2650void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2651 struct ptlrpc_request *req);
2652
2653void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2654void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2655
2656struct ptlrpc_request_pool *
2657ptlrpc_init_rq_pool(int, int,
2658 void (*populate_pool)(struct ptlrpc_request_pool *, int));
2659
2660void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2661struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2662 const struct req_format *format);
2663struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2664 struct ptlrpc_request_pool *,
2665 const struct req_format *format);
2666void ptlrpc_request_free(struct ptlrpc_request *request);
2667int ptlrpc_request_pack(struct ptlrpc_request *request,
2668 __u32 version, int opcode);
2669struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
2670 const struct req_format *format,
2671 __u32 version, int opcode);
2672int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2673 __u32 version, int opcode, char **bufs,
2674 struct ptlrpc_cli_ctx *ctx);
2675struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
2676 int opcode, int count, __u32 *lengths,
2677 char **bufs);
2678struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
2679 __u32 version, int opcode,
2680 int count, __u32 *lengths, char **bufs,
2681 struct ptlrpc_request_pool *pool);
2682void ptlrpc_req_finished(struct ptlrpc_request *request);
2683void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
2684struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2685struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2686 unsigned npages, unsigned max_brw,
2687 unsigned type, unsigned portal);
2688void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2689static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2690{
2691 __ptlrpc_free_bulk(bulk, 1);
2692}
2693static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2694{
2695 __ptlrpc_free_bulk(bulk, 0);
2696}
2697void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2698 struct page *page, int pageoffset, int len, int);
2699static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2700 struct page *page, int pageoffset,
2701 int len)
2702{
2703 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2704}
2705
2706static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2707 struct page *page, int pageoffset,
2708 int len)
2709{
2710 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2711}
2712
2713void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2714 struct obd_import *imp);
2715__u64 ptlrpc_next_xid(void);
2716__u64 ptlrpc_sample_next_xid(void);
2717__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2718
2719/* Set of routines to run a function in ptlrpcd context */
2720void *ptlrpcd_alloc_work(struct obd_import *imp,
2721 int (*cb)(const struct lu_env *, void *), void *data);
2722void ptlrpcd_destroy_work(void *handler);
2723int ptlrpcd_queue_work(void *handler);
2724
2725/** @} */
2726struct ptlrpc_service_buf_conf {
2727 /* nbufs is buffers # to allocate when growing the pool */
2728 unsigned int bc_nbufs;
2729 /* buffer size to post */
2730 unsigned int bc_buf_size;
2731 /* portal to listed for requests on */
2732 unsigned int bc_req_portal;
2733 /* portal of where to send replies to */
2734 unsigned int bc_rep_portal;
2735 /* maximum request size to be accepted for this service */
2736 unsigned int bc_req_max_size;
2737 /* maximum reply size this service can ever send */
2738 unsigned int bc_rep_max_size;
2739};
2740
2741struct ptlrpc_service_thr_conf {
2742 /* threadname should be 8 characters or less - 6 will be added on */
2743 char *tc_thr_name;
2744 /* threads increasing factor for each CPU */
2745 unsigned int tc_thr_factor;
2746 /* service threads # to start on each partition while initializing */
2747 unsigned int tc_nthrs_init;
2748 /*
2749 * low water of threads # upper-limit on each partition while running,
2750 * service availability may be impacted if threads number is lower
2751 * than this value. It can be ZERO if the service doesn't require
2752 * CPU affinity or there is only one partition.
2753 */
2754 unsigned int tc_nthrs_base;
2755 /* "soft" limit for total threads number */
2756 unsigned int tc_nthrs_max;
2757 /* user specified threads number, it will be validated due to
2758 * other members of this structure. */
2759 unsigned int tc_nthrs_user;
2760 /* set NUMA node affinity for service threads */
2761 unsigned int tc_cpu_affinity;
2762 /* Tags for lu_context associated with service thread */
2763 __u32 tc_ctx_tags;
2764};
2765
2766struct ptlrpc_service_cpt_conf {
2767 struct cfs_cpt_table *cc_cptable;
2768 /* string pattern to describe CPTs for a service */
2769 char *cc_pattern;
2770};
2771
2772struct ptlrpc_service_conf {
2773 /* service name */
2774 char *psc_name;
2775 /* soft watchdog timeout multiplifier to print stuck service traces */
2776 unsigned int psc_watchdog_factor;
2777 /* buffer information */
2778 struct ptlrpc_service_buf_conf psc_buf;
2779 /* thread information */
2780 struct ptlrpc_service_thr_conf psc_thr;
2781 /* CPU partition information */
2782 struct ptlrpc_service_cpt_conf psc_cpt;
2783 /* function table */
2784 struct ptlrpc_service_ops psc_ops;
2785};
2786
2787/* ptlrpc/service.c */
2788/**
2789 * Server-side services API. Register/unregister service, request state
2790 * management, service thread management
2791 *
2792 * @{
2793 */
2794void ptlrpc_save_lock(struct ptlrpc_request *req,
2795 struct lustre_handle *lock, int mode, int no_ack);
2796void ptlrpc_commit_replies(struct obd_export *exp);
2797void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2798void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
2799int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
2800struct ptlrpc_service *ptlrpc_register_service(
2801 struct ptlrpc_service_conf *conf,
2802 struct proc_dir_entry *proc_entry);
2803void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
2804
2805int ptlrpc_start_threads(struct ptlrpc_service *svc);
2806int ptlrpc_unregister_service(struct ptlrpc_service *service);
2807int liblustre_check_services(void *arg);
2808void ptlrpc_daemonize(char *name);
2809int ptlrpc_service_health_check(struct ptlrpc_service *);
2810void ptlrpc_server_drop_request(struct ptlrpc_request *req);
2811void ptlrpc_request_change_export(struct ptlrpc_request *req,
2812 struct obd_export *export);
2813
2814int ptlrpc_hr_init(void);
2815void ptlrpc_hr_fini(void);
2816
2817/** @} */
2818
2819/* ptlrpc/import.c */
2820/**
2821 * Import API
2822 * @{
2823 */
2824int ptlrpc_connect_import(struct obd_import *imp);
2825int ptlrpc_init_import(struct obd_import *imp);
2826int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2827int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
2828void deuuidify(char *uuid, const char *prefix, char **uuid_start,
2829 int *uuid_len);
2830
2831/* ptlrpc/pack_generic.c */
2832int ptlrpc_reconnect_import(struct obd_import *imp);
2833/** @} */
2834
2835/**
2836 * ptlrpc msg buffer and swab interface
2837 *
2838 * @{
2839 */
2840int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2841 int index);
2842void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
2843 int index);
2844int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2845int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2846
2847int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
2848void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2849 char **bufs);
2850int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2851 __u32 *lens, char **bufs);
2852int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2853 char **bufs);
2854int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2855 __u32 *lens, char **bufs, int flags);
2856#define LPRFL_EARLY_REPLY 1
2857int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2858 char **bufs, int flags);
2859int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2860 unsigned int newlen, int move_data);
2861void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2862int __lustre_unpack_msg(struct lustre_msg *m, int len);
2863int lustre_msg_hdr_size(__u32 magic, int count);
2864int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2865int lustre_msg_size_v2(int count, __u32 *lengths);
2866int lustre_packed_msg_size(struct lustre_msg *msg);
2867int lustre_msg_early_size(void);
2868void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2869void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2870int lustre_msg_buflen(struct lustre_msg *m, int n);
2871void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
2872int lustre_msg_bufcount(struct lustre_msg *m);
2873char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2874__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2875void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2876__u32 lustre_msg_get_flags(struct lustre_msg *msg);
2877void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2878void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2879void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2880__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2881void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
2882void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
2883struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2884__u32 lustre_msg_get_type(struct lustre_msg *msg);
2885__u32 lustre_msg_get_version(struct lustre_msg *msg);
2886void lustre_msg_add_version(struct lustre_msg *msg, int version);
2887__u32 lustre_msg_get_opc(struct lustre_msg *msg);
2888__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
2889__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2890__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2891__u64 lustre_msg_get_transno(struct lustre_msg *msg);
2892__u64 lustre_msg_get_slv(struct lustre_msg *msg);
2893__u32 lustre_msg_get_limit(struct lustre_msg *msg);
2894void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2895void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2896int lustre_msg_get_status(struct lustre_msg *msg);
2897__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
2898int lustre_msg_is_v1(struct lustre_msg *msg);
2899__u32 lustre_msg_get_magic(struct lustre_msg *msg);
2900__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2901__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
2902char *lustre_msg_get_jobid(struct lustre_msg *msg);
2903__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
2904#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
2905__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
2906#else
2907# warning "remove checksum compatibility support for b1_8"
2908__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
2909#endif
2910void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
2911void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2912void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2913void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
2914void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
2915void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2916void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2917void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2918void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
2919void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
2920void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2921void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2922void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2923void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2924void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2925
2926static inline void
2927lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2928 unsigned int newlen, int move_data)
2929{
2930 LASSERT(req->rq_reply_state);
2931 LASSERT(req->rq_repmsg);
2932 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2933 newlen, move_data);
2934}
2d58de78
LW
2935
2936#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
2937
2938static inline int ptlrpc_status_hton(int h)
2939{
2940 /*
2941 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
2942 * ELDLM_LOCK_ABORTED, etc.
2943 */
2944 if (h < 0)
2945 return -lustre_errno_hton(-h);
2946 else
2947 return h;
2948}
2949
2950static inline int ptlrpc_status_ntoh(int n)
2951{
2952 /*
2953 * See the comment in ptlrpc_status_hton().
2954 */
2955 if (n < 0)
2956 return -lustre_errno_ntoh(-n);
2957 else
2958 return n;
2959}
2960
2961#else
2962
2963#define ptlrpc_status_hton(h) (h)
2964#define ptlrpc_status_ntoh(n) (n)
2965
2966#endif
d7e09d03
PT
2967/** @} */
2968
2969/** Change request phase of \a req to \a new_phase */
2970static inline void
2971ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2972{
2973 if (req->rq_phase == new_phase)
2974 return;
2975
2976 if (new_phase == RQ_PHASE_UNREGISTERING) {
2977 req->rq_next_phase = req->rq_phase;
2978 if (req->rq_import)
2979 atomic_inc(&req->rq_import->imp_unregistering);
2980 }
2981
2982 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
2983 if (req->rq_import)
2984 atomic_dec(&req->rq_import->imp_unregistering);
2985 }
2986
2987 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2988 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2989
2990 req->rq_phase = new_phase;
2991}
2992
2993/**
2994 * Returns true if request \a req got early reply and hard deadline is not met
2995 */
2996static inline int
2997ptlrpc_client_early(struct ptlrpc_request *req)
2998{
2999 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3000 req->rq_reply_deadline > cfs_time_current_sec())
3001 return 0;
3002 return req->rq_early;
3003}
3004
3005/**
3006 * Returns true if we got real reply from server for this request
3007 */
3008static inline int
3009ptlrpc_client_replied(struct ptlrpc_request *req)
3010{
3011 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3012 req->rq_reply_deadline > cfs_time_current_sec())
3013 return 0;
3014 return req->rq_replied;
3015}
3016
3017/** Returns true if request \a req is in process of receiving server reply */
3018static inline int
3019ptlrpc_client_recv(struct ptlrpc_request *req)
3020{
3021 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3022 req->rq_reply_deadline > cfs_time_current_sec())
3023 return 1;
3024 return req->rq_receiving_reply;
3025}
3026
3027static inline int
3028ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
3029{
3030 int rc;
3031
3032 spin_lock(&req->rq_lock);
3033 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3034 req->rq_reply_deadline > cfs_time_current_sec()) {
3035 spin_unlock(&req->rq_lock);
3036 return 1;
3037 }
3038 rc = req->rq_receiving_reply || req->rq_must_unlink;
3039 spin_unlock(&req->rq_lock);
3040 return rc;
3041}
3042
3043static inline void
3044ptlrpc_client_wake_req(struct ptlrpc_request *req)
3045{
3046 if (req->rq_set == NULL)
3047 wake_up(&req->rq_reply_waitq);
3048 else
3049 wake_up(&req->rq_set->set_waitq);
3050}
3051
3052static inline void
3053ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
3054{
3055 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3056 atomic_inc(&rs->rs_refcount);
3057}
3058
3059static inline void
3060ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
3061{
3062 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3063 if (atomic_dec_and_test(&rs->rs_refcount))
3064 lustre_free_reply_state(rs);
3065}
3066
3067/* Should only be called once per req */
3068static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
3069{
3070 if (req->rq_reply_state == NULL)
3071 return; /* shouldn't occur */
3072 ptlrpc_rs_decref(req->rq_reply_state);
3073 req->rq_reply_state = NULL;
3074 req->rq_repmsg = NULL;
3075}
3076
3077static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
3078{
3079 return lustre_msg_get_magic(req->rq_reqmsg);
3080}
3081
3082static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
3083{
3084 switch (req->rq_reqmsg->lm_magic) {
3085 case LUSTRE_MSG_MAGIC_V2:
3086 return req->rq_reqmsg->lm_repsize;
3087 default:
3088 LASSERTF(0, "incorrect message magic: %08x\n",
3089 req->rq_reqmsg->lm_magic);
3090 return -EFAULT;
3091 }
3092}
3093
3094static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
3095{
3096 if (req->rq_delay_limit != 0 &&
3097 cfs_time_before(cfs_time_add(req->rq_queued_time,
3098 cfs_time_seconds(req->rq_delay_limit)),
3099 cfs_time_current())) {
3100 return 1;
3101 }
3102 return 0;
3103}
3104
3105static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
3106{
3107 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
3108 spin_lock(&req->rq_lock);
3109 req->rq_no_resend = 1;
3110 spin_unlock(&req->rq_lock);
3111 }
3112 return req->rq_no_resend;
3113}
3114
3115static inline int
3116ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
3117{
3118 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
3119
3120 return svcpt->scp_service->srv_watchdog_factor *
3121 max_t(int, at, obd_timeout);
3122}
3123
3124static inline struct ptlrpc_service *
3125ptlrpc_req2svc(struct ptlrpc_request *req)
3126{
3127 LASSERT(req->rq_rqbd != NULL);
3128 return req->rq_rqbd->rqbd_svcpt->scp_service;
3129}
3130
3131/* ldlm/ldlm_lib.c */
3132/**
3133 * Target client logic
3134 * @{
3135 */
3136int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
3137int client_obd_cleanup(struct obd_device *obddev);
3138int client_connect_import(const struct lu_env *env,
3139 struct obd_export **exp, struct obd_device *obd,
3140 struct obd_uuid *cluuid, struct obd_connect_data *,
3141 void *localdata);
3142int client_disconnect_export(struct obd_export *exp);
3143int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
3144 int priority);
3145int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
3146int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
3147 struct obd_uuid *uuid);
3148int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
3149void client_destroy_import(struct obd_import *imp);
3150/** @} */
3151
3152
3153/* ptlrpc/pinger.c */
3154/**
3155 * Pinger API (client side only)
3156 * @{
3157 */
3158enum timeout_event {
3159 TIMEOUT_GRANT = 1
3160};
3161struct timeout_item;
3162typedef int (*timeout_cb_t)(struct timeout_item *, void *);
3163int ptlrpc_pinger_add_import(struct obd_import *imp);
3164int ptlrpc_pinger_del_import(struct obd_import *imp);
3165int ptlrpc_add_timeout_client(int time, enum timeout_event event,
3166 timeout_cb_t cb, void *data,
3167 struct list_head *obd_list);
3168int ptlrpc_del_timeout_client(struct list_head *obd_list,
3169 enum timeout_event event);
3170struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
3171int ptlrpc_obd_ping(struct obd_device *obd);
d7e09d03
PT
3172void ping_evictor_start(void);
3173void ping_evictor_stop(void);
d7e09d03
PT
3174void ptlrpc_pinger_ir_up(void);
3175void ptlrpc_pinger_ir_down(void);
3176/** @} */
3177int ptlrpc_pinger_suppress_pings(void);
3178
3179/* ptlrpc daemon bind policy */
3180typedef enum {
3181 /* all ptlrpcd threads are free mode */
3182 PDB_POLICY_NONE = 1,
3183 /* all ptlrpcd threads are bound mode */
3184 PDB_POLICY_FULL = 2,
3185 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
3186 PDB_POLICY_PAIR = 3,
3187 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
3188 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
3189 * If kernel supports NUMA, pthrpcd threads are binded and
3190 * grouped by NUMA node */
3191 PDB_POLICY_NEIGHBOR = 4,
3192} pdb_policy_t;
3193
3194/* ptlrpc daemon load policy
3195 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
3196 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
3197 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
3198 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
3199 * depends on which is scheduled firstly, to accelerate the RPC processing. */
3200typedef enum {
3201 /* on the same CPU core as the caller */
3202 PDL_POLICY_SAME = 1,
3203 /* within the same CPU partition, but not the same core as the caller */
3204 PDL_POLICY_LOCAL = 2,
3205 /* round-robin on all CPU cores, but not the same core as the caller */
3206 PDL_POLICY_ROUND = 3,
3207 /* the specified CPU core is preferred, but not enforced */
3208 PDL_POLICY_PREFERRED = 4,
3209} pdl_policy_t;
3210
3211/* ptlrpc/ptlrpcd.c */
3212void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
3213void ptlrpcd_free(struct ptlrpcd_ctl *pc);
3214void ptlrpcd_wake(struct ptlrpc_request *req);
3215void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
3216void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
3217int ptlrpcd_addref(void);
3218void ptlrpcd_decref(void);
3219
3220/* ptlrpc/lproc_ptlrpc.c */
3221/**
3222 * procfs output related functions
3223 * @{
3224 */
3225const char* ll_opcode2str(__u32 opcode);
3226#ifdef LPROCFS
3227void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
3228void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
3229void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
3230#else
3231static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
3232static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
3233static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
3234#endif
3235/** @} */
3236
d7e09d03
PT
3237/* ptlrpc/llog_client.c */
3238extern struct llog_operations llog_client_ops;
3239
3240/** @} net */
3241
3242#endif
3243/** @} PtlRPC */
This page took 0.268008 seconds and 5 git commands to generate.