staging: lustre: remove unused functions from linux-time.h
[deliverable/linux.git] / drivers / staging / lustre / lustre / include / lustre_net.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36/** \defgroup PtlRPC Portal RPC and networking module.
37 *
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
43 *
44 * PortalRPC utilizes LNet as its transport layer.
45 *
46 * @{
47 */
48
49
50#ifndef _LUSTRE_NET_H
51#define _LUSTRE_NET_H
52
53/** \defgroup net net
54 *
55 * @{
56 */
57
1accaadf 58#include "linux/lustre_net.h"
d7e09d03 59
9fdaf8c0 60#include "../../include/linux/libcfs/libcfs.h"
d7e09d03 61// #include <obd.h>
9fdaf8c0 62#include "../../include/linux/lnet/lnet.h"
1accaadf
GKH
63#include "lustre/lustre_idl.h"
64#include "lustre_ha.h"
65#include "lustre_sec.h"
66#include "lustre_import.h"
67#include "lprocfs_status.h"
68#include "lu_object.h"
69#include "lustre_req_layout.h"
d7e09d03 70
1accaadf
GKH
71#include "obd_support.h"
72#include "lustre_ver.h"
d7e09d03
PT
73
74/* MD flags we _always_ use */
75#define PTLRPC_MD_OPTIONS 0
76
77/**
78 * Max # of bulk operations in one request.
79 * In order for the client and server to properly negotiate the maximum
80 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
81 * value. The client is free to limit the actual RPC size for any bulk
82 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
83#define PTLRPC_BULK_OPS_BITS 2
84#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
85/**
86 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
87 * should not be used on the server at all. Otherwise, it imposes a
88 * protocol limitation on the maximum RPC size that can be used by any
89 * RPC sent to that server in the future. Instead, the server should
90 * use the negotiated per-client ocd_brw_size to determine the bulk
91 * RPC count. */
92#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
93
94/**
95 * Define maxima for bulk I/O.
96 *
97 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
98 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
99 * currently supported maximum between peers at connect via ocd_brw_size.
100 */
101#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
102#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
103#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
104
105#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
106#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
107#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
108#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
109#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
110#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
111
112/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
113# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
114# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
115# endif
116# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
117# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
118# endif
119# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
120# error "PTLRPC_MAX_BRW_SIZE too big"
121# endif
122# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
123# error "PTLRPC_MAX_BRW_PAGES too big"
124# endif
125
126#define PTLRPC_NTHRS_INIT 2
127
128/**
129 * Buffer Constants
130 *
131 * Constants determine how memory is used to buffer incoming service requests.
132 *
133 * ?_NBUFS # buffers to allocate when growing the pool
134 * ?_BUFSIZE # bytes in a single request buffer
135 * ?_MAXREQSIZE # maximum request service will receive
136 *
137 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
138 * of ?_NBUFS is added to the pool.
139 *
140 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
141 * considered full when less than ?_MAXREQSIZE is left in them.
142 */
143/**
144 * Thread Constants
145 *
146 * Constants determine how threads are created for ptlrpc service.
147 *
148 * ?_NTHRS_INIT # threads to create for each service partition on
149 * initializing. If it's non-affinity service and
150 * there is only one partition, it's the overall #
151 * threads for the service while initializing.
152 * ?_NTHRS_BASE # threads should be created at least for each
153 * ptlrpc partition to keep the service healthy.
154 * It's the low-water mark of threads upper-limit
155 * for each partition.
156 * ?_THR_FACTOR # threads can be added on threads upper-limit for
157 * each CPU core. This factor is only for reference,
158 * we might decrease value of factor if number of cores
159 * per CPT is above a limit.
160 * ?_NTHRS_MAX # overall threads can be created for a service,
161 * it's a soft limit because if service is running
162 * on machine with hundreds of cores and tens of
163 * CPU partitions, we need to guarantee each partition
164 * has ?_NTHRS_BASE threads, which means total threads
165 * will be ?_NTHRS_BASE * number_of_cpts which can
166 * exceed ?_NTHRS_MAX.
167 *
168 * Examples
169 *
170 * #define MDS_NTHRS_INIT 2
171 * #define MDS_NTHRS_BASE 64
172 * #define MDS_NTHRS_FACTOR 8
173 * #define MDS_NTHRS_MAX 1024
174 *
175 * Example 1):
176 * ---------------------------------------------------------------------
177 * Server(A) has 16 cores, user configured it to 4 partitions so each
178 * partition has 4 cores, then actual number of service threads on each
179 * partition is:
180 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
181 *
182 * Total number of threads for the service is:
183 * 96 * partitions(4) = 384
184 *
185 * Example 2):
186 * ---------------------------------------------------------------------
187 * Server(B) has 32 cores, user configured it to 4 partitions so each
188 * partition has 8 cores, then actual number of service threads on each
189 * partition is:
190 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
191 *
192 * Total number of threads for the service is:
193 * 128 * partitions(4) = 512
194 *
195 * Example 3):
196 * ---------------------------------------------------------------------
197 * Server(B) has 96 cores, user configured it to 8 partitions so each
198 * partition has 12 cores, then actual number of service threads on each
199 * partition is:
200 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
201 *
202 * Total number of threads for the service is:
203 * 160 * partitions(8) = 1280
204 *
205 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
206 * as upper limit of threads number for each partition:
207 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
208 *
209 * Example 4):
210 * ---------------------------------------------------------------------
211 * Server(C) have a thousand of cores and user configured it to 32 partitions
212 * MDS_NTHRS_BASE(64) * 32 = 2048
213 *
214 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
215 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
216 * to keep service healthy, so total number of threads will just be 2048.
217 *
218 * NB: we don't suggest to choose server with that many cores because backend
219 * filesystem itself, buffer cache, or underlying network stack might
220 * have some SMP scalability issues at that large scale.
221 *
222 * If user already has a fat machine with hundreds or thousands of cores,
223 * there are two choices for configuration:
224 * a) create CPU table from subset of all CPUs and run Lustre on
225 * top of this subset
226 * b) bind service threads on a few partitions, see modparameters of
227 * MDS and OSS for details
228*
229 * NB: these calculations (and examples below) are simplified to help
230 * understanding, the real implementation is a little more complex,
231 * please see ptlrpc_server_nthreads_check() for details.
232 *
233 */
234
235 /*
236 * LDLM threads constants:
237 *
238 * Given 8 as factor and 24 as base threads number
239 *
240 * example 1)
241 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
242 *
243 * example 2)
244 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
245 * threads for each partition and total threads number will be 112.
246 *
247 * example 3)
248 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
249 * threads for each partition to keep service healthy, so total threads
250 * number should be 24 * 8 = 192.
251 *
252 * So with these constants, threads number will be at the similar level
253 * of old versions, unless target machine has over a hundred cores
254 */
255#define LDLM_THR_FACTOR 8
256#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
257#define LDLM_NTHRS_BASE 24
258#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
259
260#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
261#define LDLM_CLIENT_NBUFS 1
262#define LDLM_SERVER_NBUFS 64
263#define LDLM_BUFSIZE (8 * 1024)
264#define LDLM_MAXREQSIZE (5 * 1024)
265#define LDLM_MAXREPSIZE (1024)
266
e55c4476
JN
267#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
268
d7e09d03 269#define OST_MAXREQSIZE (5 * 1024)
d7e09d03
PT
270
271/* Macro to hide a typecast. */
272#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
273
274/**
275 * Structure to single define portal connection.
276 */
277struct ptlrpc_connection {
278 /** linkage for connections hash table */
279 struct hlist_node c_hash;
280 /** Our own lnet nid for this connection */
281 lnet_nid_t c_self;
282 /** Remote side nid for this connection */
283 lnet_process_id_t c_peer;
284 /** UUID of the other side */
285 struct obd_uuid c_remote_uuid;
286 /** reference counter for this connection */
287 atomic_t c_refcount;
288};
289
290/** Client definition for PortalRPC */
291struct ptlrpc_client {
292 /** What lnet portal does this client send messages to by default */
293 __u32 cli_request_portal;
294 /** What portal do we expect replies on */
295 __u32 cli_reply_portal;
296 /** Name of the client */
297 char *cli_name;
298};
299
300/** state flags of requests */
301/* XXX only ones left are those used by the bulk descs as well! */
302#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
303#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
304
305#define REQ_MAX_ACK_LOCKS 8
306
307union ptlrpc_async_args {
308 /**
309 * Scratchpad for passing args to completion interpreter. Users
310 * cast to the struct of their choosing, and CLASSERT that this is
311 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
312 * a pointer to it here. The pointer_arg ensures this struct is at
313 * least big enough for that.
314 */
315 void *pointer_arg[11];
316 __u64 space[7];
317};
318
319struct ptlrpc_request_set;
320typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
321typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
322
323/**
324 * Definition of request set structure.
325 * Request set is a list of requests (not necessary to the same target) that
326 * once populated with RPCs could be sent in parallel.
327 * There are two kinds of request sets. General purpose and with dedicated
328 * serving thread. Example of the latter is ptlrpcd set.
329 * For general purpose sets once request set started sending it is impossible
330 * to add new requests to such set.
331 * Provides a way to call "completion callbacks" when all requests in the set
332 * returned.
333 */
334struct ptlrpc_request_set {
335 atomic_t set_refcount;
336 /** number of in queue requests */
337 atomic_t set_new_count;
338 /** number of uncompleted requests */
339 atomic_t set_remaining;
340 /** wait queue to wait on for request events */
341 wait_queue_head_t set_waitq;
342 wait_queue_head_t *set_wakeup_ptr;
343 /** List of requests in the set */
344 struct list_head set_requests;
345 /**
346 * List of completion callbacks to be called when the set is completed
347 * This is only used if \a set_interpret is NULL.
348 * Links struct ptlrpc_set_cbdata.
349 */
350 struct list_head set_cblist;
351 /** Completion callback, if only one. */
352 set_interpreter_func set_interpret;
353 /** opaq argument passed to completion \a set_interpret callback. */
354 void *set_arg;
355 /**
356 * Lock for \a set_new_requests manipulations
357 * locked so that any old caller can communicate requests to
358 * the set holder who can then fold them into the lock-free set
359 */
360 spinlock_t set_new_req_lock;
361 /** List of new yet unsent requests. Only used with ptlrpcd now. */
362 struct list_head set_new_requests;
363
364 /** rq_status of requests that have been freed already */
365 int set_rc;
366 /** Additional fields used by the flow control extension */
367 /** Maximum number of RPCs in flight */
368 int set_max_inflight;
369 /** Callback function used to generate RPCs */
370 set_producer_func set_producer;
371 /** opaq argument passed to the producer callback */
372 void *set_producer_arg;
373};
374
375/**
376 * Description of a single ptrlrpc_set callback
377 */
378struct ptlrpc_set_cbdata {
379 /** List linkage item */
380 struct list_head psc_item;
381 /** Pointer to interpreting function */
382 set_interpreter_func psc_interpret;
383 /** Opaq argument to pass to the callback */
384 void *psc_data;
385};
386
387struct ptlrpc_bulk_desc;
388struct ptlrpc_service_part;
389struct ptlrpc_service;
390
391/**
392 * ptlrpc callback & work item stuff
393 */
394struct ptlrpc_cb_id {
395 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
396 void *cbid_arg; /* additional arg */
397};
398
399/** Maximum number of locks to fit into reply state */
400#define RS_MAX_LOCKS 8
401#define RS_DEBUG 0
402
403/**
404 * Structure to define reply state on the server
405 * Reply state holds various reply message information. Also for "difficult"
406 * replies (rep-ack case) we store the state after sending reply and wait
407 * for the client to acknowledge the reception. In these cases locks could be
408 * added to the state for replay/failover consistency guarantees.
409 */
410struct ptlrpc_reply_state {
411 /** Callback description */
412 struct ptlrpc_cb_id rs_cb_id;
413 /** Linkage for list of all reply states in a system */
414 struct list_head rs_list;
415 /** Linkage for list of all reply states on same export */
416 struct list_head rs_exp_list;
417 /** Linkage for list of all reply states for same obd */
418 struct list_head rs_obd_list;
419#if RS_DEBUG
420 struct list_head rs_debug_list;
421#endif
422 /** A spinlock to protect the reply state flags */
423 spinlock_t rs_lock;
424 /** Reply state flags */
425 unsigned long rs_difficult:1; /* ACK/commit stuff */
426 unsigned long rs_no_ack:1; /* no ACK, even for
427 difficult requests */
428 unsigned long rs_scheduled:1; /* being handled? */
429 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
430 unsigned long rs_handled:1; /* been handled yet? */
431 unsigned long rs_on_net:1; /* reply_out_callback pending? */
432 unsigned long rs_prealloc:1; /* rs from prealloc list */
433 unsigned long rs_committed:1;/* the transaction was committed
434 and the rs was dispatched
435 by ptlrpc_commit_replies */
436 /** Size of the state */
437 int rs_size;
438 /** opcode */
439 __u32 rs_opc;
440 /** Transaction number */
441 __u64 rs_transno;
442 /** xid */
443 __u64 rs_xid;
444 struct obd_export *rs_export;
445 struct ptlrpc_service_part *rs_svcpt;
446 /** Lnet metadata handle for the reply */
447 lnet_handle_md_t rs_md_h;
448 atomic_t rs_refcount;
449
17891183 450 /** Context for the service thread */
d7e09d03
PT
451 struct ptlrpc_svc_ctx *rs_svc_ctx;
452 /** Reply buffer (actually sent to the client), encoded if needed */
453 struct lustre_msg *rs_repbuf; /* wrapper */
454 /** Size of the reply buffer */
455 int rs_repbuf_len; /* wrapper buf length */
456 /** Size of the reply message */
457 int rs_repdata_len; /* wrapper msg length */
458 /**
bd9070cb 459 * Actual reply message. Its content is encrypted (if needed) to
d7e09d03 460 * produce reply buffer for actual sending. In simple case
bd9070cb 461 * of no network encryption we just set \a rs_repbuf to \a rs_msg
d7e09d03
PT
462 */
463 struct lustre_msg *rs_msg; /* reply message */
464
465 /** Number of locks awaiting client ACK */
466 int rs_nlocks;
467 /** Handles of locks awaiting client reply ACK */
468 struct lustre_handle rs_locks[RS_MAX_LOCKS];
469 /** Lock modes of locks in \a rs_locks */
470 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
471};
472
473struct ptlrpc_thread;
474
475/** RPC stages */
476enum rq_phase {
477 RQ_PHASE_NEW = 0xebc0de00,
478 RQ_PHASE_RPC = 0xebc0de01,
479 RQ_PHASE_BULK = 0xebc0de02,
480 RQ_PHASE_INTERPRET = 0xebc0de03,
481 RQ_PHASE_COMPLETE = 0xebc0de04,
482 RQ_PHASE_UNREGISTERING = 0xebc0de05,
483 RQ_PHASE_UNDEFINED = 0xebc0de06
484};
485
486/** Type of request interpreter call-back */
487typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
488 struct ptlrpc_request *req,
489 void *arg, int rc);
490
491/**
492 * Definition of request pool structure.
493 * The pool is used to store empty preallocated requests for the case
494 * when we would actually need to send something without performing
495 * any allocations (to avoid e.g. OOM).
496 */
497struct ptlrpc_request_pool {
498 /** Locks the list */
499 spinlock_t prp_lock;
500 /** list of ptlrpc_request structs */
501 struct list_head prp_req_list;
17891183 502 /** Maximum message size that would fit into a request from this pool */
d7e09d03
PT
503 int prp_rq_size;
504 /** Function to allocate more requests for this pool */
505 void (*prp_populate)(struct ptlrpc_request_pool *, int);
506};
507
508struct lu_context;
509struct lu_env;
510
511struct ldlm_lock;
512
513/**
514 * \defgroup nrs Network Request Scheduler
515 * @{
516 */
517struct ptlrpc_nrs_policy;
518struct ptlrpc_nrs_resource;
519struct ptlrpc_nrs_request;
520
521/**
522 * NRS control operations.
523 *
524 * These are common for all policies.
525 */
526enum ptlrpc_nrs_ctl {
527 /**
528 * Not a valid opcode.
529 */
530 PTLRPC_NRS_CTL_INVALID,
531 /**
532 * Activate the policy.
533 */
534 PTLRPC_NRS_CTL_START,
535 /**
536 * Reserved for multiple primary policies, which may be a possibility
537 * in the future.
538 */
539 PTLRPC_NRS_CTL_STOP,
540 /**
541 * Policies can start using opcodes from this value and onwards for
542 * their own purposes; the assigned value itself is arbitrary.
543 */
544 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
545};
546
547/**
548 * ORR policy operations
549 */
550enum nrs_ctl_orr {
551 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
552 NRS_CTL_ORR_WR_QUANTUM,
553 NRS_CTL_ORR_RD_OFF_TYPE,
554 NRS_CTL_ORR_WR_OFF_TYPE,
555 NRS_CTL_ORR_RD_SUPP_REQ,
556 NRS_CTL_ORR_WR_SUPP_REQ,
557};
558
559/**
560 * NRS policy operations.
561 *
562 * These determine the behaviour of a policy, and are called in response to
563 * NRS core events.
564 */
565struct ptlrpc_nrs_pol_ops {
566 /**
567 * Called during policy registration; this operation is optional.
568 *
569 * \param[in,out] policy The policy being initialized
570 */
571 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
572 /**
573 * Called during policy unregistration; this operation is optional.
574 *
575 * \param[in,out] policy The policy being unregistered/finalized
576 */
577 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
578 /**
579 * Called when activating a policy via lprocfs; policies allocate and
580 * initialize their resources here; this operation is optional.
581 *
582 * \param[in,out] policy The policy being started
583 *
584 * \see nrs_policy_start_locked()
585 */
586 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
587 /**
588 * Called when deactivating a policy via lprocfs; policies deallocate
589 * their resources here; this operation is optional
590 *
591 * \param[in,out] policy The policy being stopped
592 *
593 * \see nrs_policy_stop0()
594 */
595 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
596 /**
597 * Used for policy-specific operations; i.e. not generic ones like
598 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
599 * to an ioctl; this operation is optional.
600 *
601 * \param[in,out] policy The policy carrying out operation \a opc
602 * \param[in] opc The command operation being carried out
603 * \param[in,out] arg An generic buffer for communication between the
604 * user and the control operation
605 *
606 * \retval -ve error
607 * \retval 0 success
608 *
609 * \see ptlrpc_nrs_policy_control()
610 */
611 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
612 enum ptlrpc_nrs_ctl opc, void *arg);
613
614 /**
615 * Called when obtaining references to the resources of the resource
616 * hierarchy for a request that has arrived for handling at the PTLRPC
617 * service. Policies should return -ve for requests they do not wish
618 * to handle. This operation is mandatory.
619 *
620 * \param[in,out] policy The policy we're getting resources for.
621 * \param[in,out] nrq The request we are getting resources for.
622 * \param[in] parent The parent resource of the resource being
623 * requested; set to NULL if none.
624 * \param[out] resp The resource is to be returned here; the
625 * fallback policy in an NRS head should
626 * \e always return a non-NULL pointer value.
627 * \param[in] moving_req When set, signifies that this is an attempt
628 * to obtain resources for a request being moved
629 * to the high-priority NRS head by
630 * ldlm_lock_reorder_req().
631 * This implies two things:
632 * 1. We are under obd_export::exp_rpc_lock and
633 * so should not sleep.
634 * 2. We should not perform non-idempotent or can
635 * skip performing idempotent operations that
636 * were carried out when resources were first
637 * taken for the request when it was initialized
638 * in ptlrpc_nrs_req_initialize().
639 *
640 * \retval 0, +ve The level of the returned resource in the resource
641 * hierarchy; currently only 0 (for a non-leaf resource)
642 * and 1 (for a leaf resource) are supported by the
643 * framework.
644 * \retval -ve error
645 *
646 * \see ptlrpc_nrs_req_initialize()
647 * \see ptlrpc_nrs_hpreq_add_nolock()
648 * \see ptlrpc_nrs_req_hp_move()
649 */
650 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
651 struct ptlrpc_nrs_request *nrq,
652 const struct ptlrpc_nrs_resource *parent,
653 struct ptlrpc_nrs_resource **resp,
654 bool moving_req);
655 /**
656 * Called when releasing references taken for resources in the resource
657 * hierarchy for the request; this operation is optional.
658 *
659 * \param[in,out] policy The policy the resource belongs to
660 * \param[in] res The resource to be freed
661 *
662 * \see ptlrpc_nrs_req_finalize()
663 * \see ptlrpc_nrs_hpreq_add_nolock()
664 * \see ptlrpc_nrs_req_hp_move()
665 */
666 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
667 const struct ptlrpc_nrs_resource *res);
668
669 /**
670 * Obtains a request for handling from the policy, and optionally
671 * removes the request from the policy; this operation is mandatory.
672 *
673 * \param[in,out] policy The policy to poll
674 * \param[in] peek When set, signifies that we just want to
675 * examine the request, and not handle it, so the
676 * request is not removed from the policy.
677 * \param[in] force When set, it will force a policy to return a
678 * request if it has one queued.
679 *
680 * \retval NULL No request available for handling
681 * \retval valid-pointer The request polled for handling
682 *
683 * \see ptlrpc_nrs_req_get_nolock()
684 */
685 struct ptlrpc_nrs_request *
686 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
687 bool force);
688 /**
689 * Called when attempting to add a request to a policy for later
690 * handling; this operation is mandatory.
691 *
692 * \param[in,out] policy The policy on which to enqueue \a nrq
693 * \param[in,out] nrq The request to enqueue
694 *
695 * \retval 0 success
696 * \retval != 0 error
697 *
698 * \see ptlrpc_nrs_req_add_nolock()
699 */
700 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
701 struct ptlrpc_nrs_request *nrq);
702 /**
703 * Removes a request from the policy's set of pending requests. Normally
704 * called after a request has been polled successfully from the policy
705 * for handling; this operation is mandatory.
706 *
707 * \param[in,out] policy The policy the request \a nrq belongs to
708 * \param[in,out] nrq The request to dequeue
709 *
710 * \see ptlrpc_nrs_req_del_nolock()
711 */
712 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
713 struct ptlrpc_nrs_request *nrq);
714 /**
715 * Called after the request being carried out. Could be used for
716 * job/resource control; this operation is optional.
717 *
718 * \param[in,out] policy The policy which is stopping to handle request
719 * \a nrq
720 * \param[in,out] nrq The request
721 *
5e42bc9d 722 * \pre assert_spin_locked(&svcpt->scp_req_lock)
d7e09d03
PT
723 *
724 * \see ptlrpc_nrs_req_stop_nolock()
725 */
726 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
727 struct ptlrpc_nrs_request *nrq);
728 /**
729 * Registers the policy's lprocfs interface with a PTLRPC service.
730 *
731 * \param[in] svc The service
732 *
733 * \retval 0 success
734 * \retval != 0 error
735 */
736 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
737 /**
738 * Unegisters the policy's lprocfs interface with a PTLRPC service.
739 *
740 * In cases of failed policy registration in
741 * \e ptlrpc_nrs_policy_register(), this function may be called for a
742 * service which has not registered the policy successfully, so
743 * implementations of this method should make sure their operations are
744 * safe in such cases.
745 *
746 * \param[in] svc The service
747 */
748 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
749};
750
751/**
752 * Policy flags
753 */
754enum nrs_policy_flags {
755 /**
756 * Fallback policy, use this flag only on a single supported policy per
757 * service. The flag cannot be used on policies that use
758 * \e PTLRPC_NRS_FL_REG_EXTERN
759 */
760 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
761 /**
762 * Start policy immediately after registering.
763 */
764 PTLRPC_NRS_FL_REG_START = (1 << 1),
765 /**
766 * This is a policy registering from a module different to the one NRS
767 * core ships in (currently ptlrpc).
768 */
769 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
770};
771
772/**
773 * NRS queue type.
774 *
775 * Denotes whether an NRS instance is for handling normal or high-priority
776 * RPCs, or whether an operation pertains to one or both of the NRS instances
777 * in a service.
778 */
779enum ptlrpc_nrs_queue_type {
780 PTLRPC_NRS_QUEUE_REG = (1 << 0),
781 PTLRPC_NRS_QUEUE_HP = (1 << 1),
782 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
783};
784
785/**
786 * NRS head
787 *
788 * A PTLRPC service has at least one NRS head instance for handling normal
789 * priority RPCs, and may optionally have a second NRS head instance for
790 * handling high-priority RPCs. Each NRS head maintains a list of available
791 * policies, of which one and only one policy is acting as the fallback policy,
792 * and optionally a different policy may be acting as the primary policy. For
793 * all RPCs handled by this NRS head instance, NRS core will first attempt to
794 * enqueue the RPC using the primary policy (if any). The fallback policy is
795 * used in the following cases:
796 * - when there was no primary policy in the
797 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
798 * was initialized.
799 * - when the primary policy that was at the
800 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
801 * RPC was initialized, denoted it did not wish, or for some other reason was
802 * not able to handle the request, by returning a non-valid NRS resource
803 * reference.
804 * - when the primary policy that was at the
805 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
806 * RPC was initialized, fails later during the request enqueueing stage.
807 *
808 * \see nrs_resource_get_safe()
809 * \see nrs_request_enqueue()
810 */
811struct ptlrpc_nrs {
812 spinlock_t nrs_lock;
813 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
814 /**
815 * List of registered policies
816 */
817 struct list_head nrs_policy_list;
818 /**
819 * List of policies with queued requests. Policies that have any
820 * outstanding requests are queued here, and this list is queried
821 * in a round-robin manner from NRS core when obtaining a request
822 * for handling. This ensures that requests from policies that at some
823 * point transition away from the
824 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
825 */
826 struct list_head nrs_policy_queued;
827 /**
828 * Service partition for this NRS head
829 */
830 struct ptlrpc_service_part *nrs_svcpt;
831 /**
832 * Primary policy, which is the preferred policy for handling RPCs
833 */
834 struct ptlrpc_nrs_policy *nrs_policy_primary;
835 /**
836 * Fallback policy, which is the backup policy for handling RPCs
837 */
838 struct ptlrpc_nrs_policy *nrs_policy_fallback;
839 /**
840 * This NRS head handles either HP or regular requests
841 */
842 enum ptlrpc_nrs_queue_type nrs_queue_type;
843 /**
844 * # queued requests from all policies in this NRS head
845 */
846 unsigned long nrs_req_queued;
847 /**
848 * # scheduled requests from all policies in this NRS head
849 */
850 unsigned long nrs_req_started;
851 /**
852 * # policies on this NRS
853 */
854 unsigned nrs_num_pols;
855 /**
856 * This NRS head is in progress of starting a policy
857 */
858 unsigned nrs_policy_starting:1;
859 /**
860 * In progress of shutting down the whole NRS head; used during
861 * unregistration
862 */
863 unsigned nrs_stopping:1;
864};
865
866#define NRS_POL_NAME_MAX 16
867
868struct ptlrpc_nrs_pol_desc;
869
870/**
871 * Service compatibility predicate; this determines whether a policy is adequate
872 * for handling RPCs of a particular PTLRPC service.
873 *
874 * XXX:This should give the same result during policy registration and
875 * unregistration, and for all partitions of a service; so the result should not
876 * depend on temporal service or other properties, that may influence the
877 * result.
878 */
879typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
880 const struct ptlrpc_nrs_pol_desc *desc);
881
882struct ptlrpc_nrs_pol_conf {
883 /**
884 * Human-readable policy name
885 */
886 char nc_name[NRS_POL_NAME_MAX];
887 /**
888 * NRS operations for this policy
889 */
890 const struct ptlrpc_nrs_pol_ops *nc_ops;
891 /**
892 * Service compatibility predicate
893 */
894 nrs_pol_desc_compat_t nc_compat;
895 /**
896 * Set for policies that support a single ptlrpc service, i.e. ones that
897 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
898 * depicts the name of the single service that such policies are
899 * compatible with.
900 */
901 const char *nc_compat_svc_name;
902 /**
903 * Owner module for this policy descriptor; policies registering from a
904 * different module to the one the NRS framework is held within
905 * (currently ptlrpc), should set this field to THIS_MODULE.
906 */
c34d9cd8 907 struct module *nc_owner;
d7e09d03 908 /**
bd9070cb 909 * Policy registration flags; a bitmask of \e nrs_policy_flags
d7e09d03
PT
910 */
911 unsigned nc_flags;
912};
913
914/**
915 * NRS policy registering descriptor
916 *
917 * Is used to hold a description of a policy that can be passed to NRS core in
918 * order to register the policy with NRS heads in different PTLRPC services.
919 */
920struct ptlrpc_nrs_pol_desc {
921 /**
922 * Human-readable policy name
923 */
924 char pd_name[NRS_POL_NAME_MAX];
925 /**
926 * Link into nrs_core::nrs_policies
927 */
928 struct list_head pd_list;
929 /**
930 * NRS operations for this policy
931 */
932 const struct ptlrpc_nrs_pol_ops *pd_ops;
933 /**
934 * Service compatibility predicate
935 */
936 nrs_pol_desc_compat_t pd_compat;
937 /**
938 * Set for policies that are compatible with only one PTLRPC service.
939 *
940 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
941 */
942 const char *pd_compat_svc_name;
943 /**
944 * Owner module for this policy descriptor.
945 *
946 * We need to hold a reference to the module whenever we might make use
947 * of any of the module's contents, i.e.
948 * - If one or more instances of the policy are at a state where they
949 * might be handling a request, i.e.
950 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
951 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
952 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
953 * is taken on the module when
954 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
955 * becomes 0, so that we hold only one reference to the module maximum
956 * at any time.
957 *
958 * We do not need to hold a reference to the module, even though we
959 * might use code and data from the module, in the following cases:
960 * - During external policy registration, because this should happen in
961 * the module's init() function, in which case the module is safe from
962 * removal because a reference is being held on the module by the
963 * kernel, and iirc kmod (and I guess module-init-tools also) will
964 * serialize any racing processes properly anyway.
965 * - During external policy unregistration, because this should happen
966 * in a module's exit() function, and any attempts to start a policy
967 * instance would need to take a reference on the module, and this is
968 * not possible once we have reached the point where the exit()
969 * handler is called.
970 * - During service registration and unregistration, as service setup
971 * and cleanup, and policy registration, unregistration and policy
972 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
973 * as long as users adhere to the convention of registering policies
974 * in init() and unregistering them in module exit() functions, there
975 * should not be a race between these operations.
976 * - During any policy-specific lprocfs operations, because a reference
977 * is held by the kernel on a proc entry that has been entered by a
978 * syscall, so as long as proc entries are removed during unregistration time,
979 * then unregistration and lprocfs operations will be properly
980 * serialized.
981 */
c34d9cd8 982 struct module *pd_owner;
d7e09d03
PT
983 /**
984 * Bitmask of \e nrs_policy_flags
985 */
986 unsigned pd_flags;
987 /**
988 * # of references on this descriptor
989 */
990 atomic_t pd_refs;
991};
992
993/**
994 * NRS policy state
995 *
996 * Policies transition from one state to the other during their lifetime
997 */
998enum ptlrpc_nrs_pol_state {
999 /**
1000 * Not a valid policy state.
1001 */
1002 NRS_POL_STATE_INVALID,
1003 /**
1004 * Policies are at this state either at the start of their life, or
1005 * transition here when the user selects a different policy to act
1006 * as the primary one.
1007 */
1008 NRS_POL_STATE_STOPPED,
1009 /**
1010 * Policy is progress of stopping
1011 */
1012 NRS_POL_STATE_STOPPING,
1013 /**
1014 * Policy is in progress of starting
1015 */
1016 NRS_POL_STATE_STARTING,
1017 /**
1018 * A policy is in this state in two cases:
1019 * - it is the fallback policy, which is always in this state.
1020 * - it has been activated by the user; i.e. it is the primary policy,
1021 */
1022 NRS_POL_STATE_STARTED,
1023};
1024
1025/**
1026 * NRS policy information
1027 *
1028 * Used for obtaining information for the status of a policy via lprocfs
1029 */
1030struct ptlrpc_nrs_pol_info {
1031 /**
1032 * Policy name
1033 */
1034 char pi_name[NRS_POL_NAME_MAX];
1035 /**
1036 * Current policy state
1037 */
1038 enum ptlrpc_nrs_pol_state pi_state;
1039 /**
1040 * # RPCs enqueued for later dispatching by the policy
1041 */
1042 long pi_req_queued;
1043 /**
1044 * # RPCs started for dispatch by the policy
1045 */
1046 long pi_req_started;
1047 /**
1048 * Is this a fallback policy?
1049 */
1050 unsigned pi_fallback:1;
1051};
1052
1053/**
1054 * NRS policy
1055 *
1056 * There is one instance of this for each policy in each NRS head of each
1057 * PTLRPC service partition.
1058 */
1059struct ptlrpc_nrs_policy {
1060 /**
1061 * Linkage into the NRS head's list of policies,
1062 * ptlrpc_nrs:nrs_policy_list
1063 */
1064 struct list_head pol_list;
1065 /**
1066 * Linkage into the NRS head's list of policies with enqueued
1067 * requests ptlrpc_nrs:nrs_policy_queued
1068 */
1069 struct list_head pol_list_queued;
1070 /**
1071 * Current state of this policy
1072 */
1073 enum ptlrpc_nrs_pol_state pol_state;
1074 /**
1075 * Bitmask of nrs_policy_flags
1076 */
1077 unsigned pol_flags;
1078 /**
1079 * # RPCs enqueued for later dispatching by the policy
1080 */
1081 long pol_req_queued;
1082 /**
1083 * # RPCs started for dispatch by the policy
1084 */
1085 long pol_req_started;
1086 /**
1087 * Usage Reference count taken on the policy instance
1088 */
1089 long pol_ref;
1090 /**
1091 * The NRS head this policy has been created at
1092 */
1093 struct ptlrpc_nrs *pol_nrs;
1094 /**
1095 * Private policy data; varies by policy type
1096 */
1097 void *pol_private;
1098 /**
1099 * Policy descriptor for this policy instance.
1100 */
1101 struct ptlrpc_nrs_pol_desc *pol_desc;
1102};
1103
1104/**
1105 * NRS resource
1106 *
1107 * Resources are embedded into two types of NRS entities:
1108 * - Inside NRS policies, in the policy's private data in
1109 * ptlrpc_nrs_policy::pol_private
1110 * - In objects that act as prime-level scheduling entities in different NRS
1111 * policies; e.g. on a policy that performs round robin or similar order
1112 * scheduling across client NIDs, there would be one NRS resource per unique
1113 * client NID. On a policy which performs round robin scheduling across
1114 * backend filesystem objects, there would be one resource associated with
1115 * each of the backend filesystem objects partaking in the scheduling
1116 * performed by the policy.
1117 *
1118 * NRS resources share a parent-child relationship, in which resources embedded
1119 * in policy instances are the parent entities, with all scheduling entities
1120 * a policy schedules across being the children, thus forming a simple resource
1121 * hierarchy. This hierarchy may be extended with one or more levels in the
1122 * future if the ability to have more than one primary policy is added.
1123 *
1124 * Upon request initialization, references to the then active NRS policies are
1125 * taken and used to later handle the dispatching of the request with one of
1126 * these policies.
1127 *
1128 * \see nrs_resource_get_safe()
1129 * \see ptlrpc_nrs_req_add()
1130 */
1131struct ptlrpc_nrs_resource {
1132 /**
1133 * This NRS resource's parent; is NULL for resources embedded in NRS
1134 * policy instances; i.e. those are top-level ones.
1135 */
1136 struct ptlrpc_nrs_resource *res_parent;
1137 /**
1138 * The policy associated with this resource.
1139 */
1140 struct ptlrpc_nrs_policy *res_policy;
1141};
1142
1143enum {
1144 NRS_RES_FALLBACK,
1145 NRS_RES_PRIMARY,
1146 NRS_RES_MAX
1147};
1148
1149/* \name fifo
1150 *
1151 * FIFO policy
1152 *
1153 * This policy is a logical wrapper around previous, non-NRS functionality.
1154 * It dispatches RPCs in the same order as they arrive from the network. This
1155 * policy is currently used as the fallback policy, and the only enabled policy
1156 * on all NRS heads of all PTLRPC service partitions.
1157 * @{
1158 */
1159
1160/**
1161 * Private data structure for the FIFO policy
1162 */
1163struct nrs_fifo_head {
1164 /**
1165 * Resource object for policy instance.
1166 */
1167 struct ptlrpc_nrs_resource fh_res;
1168 /**
1169 * List of queued requests.
1170 */
1171 struct list_head fh_list;
1172 /**
1173 * For debugging purposes.
1174 */
1175 __u64 fh_sequence;
1176};
1177
1178struct nrs_fifo_req {
1179 struct list_head fr_list;
1180 __u64 fr_sequence;
1181};
1182
1183/** @} fifo */
1184
1185/**
1186 * \name CRR-N
1187 *
1188 * CRR-N, Client Round Robin over NIDs
1189 * @{
1190 */
1191
1192/**
1193 * private data structure for CRR-N NRS
1194 */
1195struct nrs_crrn_net {
1196 struct ptlrpc_nrs_resource cn_res;
1197 cfs_binheap_t *cn_binheap;
6da6eabe 1198 struct cfs_hash *cn_cli_hash;
d7e09d03
PT
1199 /**
1200 * Used when a new scheduling round commences, in order to synchronize
1201 * all clients with the new round number.
1202 */
1203 __u64 cn_round;
1204 /**
1205 * Determines the relevant ordering amongst request batches within a
1206 * scheduling round.
1207 */
1208 __u64 cn_sequence;
1209 /**
1210 * Round Robin quantum; the maximum number of RPCs that each request
1211 * batch for each client can have in a scheduling round.
1212 */
1213 __u16 cn_quantum;
1214};
1215
1216/**
1217 * Object representing a client in CRR-N, as identified by its NID
1218 */
1219struct nrs_crrn_client {
1220 struct ptlrpc_nrs_resource cc_res;
1221 struct hlist_node cc_hnode;
1222 lnet_nid_t cc_nid;
1223 /**
1224 * The round number against which this client is currently scheduling
1225 * requests.
1226 */
1227 __u64 cc_round;
1228 /**
1229 * The sequence number used for requests scheduled by this client during
1230 * the current round number.
1231 */
1232 __u64 cc_sequence;
1233 atomic_t cc_ref;
1234 /**
1235 * Round Robin quantum; the maximum number of RPCs the client is allowed
1236 * to schedule in a single batch of each round.
1237 */
1238 __u16 cc_quantum;
1239 /**
1240 * # of pending requests for this client, on all existing rounds
1241 */
1242 __u16 cc_active;
1243};
1244
1245/**
1246 * CRR-N NRS request definition
1247 */
1248struct nrs_crrn_req {
1249 /**
1250 * Round number for this request; shared with all other requests in the
1251 * same batch.
1252 */
1253 __u64 cr_round;
1254 /**
1255 * Sequence number for this request; shared with all other requests in
1256 * the same batch.
1257 */
1258 __u64 cr_sequence;
1259};
1260
1261/**
1262 * CRR-N policy operations.
1263 */
1264enum nrs_ctl_crr {
1265 /**
1266 * Read the RR quantum size of a CRR-N policy.
1267 */
1268 NRS_CTL_CRRN_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
1269 /**
1270 * Write the RR quantum size of a CRR-N policy.
1271 */
1272 NRS_CTL_CRRN_WR_QUANTUM,
1273};
1274
1275/** @} CRR-N */
1276
1277/**
1278 * \name ORR/TRR
1279 *
1280 * ORR/TRR (Object-based Round Robin/Target-based Round Robin) NRS policies
1281 * @{
1282 */
1283
1284/**
1285 * Lower and upper byte offsets of a brw RPC
1286 */
1287struct nrs_orr_req_range {
1288 __u64 or_start;
1289 __u64 or_end;
1290};
1291
1292/**
1293 * RPC types supported by the ORR/TRR policies
1294 */
1295enum nrs_orr_supp {
1296 NOS_OST_READ = (1 << 0),
1297 NOS_OST_WRITE = (1 << 1),
1298 NOS_OST_RW = (NOS_OST_READ | NOS_OST_WRITE),
1299 /**
1300 * Default value for policies.
1301 */
1302 NOS_DFLT = NOS_OST_READ
1303};
1304
1305/**
1306 * As unique keys for grouping RPCs together, we use the object's OST FID for
1307 * the ORR policy, and the OST index for the TRR policy.
1308 *
1309 * XXX: We waste some space for TRR policy instances by using a union, but it
1310 * allows to consolidate some of the code between ORR and TRR, and these
1311 * policies will probably eventually merge into one anyway.
1312 */
1313struct nrs_orr_key {
1314 union {
1315 /** object FID for ORR */
1316 struct lu_fid ok_fid;
1317 /** OST index for TRR */
1318 __u32 ok_idx;
1319 };
1320};
1321
1322/**
1323 * The largest base string for unique hash/slab object names is
1324 * "nrs_orr_reg_", so 13 characters. We add 3 to this to be used for the CPT
1325 * id number, so this _should_ be more than enough for the maximum number of
1326 * CPTs on any system. If it does happen that this statement is incorrect,
1327 * nrs_orr_genobjname() will inevitably yield a non-unique name and cause
1328 * kmem_cache_create() to complain (on Linux), so the erroneous situation
1329 * will hopefully not go unnoticed.
1330 */
1331#define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3)
1332
1333/**
1334 * private data structure for ORR and TRR NRS
1335 */
1336struct nrs_orr_data {
1337 struct ptlrpc_nrs_resource od_res;
1338 cfs_binheap_t *od_binheap;
6da6eabe 1339 struct cfs_hash *od_obj_hash;
d7e09d03
PT
1340 struct kmem_cache *od_cache;
1341 /**
1342 * Used when a new scheduling round commences, in order to synchronize
1343 * all object or OST batches with the new round number.
1344 */
1345 __u64 od_round;
1346 /**
1347 * Determines the relevant ordering amongst request batches within a
1348 * scheduling round.
1349 */
1350 __u64 od_sequence;
1351 /**
1352 * RPC types that are currently supported.
1353 */
1354 enum nrs_orr_supp od_supp;
1355 /**
17891183 1356 * Round Robin quantum; the maximum number of RPCs that each request
d7e09d03
PT
1357 * batch for each object or OST can have in a scheduling round.
1358 */
1359 __u16 od_quantum;
1360 /**
1361 * Whether to use physical disk offsets or logical file offsets.
1362 */
1363 bool od_physical;
1364 /**
1365 * XXX: We need to provide a persistently allocated string to hold
1366 * unique object names for this policy, since in currently supported
1367 * versions of Linux by Lustre, kmem_cache_create() just sets a pointer
1368 * to the name string provided. kstrdup() is used in the version of
1369 * kmeme_cache_create() in current Linux mainline, so we may be able to
1370 * remove this in the future.
1371 */
1372 char od_objname[NRS_ORR_OBJ_NAME_MAX];
1373};
1374
1375/**
1376 * Represents a backend-fs object or OST in the ORR and TRR policies
1377 * respectively
1378 */
1379struct nrs_orr_object {
1380 struct ptlrpc_nrs_resource oo_res;
1381 struct hlist_node oo_hnode;
1382 /**
1383 * The round number against which requests are being scheduled for this
1384 * object or OST
1385 */
1386 __u64 oo_round;
1387 /**
1388 * The sequence number used for requests scheduled for this object or
1389 * OST during the current round number.
1390 */
1391 __u64 oo_sequence;
1392 /**
1393 * The key of the object or OST for which this structure instance is
1394 * scheduling RPCs
1395 */
1396 struct nrs_orr_key oo_key;
1397 atomic_t oo_ref;
1398 /**
1399 * Round Robin quantum; the maximum number of RPCs that are allowed to
1400 * be scheduled for the object or OST in a single batch of each round.
1401 */
1402 __u16 oo_quantum;
1403 /**
1404 * # of pending requests for this object or OST, on all existing rounds
1405 */
1406 __u16 oo_active;
1407};
1408
1409/**
1410 * ORR/TRR NRS request definition
1411 */
1412struct nrs_orr_req {
1413 /**
1414 * The offset range this request covers
1415 */
1416 struct nrs_orr_req_range or_range;
1417 /**
1418 * Round number for this request; shared with all other requests in the
1419 * same batch.
1420 */
1421 __u64 or_round;
1422 /**
1423 * Sequence number for this request; shared with all other requests in
1424 * the same batch.
1425 */
1426 __u64 or_sequence;
1427 /**
1428 * For debugging purposes.
1429 */
1430 struct nrs_orr_key or_key;
1431 /**
1432 * An ORR policy instance has filled in request information while
1433 * enqueueing the request on the service partition's regular NRS head.
1434 */
1435 unsigned int or_orr_set:1;
1436 /**
1437 * A TRR policy instance has filled in request information while
1438 * enqueueing the request on the service partition's regular NRS head.
1439 */
1440 unsigned int or_trr_set:1;
1441 /**
1442 * Request offset ranges have been filled in with logical offset
1443 * values.
1444 */
1445 unsigned int or_logical_set:1;
1446 /**
1447 * Request offset ranges have been filled in with physical offset
1448 * values.
1449 */
1450 unsigned int or_physical_set:1;
1451};
1452
1453/** @} ORR/TRR */
1454
1455/**
1456 * NRS request
1457 *
1458 * Instances of this object exist embedded within ptlrpc_request; the main
1459 * purpose of this object is to hold references to the request's resources
1460 * for the lifetime of the request, and to hold properties that policies use
1461 * use for determining the request's scheduling priority.
1462 * */
1463struct ptlrpc_nrs_request {
1464 /**
1465 * The request's resource hierarchy.
1466 */
1467 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1468 /**
1469 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1470 * policy that was used to enqueue the request.
1471 *
1472 * \see nrs_request_enqueue()
1473 */
1474 unsigned nr_res_idx;
1475 unsigned nr_initialized:1;
1476 unsigned nr_enqueued:1;
1477 unsigned nr_started:1;
1478 unsigned nr_finalized:1;
1479 cfs_binheap_node_t nr_node;
1480
1481 /**
1482 * Policy-specific fields, used for determining a request's scheduling
1483 * priority, and other supporting functionality.
1484 */
1485 union {
1486 /**
1487 * Fields for the FIFO policy
1488 */
1489 struct nrs_fifo_req fifo;
1490 /**
17891183 1491 * CRR-N request definition
d7e09d03
PT
1492 */
1493 struct nrs_crrn_req crr;
1494 /** ORR and TRR share the same request definition */
1495 struct nrs_orr_req orr;
1496 } nr_u;
1497 /**
1498 * Externally-registering policies may want to use this to allocate
1499 * their own request properties.
1500 */
1501 void *ext;
1502};
1503
1504/** @} nrs */
1505
1506/**
1507 * Basic request prioritization operations structure.
1508 * The whole idea is centered around locks and RPCs that might affect locks.
1509 * When a lock is contended we try to give priority to RPCs that might lead
1510 * to fastest release of that lock.
1511 * Currently only implemented for OSTs only in a way that makes all
1512 * IO and truncate RPCs that are coming from a locked region where a lock is
1513 * contended a priority over other requests.
1514 */
1515struct ptlrpc_hpreq_ops {
1516 /**
1517 * Check if the lock handle of the given lock is the same as
1518 * taken from the request.
1519 */
1520 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1521 /**
1522 * Check if the request is a high priority one.
1523 */
1524 int (*hpreq_check)(struct ptlrpc_request *);
1525 /**
1526 * Called after the request has been handled.
1527 */
1528 void (*hpreq_fini)(struct ptlrpc_request *);
1529};
1530
1531/**
1532 * Represents remote procedure call.
1533 *
1534 * This is a staple structure used by everybody wanting to send a request
1535 * in Lustre.
1536 */
1537struct ptlrpc_request {
1538 /* Request type: one of PTL_RPC_MSG_* */
1539 int rq_type;
1540 /** Result of request processing */
1541 int rq_status;
1542 /**
1543 * Linkage item through which this request is included into
1544 * sending/delayed lists on client and into rqbd list on server
1545 */
1546 struct list_head rq_list;
1547 /**
1548 * Server side list of incoming unserved requests sorted by arrival
1549 * time. Traversed from time to time to notice about to expire
1550 * requests and sent back "early replies" to clients to let them
1551 * know server is alive and well, just very busy to service their
1552 * requests in time
1553 */
1554 struct list_head rq_timed_list;
17891183 1555 /** server-side history, used for debugging purposes. */
d7e09d03
PT
1556 struct list_head rq_history_list;
1557 /** server-side per-export list */
1558 struct list_head rq_exp_list;
1559 /** server-side hp handlers */
1560 struct ptlrpc_hpreq_ops *rq_ops;
1561
1562 /** initial thread servicing this request */
1563 struct ptlrpc_thread *rq_svc_thread;
1564
1565 /** history sequence # */
1566 __u64 rq_history_seq;
1567 /** \addtogroup nrs
1568 * @{
1569 */
1570 /** stub for NRS request */
1571 struct ptlrpc_nrs_request rq_nrq;
1572 /** @} nrs */
1573 /** the index of service's srv_at_array into which request is linked */
1574 time_t rq_at_index;
1575 /** Lock to protect request flags and some other important bits, like
1576 * rq_list
1577 */
1578 spinlock_t rq_lock;
1579 /** client-side flags are serialized by rq_lock */
1580 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1581 rq_timedout:1, rq_resend:1, rq_restart:1,
1582 /**
1583 * when ->rq_replay is set, request is kept by the client even
1584 * after server commits corresponding transaction. This is
1585 * used for operations that require sequence of multiple
1586 * requests to be replayed. The only example currently is file
1587 * open/close. When last request in such a sequence is
1588 * committed, ->rq_replay is cleared on all requests in the
1589 * sequence.
1590 */
1591 rq_replay:1,
1592 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1593 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
cf378ff7
AL
1594 rq_early:1,
1595 rq_req_unlink:1, rq_reply_unlink:1,
d7e09d03
PT
1596 rq_memalloc:1, /* req originated from "kswapd" */
1597 /* server-side flags */
1598 rq_packed_final:1, /* packed final reply */
1599 rq_hp:1, /* high priority RPC */
1600 rq_at_linked:1, /* link into service's srv_at_array */
1601 rq_reply_truncate:1,
1602 rq_committed:1,
1603 /* whether the "rq_set" is a valid one */
1604 rq_invalid_rqset:1,
1605 rq_generation_set:1,
1606 /* do not resend request on -EINPROGRESS */
1607 rq_no_retry_einprogress:1,
1608 /* allow the req to be sent if the import is in recovery
1609 * status */
c52f69c5 1610 rq_allow_replay:1;
d7e09d03
PT
1611
1612 unsigned int rq_nr_resend;
1613
1614 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1615 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1616 atomic_t rq_refcount;/* client-side refcount for SENT race,
17891183 1617 server-side refcount for multiple replies */
d7e09d03
PT
1618
1619 /** Portal to which this request would be sent */
1620 short rq_request_portal; /* XXX FIXME bug 249 */
1621 /** Portal where to wait for reply and where reply would be sent */
1622 short rq_reply_portal; /* XXX FIXME bug 249 */
1623
1624 /**
1625 * client-side:
1626 * !rq_truncate : # reply bytes actually received,
1627 * rq_truncate : required repbuf_len for resend
1628 */
1629 int rq_nob_received;
1630 /** Request length */
1631 int rq_reqlen;
1632 /** Reply length */
1633 int rq_replen;
1634 /** Request message - what client sent */
1635 struct lustre_msg *rq_reqmsg;
1636 /** Reply message - server response */
1637 struct lustre_msg *rq_repmsg;
1638 /** Transaction number */
1639 __u64 rq_transno;
1640 /** xid */
1641 __u64 rq_xid;
1642 /**
17891183 1643 * List item to for replay list. Not yet committed requests get linked
d7e09d03
PT
1644 * there.
1645 * Also see \a rq_replay comment above.
1646 */
1647 struct list_head rq_replay_list;
1648
1649 /**
1650 * security and encryption data
1651 * @{ */
1652 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1653 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1654 struct list_head rq_ctx_chain; /**< link to waited ctx */
1655
1656 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1657 enum lustre_sec_part rq_sp_from;
1658
1659 /* client/server security flags */
1660 unsigned int
1661 rq_ctx_init:1, /* context initiation */
1662 rq_ctx_fini:1, /* context destroy */
1663 rq_bulk_read:1, /* request bulk read */
1664 rq_bulk_write:1, /* request bulk write */
1665 /* server authentication flags */
1666 rq_auth_gss:1, /* authenticated by gss */
1667 rq_auth_remote:1, /* authed as remote user */
1668 rq_auth_usr_root:1, /* authed as root */
1669 rq_auth_usr_mdt:1, /* authed as mdt */
1670 rq_auth_usr_ost:1, /* authed as ost */
1671 /* security tfm flags */
1672 rq_pack_udesc:1,
1673 rq_pack_bulk:1,
1674 /* doesn't expect reply FIXME */
1675 rq_no_reply:1,
1676 rq_pill_init:1; /* pill initialized */
1677
1678 uid_t rq_auth_uid; /* authed uid */
1679 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1680
1681 /* (server side), pointed directly into req buffer */
1682 struct ptlrpc_user_desc *rq_user_desc;
1683
1684 /* various buffer pointers */
1685 struct lustre_msg *rq_reqbuf; /* req wrapper */
1686 char *rq_repbuf; /* rep buffer */
1687 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1688 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1689 int rq_reqbuf_len; /* req wrapper buf len */
1690 int rq_reqdata_len; /* req wrapper msg len */
1691 int rq_repbuf_len; /* rep buffer len */
1692 int rq_repdata_len; /* rep wrapper msg len */
1693 int rq_clrbuf_len; /* only in priv mode */
1694 int rq_clrdata_len; /* only in priv mode */
1695
1696 /** early replies go to offset 0, regular replies go after that */
1697 unsigned int rq_reply_off;
1698
1699 /** @} */
1700
1701 /** Fields that help to see if request and reply were swabbed or not */
1702 __u32 rq_req_swab_mask;
1703 __u32 rq_rep_swab_mask;
1704
1705 /** What was import generation when this request was sent */
1706 int rq_import_generation;
1707 enum lustre_imp_state rq_send_state;
1708
1709 /** how many early replies (for stats) */
1710 int rq_early_count;
1711
1712 /** client+server request */
1713 lnet_handle_md_t rq_req_md_h;
1714 struct ptlrpc_cb_id rq_req_cbid;
1715 /** optional time limit for send attempts */
1716 cfs_duration_t rq_delay_limit;
1717 /** time request was first queued */
1718 cfs_time_t rq_queued_time;
1719
1720 /* server-side... */
1721 /** request arrival time */
1722 struct timeval rq_arrival_time;
1723 /** separated reply state */
1724 struct ptlrpc_reply_state *rq_reply_state;
1725 /** incoming request buffer */
1726 struct ptlrpc_request_buffer_desc *rq_rqbd;
1727
1728 /** client-only incoming reply */
1729 lnet_handle_md_t rq_reply_md_h;
1730 wait_queue_head_t rq_reply_waitq;
1731 struct ptlrpc_cb_id rq_reply_cbid;
1732
1733 /** our LNet NID */
1734 lnet_nid_t rq_self;
1735 /** Peer description (the other side) */
1736 lnet_process_id_t rq_peer;
1737 /** Server-side, export on which request was received */
1738 struct obd_export *rq_export;
1739 /** Client side, import where request is being sent */
1740 struct obd_import *rq_import;
1741
1742 /** Replay callback, called after request is replayed at recovery */
1743 void (*rq_replay_cb)(struct ptlrpc_request *);
1744 /**
1745 * Commit callback, called when request is committed and about to be
1746 * freed.
1747 */
1748 void (*rq_commit_cb)(struct ptlrpc_request *);
1749 /** Opaq data for replay and commit callbacks. */
1750 void *rq_cb_data;
1751
1752 /** For bulk requests on client only: bulk descriptor */
1753 struct ptlrpc_bulk_desc *rq_bulk;
1754
1755 /** client outgoing req */
1756 /**
1757 * when request/reply sent (secs), or time when request should be sent
1758 */
1759 time_t rq_sent;
1760 /** time for request really sent out */
1761 time_t rq_real_sent;
1762
1763 /** when request must finish. volatile
1764 * so that servers' early reply updates to the deadline aren't
1765 * kept in per-cpu cache */
1766 volatile time_t rq_deadline;
1767 /** when req reply unlink must finish. */
1768 time_t rq_reply_deadline;
1769 /** when req bulk unlink must finish. */
1770 time_t rq_bulk_deadline;
1771 /**
1772 * service time estimate (secs)
1773 * If the requestsis not served by this time, it is marked as timed out.
1774 */
1775 int rq_timeout;
1776
1777 /** Multi-rpc bits */
1778 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1779 wait_queue_head_t rq_set_waitq;
1780 /** Link item for request set lists */
1781 struct list_head rq_set_chain;
1782 /** Link back to the request set */
1783 struct ptlrpc_request_set *rq_set;
1784 /** Async completion handler, called when reply is received */
1785 ptlrpc_interpterer_t rq_interpret_reply;
1786 /** Async completion context */
1787 union ptlrpc_async_args rq_async_args;
1788
1789 /** Pool if request is from preallocated list */
1790 struct ptlrpc_request_pool *rq_pool;
1791
1792 struct lu_context rq_session;
1793 struct lu_context rq_recov_session;
1794
1795 /** request format description */
1796 struct req_capsule rq_pill;
1797};
1798
1799/**
1800 * Call completion handler for rpc if any, return it's status or original
1801 * rc if there was no handler defined for this request.
1802 */
1803static inline int ptlrpc_req_interpret(const struct lu_env *env,
1804 struct ptlrpc_request *req, int rc)
1805{
1806 if (req->rq_interpret_reply != NULL) {
1807 req->rq_status = req->rq_interpret_reply(env, req,
1808 &req->rq_async_args,
1809 rc);
1810 return req->rq_status;
1811 }
1812 return rc;
1813}
1814
1815/** \addtogroup nrs
1816 * @{
1817 */
1818int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
1819int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
1820void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
1821void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
1822 struct ptlrpc_nrs_pol_info *info);
1823
1824/*
1825 * Can the request be moved from the regular NRS head to the high-priority NRS
1826 * head (of the same PTLRPC service partition), if any?
1827 *
1828 * For a reliable result, this should be checked under svcpt->scp_req lock.
1829 */
1830static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1831{
1832 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1833
1834 /**
1835 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1836 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1837 * to make sure it has not been scheduled yet (analogous to previous
1838 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1839 */
1840 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1841}
1842/** @} nrs */
1843
1844/**
1845 * Returns 1 if request buffer at offset \a index was already swabbed
1846 */
1847static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1848{
1849 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1850 return req->rq_req_swab_mask & (1 << index);
1851}
1852
1853/**
1854 * Returns 1 if request reply buffer at offset \a index was already swabbed
1855 */
1856static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1857{
1858 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1859 return req->rq_rep_swab_mask & (1 << index);
1860}
1861
1862/**
1863 * Returns 1 if request needs to be swabbed into local cpu byteorder
1864 */
1865static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1866{
1867 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1868}
1869
1870/**
1871 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1872 */
1873static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1874{
1875 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1876}
1877
1878/**
1879 * Mark request buffer at offset \a index that it was already swabbed
1880 */
1881static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1882{
1883 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1884 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1885 req->rq_req_swab_mask |= 1 << index;
1886}
1887
1888/**
1889 * Mark request reply buffer at offset \a index that it was already swabbed
1890 */
1891static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1892{
1893 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1894 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1895 req->rq_rep_swab_mask |= 1 << index;
1896}
1897
1898/**
1899 * Convert numerical request phase value \a phase into text string description
1900 */
1901static inline const char *
1902ptlrpc_phase2str(enum rq_phase phase)
1903{
1904 switch (phase) {
1905 case RQ_PHASE_NEW:
1906 return "New";
1907 case RQ_PHASE_RPC:
1908 return "Rpc";
1909 case RQ_PHASE_BULK:
1910 return "Bulk";
1911 case RQ_PHASE_INTERPRET:
1912 return "Interpret";
1913 case RQ_PHASE_COMPLETE:
1914 return "Complete";
1915 case RQ_PHASE_UNREGISTERING:
1916 return "Unregistering";
1917 default:
1918 return "?Phase?";
1919 }
1920}
1921
1922/**
1923 * Convert numerical request phase of the request \a req into text stringi
1924 * description
1925 */
1926static inline const char *
1927ptlrpc_rqphase2str(struct ptlrpc_request *req)
1928{
1929 return ptlrpc_phase2str(req->rq_phase);
1930}
1931
1932/**
1933 * Debugging functions and helpers to print request structure into debug log
1934 * @{
1935 */
1936/* Spare the preprocessor, spoil the bugs. */
1937#define FLAG(field, str) (field ? str : "")
1938
1939/** Convert bit flags into a string */
1940#define DEBUG_REQ_FLAGS(req) \
1941 ptlrpc_rqphase2str(req), \
1942 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
1943 FLAG(req->rq_err, "E"), \
1944 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1945 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1946 FLAG(req->rq_no_resend, "N"), \
1947 FLAG(req->rq_waiting, "W"), \
1948 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1949 FLAG(req->rq_committed, "M")
1950
1951#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
1952
1953void _debug_req(struct ptlrpc_request *req,
1954 struct libcfs_debug_msg_data *data, const char *fmt, ...)
1955 __attribute__ ((format (printf, 3, 4)));
1956
1957/**
17891183 1958 * Helper that decides if we need to print request according to current debug
d7e09d03
PT
1959 * level settings
1960 */
1961#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1962do { \
1963 CFS_CHECK_STACK(msgdata, mask, cdls); \
1964 \
1965 if (((mask) & D_CANTMASK) != 0 || \
1966 ((libcfs_debug & (mask)) != 0 && \
1967 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1968 _debug_req((req), msgdata, fmt, ##a); \
1969} while(0)
1970
1971/**
17891183 1972 * This is the debug print function you need to use to print request structure
d7e09d03
PT
1973 * content into lustre debug log.
1974 * for most callers (level is a constant) this is resolved at compile time */
1975#define DEBUG_REQ(level, req, fmt, args...) \
1976do { \
1977 if ((level) & (D_ERROR | D_WARNING)) { \
a3ea59e0 1978 static struct cfs_debug_limit_state cdls; \
d7e09d03
PT
1979 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1980 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1981 } else { \
1982 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1983 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1984 } \
1985} while (0)
1986/** @} */
1987
1988/**
1989 * Structure that defines a single page of a bulk transfer
1990 */
1991struct ptlrpc_bulk_page {
1992 /** Linkage to list of pages in a bulk */
1993 struct list_head bp_link;
1994 /**
1995 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1996 */
1997 int bp_buflen;
1998 /** offset within a page */
1999 int bp_pageoffset;
2000 /** The page itself */
2001 struct page *bp_page;
2002};
2003
2004#define BULK_GET_SOURCE 0
2005#define BULK_PUT_SINK 1
2006#define BULK_GET_SINK 2
2007#define BULK_PUT_SOURCE 3
2008
2009/**
2010 * Definition of bulk descriptor.
2011 * Bulks are special "Two phase" RPCs where initial request message
2012 * is sent first and it is followed bt a transfer (o receiving) of a large
2013 * amount of data to be settled into pages referenced from the bulk descriptors.
2014 * Bulks transfers (the actual data following the small requests) are done
2015 * on separate LNet portals.
2016 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
2017 * Another user is readpage for MDT.
2018 */
2019struct ptlrpc_bulk_desc {
2020 /** completed with failure */
2021 unsigned long bd_failure:1;
2022 /** {put,get}{source,sink} */
2023 unsigned long bd_type:2;
2024 /** client side */
2025 unsigned long bd_registered:1;
2026 /** For serialization with callback */
2027 spinlock_t bd_lock;
2028 /** Import generation when request for this bulk was sent */
2029 int bd_import_generation;
2030 /** LNet portal for this bulk */
2031 __u32 bd_portal;
2032 /** Server side - export this bulk created for */
2033 struct obd_export *bd_export;
2034 /** Client side - import this bulk was sent on */
2035 struct obd_import *bd_import;
2036 /** Back pointer to the request */
2037 struct ptlrpc_request *bd_req;
2038 wait_queue_head_t bd_waitq; /* server side only WQ */
2039 int bd_iov_count; /* # entries in bd_iov */
2040 int bd_max_iov; /* allocated size of bd_iov */
2041 int bd_nob; /* # bytes covered */
2042 int bd_nob_transferred; /* # bytes GOT/PUT */
2043
2044 __u64 bd_last_xid;
2045
2046 struct ptlrpc_cb_id bd_cbid; /* network callback info */
2047 lnet_nid_t bd_sender; /* stash event::sender */
2048 int bd_md_count; /* # valid entries in bd_mds */
2049 int bd_md_max_brw; /* max entries in bd_mds */
2050 /** array of associated MDs */
2051 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
2052
2053 /*
2054 * encrypt iov, size is either 0 or bd_iov_count.
2055 */
2056 lnet_kiov_t *bd_enc_iov;
2057
2058 lnet_kiov_t bd_iov[0];
2059};
2060
2061enum {
2062 SVC_STOPPED = 1 << 0,
2063 SVC_STOPPING = 1 << 1,
2064 SVC_STARTING = 1 << 2,
2065 SVC_RUNNING = 1 << 3,
2066 SVC_EVENT = 1 << 4,
2067 SVC_SIGNAL = 1 << 5,
2068};
2069
2070#define PTLRPC_THR_NAME_LEN 32
2071/**
2072 * Definition of server service thread structure
2073 */
2074struct ptlrpc_thread {
2075 /**
2076 * List of active threads in svc->srv_threads
2077 */
2078 struct list_head t_link;
2079 /**
2080 * thread-private data (preallocated memory)
2081 */
2082 void *t_data;
2083 __u32 t_flags;
2084 /**
2085 * service thread index, from ptlrpc_start_threads
2086 */
2087 unsigned int t_id;
2088 /**
2089 * service thread pid
2090 */
2091 pid_t t_pid;
2092 /**
2093 * put watchdog in the structure per thread b=14840
5d4450c4
PT
2094 *
2095 * Lustre watchdog is removed for client in the hope
2096 * of a generic watchdog can be merged in kernel.
2097 * When that happens, we should add below back.
2098 *
2099 * struct lc_watchdog *t_watchdog;
d7e09d03 2100 */
d7e09d03
PT
2101 /**
2102 * the svc this thread belonged to b=18582
2103 */
2104 struct ptlrpc_service_part *t_svcpt;
2105 wait_queue_head_t t_ctl_waitq;
2106 struct lu_env *t_env;
2107 char t_name[PTLRPC_THR_NAME_LEN];
2108};
2109
2110static inline int thread_is_init(struct ptlrpc_thread *thread)
2111{
2112 return thread->t_flags == 0;
2113}
2114
2115static inline int thread_is_stopped(struct ptlrpc_thread *thread)
2116{
2117 return !!(thread->t_flags & SVC_STOPPED);
2118}
2119
2120static inline int thread_is_stopping(struct ptlrpc_thread *thread)
2121{
2122 return !!(thread->t_flags & SVC_STOPPING);
2123}
2124
2125static inline int thread_is_starting(struct ptlrpc_thread *thread)
2126{
2127 return !!(thread->t_flags & SVC_STARTING);
2128}
2129
2130static inline int thread_is_running(struct ptlrpc_thread *thread)
2131{
2132 return !!(thread->t_flags & SVC_RUNNING);
2133}
2134
2135static inline int thread_is_event(struct ptlrpc_thread *thread)
2136{
2137 return !!(thread->t_flags & SVC_EVENT);
2138}
2139
2140static inline int thread_is_signal(struct ptlrpc_thread *thread)
2141{
2142 return !!(thread->t_flags & SVC_SIGNAL);
2143}
2144
2145static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
2146{
2147 thread->t_flags &= ~flags;
2148}
2149
2150static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
2151{
2152 thread->t_flags = flags;
2153}
2154
2155static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
2156{
2157 thread->t_flags |= flags;
2158}
2159
2160static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
2161 __u32 flags)
2162{
2163 if (thread->t_flags & flags) {
2164 thread->t_flags &= ~flags;
2165 return 1;
2166 }
2167 return 0;
2168}
2169
2170/**
2171 * Request buffer descriptor structure.
2172 * This is a structure that contains one posted request buffer for service.
2173 * Once data land into a buffer, event callback creates actual request and
2174 * notifies wakes one of the service threads to process new incoming request.
2175 * More than one request can fit into the buffer.
2176 */
2177struct ptlrpc_request_buffer_desc {
2178 /** Link item for rqbds on a service */
2179 struct list_head rqbd_list;
2180 /** History of requests for this buffer */
2181 struct list_head rqbd_reqs;
2182 /** Back pointer to service for which this buffer is registered */
2183 struct ptlrpc_service_part *rqbd_svcpt;
2184 /** LNet descriptor */
2185 lnet_handle_md_t rqbd_md_h;
2186 int rqbd_refcount;
2187 /** The buffer itself */
2188 char *rqbd_buffer;
2189 struct ptlrpc_cb_id rqbd_cbid;
2190 /**
2191 * This "embedded" request structure is only used for the
2192 * last request to fit into the buffer
2193 */
2194 struct ptlrpc_request rqbd_req;
2195};
2196
2197typedef int (*svc_handler_t)(struct ptlrpc_request *req);
2198
2199struct ptlrpc_service_ops {
2200 /**
2201 * if non-NULL called during thread creation (ptlrpc_start_thread())
2202 * to initialize service specific per-thread state.
2203 */
2204 int (*so_thr_init)(struct ptlrpc_thread *thr);
2205 /**
2206 * if non-NULL called during thread shutdown (ptlrpc_main()) to
2207 * destruct state created by ->srv_init().
2208 */
2209 void (*so_thr_done)(struct ptlrpc_thread *thr);
2210 /**
2211 * Handler function for incoming requests for this service
2212 */
2213 int (*so_req_handler)(struct ptlrpc_request *req);
2214 /**
2215 * function to determine priority of the request, it's called
2216 * on every new request
2217 */
2218 int (*so_hpreq_handler)(struct ptlrpc_request *);
2219 /**
2220 * service-specific print fn
2221 */
2222 void (*so_req_printer)(void *, struct ptlrpc_request *);
2223};
2224
2225#ifndef __cfs_cacheline_aligned
2226/* NB: put it here for reducing patche dependence */
2227# define __cfs_cacheline_aligned
2228#endif
2229
2230/**
2231 * How many high priority requests to serve before serving one normal
2232 * priority request
2233 */
2234#define PTLRPC_SVC_HP_RATIO 10
2235
2236/**
2237 * Definition of PortalRPC service.
2238 * The service is listening on a particular portal (like tcp port)
2239 * and perform actions for a specific server like IO service for OST
2240 * or general metadata service for MDS.
2241 */
2242struct ptlrpc_service {
2243 /** serialize /proc operations */
2244 spinlock_t srv_lock;
2245 /** most often accessed fields */
2246 /** chain thru all services */
2247 struct list_head srv_list;
2248 /** service operations table */
2249 struct ptlrpc_service_ops srv_ops;
2250 /** only statically allocated strings here; we don't clean them */
2251 char *srv_name;
2252 /** only statically allocated strings here; we don't clean them */
2253 char *srv_thread_name;
2254 /** service thread list */
2255 struct list_head srv_threads;
2256 /** threads # should be created for each partition on initializing */
2257 int srv_nthrs_cpt_init;
2258 /** limit of threads number for each partition */
2259 int srv_nthrs_cpt_limit;
2260 /** Root of /proc dir tree for this service */
b59fe845 2261 struct proc_dir_entry *srv_procroot;
d7e09d03
PT
2262 /** Pointer to statistic data for this service */
2263 struct lprocfs_stats *srv_stats;
2264 /** # hp per lp reqs to handle */
2265 int srv_hpreq_ratio;
2266 /** biggest request to receive */
2267 int srv_max_req_size;
2268 /** biggest reply to send */
2269 int srv_max_reply_size;
2270 /** size of individual buffers */
2271 int srv_buf_size;
2272 /** # buffers to allocate in 1 group */
2273 int srv_nbuf_per_group;
2274 /** Local portal on which to receive requests */
2275 __u32 srv_req_portal;
2276 /** Portal on the client to send replies to */
2277 __u32 srv_rep_portal;
2278 /**
2279 * Tags for lu_context associated with this thread, see struct
2280 * lu_context.
2281 */
2282 __u32 srv_ctx_tags;
2283 /** soft watchdog timeout multiplier */
2284 int srv_watchdog_factor;
2285 /** under unregister_service */
2286 unsigned srv_is_stopping:1;
2287
2288 /** max # request buffers in history per partition */
2289 int srv_hist_nrqbds_cpt_max;
2290 /** number of CPTs this service bound on */
2291 int srv_ncpts;
2292 /** CPTs array this service bound on */
2293 __u32 *srv_cpts;
2294 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2295 int srv_cpt_bits;
2296 /** CPT table this service is running over */
2297 struct cfs_cpt_table *srv_cptable;
2298 /**
2299 * partition data for ptlrpc service
2300 */
2301 struct ptlrpc_service_part *srv_parts[0];
2302};
2303
2304/**
2305 * Definition of PortalRPC service partition data.
2306 * Although a service only has one instance of it right now, but we
2307 * will have multiple instances very soon (instance per CPT).
2308 *
2309 * it has four locks:
2310 * \a scp_lock
2311 * serialize operations on rqbd and requests waiting for preprocess
2312 * \a scp_req_lock
2313 * serialize operations active requests sent to this portal
2314 * \a scp_at_lock
2315 * serialize adaptive timeout stuff
2316 * \a scp_rep_lock
2317 * serialize operations on RS list (reply states)
2318 *
2319 * We don't have any use-case to take two or more locks at the same time
2320 * for now, so there is no lock order issue.
2321 */
2322struct ptlrpc_service_part {
2323 /** back reference to owner */
2324 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2325 /* CPT id, reserved */
2326 int scp_cpt;
2327 /** always increasing number */
2328 int scp_thr_nextid;
2329 /** # of starting threads */
2330 int scp_nthrs_starting;
2331 /** # of stopping threads, reserved for shrinking threads */
2332 int scp_nthrs_stopping;
2333 /** # running threads */
2334 int scp_nthrs_running;
2335 /** service threads list */
2336 struct list_head scp_threads;
2337
2338 /**
2339 * serialize the following fields, used for protecting
2340 * rqbd list and incoming requests waiting for preprocess,
2341 * threads starting & stopping are also protected by this lock.
2342 */
2343 spinlock_t scp_lock __cfs_cacheline_aligned;
2344 /** total # req buffer descs allocated */
2345 int scp_nrqbds_total;
2346 /** # posted request buffers for receiving */
2347 int scp_nrqbds_posted;
2348 /** in progress of allocating rqbd */
2349 int scp_rqbd_allocating;
2350 /** # incoming reqs */
2351 int scp_nreqs_incoming;
2352 /** request buffers to be reposted */
2353 struct list_head scp_rqbd_idle;
2354 /** req buffers receiving */
2355 struct list_head scp_rqbd_posted;
2356 /** incoming reqs */
2357 struct list_head scp_req_incoming;
2358 /** timeout before re-posting reqs, in tick */
2359 cfs_duration_t scp_rqbd_timeout;
2360 /**
2361 * all threads sleep on this. This wait-queue is signalled when new
2362 * incoming request arrives and when difficult reply has to be handled.
2363 */
2364 wait_queue_head_t scp_waitq;
2365
2366 /** request history */
2367 struct list_head scp_hist_reqs;
2368 /** request buffer history */
2369 struct list_head scp_hist_rqbds;
2370 /** # request buffers in history */
2371 int scp_hist_nrqbds;
2372 /** sequence number for request */
2373 __u64 scp_hist_seq;
2374 /** highest seq culled from history */
2375 __u64 scp_hist_seq_culled;
2376
2377 /**
2378 * serialize the following fields, used for processing requests
2379 * sent to this portal
2380 */
2381 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2382 /** # reqs in either of the NRS heads below */
2383 /** # reqs being served */
2384 int scp_nreqs_active;
2385 /** # HPreqs being served */
2386 int scp_nhreqs_active;
2387 /** # hp requests handled */
2388 int scp_hreq_count;
2389
2390 /** NRS head for regular requests */
2391 struct ptlrpc_nrs scp_nrs_reg;
2392 /** NRS head for HP requests; this is only valid for services that can
2393 * handle HP requests */
2394 struct ptlrpc_nrs *scp_nrs_hp;
2395
2396 /** AT stuff */
2397 /** @{ */
2398 /**
2399 * serialize the following fields, used for changes on
2400 * adaptive timeout
2401 */
2402 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2403 /** estimated rpc service time */
2404 struct adaptive_timeout scp_at_estimate;
2405 /** reqs waiting for replies */
2406 struct ptlrpc_at_array scp_at_array;
2407 /** early reply timer */
54319351 2408 struct timer_list scp_at_timer;
d7e09d03
PT
2409 /** debug */
2410 cfs_time_t scp_at_checktime;
2411 /** check early replies */
2412 unsigned scp_at_check;
2413 /** @} */
2414
2415 /**
2416 * serialize the following fields, used for processing
2417 * replies for this portal
2418 */
2419 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2420 /** all the active replies */
2421 struct list_head scp_rep_active;
2422 /** List of free reply_states */
2423 struct list_head scp_rep_idle;
2424 /** waitq to run, when adding stuff to srv_free_rs_list */
2425 wait_queue_head_t scp_rep_waitq;
2426 /** # 'difficult' replies */
2427 atomic_t scp_nreps_difficult;
2428};
2429
2430#define ptlrpc_service_for_each_part(part, i, svc) \
2431 for (i = 0; \
2432 i < (svc)->srv_ncpts && \
2433 (svc)->srv_parts != NULL && \
2434 ((part) = (svc)->srv_parts[i]) != NULL; i++)
2435
2436/**
2437 * Declaration of ptlrpcd control structure
2438 */
2439struct ptlrpcd_ctl {
2440 /**
2441 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2442 */
2443 unsigned long pc_flags;
2444 /**
2445 * Thread lock protecting structure fields.
2446 */
2447 spinlock_t pc_lock;
2448 /**
2449 * Start completion.
2450 */
2451 struct completion pc_starting;
2452 /**
2453 * Stop completion.
2454 */
2455 struct completion pc_finishing;
2456 /**
2457 * Thread requests set.
2458 */
2459 struct ptlrpc_request_set *pc_set;
2460 /**
2461 * Thread name used in cfs_daemonize()
2462 */
2463 char pc_name[16];
2464 /**
2465 * Environment for request interpreters to run in.
2466 */
2467 struct lu_env pc_env;
2468 /**
2469 * Index of ptlrpcd thread in the array.
2470 */
2471 int pc_index;
2472 /**
2473 * Number of the ptlrpcd's partners.
2474 */
2475 int pc_npartners;
2476 /**
2477 * Pointer to the array of partners' ptlrpcd_ctl structure.
2478 */
2479 struct ptlrpcd_ctl **pc_partners;
2480 /**
2481 * Record the partner index to be processed next.
2482 */
2483 int pc_cursor;
2484};
2485
2486/* Bits for pc_flags */
2487enum ptlrpcd_ctl_flags {
2488 /**
2489 * Ptlrpc thread start flag.
2490 */
2491 LIOD_START = 1 << 0,
2492 /**
2493 * Ptlrpc thread stop flag.
2494 */
2495 LIOD_STOP = 1 << 1,
2496 /**
2497 * Ptlrpc thread force flag (only stop force so far).
2498 * This will cause aborting any inflight rpcs handled
2499 * by thread if LIOD_STOP is specified.
2500 */
2501 LIOD_FORCE = 1 << 2,
2502 /**
2503 * This is a recovery ptlrpc thread.
2504 */
2505 LIOD_RECOVERY = 1 << 3,
2506 /**
2507 * The ptlrpcd is bound to some CPU core.
2508 */
2509 LIOD_BIND = 1 << 4,
2510};
2511
2512/**
2513 * \addtogroup nrs
2514 * @{
2515 *
2516 * Service compatibility function; the policy is compatible with all services.
2517 *
2518 * \param[in] svc The service the policy is attempting to register with.
2519 * \param[in] desc The policy descriptor
2520 *
2521 * \retval true The policy is compatible with the service
2522 *
2523 * \see ptlrpc_nrs_pol_desc::pd_compat()
2524 */
2525static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2526 const struct ptlrpc_nrs_pol_desc *desc)
2527{
2528 return true;
2529}
2530
2531/**
2532 * Service compatibility function; the policy is compatible with only a specific
2533 * service which is identified by its human-readable name at
2534 * ptlrpc_service::srv_name.
2535 *
2536 * \param[in] svc The service the policy is attempting to register with.
2537 * \param[in] desc The policy descriptor
2538 *
2539 * \retval false The policy is not compatible with the service
2540 * \retval true The policy is compatible with the service
2541 *
2542 * \see ptlrpc_nrs_pol_desc::pd_compat()
2543 */
2544static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2545 const struct ptlrpc_nrs_pol_desc *desc)
2546{
2547 LASSERT(desc->pd_compat_svc_name != NULL);
2548 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2549}
2550
2551/** @} nrs */
2552
2553/* ptlrpc/events.c */
2554extern lnet_handle_eq_t ptlrpc_eq_h;
2555extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2556 lnet_process_id_t *peer, lnet_nid_t *self);
2557/**
2558 * These callbacks are invoked by LNet when something happened to
2559 * underlying buffer
2560 * @{
2561 */
2562extern void request_out_callback(lnet_event_t *ev);
2563extern void reply_in_callback(lnet_event_t *ev);
2564extern void client_bulk_callback(lnet_event_t *ev);
2565extern void request_in_callback(lnet_event_t *ev);
2566extern void reply_out_callback(lnet_event_t *ev);
2567/** @} */
2568
2569/* ptlrpc/connection.c */
2570struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2571 lnet_nid_t self,
2572 struct obd_uuid *uuid);
2573int ptlrpc_connection_put(struct ptlrpc_connection *c);
2574struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2575int ptlrpc_connection_init(void);
2576void ptlrpc_connection_fini(void);
2577extern lnet_pid_t ptl_get_pid(void);
2578
2579/* ptlrpc/niobuf.c */
2580/**
2581 * Actual interfacing with LNet to put/get/register/unregister stuff
2582 * @{
2583 */
2584
2585int ptlrpc_register_bulk(struct ptlrpc_request *req);
2586int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2587
2588static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2589{
2590 struct ptlrpc_bulk_desc *desc;
2591 int rc;
2592
2593 LASSERT(req != NULL);
2594 desc = req->rq_bulk;
2595
2596 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
2597 req->rq_bulk_deadline > cfs_time_current_sec())
2598 return 1;
2599
2600 if (!desc)
2601 return 0;
2602
2603 spin_lock(&desc->bd_lock);
2604 rc = desc->bd_md_count;
2605 spin_unlock(&desc->bd_lock);
2606 return rc;
2607}
2608
2609#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2610#define PTLRPC_REPLY_EARLY 0x02
2611int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2612int ptlrpc_reply(struct ptlrpc_request *req);
2613int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2614int ptlrpc_error(struct ptlrpc_request *req);
2615void ptlrpc_resend_req(struct ptlrpc_request *request);
2616int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2617int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2618int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2619/** @} */
2620
2621/* ptlrpc/client.c */
2622/**
2623 * Client-side portals API. Everything to send requests, receive replies,
2624 * request queues, request management, etc.
2625 * @{
2626 */
63d42578
HZ
2627void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
2628
d7e09d03
PT
2629void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2630 struct ptlrpc_client *);
2631void ptlrpc_cleanup_client(struct obd_import *imp);
2632struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2633
2634int ptlrpc_queue_wait(struct ptlrpc_request *req);
2635int ptlrpc_replay_req(struct ptlrpc_request *req);
2636int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2637void ptlrpc_restart_req(struct ptlrpc_request *req);
2638void ptlrpc_abort_inflight(struct obd_import *imp);
2639void ptlrpc_cleanup_imp(struct obd_import *imp);
2640void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2641
2642struct ptlrpc_request_set *ptlrpc_prep_set(void);
2643struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2644 void *arg);
2645int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
2646 set_interpreter_func fn, void *data);
2647int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2648int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2649int ptlrpc_set_wait(struct ptlrpc_request_set *);
2650int ptlrpc_expired_set(void *data);
2651void ptlrpc_interrupted_set(void *data);
2652void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2653void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2654void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2655void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2656 struct ptlrpc_request *req);
2657
2658void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2659void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2660
2661struct ptlrpc_request_pool *
2662ptlrpc_init_rq_pool(int, int,
2663 void (*populate_pool)(struct ptlrpc_request_pool *, int));
2664
2665void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2666struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2667 const struct req_format *format);
2668struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2669 struct ptlrpc_request_pool *,
2670 const struct req_format *format);
2671void ptlrpc_request_free(struct ptlrpc_request *request);
2672int ptlrpc_request_pack(struct ptlrpc_request *request,
2673 __u32 version, int opcode);
2674struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
2675 const struct req_format *format,
2676 __u32 version, int opcode);
2677int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2678 __u32 version, int opcode, char **bufs,
2679 struct ptlrpc_cli_ctx *ctx);
2680struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
2681 int opcode, int count, __u32 *lengths,
2682 char **bufs);
2683struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
2684 __u32 version, int opcode,
2685 int count, __u32 *lengths, char **bufs,
2686 struct ptlrpc_request_pool *pool);
2687void ptlrpc_req_finished(struct ptlrpc_request *request);
2688void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
2689struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2690struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2691 unsigned npages, unsigned max_brw,
2692 unsigned type, unsigned portal);
2693void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2694static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2695{
2696 __ptlrpc_free_bulk(bulk, 1);
2697}
2698static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2699{
2700 __ptlrpc_free_bulk(bulk, 0);
2701}
2702void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2703 struct page *page, int pageoffset, int len, int);
2704static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2705 struct page *page, int pageoffset,
2706 int len)
2707{
2708 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2709}
2710
2711static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2712 struct page *page, int pageoffset,
2713 int len)
2714{
2715 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2716}
2717
2718void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2719 struct obd_import *imp);
2720__u64 ptlrpc_next_xid(void);
2721__u64 ptlrpc_sample_next_xid(void);
2722__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2723
2724/* Set of routines to run a function in ptlrpcd context */
2725void *ptlrpcd_alloc_work(struct obd_import *imp,
2726 int (*cb)(const struct lu_env *, void *), void *data);
2727void ptlrpcd_destroy_work(void *handler);
2728int ptlrpcd_queue_work(void *handler);
2729
2730/** @} */
2731struct ptlrpc_service_buf_conf {
2732 /* nbufs is buffers # to allocate when growing the pool */
2733 unsigned int bc_nbufs;
2734 /* buffer size to post */
2735 unsigned int bc_buf_size;
2736 /* portal to listed for requests on */
2737 unsigned int bc_req_portal;
2738 /* portal of where to send replies to */
2739 unsigned int bc_rep_portal;
2740 /* maximum request size to be accepted for this service */
2741 unsigned int bc_req_max_size;
2742 /* maximum reply size this service can ever send */
2743 unsigned int bc_rep_max_size;
2744};
2745
2746struct ptlrpc_service_thr_conf {
2747 /* threadname should be 8 characters or less - 6 will be added on */
2748 char *tc_thr_name;
2749 /* threads increasing factor for each CPU */
2750 unsigned int tc_thr_factor;
2751 /* service threads # to start on each partition while initializing */
2752 unsigned int tc_nthrs_init;
2753 /*
2754 * low water of threads # upper-limit on each partition while running,
2755 * service availability may be impacted if threads number is lower
2756 * than this value. It can be ZERO if the service doesn't require
2757 * CPU affinity or there is only one partition.
2758 */
2759 unsigned int tc_nthrs_base;
2760 /* "soft" limit for total threads number */
2761 unsigned int tc_nthrs_max;
2762 /* user specified threads number, it will be validated due to
2763 * other members of this structure. */
2764 unsigned int tc_nthrs_user;
2765 /* set NUMA node affinity for service threads */
2766 unsigned int tc_cpu_affinity;
2767 /* Tags for lu_context associated with service thread */
2768 __u32 tc_ctx_tags;
2769};
2770
2771struct ptlrpc_service_cpt_conf {
2772 struct cfs_cpt_table *cc_cptable;
2773 /* string pattern to describe CPTs for a service */
2774 char *cc_pattern;
2775};
2776
2777struct ptlrpc_service_conf {
2778 /* service name */
2779 char *psc_name;
2780 /* soft watchdog timeout multiplifier to print stuck service traces */
2781 unsigned int psc_watchdog_factor;
2782 /* buffer information */
2783 struct ptlrpc_service_buf_conf psc_buf;
2784 /* thread information */
2785 struct ptlrpc_service_thr_conf psc_thr;
2786 /* CPU partition information */
2787 struct ptlrpc_service_cpt_conf psc_cpt;
2788 /* function table */
2789 struct ptlrpc_service_ops psc_ops;
2790};
2791
2792/* ptlrpc/service.c */
2793/**
2794 * Server-side services API. Register/unregister service, request state
2795 * management, service thread management
2796 *
2797 * @{
2798 */
2799void ptlrpc_save_lock(struct ptlrpc_request *req,
2800 struct lustre_handle *lock, int mode, int no_ack);
2801void ptlrpc_commit_replies(struct obd_export *exp);
2802void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2803void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
2804int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
2805struct ptlrpc_service *ptlrpc_register_service(
2806 struct ptlrpc_service_conf *conf,
2807 struct proc_dir_entry *proc_entry);
2808void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
2809
2810int ptlrpc_start_threads(struct ptlrpc_service *svc);
2811int ptlrpc_unregister_service(struct ptlrpc_service *service);
2812int liblustre_check_services(void *arg);
2813void ptlrpc_daemonize(char *name);
2814int ptlrpc_service_health_check(struct ptlrpc_service *);
2815void ptlrpc_server_drop_request(struct ptlrpc_request *req);
2816void ptlrpc_request_change_export(struct ptlrpc_request *req,
2817 struct obd_export *export);
2818
2819int ptlrpc_hr_init(void);
2820void ptlrpc_hr_fini(void);
2821
2822/** @} */
2823
2824/* ptlrpc/import.c */
2825/**
2826 * Import API
2827 * @{
2828 */
2829int ptlrpc_connect_import(struct obd_import *imp);
2830int ptlrpc_init_import(struct obd_import *imp);
2831int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2832int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
2833void deuuidify(char *uuid, const char *prefix, char **uuid_start,
2834 int *uuid_len);
2835
2836/* ptlrpc/pack_generic.c */
2837int ptlrpc_reconnect_import(struct obd_import *imp);
2838/** @} */
2839
2840/**
2841 * ptlrpc msg buffer and swab interface
2842 *
2843 * @{
2844 */
2845int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2846 int index);
2847void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
2848 int index);
2849int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2850int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2851
2852int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
2853void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2854 char **bufs);
2855int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2856 __u32 *lens, char **bufs);
2857int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2858 char **bufs);
2859int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2860 __u32 *lens, char **bufs, int flags);
2861#define LPRFL_EARLY_REPLY 1
2862int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2863 char **bufs, int flags);
2864int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2865 unsigned int newlen, int move_data);
2866void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2867int __lustre_unpack_msg(struct lustre_msg *m, int len);
2868int lustre_msg_hdr_size(__u32 magic, int count);
2869int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2870int lustre_msg_size_v2(int count, __u32 *lengths);
2871int lustre_packed_msg_size(struct lustre_msg *msg);
2872int lustre_msg_early_size(void);
2873void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2874void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2875int lustre_msg_buflen(struct lustre_msg *m, int n);
2876void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
2877int lustre_msg_bufcount(struct lustre_msg *m);
2878char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2879__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2880void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2881__u32 lustre_msg_get_flags(struct lustre_msg *msg);
2882void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2883void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2884void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2885__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2886void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
2887void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
2888struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2889__u32 lustre_msg_get_type(struct lustre_msg *msg);
2890__u32 lustre_msg_get_version(struct lustre_msg *msg);
2891void lustre_msg_add_version(struct lustre_msg *msg, int version);
2892__u32 lustre_msg_get_opc(struct lustre_msg *msg);
2893__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
2894__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2895__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2896__u64 lustre_msg_get_transno(struct lustre_msg *msg);
2897__u64 lustre_msg_get_slv(struct lustre_msg *msg);
2898__u32 lustre_msg_get_limit(struct lustre_msg *msg);
2899void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2900void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2901int lustre_msg_get_status(struct lustre_msg *msg);
2902__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
2903int lustre_msg_is_v1(struct lustre_msg *msg);
2904__u32 lustre_msg_get_magic(struct lustre_msg *msg);
2905__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2906__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
2907char *lustre_msg_get_jobid(struct lustre_msg *msg);
2908__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
2909#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
2910__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
2911#else
2912# warning "remove checksum compatibility support for b1_8"
2913__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
2914#endif
2915void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle);
2916void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2917void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2918void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
2919void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
2920void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2921void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2922void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2923void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
2924void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
2925void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2926void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2927void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2928void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2929void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2930
2931static inline void
2932lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2933 unsigned int newlen, int move_data)
2934{
2935 LASSERT(req->rq_reply_state);
2936 LASSERT(req->rq_repmsg);
2937 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2938 newlen, move_data);
2939}
2d58de78
LW
2940
2941#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
2942
2943static inline int ptlrpc_status_hton(int h)
2944{
2945 /*
2946 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
2947 * ELDLM_LOCK_ABORTED, etc.
2948 */
2949 if (h < 0)
2950 return -lustre_errno_hton(-h);
2951 else
2952 return h;
2953}
2954
2955static inline int ptlrpc_status_ntoh(int n)
2956{
2957 /*
2958 * See the comment in ptlrpc_status_hton().
2959 */
2960 if (n < 0)
2961 return -lustre_errno_ntoh(-n);
2962 else
2963 return n;
2964}
2965
2966#else
2967
2968#define ptlrpc_status_hton(h) (h)
2969#define ptlrpc_status_ntoh(n) (n)
2970
2971#endif
d7e09d03
PT
2972/** @} */
2973
2974/** Change request phase of \a req to \a new_phase */
2975static inline void
2976ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2977{
2978 if (req->rq_phase == new_phase)
2979 return;
2980
2981 if (new_phase == RQ_PHASE_UNREGISTERING) {
2982 req->rq_next_phase = req->rq_phase;
2983 if (req->rq_import)
2984 atomic_inc(&req->rq_import->imp_unregistering);
2985 }
2986
2987 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
2988 if (req->rq_import)
2989 atomic_dec(&req->rq_import->imp_unregistering);
2990 }
2991
2992 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2993 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2994
2995 req->rq_phase = new_phase;
2996}
2997
2998/**
2999 * Returns true if request \a req got early reply and hard deadline is not met
3000 */
3001static inline int
3002ptlrpc_client_early(struct ptlrpc_request *req)
3003{
3004 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3005 req->rq_reply_deadline > cfs_time_current_sec())
3006 return 0;
3007 return req->rq_early;
3008}
3009
3010/**
3011 * Returns true if we got real reply from server for this request
3012 */
3013static inline int
3014ptlrpc_client_replied(struct ptlrpc_request *req)
3015{
3016 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3017 req->rq_reply_deadline > cfs_time_current_sec())
3018 return 0;
3019 return req->rq_replied;
3020}
3021
3022/** Returns true if request \a req is in process of receiving server reply */
3023static inline int
3024ptlrpc_client_recv(struct ptlrpc_request *req)
3025{
3026 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3027 req->rq_reply_deadline > cfs_time_current_sec())
3028 return 1;
3029 return req->rq_receiving_reply;
3030}
3031
3032static inline int
3033ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
3034{
3035 int rc;
3036
3037 spin_lock(&req->rq_lock);
3038 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
3039 req->rq_reply_deadline > cfs_time_current_sec()) {
3040 spin_unlock(&req->rq_lock);
3041 return 1;
3042 }
cf378ff7
AL
3043 rc = req->rq_receiving_reply;
3044 rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
d7e09d03
PT
3045 spin_unlock(&req->rq_lock);
3046 return rc;
3047}
3048
3049static inline void
3050ptlrpc_client_wake_req(struct ptlrpc_request *req)
3051{
3052 if (req->rq_set == NULL)
3053 wake_up(&req->rq_reply_waitq);
3054 else
3055 wake_up(&req->rq_set->set_waitq);
3056}
3057
3058static inline void
3059ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
3060{
3061 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3062 atomic_inc(&rs->rs_refcount);
3063}
3064
3065static inline void
3066ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
3067{
3068 LASSERT(atomic_read(&rs->rs_refcount) > 0);
3069 if (atomic_dec_and_test(&rs->rs_refcount))
3070 lustre_free_reply_state(rs);
3071}
3072
3073/* Should only be called once per req */
3074static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
3075{
3076 if (req->rq_reply_state == NULL)
3077 return; /* shouldn't occur */
3078 ptlrpc_rs_decref(req->rq_reply_state);
3079 req->rq_reply_state = NULL;
3080 req->rq_repmsg = NULL;
3081}
3082
3083static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
3084{
3085 return lustre_msg_get_magic(req->rq_reqmsg);
3086}
3087
3088static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
3089{
3090 switch (req->rq_reqmsg->lm_magic) {
3091 case LUSTRE_MSG_MAGIC_V2:
3092 return req->rq_reqmsg->lm_repsize;
3093 default:
3094 LASSERTF(0, "incorrect message magic: %08x\n",
3095 req->rq_reqmsg->lm_magic);
3096 return -EFAULT;
3097 }
3098}
3099
3100static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
3101{
3102 if (req->rq_delay_limit != 0 &&
3103 cfs_time_before(cfs_time_add(req->rq_queued_time,
3104 cfs_time_seconds(req->rq_delay_limit)),
3105 cfs_time_current())) {
3106 return 1;
3107 }
3108 return 0;
3109}
3110
3111static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
3112{
3113 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
3114 spin_lock(&req->rq_lock);
3115 req->rq_no_resend = 1;
3116 spin_unlock(&req->rq_lock);
3117 }
3118 return req->rq_no_resend;
3119}
3120
3121static inline int
3122ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
3123{
3124 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
3125
3126 return svcpt->scp_service->srv_watchdog_factor *
3127 max_t(int, at, obd_timeout);
3128}
3129
3130static inline struct ptlrpc_service *
3131ptlrpc_req2svc(struct ptlrpc_request *req)
3132{
3133 LASSERT(req->rq_rqbd != NULL);
3134 return req->rq_rqbd->rqbd_svcpt->scp_service;
3135}
3136
3137/* ldlm/ldlm_lib.c */
3138/**
3139 * Target client logic
3140 * @{
3141 */
3142int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
3143int client_obd_cleanup(struct obd_device *obddev);
3144int client_connect_import(const struct lu_env *env,
3145 struct obd_export **exp, struct obd_device *obd,
3146 struct obd_uuid *cluuid, struct obd_connect_data *,
3147 void *localdata);
3148int client_disconnect_export(struct obd_export *exp);
3149int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
3150 int priority);
3151int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
3152int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
3153 struct obd_uuid *uuid);
3154int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
3155void client_destroy_import(struct obd_import *imp);
3156/** @} */
3157
3158
3159/* ptlrpc/pinger.c */
3160/**
3161 * Pinger API (client side only)
3162 * @{
3163 */
3164enum timeout_event {
3165 TIMEOUT_GRANT = 1
3166};
3167struct timeout_item;
3168typedef int (*timeout_cb_t)(struct timeout_item *, void *);
3169int ptlrpc_pinger_add_import(struct obd_import *imp);
3170int ptlrpc_pinger_del_import(struct obd_import *imp);
3171int ptlrpc_add_timeout_client(int time, enum timeout_event event,
3172 timeout_cb_t cb, void *data,
3173 struct list_head *obd_list);
3174int ptlrpc_del_timeout_client(struct list_head *obd_list,
3175 enum timeout_event event);
3176struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
3177int ptlrpc_obd_ping(struct obd_device *obd);
d7e09d03
PT
3178void ping_evictor_start(void);
3179void ping_evictor_stop(void);
d7e09d03
PT
3180void ptlrpc_pinger_ir_up(void);
3181void ptlrpc_pinger_ir_down(void);
3182/** @} */
3183int ptlrpc_pinger_suppress_pings(void);
3184
3185/* ptlrpc daemon bind policy */
3186typedef enum {
3187 /* all ptlrpcd threads are free mode */
3188 PDB_POLICY_NONE = 1,
3189 /* all ptlrpcd threads are bound mode */
3190 PDB_POLICY_FULL = 2,
3191 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
3192 PDB_POLICY_PAIR = 3,
3193 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
3194 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
3195 * If kernel supports NUMA, pthrpcd threads are binded and
3196 * grouped by NUMA node */
3197 PDB_POLICY_NEIGHBOR = 4,
3198} pdb_policy_t;
3199
3200/* ptlrpc daemon load policy
3201 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
3202 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
3203 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
3204 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
3205 * depends on which is scheduled firstly, to accelerate the RPC processing. */
3206typedef enum {
3207 /* on the same CPU core as the caller */
3208 PDL_POLICY_SAME = 1,
3209 /* within the same CPU partition, but not the same core as the caller */
3210 PDL_POLICY_LOCAL = 2,
3211 /* round-robin on all CPU cores, but not the same core as the caller */
3212 PDL_POLICY_ROUND = 3,
3213 /* the specified CPU core is preferred, but not enforced */
3214 PDL_POLICY_PREFERRED = 4,
3215} pdl_policy_t;
3216
3217/* ptlrpc/ptlrpcd.c */
3218void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
3219void ptlrpcd_free(struct ptlrpcd_ctl *pc);
3220void ptlrpcd_wake(struct ptlrpc_request *req);
3221void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
3222void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
3223int ptlrpcd_addref(void);
3224void ptlrpcd_decref(void);
3225
3226/* ptlrpc/lproc_ptlrpc.c */
3227/**
3228 * procfs output related functions
3229 * @{
3230 */
3231const char* ll_opcode2str(__u32 opcode);
3232#ifdef LPROCFS
3233void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
3234void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
3235void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
3236#else
3237static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {}
3238static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {}
3239static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {}
3240#endif
3241/** @} */
3242
d7e09d03
PT
3243/* ptlrpc/llog_client.c */
3244extern struct llog_operations llog_client_ops;
3245
3246/** @} net */
3247
3248#endif
3249/** @} PtlRPC */
This page took 0.323498 seconds and 5 git commands to generate.