staging:lustre: merge socklnd_lib-linux.h into socklnd.h
[deliverable/linux.git] / drivers / staging / lustre / lustre / include / lustre_net.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2010, 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36/** \defgroup PtlRPC Portal RPC and networking module.
37 *
38 * PortalRPC is the layer used by rest of lustre code to achieve network
39 * communications: establish connections with corresponding export and import
40 * states, listen for a service, send and receive RPCs.
41 * PortalRPC also includes base recovery framework: packet resending and
42 * replaying, reconnections, pinger.
43 *
44 * PortalRPC utilizes LNet as its transport layer.
45 *
46 * @{
47 */
48
49
50#ifndef _LUSTRE_NET_H
51#define _LUSTRE_NET_H
52
53/** \defgroup net net
54 *
55 * @{
56 */
57
9fdaf8c0 58#include "../../include/linux/libcfs/libcfs.h"
d7e09d03 59// #include <obd.h>
9fdaf8c0 60#include "../../include/linux/lnet/lnet.h"
1accaadf
GKH
61#include "lustre/lustre_idl.h"
62#include "lustre_ha.h"
63#include "lustre_sec.h"
64#include "lustre_import.h"
65#include "lprocfs_status.h"
66#include "lu_object.h"
67#include "lustre_req_layout.h"
d7e09d03 68
1accaadf
GKH
69#include "obd_support.h"
70#include "lustre_ver.h"
d7e09d03
PT
71
72/* MD flags we _always_ use */
73#define PTLRPC_MD_OPTIONS 0
74
75/**
76 * Max # of bulk operations in one request.
77 * In order for the client and server to properly negotiate the maximum
78 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
79 * value. The client is free to limit the actual RPC size for any bulk
80 * transfer via cl_max_pages_per_rpc to some non-power-of-two value. */
81#define PTLRPC_BULK_OPS_BITS 2
82#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
83/**
84 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
85 * should not be used on the server at all. Otherwise, it imposes a
86 * protocol limitation on the maximum RPC size that can be used by any
87 * RPC sent to that server in the future. Instead, the server should
88 * use the negotiated per-client ocd_brw_size to determine the bulk
89 * RPC count. */
90#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
91
92/**
93 * Define maxima for bulk I/O.
94 *
95 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
96 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
97 * currently supported maximum between peers at connect via ocd_brw_size.
98 */
99#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
100#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
101#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
102
103#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
104#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
105#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
106#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
107#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
108#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
109
110/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
111# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
112# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
113# endif
114# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
115# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
116# endif
117# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
118# error "PTLRPC_MAX_BRW_SIZE too big"
119# endif
120# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
121# error "PTLRPC_MAX_BRW_PAGES too big"
122# endif
123
124#define PTLRPC_NTHRS_INIT 2
125
126/**
127 * Buffer Constants
128 *
129 * Constants determine how memory is used to buffer incoming service requests.
130 *
131 * ?_NBUFS # buffers to allocate when growing the pool
132 * ?_BUFSIZE # bytes in a single request buffer
133 * ?_MAXREQSIZE # maximum request service will receive
134 *
135 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
136 * of ?_NBUFS is added to the pool.
137 *
138 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
139 * considered full when less than ?_MAXREQSIZE is left in them.
140 */
141/**
142 * Thread Constants
143 *
144 * Constants determine how threads are created for ptlrpc service.
145 *
146 * ?_NTHRS_INIT # threads to create for each service partition on
147 * initializing. If it's non-affinity service and
148 * there is only one partition, it's the overall #
149 * threads for the service while initializing.
150 * ?_NTHRS_BASE # threads should be created at least for each
151 * ptlrpc partition to keep the service healthy.
152 * It's the low-water mark of threads upper-limit
153 * for each partition.
154 * ?_THR_FACTOR # threads can be added on threads upper-limit for
155 * each CPU core. This factor is only for reference,
156 * we might decrease value of factor if number of cores
157 * per CPT is above a limit.
158 * ?_NTHRS_MAX # overall threads can be created for a service,
159 * it's a soft limit because if service is running
160 * on machine with hundreds of cores and tens of
161 * CPU partitions, we need to guarantee each partition
162 * has ?_NTHRS_BASE threads, which means total threads
163 * will be ?_NTHRS_BASE * number_of_cpts which can
164 * exceed ?_NTHRS_MAX.
165 *
166 * Examples
167 *
168 * #define MDS_NTHRS_INIT 2
169 * #define MDS_NTHRS_BASE 64
170 * #define MDS_NTHRS_FACTOR 8
171 * #define MDS_NTHRS_MAX 1024
172 *
173 * Example 1):
174 * ---------------------------------------------------------------------
175 * Server(A) has 16 cores, user configured it to 4 partitions so each
176 * partition has 4 cores, then actual number of service threads on each
177 * partition is:
178 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
179 *
180 * Total number of threads for the service is:
181 * 96 * partitions(4) = 384
182 *
183 * Example 2):
184 * ---------------------------------------------------------------------
185 * Server(B) has 32 cores, user configured it to 4 partitions so each
186 * partition has 8 cores, then actual number of service threads on each
187 * partition is:
188 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
189 *
190 * Total number of threads for the service is:
191 * 128 * partitions(4) = 512
192 *
193 * Example 3):
194 * ---------------------------------------------------------------------
195 * Server(B) has 96 cores, user configured it to 8 partitions so each
196 * partition has 12 cores, then actual number of service threads on each
197 * partition is:
198 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
199 *
200 * Total number of threads for the service is:
201 * 160 * partitions(8) = 1280
202 *
203 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
204 * as upper limit of threads number for each partition:
205 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
206 *
207 * Example 4):
208 * ---------------------------------------------------------------------
209 * Server(C) have a thousand of cores and user configured it to 32 partitions
210 * MDS_NTHRS_BASE(64) * 32 = 2048
211 *
212 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
213 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
214 * to keep service healthy, so total number of threads will just be 2048.
215 *
216 * NB: we don't suggest to choose server with that many cores because backend
217 * filesystem itself, buffer cache, or underlying network stack might
218 * have some SMP scalability issues at that large scale.
219 *
220 * If user already has a fat machine with hundreds or thousands of cores,
221 * there are two choices for configuration:
222 * a) create CPU table from subset of all CPUs and run Lustre on
223 * top of this subset
224 * b) bind service threads on a few partitions, see modparameters of
225 * MDS and OSS for details
226*
227 * NB: these calculations (and examples below) are simplified to help
228 * understanding, the real implementation is a little more complex,
229 * please see ptlrpc_server_nthreads_check() for details.
230 *
231 */
232
233 /*
234 * LDLM threads constants:
235 *
236 * Given 8 as factor and 24 as base threads number
237 *
238 * example 1)
239 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
240 *
241 * example 2)
242 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
243 * threads for each partition and total threads number will be 112.
244 *
245 * example 3)
246 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
247 * threads for each partition to keep service healthy, so total threads
248 * number should be 24 * 8 = 192.
249 *
250 * So with these constants, threads number will be at the similar level
251 * of old versions, unless target machine has over a hundred cores
252 */
253#define LDLM_THR_FACTOR 8
254#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
255#define LDLM_NTHRS_BASE 24
256#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
257
258#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
259#define LDLM_CLIENT_NBUFS 1
260#define LDLM_SERVER_NBUFS 64
261#define LDLM_BUFSIZE (8 * 1024)
262#define LDLM_MAXREQSIZE (5 * 1024)
263#define LDLM_MAXREPSIZE (1024)
264
e55c4476
JN
265#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
266
d7e09d03 267#define OST_MAXREQSIZE (5 * 1024)
d7e09d03
PT
268
269/* Macro to hide a typecast. */
270#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
271
272/**
273 * Structure to single define portal connection.
274 */
275struct ptlrpc_connection {
276 /** linkage for connections hash table */
277 struct hlist_node c_hash;
278 /** Our own lnet nid for this connection */
279 lnet_nid_t c_self;
280 /** Remote side nid for this connection */
281 lnet_process_id_t c_peer;
282 /** UUID of the other side */
283 struct obd_uuid c_remote_uuid;
284 /** reference counter for this connection */
285 atomic_t c_refcount;
286};
287
288/** Client definition for PortalRPC */
289struct ptlrpc_client {
290 /** What lnet portal does this client send messages to by default */
291 __u32 cli_request_portal;
292 /** What portal do we expect replies on */
293 __u32 cli_reply_portal;
294 /** Name of the client */
295 char *cli_name;
296};
297
298/** state flags of requests */
299/* XXX only ones left are those used by the bulk descs as well! */
300#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
301#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
302
303#define REQ_MAX_ACK_LOCKS 8
304
305union ptlrpc_async_args {
306 /**
307 * Scratchpad for passing args to completion interpreter. Users
308 * cast to the struct of their choosing, and CLASSERT that this is
309 * big enough. For _tons_ of context, OBD_ALLOC a struct and store
310 * a pointer to it here. The pointer_arg ensures this struct is at
311 * least big enough for that.
312 */
313 void *pointer_arg[11];
314 __u64 space[7];
315};
316
317struct ptlrpc_request_set;
318typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
319typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
320
321/**
322 * Definition of request set structure.
323 * Request set is a list of requests (not necessary to the same target) that
324 * once populated with RPCs could be sent in parallel.
325 * There are two kinds of request sets. General purpose and with dedicated
326 * serving thread. Example of the latter is ptlrpcd set.
327 * For general purpose sets once request set started sending it is impossible
328 * to add new requests to such set.
329 * Provides a way to call "completion callbacks" when all requests in the set
330 * returned.
331 */
332struct ptlrpc_request_set {
333 atomic_t set_refcount;
334 /** number of in queue requests */
335 atomic_t set_new_count;
336 /** number of uncompleted requests */
337 atomic_t set_remaining;
338 /** wait queue to wait on for request events */
339 wait_queue_head_t set_waitq;
340 wait_queue_head_t *set_wakeup_ptr;
341 /** List of requests in the set */
342 struct list_head set_requests;
343 /**
344 * List of completion callbacks to be called when the set is completed
345 * This is only used if \a set_interpret is NULL.
346 * Links struct ptlrpc_set_cbdata.
347 */
348 struct list_head set_cblist;
349 /** Completion callback, if only one. */
350 set_interpreter_func set_interpret;
351 /** opaq argument passed to completion \a set_interpret callback. */
352 void *set_arg;
353 /**
354 * Lock for \a set_new_requests manipulations
355 * locked so that any old caller can communicate requests to
356 * the set holder who can then fold them into the lock-free set
357 */
358 spinlock_t set_new_req_lock;
359 /** List of new yet unsent requests. Only used with ptlrpcd now. */
360 struct list_head set_new_requests;
361
362 /** rq_status of requests that have been freed already */
363 int set_rc;
364 /** Additional fields used by the flow control extension */
365 /** Maximum number of RPCs in flight */
366 int set_max_inflight;
367 /** Callback function used to generate RPCs */
368 set_producer_func set_producer;
369 /** opaq argument passed to the producer callback */
370 void *set_producer_arg;
371};
372
373/**
374 * Description of a single ptrlrpc_set callback
375 */
376struct ptlrpc_set_cbdata {
377 /** List linkage item */
378 struct list_head psc_item;
379 /** Pointer to interpreting function */
380 set_interpreter_func psc_interpret;
381 /** Opaq argument to pass to the callback */
382 void *psc_data;
383};
384
385struct ptlrpc_bulk_desc;
386struct ptlrpc_service_part;
387struct ptlrpc_service;
388
389/**
390 * ptlrpc callback & work item stuff
391 */
392struct ptlrpc_cb_id {
393 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
394 void *cbid_arg; /* additional arg */
395};
396
397/** Maximum number of locks to fit into reply state */
398#define RS_MAX_LOCKS 8
399#define RS_DEBUG 0
400
401/**
402 * Structure to define reply state on the server
403 * Reply state holds various reply message information. Also for "difficult"
404 * replies (rep-ack case) we store the state after sending reply and wait
405 * for the client to acknowledge the reception. In these cases locks could be
406 * added to the state for replay/failover consistency guarantees.
407 */
408struct ptlrpc_reply_state {
409 /** Callback description */
410 struct ptlrpc_cb_id rs_cb_id;
411 /** Linkage for list of all reply states in a system */
412 struct list_head rs_list;
413 /** Linkage for list of all reply states on same export */
414 struct list_head rs_exp_list;
415 /** Linkage for list of all reply states for same obd */
416 struct list_head rs_obd_list;
417#if RS_DEBUG
418 struct list_head rs_debug_list;
419#endif
420 /** A spinlock to protect the reply state flags */
421 spinlock_t rs_lock;
422 /** Reply state flags */
423 unsigned long rs_difficult:1; /* ACK/commit stuff */
424 unsigned long rs_no_ack:1; /* no ACK, even for
425 difficult requests */
426 unsigned long rs_scheduled:1; /* being handled? */
427 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
428 unsigned long rs_handled:1; /* been handled yet? */
429 unsigned long rs_on_net:1; /* reply_out_callback pending? */
430 unsigned long rs_prealloc:1; /* rs from prealloc list */
431 unsigned long rs_committed:1;/* the transaction was committed
432 and the rs was dispatched
433 by ptlrpc_commit_replies */
434 /** Size of the state */
435 int rs_size;
436 /** opcode */
437 __u32 rs_opc;
438 /** Transaction number */
439 __u64 rs_transno;
440 /** xid */
441 __u64 rs_xid;
442 struct obd_export *rs_export;
443 struct ptlrpc_service_part *rs_svcpt;
444 /** Lnet metadata handle for the reply */
445 lnet_handle_md_t rs_md_h;
446 atomic_t rs_refcount;
447
17891183 448 /** Context for the service thread */
d7e09d03
PT
449 struct ptlrpc_svc_ctx *rs_svc_ctx;
450 /** Reply buffer (actually sent to the client), encoded if needed */
451 struct lustre_msg *rs_repbuf; /* wrapper */
452 /** Size of the reply buffer */
453 int rs_repbuf_len; /* wrapper buf length */
454 /** Size of the reply message */
455 int rs_repdata_len; /* wrapper msg length */
456 /**
bd9070cb 457 * Actual reply message. Its content is encrypted (if needed) to
d7e09d03 458 * produce reply buffer for actual sending. In simple case
bd9070cb 459 * of no network encryption we just set \a rs_repbuf to \a rs_msg
d7e09d03
PT
460 */
461 struct lustre_msg *rs_msg; /* reply message */
462
463 /** Number of locks awaiting client ACK */
464 int rs_nlocks;
465 /** Handles of locks awaiting client reply ACK */
466 struct lustre_handle rs_locks[RS_MAX_LOCKS];
467 /** Lock modes of locks in \a rs_locks */
468 ldlm_mode_t rs_modes[RS_MAX_LOCKS];
469};
470
471struct ptlrpc_thread;
472
473/** RPC stages */
474enum rq_phase {
475 RQ_PHASE_NEW = 0xebc0de00,
476 RQ_PHASE_RPC = 0xebc0de01,
477 RQ_PHASE_BULK = 0xebc0de02,
478 RQ_PHASE_INTERPRET = 0xebc0de03,
479 RQ_PHASE_COMPLETE = 0xebc0de04,
480 RQ_PHASE_UNREGISTERING = 0xebc0de05,
481 RQ_PHASE_UNDEFINED = 0xebc0de06
482};
483
484/** Type of request interpreter call-back */
485typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
486 struct ptlrpc_request *req,
487 void *arg, int rc);
488
489/**
490 * Definition of request pool structure.
491 * The pool is used to store empty preallocated requests for the case
492 * when we would actually need to send something without performing
493 * any allocations (to avoid e.g. OOM).
494 */
495struct ptlrpc_request_pool {
496 /** Locks the list */
497 spinlock_t prp_lock;
498 /** list of ptlrpc_request structs */
499 struct list_head prp_req_list;
17891183 500 /** Maximum message size that would fit into a request from this pool */
d7e09d03
PT
501 int prp_rq_size;
502 /** Function to allocate more requests for this pool */
503 void (*prp_populate)(struct ptlrpc_request_pool *, int);
504};
505
506struct lu_context;
507struct lu_env;
508
509struct ldlm_lock;
510
511/**
512 * \defgroup nrs Network Request Scheduler
513 * @{
514 */
515struct ptlrpc_nrs_policy;
516struct ptlrpc_nrs_resource;
517struct ptlrpc_nrs_request;
518
519/**
520 * NRS control operations.
521 *
522 * These are common for all policies.
523 */
524enum ptlrpc_nrs_ctl {
525 /**
526 * Not a valid opcode.
527 */
528 PTLRPC_NRS_CTL_INVALID,
529 /**
530 * Activate the policy.
531 */
532 PTLRPC_NRS_CTL_START,
533 /**
534 * Reserved for multiple primary policies, which may be a possibility
535 * in the future.
536 */
537 PTLRPC_NRS_CTL_STOP,
538 /**
539 * Policies can start using opcodes from this value and onwards for
540 * their own purposes; the assigned value itself is arbitrary.
541 */
542 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
543};
544
545/**
546 * ORR policy operations
547 */
548enum nrs_ctl_orr {
549 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
550 NRS_CTL_ORR_WR_QUANTUM,
551 NRS_CTL_ORR_RD_OFF_TYPE,
552 NRS_CTL_ORR_WR_OFF_TYPE,
553 NRS_CTL_ORR_RD_SUPP_REQ,
554 NRS_CTL_ORR_WR_SUPP_REQ,
555};
556
557/**
558 * NRS policy operations.
559 *
560 * These determine the behaviour of a policy, and are called in response to
561 * NRS core events.
562 */
563struct ptlrpc_nrs_pol_ops {
564 /**
565 * Called during policy registration; this operation is optional.
566 *
567 * \param[in,out] policy The policy being initialized
568 */
569 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
570 /**
571 * Called during policy unregistration; this operation is optional.
572 *
573 * \param[in,out] policy The policy being unregistered/finalized
574 */
575 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
576 /**
577 * Called when activating a policy via lprocfs; policies allocate and
578 * initialize their resources here; this operation is optional.
579 *
580 * \param[in,out] policy The policy being started
581 *
582 * \see nrs_policy_start_locked()
583 */
584 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
585 /**
586 * Called when deactivating a policy via lprocfs; policies deallocate
587 * their resources here; this operation is optional
588 *
589 * \param[in,out] policy The policy being stopped
590 *
591 * \see nrs_policy_stop0()
592 */
593 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
594 /**
595 * Used for policy-specific operations; i.e. not generic ones like
596 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
597 * to an ioctl; this operation is optional.
598 *
599 * \param[in,out] policy The policy carrying out operation \a opc
600 * \param[in] opc The command operation being carried out
601 * \param[in,out] arg An generic buffer for communication between the
602 * user and the control operation
603 *
604 * \retval -ve error
605 * \retval 0 success
606 *
607 * \see ptlrpc_nrs_policy_control()
608 */
609 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
610 enum ptlrpc_nrs_ctl opc, void *arg);
611
612 /**
613 * Called when obtaining references to the resources of the resource
614 * hierarchy for a request that has arrived for handling at the PTLRPC
615 * service. Policies should return -ve for requests they do not wish
616 * to handle. This operation is mandatory.
617 *
618 * \param[in,out] policy The policy we're getting resources for.
619 * \param[in,out] nrq The request we are getting resources for.
620 * \param[in] parent The parent resource of the resource being
621 * requested; set to NULL if none.
622 * \param[out] resp The resource is to be returned here; the
623 * fallback policy in an NRS head should
624 * \e always return a non-NULL pointer value.
625 * \param[in] moving_req When set, signifies that this is an attempt
626 * to obtain resources for a request being moved
627 * to the high-priority NRS head by
628 * ldlm_lock_reorder_req().
629 * This implies two things:
630 * 1. We are under obd_export::exp_rpc_lock and
631 * so should not sleep.
632 * 2. We should not perform non-idempotent or can
633 * skip performing idempotent operations that
634 * were carried out when resources were first
635 * taken for the request when it was initialized
636 * in ptlrpc_nrs_req_initialize().
637 *
638 * \retval 0, +ve The level of the returned resource in the resource
639 * hierarchy; currently only 0 (for a non-leaf resource)
640 * and 1 (for a leaf resource) are supported by the
641 * framework.
642 * \retval -ve error
643 *
644 * \see ptlrpc_nrs_req_initialize()
645 * \see ptlrpc_nrs_hpreq_add_nolock()
646 * \see ptlrpc_nrs_req_hp_move()
647 */
648 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
649 struct ptlrpc_nrs_request *nrq,
650 const struct ptlrpc_nrs_resource *parent,
651 struct ptlrpc_nrs_resource **resp,
652 bool moving_req);
653 /**
654 * Called when releasing references taken for resources in the resource
655 * hierarchy for the request; this operation is optional.
656 *
657 * \param[in,out] policy The policy the resource belongs to
658 * \param[in] res The resource to be freed
659 *
660 * \see ptlrpc_nrs_req_finalize()
661 * \see ptlrpc_nrs_hpreq_add_nolock()
662 * \see ptlrpc_nrs_req_hp_move()
663 */
664 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
665 const struct ptlrpc_nrs_resource *res);
666
667 /**
668 * Obtains a request for handling from the policy, and optionally
669 * removes the request from the policy; this operation is mandatory.
670 *
671 * \param[in,out] policy The policy to poll
672 * \param[in] peek When set, signifies that we just want to
673 * examine the request, and not handle it, so the
674 * request is not removed from the policy.
675 * \param[in] force When set, it will force a policy to return a
676 * request if it has one queued.
677 *
678 * \retval NULL No request available for handling
679 * \retval valid-pointer The request polled for handling
680 *
681 * \see ptlrpc_nrs_req_get_nolock()
682 */
683 struct ptlrpc_nrs_request *
684 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
685 bool force);
686 /**
687 * Called when attempting to add a request to a policy for later
688 * handling; this operation is mandatory.
689 *
690 * \param[in,out] policy The policy on which to enqueue \a nrq
691 * \param[in,out] nrq The request to enqueue
692 *
693 * \retval 0 success
694 * \retval != 0 error
695 *
696 * \see ptlrpc_nrs_req_add_nolock()
697 */
698 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
699 struct ptlrpc_nrs_request *nrq);
700 /**
701 * Removes a request from the policy's set of pending requests. Normally
702 * called after a request has been polled successfully from the policy
703 * for handling; this operation is mandatory.
704 *
705 * \param[in,out] policy The policy the request \a nrq belongs to
706 * \param[in,out] nrq The request to dequeue
707 *
708 * \see ptlrpc_nrs_req_del_nolock()
709 */
710 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
711 struct ptlrpc_nrs_request *nrq);
712 /**
713 * Called after the request being carried out. Could be used for
714 * job/resource control; this operation is optional.
715 *
716 * \param[in,out] policy The policy which is stopping to handle request
717 * \a nrq
718 * \param[in,out] nrq The request
719 *
5e42bc9d 720 * \pre assert_spin_locked(&svcpt->scp_req_lock)
d7e09d03
PT
721 *
722 * \see ptlrpc_nrs_req_stop_nolock()
723 */
724 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
725 struct ptlrpc_nrs_request *nrq);
726 /**
727 * Registers the policy's lprocfs interface with a PTLRPC service.
728 *
729 * \param[in] svc The service
730 *
731 * \retval 0 success
732 * \retval != 0 error
733 */
734 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
735 /**
736 * Unegisters the policy's lprocfs interface with a PTLRPC service.
737 *
738 * In cases of failed policy registration in
739 * \e ptlrpc_nrs_policy_register(), this function may be called for a
740 * service which has not registered the policy successfully, so
741 * implementations of this method should make sure their operations are
742 * safe in such cases.
743 *
744 * \param[in] svc The service
745 */
746 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
747};
748
749/**
750 * Policy flags
751 */
752enum nrs_policy_flags {
753 /**
754 * Fallback policy, use this flag only on a single supported policy per
755 * service. The flag cannot be used on policies that use
756 * \e PTLRPC_NRS_FL_REG_EXTERN
757 */
758 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
759 /**
760 * Start policy immediately after registering.
761 */
762 PTLRPC_NRS_FL_REG_START = (1 << 1),
763 /**
764 * This is a policy registering from a module different to the one NRS
765 * core ships in (currently ptlrpc).
766 */
767 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
768};
769
770/**
771 * NRS queue type.
772 *
773 * Denotes whether an NRS instance is for handling normal or high-priority
774 * RPCs, or whether an operation pertains to one or both of the NRS instances
775 * in a service.
776 */
777enum ptlrpc_nrs_queue_type {
778 PTLRPC_NRS_QUEUE_REG = (1 << 0),
779 PTLRPC_NRS_QUEUE_HP = (1 << 1),
780 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
781};
782
783/**
784 * NRS head
785 *
786 * A PTLRPC service has at least one NRS head instance for handling normal
787 * priority RPCs, and may optionally have a second NRS head instance for
788 * handling high-priority RPCs. Each NRS head maintains a list of available
789 * policies, of which one and only one policy is acting as the fallback policy,
790 * and optionally a different policy may be acting as the primary policy. For
791 * all RPCs handled by this NRS head instance, NRS core will first attempt to
792 * enqueue the RPC using the primary policy (if any). The fallback policy is
793 * used in the following cases:
794 * - when there was no primary policy in the
795 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
796 * was initialized.
797 * - when the primary policy that was at the
798 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
799 * RPC was initialized, denoted it did not wish, or for some other reason was
800 * not able to handle the request, by returning a non-valid NRS resource
801 * reference.
802 * - when the primary policy that was at the
803 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
804 * RPC was initialized, fails later during the request enqueueing stage.
805 *
806 * \see nrs_resource_get_safe()
807 * \see nrs_request_enqueue()
808 */
809struct ptlrpc_nrs {
810 spinlock_t nrs_lock;
811 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
812 /**
813 * List of registered policies
814 */
815 struct list_head nrs_policy_list;
816 /**
817 * List of policies with queued requests. Policies that have any
818 * outstanding requests are queued here, and this list is queried
819 * in a round-robin manner from NRS core when obtaining a request
820 * for handling. This ensures that requests from policies that at some
821 * point transition away from the
822 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
823 */
824 struct list_head nrs_policy_queued;
825 /**
826 * Service partition for this NRS head
827 */
828 struct ptlrpc_service_part *nrs_svcpt;
829 /**
830 * Primary policy, which is the preferred policy for handling RPCs
831 */
832 struct ptlrpc_nrs_policy *nrs_policy_primary;
833 /**
834 * Fallback policy, which is the backup policy for handling RPCs
835 */
836 struct ptlrpc_nrs_policy *nrs_policy_fallback;
837 /**
838 * This NRS head handles either HP or regular requests
839 */
840 enum ptlrpc_nrs_queue_type nrs_queue_type;
841 /**
842 * # queued requests from all policies in this NRS head
843 */
844 unsigned long nrs_req_queued;
845 /**
846 * # scheduled requests from all policies in this NRS head
847 */
848 unsigned long nrs_req_started;
849 /**
850 * # policies on this NRS
851 */
852 unsigned nrs_num_pols;
853 /**
854 * This NRS head is in progress of starting a policy
855 */
856 unsigned nrs_policy_starting:1;
857 /**
858 * In progress of shutting down the whole NRS head; used during
859 * unregistration
860 */
861 unsigned nrs_stopping:1;
862};
863
864#define NRS_POL_NAME_MAX 16
865
866struct ptlrpc_nrs_pol_desc;
867
868/**
869 * Service compatibility predicate; this determines whether a policy is adequate
870 * for handling RPCs of a particular PTLRPC service.
871 *
872 * XXX:This should give the same result during policy registration and
873 * unregistration, and for all partitions of a service; so the result should not
874 * depend on temporal service or other properties, that may influence the
875 * result.
876 */
877typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
878 const struct ptlrpc_nrs_pol_desc *desc);
879
880struct ptlrpc_nrs_pol_conf {
881 /**
882 * Human-readable policy name
883 */
884 char nc_name[NRS_POL_NAME_MAX];
885 /**
886 * NRS operations for this policy
887 */
888 const struct ptlrpc_nrs_pol_ops *nc_ops;
889 /**
890 * Service compatibility predicate
891 */
892 nrs_pol_desc_compat_t nc_compat;
893 /**
894 * Set for policies that support a single ptlrpc service, i.e. ones that
895 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
896 * depicts the name of the single service that such policies are
897 * compatible with.
898 */
899 const char *nc_compat_svc_name;
900 /**
901 * Owner module for this policy descriptor; policies registering from a
902 * different module to the one the NRS framework is held within
903 * (currently ptlrpc), should set this field to THIS_MODULE.
904 */
c34d9cd8 905 struct module *nc_owner;
d7e09d03 906 /**
bd9070cb 907 * Policy registration flags; a bitmask of \e nrs_policy_flags
d7e09d03
PT
908 */
909 unsigned nc_flags;
910};
911
912/**
913 * NRS policy registering descriptor
914 *
915 * Is used to hold a description of a policy that can be passed to NRS core in
916 * order to register the policy with NRS heads in different PTLRPC services.
917 */
918struct ptlrpc_nrs_pol_desc {
919 /**
920 * Human-readable policy name
921 */
922 char pd_name[NRS_POL_NAME_MAX];
923 /**
924 * Link into nrs_core::nrs_policies
925 */
926 struct list_head pd_list;
927 /**
928 * NRS operations for this policy
929 */
930 const struct ptlrpc_nrs_pol_ops *pd_ops;
931 /**
932 * Service compatibility predicate
933 */
934 nrs_pol_desc_compat_t pd_compat;
935 /**
936 * Set for policies that are compatible with only one PTLRPC service.
937 *
938 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
939 */
940 const char *pd_compat_svc_name;
941 /**
942 * Owner module for this policy descriptor.
943 *
944 * We need to hold a reference to the module whenever we might make use
945 * of any of the module's contents, i.e.
946 * - If one or more instances of the policy are at a state where they
947 * might be handling a request, i.e.
948 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
949 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
950 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
951 * is taken on the module when
952 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
953 * becomes 0, so that we hold only one reference to the module maximum
954 * at any time.
955 *
956 * We do not need to hold a reference to the module, even though we
957 * might use code and data from the module, in the following cases:
958 * - During external policy registration, because this should happen in
959 * the module's init() function, in which case the module is safe from
960 * removal because a reference is being held on the module by the
961 * kernel, and iirc kmod (and I guess module-init-tools also) will
962 * serialize any racing processes properly anyway.
963 * - During external policy unregistration, because this should happen
964 * in a module's exit() function, and any attempts to start a policy
965 * instance would need to take a reference on the module, and this is
966 * not possible once we have reached the point where the exit()
967 * handler is called.
968 * - During service registration and unregistration, as service setup
969 * and cleanup, and policy registration, unregistration and policy
970 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
971 * as long as users adhere to the convention of registering policies
972 * in init() and unregistering them in module exit() functions, there
973 * should not be a race between these operations.
974 * - During any policy-specific lprocfs operations, because a reference
975 * is held by the kernel on a proc entry that has been entered by a
976 * syscall, so as long as proc entries are removed during unregistration time,
977 * then unregistration and lprocfs operations will be properly
978 * serialized.
979 */
c34d9cd8 980 struct module *pd_owner;
d7e09d03
PT
981 /**
982 * Bitmask of \e nrs_policy_flags
983 */
984 unsigned pd_flags;
985 /**
986 * # of references on this descriptor
987 */
988 atomic_t pd_refs;
989};
990
991/**
992 * NRS policy state
993 *
994 * Policies transition from one state to the other during their lifetime
995 */
996enum ptlrpc_nrs_pol_state {
997 /**
998 * Not a valid policy state.
999 */
1000 NRS_POL_STATE_INVALID,
1001 /**
1002 * Policies are at this state either at the start of their life, or
1003 * transition here when the user selects a different policy to act
1004 * as the primary one.
1005 */
1006 NRS_POL_STATE_STOPPED,
1007 /**
1008 * Policy is progress of stopping
1009 */
1010 NRS_POL_STATE_STOPPING,
1011 /**
1012 * Policy is in progress of starting
1013 */
1014 NRS_POL_STATE_STARTING,
1015 /**
1016 * A policy is in this state in two cases:
1017 * - it is the fallback policy, which is always in this state.
1018 * - it has been activated by the user; i.e. it is the primary policy,
1019 */
1020 NRS_POL_STATE_STARTED,
1021};
1022
1023/**
1024 * NRS policy information
1025 *
1026 * Used for obtaining information for the status of a policy via lprocfs
1027 */
1028struct ptlrpc_nrs_pol_info {
1029 /**
1030 * Policy name
1031 */
1032 char pi_name[NRS_POL_NAME_MAX];
1033 /**
1034 * Current policy state
1035 */
1036 enum ptlrpc_nrs_pol_state pi_state;
1037 /**
1038 * # RPCs enqueued for later dispatching by the policy
1039 */
1040 long pi_req_queued;
1041 /**
1042 * # RPCs started for dispatch by the policy
1043 */
1044 long pi_req_started;
1045 /**
1046 * Is this a fallback policy?
1047 */
1048 unsigned pi_fallback:1;
1049};
1050
1051/**
1052 * NRS policy
1053 *
1054 * There is one instance of this for each policy in each NRS head of each
1055 * PTLRPC service partition.
1056 */
1057struct ptlrpc_nrs_policy {
1058 /**
1059 * Linkage into the NRS head's list of policies,
1060 * ptlrpc_nrs:nrs_policy_list
1061 */
1062 struct list_head pol_list;
1063 /**
1064 * Linkage into the NRS head's list of policies with enqueued
1065 * requests ptlrpc_nrs:nrs_policy_queued
1066 */
1067 struct list_head pol_list_queued;
1068 /**
1069 * Current state of this policy
1070 */
1071 enum ptlrpc_nrs_pol_state pol_state;
1072 /**
1073 * Bitmask of nrs_policy_flags
1074 */
1075 unsigned pol_flags;
1076 /**
1077 * # RPCs enqueued for later dispatching by the policy
1078 */
1079 long pol_req_queued;
1080 /**
1081 * # RPCs started for dispatch by the policy
1082 */
1083 long pol_req_started;
1084 /**
1085 * Usage Reference count taken on the policy instance
1086 */
1087 long pol_ref;
1088 /**
1089 * The NRS head this policy has been created at
1090 */
1091 struct ptlrpc_nrs *pol_nrs;
1092 /**
1093 * Private policy data; varies by policy type
1094 */
1095 void *pol_private;
1096 /**
1097 * Policy descriptor for this policy instance.
1098 */
1099 struct ptlrpc_nrs_pol_desc *pol_desc;
1100};
1101
1102/**
1103 * NRS resource
1104 *
1105 * Resources are embedded into two types of NRS entities:
1106 * - Inside NRS policies, in the policy's private data in
1107 * ptlrpc_nrs_policy::pol_private
1108 * - In objects that act as prime-level scheduling entities in different NRS
1109 * policies; e.g. on a policy that performs round robin or similar order
1110 * scheduling across client NIDs, there would be one NRS resource per unique
1111 * client NID. On a policy which performs round robin scheduling across
1112 * backend filesystem objects, there would be one resource associated with
1113 * each of the backend filesystem objects partaking in the scheduling
1114 * performed by the policy.
1115 *
1116 * NRS resources share a parent-child relationship, in which resources embedded
1117 * in policy instances are the parent entities, with all scheduling entities
1118 * a policy schedules across being the children, thus forming a simple resource
1119 * hierarchy. This hierarchy may be extended with one or more levels in the
1120 * future if the ability to have more than one primary policy is added.
1121 *
1122 * Upon request initialization, references to the then active NRS policies are
1123 * taken and used to later handle the dispatching of the request with one of
1124 * these policies.
1125 *
1126 * \see nrs_resource_get_safe()
1127 * \see ptlrpc_nrs_req_add()
1128 */
1129struct ptlrpc_nrs_resource {
1130 /**
1131 * This NRS resource's parent; is NULL for resources embedded in NRS
1132 * policy instances; i.e. those are top-level ones.
1133 */
1134 struct ptlrpc_nrs_resource *res_parent;
1135 /**
1136 * The policy associated with this resource.
1137 */
1138 struct ptlrpc_nrs_policy *res_policy;
1139};
1140
1141enum {
1142 NRS_RES_FALLBACK,
1143 NRS_RES_PRIMARY,
1144 NRS_RES_MAX
1145};
1146
1147/* \name fifo
1148 *
1149 * FIFO policy
1150 *
1151 * This policy is a logical wrapper around previous, non-NRS functionality.
1152 * It dispatches RPCs in the same order as they arrive from the network. This
1153 * policy is currently used as the fallback policy, and the only enabled policy
1154 * on all NRS heads of all PTLRPC service partitions.
1155 * @{
1156 */
1157
1158/**
1159 * Private data structure for the FIFO policy
1160 */
1161struct nrs_fifo_head {
1162 /**
1163 * Resource object for policy instance.
1164 */
1165 struct ptlrpc_nrs_resource fh_res;
1166 /**
1167 * List of queued requests.
1168 */
1169 struct list_head fh_list;
1170 /**
1171 * For debugging purposes.
1172 */
1173 __u64 fh_sequence;
1174};
1175
1176struct nrs_fifo_req {
1177 struct list_head fr_list;
1178 __u64 fr_sequence;
1179};
1180
1181/** @} fifo */
1182
d7e09d03
PT
1183/**
1184 * NRS request
1185 *
1186 * Instances of this object exist embedded within ptlrpc_request; the main
1187 * purpose of this object is to hold references to the request's resources
1188 * for the lifetime of the request, and to hold properties that policies use
1189 * use for determining the request's scheduling priority.
1190 * */
1191struct ptlrpc_nrs_request {
1192 /**
1193 * The request's resource hierarchy.
1194 */
1195 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1196 /**
1197 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1198 * policy that was used to enqueue the request.
1199 *
1200 * \see nrs_request_enqueue()
1201 */
1202 unsigned nr_res_idx;
1203 unsigned nr_initialized:1;
1204 unsigned nr_enqueued:1;
1205 unsigned nr_started:1;
1206 unsigned nr_finalized:1;
d7e09d03
PT
1207
1208 /**
1209 * Policy-specific fields, used for determining a request's scheduling
1210 * priority, and other supporting functionality.
1211 */
1212 union {
1213 /**
1214 * Fields for the FIFO policy
1215 */
1216 struct nrs_fifo_req fifo;
d7e09d03
PT
1217 } nr_u;
1218 /**
1219 * Externally-registering policies may want to use this to allocate
1220 * their own request properties.
1221 */
1222 void *ext;
1223};
1224
1225/** @} nrs */
1226
1227/**
1228 * Basic request prioritization operations structure.
1229 * The whole idea is centered around locks and RPCs that might affect locks.
1230 * When a lock is contended we try to give priority to RPCs that might lead
1231 * to fastest release of that lock.
1232 * Currently only implemented for OSTs only in a way that makes all
1233 * IO and truncate RPCs that are coming from a locked region where a lock is
1234 * contended a priority over other requests.
1235 */
1236struct ptlrpc_hpreq_ops {
1237 /**
1238 * Check if the lock handle of the given lock is the same as
1239 * taken from the request.
1240 */
1241 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1242 /**
1243 * Check if the request is a high priority one.
1244 */
1245 int (*hpreq_check)(struct ptlrpc_request *);
1246 /**
1247 * Called after the request has been handled.
1248 */
1249 void (*hpreq_fini)(struct ptlrpc_request *);
1250};
1251
1252/**
1253 * Represents remote procedure call.
1254 *
1255 * This is a staple structure used by everybody wanting to send a request
1256 * in Lustre.
1257 */
1258struct ptlrpc_request {
1259 /* Request type: one of PTL_RPC_MSG_* */
1260 int rq_type;
1261 /** Result of request processing */
1262 int rq_status;
1263 /**
1264 * Linkage item through which this request is included into
1265 * sending/delayed lists on client and into rqbd list on server
1266 */
1267 struct list_head rq_list;
1268 /**
1269 * Server side list of incoming unserved requests sorted by arrival
1270 * time. Traversed from time to time to notice about to expire
1271 * requests and sent back "early replies" to clients to let them
1272 * know server is alive and well, just very busy to service their
1273 * requests in time
1274 */
1275 struct list_head rq_timed_list;
17891183 1276 /** server-side history, used for debugging purposes. */
d7e09d03
PT
1277 struct list_head rq_history_list;
1278 /** server-side per-export list */
1279 struct list_head rq_exp_list;
1280 /** server-side hp handlers */
1281 struct ptlrpc_hpreq_ops *rq_ops;
1282
1283 /** initial thread servicing this request */
1284 struct ptlrpc_thread *rq_svc_thread;
1285
1286 /** history sequence # */
1287 __u64 rq_history_seq;
1288 /** \addtogroup nrs
1289 * @{
1290 */
1291 /** stub for NRS request */
1292 struct ptlrpc_nrs_request rq_nrq;
1293 /** @} nrs */
1294 /** the index of service's srv_at_array into which request is linked */
1295 time_t rq_at_index;
1296 /** Lock to protect request flags and some other important bits, like
1297 * rq_list
1298 */
1299 spinlock_t rq_lock;
1300 /** client-side flags are serialized by rq_lock */
1301 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1302 rq_timedout:1, rq_resend:1, rq_restart:1,
1303 /**
1304 * when ->rq_replay is set, request is kept by the client even
1305 * after server commits corresponding transaction. This is
1306 * used for operations that require sequence of multiple
1307 * requests to be replayed. The only example currently is file
1308 * open/close. When last request in such a sequence is
1309 * committed, ->rq_replay is cleared on all requests in the
1310 * sequence.
1311 */
1312 rq_replay:1,
1313 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1314 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
cf378ff7
AL
1315 rq_early:1,
1316 rq_req_unlink:1, rq_reply_unlink:1,
d7e09d03
PT
1317 rq_memalloc:1, /* req originated from "kswapd" */
1318 /* server-side flags */
1319 rq_packed_final:1, /* packed final reply */
1320 rq_hp:1, /* high priority RPC */
1321 rq_at_linked:1, /* link into service's srv_at_array */
1322 rq_reply_truncate:1,
1323 rq_committed:1,
1324 /* whether the "rq_set" is a valid one */
1325 rq_invalid_rqset:1,
1326 rq_generation_set:1,
1327 /* do not resend request on -EINPROGRESS */
1328 rq_no_retry_einprogress:1,
1329 /* allow the req to be sent if the import is in recovery
1330 * status */
c52f69c5 1331 rq_allow_replay:1;
d7e09d03
PT
1332
1333 unsigned int rq_nr_resend;
1334
1335 enum rq_phase rq_phase; /* one of RQ_PHASE_* */
1336 enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
1337 atomic_t rq_refcount;/* client-side refcount for SENT race,
17891183 1338 server-side refcount for multiple replies */
d7e09d03
PT
1339
1340 /** Portal to which this request would be sent */
1341 short rq_request_portal; /* XXX FIXME bug 249 */
1342 /** Portal where to wait for reply and where reply would be sent */
1343 short rq_reply_portal; /* XXX FIXME bug 249 */
1344
1345 /**
1346 * client-side:
1347 * !rq_truncate : # reply bytes actually received,
1348 * rq_truncate : required repbuf_len for resend
1349 */
1350 int rq_nob_received;
1351 /** Request length */
1352 int rq_reqlen;
1353 /** Reply length */
1354 int rq_replen;
1355 /** Request message - what client sent */
1356 struct lustre_msg *rq_reqmsg;
1357 /** Reply message - server response */
1358 struct lustre_msg *rq_repmsg;
1359 /** Transaction number */
1360 __u64 rq_transno;
1361 /** xid */
1362 __u64 rq_xid;
1363 /**
17891183 1364 * List item to for replay list. Not yet committed requests get linked
d7e09d03
PT
1365 * there.
1366 * Also see \a rq_replay comment above.
1367 */
1368 struct list_head rq_replay_list;
1369
1370 /**
1371 * security and encryption data
1372 * @{ */
1373 struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
1374 struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
1375 struct list_head rq_ctx_chain; /**< link to waited ctx */
1376
1377 struct sptlrpc_flavor rq_flvr; /**< for client & server */
1378 enum lustre_sec_part rq_sp_from;
1379
1380 /* client/server security flags */
1381 unsigned int
1382 rq_ctx_init:1, /* context initiation */
1383 rq_ctx_fini:1, /* context destroy */
1384 rq_bulk_read:1, /* request bulk read */
1385 rq_bulk_write:1, /* request bulk write */
1386 /* server authentication flags */
1387 rq_auth_gss:1, /* authenticated by gss */
1388 rq_auth_remote:1, /* authed as remote user */
1389 rq_auth_usr_root:1, /* authed as root */
1390 rq_auth_usr_mdt:1, /* authed as mdt */
1391 rq_auth_usr_ost:1, /* authed as ost */
1392 /* security tfm flags */
1393 rq_pack_udesc:1,
1394 rq_pack_bulk:1,
1395 /* doesn't expect reply FIXME */
1396 rq_no_reply:1,
1397 rq_pill_init:1; /* pill initialized */
1398
1399 uid_t rq_auth_uid; /* authed uid */
1400 uid_t rq_auth_mapped_uid; /* authed uid mapped to */
1401
1402 /* (server side), pointed directly into req buffer */
1403 struct ptlrpc_user_desc *rq_user_desc;
1404
1405 /* various buffer pointers */
1406 struct lustre_msg *rq_reqbuf; /* req wrapper */
1407 char *rq_repbuf; /* rep buffer */
1408 struct lustre_msg *rq_repdata; /* rep wrapper msg */
1409 struct lustre_msg *rq_clrbuf; /* only in priv mode */
1410 int rq_reqbuf_len; /* req wrapper buf len */
1411 int rq_reqdata_len; /* req wrapper msg len */
1412 int rq_repbuf_len; /* rep buffer len */
1413 int rq_repdata_len; /* rep wrapper msg len */
1414 int rq_clrbuf_len; /* only in priv mode */
1415 int rq_clrdata_len; /* only in priv mode */
1416
1417 /** early replies go to offset 0, regular replies go after that */
1418 unsigned int rq_reply_off;
1419
1420 /** @} */
1421
1422 /** Fields that help to see if request and reply were swabbed or not */
1423 __u32 rq_req_swab_mask;
1424 __u32 rq_rep_swab_mask;
1425
1426 /** What was import generation when this request was sent */
1427 int rq_import_generation;
1428 enum lustre_imp_state rq_send_state;
1429
1430 /** how many early replies (for stats) */
1431 int rq_early_count;
1432
1433 /** client+server request */
1434 lnet_handle_md_t rq_req_md_h;
1435 struct ptlrpc_cb_id rq_req_cbid;
1436 /** optional time limit for send attempts */
b2d201bd 1437 long rq_delay_limit;
d7e09d03 1438 /** time request was first queued */
a649ad1d 1439 unsigned long rq_queued_time;
d7e09d03
PT
1440
1441 /* server-side... */
1442 /** request arrival time */
1443 struct timeval rq_arrival_time;
1444 /** separated reply state */
1445 struct ptlrpc_reply_state *rq_reply_state;
1446 /** incoming request buffer */
1447 struct ptlrpc_request_buffer_desc *rq_rqbd;
1448
1449 /** client-only incoming reply */
1450 lnet_handle_md_t rq_reply_md_h;
1451 wait_queue_head_t rq_reply_waitq;
1452 struct ptlrpc_cb_id rq_reply_cbid;
1453
1454 /** our LNet NID */
1455 lnet_nid_t rq_self;
1456 /** Peer description (the other side) */
1457 lnet_process_id_t rq_peer;
1458 /** Server-side, export on which request was received */
1459 struct obd_export *rq_export;
1460 /** Client side, import where request is being sent */
1461 struct obd_import *rq_import;
1462
1463 /** Replay callback, called after request is replayed at recovery */
1464 void (*rq_replay_cb)(struct ptlrpc_request *);
1465 /**
1466 * Commit callback, called when request is committed and about to be
1467 * freed.
1468 */
1469 void (*rq_commit_cb)(struct ptlrpc_request *);
1470 /** Opaq data for replay and commit callbacks. */
1471 void *rq_cb_data;
1472
1473 /** For bulk requests on client only: bulk descriptor */
1474 struct ptlrpc_bulk_desc *rq_bulk;
1475
1476 /** client outgoing req */
1477 /**
1478 * when request/reply sent (secs), or time when request should be sent
1479 */
1480 time_t rq_sent;
1481 /** time for request really sent out */
1482 time_t rq_real_sent;
1483
1484 /** when request must finish. volatile
1485 * so that servers' early reply updates to the deadline aren't
1486 * kept in per-cpu cache */
1487 volatile time_t rq_deadline;
1488 /** when req reply unlink must finish. */
1489 time_t rq_reply_deadline;
1490 /** when req bulk unlink must finish. */
1491 time_t rq_bulk_deadline;
1492 /**
1493 * service time estimate (secs)
1494 * If the requestsis not served by this time, it is marked as timed out.
1495 */
1496 int rq_timeout;
1497
1498 /** Multi-rpc bits */
1499 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1500 wait_queue_head_t rq_set_waitq;
1501 /** Link item for request set lists */
1502 struct list_head rq_set_chain;
1503 /** Link back to the request set */
1504 struct ptlrpc_request_set *rq_set;
1505 /** Async completion handler, called when reply is received */
1506 ptlrpc_interpterer_t rq_interpret_reply;
1507 /** Async completion context */
1508 union ptlrpc_async_args rq_async_args;
1509
1510 /** Pool if request is from preallocated list */
1511 struct ptlrpc_request_pool *rq_pool;
1512
1513 struct lu_context rq_session;
1514 struct lu_context rq_recov_session;
1515
1516 /** request format description */
1517 struct req_capsule rq_pill;
1518};
1519
1520/**
1521 * Call completion handler for rpc if any, return it's status or original
1522 * rc if there was no handler defined for this request.
1523 */
1524static inline int ptlrpc_req_interpret(const struct lu_env *env,
1525 struct ptlrpc_request *req, int rc)
1526{
1527 if (req->rq_interpret_reply != NULL) {
1528 req->rq_status = req->rq_interpret_reply(env, req,
1529 &req->rq_async_args,
1530 rc);
1531 return req->rq_status;
1532 }
1533 return rc;
1534}
1535
1536/** \addtogroup nrs
1537 * @{
1538 */
1539int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf);
1540int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf);
1541void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req);
1542void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
1543 struct ptlrpc_nrs_pol_info *info);
1544
1545/*
1546 * Can the request be moved from the regular NRS head to the high-priority NRS
1547 * head (of the same PTLRPC service partition), if any?
1548 *
1549 * For a reliable result, this should be checked under svcpt->scp_req lock.
1550 */
1551static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1552{
1553 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1554
1555 /**
1556 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1557 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1558 * to make sure it has not been scheduled yet (analogous to previous
1559 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1560 */
1561 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1562}
1563/** @} nrs */
1564
1565/**
1566 * Returns 1 if request buffer at offset \a index was already swabbed
1567 */
1568static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1569{
1570 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1571 return req->rq_req_swab_mask & (1 << index);
1572}
1573
1574/**
1575 * Returns 1 if request reply buffer at offset \a index was already swabbed
1576 */
1577static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1578{
1579 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1580 return req->rq_rep_swab_mask & (1 << index);
1581}
1582
1583/**
1584 * Returns 1 if request needs to be swabbed into local cpu byteorder
1585 */
1586static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1587{
1588 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1589}
1590
1591/**
1592 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1593 */
1594static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1595{
1596 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1597}
1598
1599/**
1600 * Mark request buffer at offset \a index that it was already swabbed
1601 */
1602static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1603{
1604 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1605 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1606 req->rq_req_swab_mask |= 1 << index;
1607}
1608
1609/**
1610 * Mark request reply buffer at offset \a index that it was already swabbed
1611 */
1612static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1613{
1614 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1615 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1616 req->rq_rep_swab_mask |= 1 << index;
1617}
1618
1619/**
1620 * Convert numerical request phase value \a phase into text string description
1621 */
1622static inline const char *
1623ptlrpc_phase2str(enum rq_phase phase)
1624{
1625 switch (phase) {
1626 case RQ_PHASE_NEW:
1627 return "New";
1628 case RQ_PHASE_RPC:
1629 return "Rpc";
1630 case RQ_PHASE_BULK:
1631 return "Bulk";
1632 case RQ_PHASE_INTERPRET:
1633 return "Interpret";
1634 case RQ_PHASE_COMPLETE:
1635 return "Complete";
1636 case RQ_PHASE_UNREGISTERING:
1637 return "Unregistering";
1638 default:
1639 return "?Phase?";
1640 }
1641}
1642
1643/**
1644 * Convert numerical request phase of the request \a req into text stringi
1645 * description
1646 */
1647static inline const char *
1648ptlrpc_rqphase2str(struct ptlrpc_request *req)
1649{
1650 return ptlrpc_phase2str(req->rq_phase);
1651}
1652
1653/**
1654 * Debugging functions and helpers to print request structure into debug log
1655 * @{
1656 */
1657/* Spare the preprocessor, spoil the bugs. */
1658#define FLAG(field, str) (field ? str : "")
1659
1660/** Convert bit flags into a string */
1661#define DEBUG_REQ_FLAGS(req) \
1662 ptlrpc_rqphase2str(req), \
1663 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
1664 FLAG(req->rq_err, "E"), \
1665 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1666 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1667 FLAG(req->rq_no_resend, "N"), \
1668 FLAG(req->rq_waiting, "W"), \
1669 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1670 FLAG(req->rq_committed, "M")
1671
1672#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
1673
1674void _debug_req(struct ptlrpc_request *req,
1675 struct libcfs_debug_msg_data *data, const char *fmt, ...)
70837c12 1676 __printf(3, 4);
d7e09d03
PT
1677
1678/**
17891183 1679 * Helper that decides if we need to print request according to current debug
d7e09d03
PT
1680 * level settings
1681 */
1682#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1683do { \
1684 CFS_CHECK_STACK(msgdata, mask, cdls); \
1685 \
1686 if (((mask) & D_CANTMASK) != 0 || \
1687 ((libcfs_debug & (mask)) != 0 && \
1688 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1689 _debug_req((req), msgdata, fmt, ##a); \
a58a38ac 1690} while (0)
d7e09d03
PT
1691
1692/**
17891183 1693 * This is the debug print function you need to use to print request structure
d7e09d03
PT
1694 * content into lustre debug log.
1695 * for most callers (level is a constant) this is resolved at compile time */
1696#define DEBUG_REQ(level, req, fmt, args...) \
1697do { \
1698 if ((level) & (D_ERROR | D_WARNING)) { \
a3ea59e0 1699 static struct cfs_debug_limit_state cdls; \
d7e09d03
PT
1700 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1701 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1702 } else { \
1703 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1704 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1705 } \
1706} while (0)
1707/** @} */
1708
1709/**
1710 * Structure that defines a single page of a bulk transfer
1711 */
1712struct ptlrpc_bulk_page {
1713 /** Linkage to list of pages in a bulk */
1714 struct list_head bp_link;
1715 /**
1716 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1717 */
1718 int bp_buflen;
1719 /** offset within a page */
1720 int bp_pageoffset;
1721 /** The page itself */
1722 struct page *bp_page;
1723};
1724
1725#define BULK_GET_SOURCE 0
1726#define BULK_PUT_SINK 1
1727#define BULK_GET_SINK 2
1728#define BULK_PUT_SOURCE 3
1729
1730/**
1731 * Definition of bulk descriptor.
1732 * Bulks are special "Two phase" RPCs where initial request message
1733 * is sent first and it is followed bt a transfer (o receiving) of a large
1734 * amount of data to be settled into pages referenced from the bulk descriptors.
1735 * Bulks transfers (the actual data following the small requests) are done
1736 * on separate LNet portals.
1737 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
1738 * Another user is readpage for MDT.
1739 */
1740struct ptlrpc_bulk_desc {
1741 /** completed with failure */
1742 unsigned long bd_failure:1;
1743 /** {put,get}{source,sink} */
1744 unsigned long bd_type:2;
1745 /** client side */
1746 unsigned long bd_registered:1;
1747 /** For serialization with callback */
1748 spinlock_t bd_lock;
1749 /** Import generation when request for this bulk was sent */
1750 int bd_import_generation;
1751 /** LNet portal for this bulk */
1752 __u32 bd_portal;
1753 /** Server side - export this bulk created for */
1754 struct obd_export *bd_export;
1755 /** Client side - import this bulk was sent on */
1756 struct obd_import *bd_import;
1757 /** Back pointer to the request */
1758 struct ptlrpc_request *bd_req;
1759 wait_queue_head_t bd_waitq; /* server side only WQ */
1760 int bd_iov_count; /* # entries in bd_iov */
1761 int bd_max_iov; /* allocated size of bd_iov */
1762 int bd_nob; /* # bytes covered */
1763 int bd_nob_transferred; /* # bytes GOT/PUT */
1764
1765 __u64 bd_last_xid;
1766
1767 struct ptlrpc_cb_id bd_cbid; /* network callback info */
1768 lnet_nid_t bd_sender; /* stash event::sender */
1769 int bd_md_count; /* # valid entries in bd_mds */
1770 int bd_md_max_brw; /* max entries in bd_mds */
1771 /** array of associated MDs */
1772 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
1773
1774 /*
1775 * encrypt iov, size is either 0 or bd_iov_count.
1776 */
1777 lnet_kiov_t *bd_enc_iov;
1778
1779 lnet_kiov_t bd_iov[0];
1780};
1781
1782enum {
1783 SVC_STOPPED = 1 << 0,
1784 SVC_STOPPING = 1 << 1,
1785 SVC_STARTING = 1 << 2,
1786 SVC_RUNNING = 1 << 3,
1787 SVC_EVENT = 1 << 4,
1788 SVC_SIGNAL = 1 << 5,
1789};
1790
1791#define PTLRPC_THR_NAME_LEN 32
1792/**
1793 * Definition of server service thread structure
1794 */
1795struct ptlrpc_thread {
1796 /**
1797 * List of active threads in svc->srv_threads
1798 */
1799 struct list_head t_link;
1800 /**
1801 * thread-private data (preallocated memory)
1802 */
1803 void *t_data;
1804 __u32 t_flags;
1805 /**
1806 * service thread index, from ptlrpc_start_threads
1807 */
1808 unsigned int t_id;
1809 /**
1810 * service thread pid
1811 */
1812 pid_t t_pid;
1813 /**
1814 * put watchdog in the structure per thread b=14840
5d4450c4
PT
1815 *
1816 * Lustre watchdog is removed for client in the hope
1817 * of a generic watchdog can be merged in kernel.
1818 * When that happens, we should add below back.
1819 *
1820 * struct lc_watchdog *t_watchdog;
d7e09d03 1821 */
d7e09d03
PT
1822 /**
1823 * the svc this thread belonged to b=18582
1824 */
1825 struct ptlrpc_service_part *t_svcpt;
1826 wait_queue_head_t t_ctl_waitq;
1827 struct lu_env *t_env;
1828 char t_name[PTLRPC_THR_NAME_LEN];
1829};
1830
1831static inline int thread_is_init(struct ptlrpc_thread *thread)
1832{
1833 return thread->t_flags == 0;
1834}
1835
1836static inline int thread_is_stopped(struct ptlrpc_thread *thread)
1837{
1838 return !!(thread->t_flags & SVC_STOPPED);
1839}
1840
1841static inline int thread_is_stopping(struct ptlrpc_thread *thread)
1842{
1843 return !!(thread->t_flags & SVC_STOPPING);
1844}
1845
1846static inline int thread_is_starting(struct ptlrpc_thread *thread)
1847{
1848 return !!(thread->t_flags & SVC_STARTING);
1849}
1850
1851static inline int thread_is_running(struct ptlrpc_thread *thread)
1852{
1853 return !!(thread->t_flags & SVC_RUNNING);
1854}
1855
1856static inline int thread_is_event(struct ptlrpc_thread *thread)
1857{
1858 return !!(thread->t_flags & SVC_EVENT);
1859}
1860
1861static inline int thread_is_signal(struct ptlrpc_thread *thread)
1862{
1863 return !!(thread->t_flags & SVC_SIGNAL);
1864}
1865
1866static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
1867{
1868 thread->t_flags &= ~flags;
1869}
1870
1871static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
1872{
1873 thread->t_flags = flags;
1874}
1875
1876static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
1877{
1878 thread->t_flags |= flags;
1879}
1880
1881static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
1882 __u32 flags)
1883{
1884 if (thread->t_flags & flags) {
1885 thread->t_flags &= ~flags;
1886 return 1;
1887 }
1888 return 0;
1889}
1890
1891/**
1892 * Request buffer descriptor structure.
1893 * This is a structure that contains one posted request buffer for service.
1894 * Once data land into a buffer, event callback creates actual request and
1895 * notifies wakes one of the service threads to process new incoming request.
1896 * More than one request can fit into the buffer.
1897 */
1898struct ptlrpc_request_buffer_desc {
1899 /** Link item for rqbds on a service */
1900 struct list_head rqbd_list;
1901 /** History of requests for this buffer */
1902 struct list_head rqbd_reqs;
1903 /** Back pointer to service for which this buffer is registered */
1904 struct ptlrpc_service_part *rqbd_svcpt;
1905 /** LNet descriptor */
1906 lnet_handle_md_t rqbd_md_h;
1907 int rqbd_refcount;
1908 /** The buffer itself */
1909 char *rqbd_buffer;
1910 struct ptlrpc_cb_id rqbd_cbid;
1911 /**
1912 * This "embedded" request structure is only used for the
1913 * last request to fit into the buffer
1914 */
1915 struct ptlrpc_request rqbd_req;
1916};
1917
1918typedef int (*svc_handler_t)(struct ptlrpc_request *req);
1919
1920struct ptlrpc_service_ops {
1921 /**
1922 * if non-NULL called during thread creation (ptlrpc_start_thread())
1923 * to initialize service specific per-thread state.
1924 */
1925 int (*so_thr_init)(struct ptlrpc_thread *thr);
1926 /**
1927 * if non-NULL called during thread shutdown (ptlrpc_main()) to
1928 * destruct state created by ->srv_init().
1929 */
1930 void (*so_thr_done)(struct ptlrpc_thread *thr);
1931 /**
1932 * Handler function for incoming requests for this service
1933 */
1934 int (*so_req_handler)(struct ptlrpc_request *req);
1935 /**
1936 * function to determine priority of the request, it's called
1937 * on every new request
1938 */
1939 int (*so_hpreq_handler)(struct ptlrpc_request *);
1940 /**
1941 * service-specific print fn
1942 */
1943 void (*so_req_printer)(void *, struct ptlrpc_request *);
1944};
1945
1946#ifndef __cfs_cacheline_aligned
1947/* NB: put it here for reducing patche dependence */
1948# define __cfs_cacheline_aligned
1949#endif
1950
1951/**
1952 * How many high priority requests to serve before serving one normal
1953 * priority request
1954 */
1955#define PTLRPC_SVC_HP_RATIO 10
1956
1957/**
1958 * Definition of PortalRPC service.
1959 * The service is listening on a particular portal (like tcp port)
1960 * and perform actions for a specific server like IO service for OST
1961 * or general metadata service for MDS.
1962 */
1963struct ptlrpc_service {
1964 /** serialize /proc operations */
1965 spinlock_t srv_lock;
1966 /** most often accessed fields */
1967 /** chain thru all services */
1968 struct list_head srv_list;
1969 /** service operations table */
1970 struct ptlrpc_service_ops srv_ops;
1971 /** only statically allocated strings here; we don't clean them */
1972 char *srv_name;
1973 /** only statically allocated strings here; we don't clean them */
1974 char *srv_thread_name;
1975 /** service thread list */
1976 struct list_head srv_threads;
1977 /** threads # should be created for each partition on initializing */
1978 int srv_nthrs_cpt_init;
1979 /** limit of threads number for each partition */
1980 int srv_nthrs_cpt_limit;
700815d4
DE
1981 /** Root of debugfs dir tree for this service */
1982 struct dentry *srv_debugfs_entry;
d7e09d03
PT
1983 /** Pointer to statistic data for this service */
1984 struct lprocfs_stats *srv_stats;
1985 /** # hp per lp reqs to handle */
1986 int srv_hpreq_ratio;
1987 /** biggest request to receive */
1988 int srv_max_req_size;
1989 /** biggest reply to send */
1990 int srv_max_reply_size;
1991 /** size of individual buffers */
1992 int srv_buf_size;
1993 /** # buffers to allocate in 1 group */
1994 int srv_nbuf_per_group;
1995 /** Local portal on which to receive requests */
1996 __u32 srv_req_portal;
1997 /** Portal on the client to send replies to */
1998 __u32 srv_rep_portal;
1999 /**
2000 * Tags for lu_context associated with this thread, see struct
2001 * lu_context.
2002 */
2003 __u32 srv_ctx_tags;
2004 /** soft watchdog timeout multiplier */
2005 int srv_watchdog_factor;
2006 /** under unregister_service */
2007 unsigned srv_is_stopping:1;
2008
2009 /** max # request buffers in history per partition */
2010 int srv_hist_nrqbds_cpt_max;
2011 /** number of CPTs this service bound on */
2012 int srv_ncpts;
2013 /** CPTs array this service bound on */
2014 __u32 *srv_cpts;
2015 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2016 int srv_cpt_bits;
2017 /** CPT table this service is running over */
2018 struct cfs_cpt_table *srv_cptable;
328676f8
OD
2019
2020 /* sysfs object */
2021 struct kobject srv_kobj;
2022 struct completion srv_kobj_unregister;
d7e09d03
PT
2023 /**
2024 * partition data for ptlrpc service
2025 */
2026 struct ptlrpc_service_part *srv_parts[0];
2027};
2028
2029/**
2030 * Definition of PortalRPC service partition data.
2031 * Although a service only has one instance of it right now, but we
2032 * will have multiple instances very soon (instance per CPT).
2033 *
2034 * it has four locks:
2035 * \a scp_lock
2036 * serialize operations on rqbd and requests waiting for preprocess
2037 * \a scp_req_lock
2038 * serialize operations active requests sent to this portal
2039 * \a scp_at_lock
2040 * serialize adaptive timeout stuff
2041 * \a scp_rep_lock
2042 * serialize operations on RS list (reply states)
2043 *
2044 * We don't have any use-case to take two or more locks at the same time
2045 * for now, so there is no lock order issue.
2046 */
2047struct ptlrpc_service_part {
2048 /** back reference to owner */
2049 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2050 /* CPT id, reserved */
2051 int scp_cpt;
2052 /** always increasing number */
2053 int scp_thr_nextid;
2054 /** # of starting threads */
2055 int scp_nthrs_starting;
2056 /** # of stopping threads, reserved for shrinking threads */
2057 int scp_nthrs_stopping;
2058 /** # running threads */
2059 int scp_nthrs_running;
2060 /** service threads list */
2061 struct list_head scp_threads;
2062
2063 /**
2064 * serialize the following fields, used for protecting
2065 * rqbd list and incoming requests waiting for preprocess,
2066 * threads starting & stopping are also protected by this lock.
2067 */
2068 spinlock_t scp_lock __cfs_cacheline_aligned;
2069 /** total # req buffer descs allocated */
2070 int scp_nrqbds_total;
2071 /** # posted request buffers for receiving */
2072 int scp_nrqbds_posted;
2073 /** in progress of allocating rqbd */
2074 int scp_rqbd_allocating;
2075 /** # incoming reqs */
2076 int scp_nreqs_incoming;
2077 /** request buffers to be reposted */
2078 struct list_head scp_rqbd_idle;
2079 /** req buffers receiving */
2080 struct list_head scp_rqbd_posted;
2081 /** incoming reqs */
2082 struct list_head scp_req_incoming;
2083 /** timeout before re-posting reqs, in tick */
b2d201bd 2084 long scp_rqbd_timeout;
d7e09d03
PT
2085 /**
2086 * all threads sleep on this. This wait-queue is signalled when new
2087 * incoming request arrives and when difficult reply has to be handled.
2088 */
2089 wait_queue_head_t scp_waitq;
2090
2091 /** request history */
2092 struct list_head scp_hist_reqs;
2093 /** request buffer history */
2094 struct list_head scp_hist_rqbds;
2095 /** # request buffers in history */
2096 int scp_hist_nrqbds;
2097 /** sequence number for request */
2098 __u64 scp_hist_seq;
2099 /** highest seq culled from history */
2100 __u64 scp_hist_seq_culled;
2101
2102 /**
2103 * serialize the following fields, used for processing requests
2104 * sent to this portal
2105 */
2106 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2107 /** # reqs in either of the NRS heads below */
2108 /** # reqs being served */
2109 int scp_nreqs_active;
2110 /** # HPreqs being served */
2111 int scp_nhreqs_active;
2112 /** # hp requests handled */
2113 int scp_hreq_count;
2114
2115 /** NRS head for regular requests */
2116 struct ptlrpc_nrs scp_nrs_reg;
2117 /** NRS head for HP requests; this is only valid for services that can
2118 * handle HP requests */
2119 struct ptlrpc_nrs *scp_nrs_hp;
2120
2121 /** AT stuff */
2122 /** @{ */
2123 /**
2124 * serialize the following fields, used for changes on
2125 * adaptive timeout
2126 */
2127 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2128 /** estimated rpc service time */
2129 struct adaptive_timeout scp_at_estimate;
2130 /** reqs waiting for replies */
2131 struct ptlrpc_at_array scp_at_array;
2132 /** early reply timer */
54319351 2133 struct timer_list scp_at_timer;
d7e09d03 2134 /** debug */
a649ad1d 2135 unsigned long scp_at_checktime;
d7e09d03
PT
2136 /** check early replies */
2137 unsigned scp_at_check;
2138 /** @} */
2139
2140 /**
2141 * serialize the following fields, used for processing
2142 * replies for this portal
2143 */
2144 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2145 /** all the active replies */
2146 struct list_head scp_rep_active;
2147 /** List of free reply_states */
2148 struct list_head scp_rep_idle;
2149 /** waitq to run, when adding stuff to srv_free_rs_list */
2150 wait_queue_head_t scp_rep_waitq;
2151 /** # 'difficult' replies */
2152 atomic_t scp_nreps_difficult;
2153};
2154
2155#define ptlrpc_service_for_each_part(part, i, svc) \
2156 for (i = 0; \
2157 i < (svc)->srv_ncpts && \
2158 (svc)->srv_parts != NULL && \
2159 ((part) = (svc)->srv_parts[i]) != NULL; i++)
2160
2161/**
2162 * Declaration of ptlrpcd control structure
2163 */
2164struct ptlrpcd_ctl {
2165 /**
2166 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2167 */
2168 unsigned long pc_flags;
2169 /**
2170 * Thread lock protecting structure fields.
2171 */
2172 spinlock_t pc_lock;
2173 /**
2174 * Start completion.
2175 */
2176 struct completion pc_starting;
2177 /**
2178 * Stop completion.
2179 */
2180 struct completion pc_finishing;
2181 /**
2182 * Thread requests set.
2183 */
2184 struct ptlrpc_request_set *pc_set;
2185 /**
2186 * Thread name used in cfs_daemonize()
2187 */
2188 char pc_name[16];
2189 /**
2190 * Environment for request interpreters to run in.
2191 */
2192 struct lu_env pc_env;
2193 /**
2194 * Index of ptlrpcd thread in the array.
2195 */
2196 int pc_index;
2197 /**
2198 * Number of the ptlrpcd's partners.
2199 */
2200 int pc_npartners;
2201 /**
2202 * Pointer to the array of partners' ptlrpcd_ctl structure.
2203 */
2204 struct ptlrpcd_ctl **pc_partners;
2205 /**
2206 * Record the partner index to be processed next.
2207 */
2208 int pc_cursor;
2209};
2210
2211/* Bits for pc_flags */
2212enum ptlrpcd_ctl_flags {
2213 /**
2214 * Ptlrpc thread start flag.
2215 */
2216 LIOD_START = 1 << 0,
2217 /**
2218 * Ptlrpc thread stop flag.
2219 */
2220 LIOD_STOP = 1 << 1,
2221 /**
2222 * Ptlrpc thread force flag (only stop force so far).
2223 * This will cause aborting any inflight rpcs handled
2224 * by thread if LIOD_STOP is specified.
2225 */
2226 LIOD_FORCE = 1 << 2,
2227 /**
2228 * This is a recovery ptlrpc thread.
2229 */
2230 LIOD_RECOVERY = 1 << 3,
2231 /**
2232 * The ptlrpcd is bound to some CPU core.
2233 */
2234 LIOD_BIND = 1 << 4,
2235};
2236
2237/**
2238 * \addtogroup nrs
2239 * @{
2240 *
2241 * Service compatibility function; the policy is compatible with all services.
2242 *
2243 * \param[in] svc The service the policy is attempting to register with.
2244 * \param[in] desc The policy descriptor
2245 *
2246 * \retval true The policy is compatible with the service
2247 *
2248 * \see ptlrpc_nrs_pol_desc::pd_compat()
2249 */
2250static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2251 const struct ptlrpc_nrs_pol_desc *desc)
2252{
2253 return true;
2254}
2255
2256/**
2257 * Service compatibility function; the policy is compatible with only a specific
2258 * service which is identified by its human-readable name at
2259 * ptlrpc_service::srv_name.
2260 *
2261 * \param[in] svc The service the policy is attempting to register with.
2262 * \param[in] desc The policy descriptor
2263 *
2264 * \retval false The policy is not compatible with the service
2265 * \retval true The policy is compatible with the service
2266 *
2267 * \see ptlrpc_nrs_pol_desc::pd_compat()
2268 */
2269static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2270 const struct ptlrpc_nrs_pol_desc *desc)
2271{
2272 LASSERT(desc->pd_compat_svc_name != NULL);
2273 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2274}
2275
2276/** @} nrs */
2277
2278/* ptlrpc/events.c */
2279extern lnet_handle_eq_t ptlrpc_eq_h;
2280extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2281 lnet_process_id_t *peer, lnet_nid_t *self);
2282/**
2283 * These callbacks are invoked by LNet when something happened to
2284 * underlying buffer
2285 * @{
2286 */
2287extern void request_out_callback(lnet_event_t *ev);
2288extern void reply_in_callback(lnet_event_t *ev);
2289extern void client_bulk_callback(lnet_event_t *ev);
2290extern void request_in_callback(lnet_event_t *ev);
2291extern void reply_out_callback(lnet_event_t *ev);
2292/** @} */
2293
2294/* ptlrpc/connection.c */
2295struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2296 lnet_nid_t self,
2297 struct obd_uuid *uuid);
2298int ptlrpc_connection_put(struct ptlrpc_connection *c);
2299struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2300int ptlrpc_connection_init(void);
2301void ptlrpc_connection_fini(void);
2302extern lnet_pid_t ptl_get_pid(void);
2303
2304/* ptlrpc/niobuf.c */
2305/**
2306 * Actual interfacing with LNet to put/get/register/unregister stuff
2307 * @{
2308 */
2309
2310int ptlrpc_register_bulk(struct ptlrpc_request *req);
2311int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2312
2313static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2314{
2315 struct ptlrpc_bulk_desc *desc;
2316 int rc;
2317
2318 LASSERT(req != NULL);
2319 desc = req->rq_bulk;
2320
2321 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
7264b8a5 2322 req->rq_bulk_deadline > get_seconds())
d7e09d03
PT
2323 return 1;
2324
2325 if (!desc)
2326 return 0;
2327
2328 spin_lock(&desc->bd_lock);
2329 rc = desc->bd_md_count;
2330 spin_unlock(&desc->bd_lock);
2331 return rc;
2332}
2333
2334#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2335#define PTLRPC_REPLY_EARLY 0x02
2336int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2337int ptlrpc_reply(struct ptlrpc_request *req);
2338int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2339int ptlrpc_error(struct ptlrpc_request *req);
2340void ptlrpc_resend_req(struct ptlrpc_request *request);
2341int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2342int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2343int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2344/** @} */
2345
2346/* ptlrpc/client.c */
2347/**
2348 * Client-side portals API. Everything to send requests, receive replies,
2349 * request queues, request management, etc.
2350 * @{
2351 */
63d42578
HZ
2352void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
2353
d7e09d03
PT
2354void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2355 struct ptlrpc_client *);
2356void ptlrpc_cleanup_client(struct obd_import *imp);
2357struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2358
2359int ptlrpc_queue_wait(struct ptlrpc_request *req);
2360int ptlrpc_replay_req(struct ptlrpc_request *req);
2361int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
2362void ptlrpc_restart_req(struct ptlrpc_request *req);
2363void ptlrpc_abort_inflight(struct obd_import *imp);
2364void ptlrpc_cleanup_imp(struct obd_import *imp);
2365void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2366
2367struct ptlrpc_request_set *ptlrpc_prep_set(void);
2368struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2369 void *arg);
2370int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
2371 set_interpreter_func fn, void *data);
2372int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2373int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2374int ptlrpc_set_wait(struct ptlrpc_request_set *);
2375int ptlrpc_expired_set(void *data);
2376void ptlrpc_interrupted_set(void *data);
2377void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2378void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2379void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2380void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2381 struct ptlrpc_request *req);
2382
2383void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
2384void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
2385
2386struct ptlrpc_request_pool *
2387ptlrpc_init_rq_pool(int, int,
2388 void (*populate_pool)(struct ptlrpc_request_pool *, int));
2389
2390void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2391struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2392 const struct req_format *format);
2393struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
2394 struct ptlrpc_request_pool *,
2395 const struct req_format *format);
2396void ptlrpc_request_free(struct ptlrpc_request *request);
2397int ptlrpc_request_pack(struct ptlrpc_request *request,
2398 __u32 version, int opcode);
2399struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp,
2400 const struct req_format *format,
2401 __u32 version, int opcode);
2402int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2403 __u32 version, int opcode, char **bufs,
2404 struct ptlrpc_cli_ctx *ctx);
2405struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
2406 int opcode, int count, __u32 *lengths,
2407 char **bufs);
2408struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp,
2409 __u32 version, int opcode,
2410 int count, __u32 *lengths, char **bufs,
2411 struct ptlrpc_request_pool *pool);
2412void ptlrpc_req_finished(struct ptlrpc_request *request);
2413void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
2414struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2415struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2416 unsigned npages, unsigned max_brw,
2417 unsigned type, unsigned portal);
2418void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2419static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2420{
2421 __ptlrpc_free_bulk(bulk, 1);
2422}
2423static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2424{
2425 __ptlrpc_free_bulk(bulk, 0);
2426}
2427void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2428 struct page *page, int pageoffset, int len, int);
2429static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2430 struct page *page, int pageoffset,
2431 int len)
2432{
2433 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2434}
2435
2436static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2437 struct page *page, int pageoffset,
2438 int len)
2439{
2440 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2441}
2442
2443void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2444 struct obd_import *imp);
2445__u64 ptlrpc_next_xid(void);
2446__u64 ptlrpc_sample_next_xid(void);
2447__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2448
2449/* Set of routines to run a function in ptlrpcd context */
2450void *ptlrpcd_alloc_work(struct obd_import *imp,
2451 int (*cb)(const struct lu_env *, void *), void *data);
2452void ptlrpcd_destroy_work(void *handler);
2453int ptlrpcd_queue_work(void *handler);
2454
2455/** @} */
2456struct ptlrpc_service_buf_conf {
2457 /* nbufs is buffers # to allocate when growing the pool */
2458 unsigned int bc_nbufs;
2459 /* buffer size to post */
2460 unsigned int bc_buf_size;
2461 /* portal to listed for requests on */
2462 unsigned int bc_req_portal;
2463 /* portal of where to send replies to */
2464 unsigned int bc_rep_portal;
2465 /* maximum request size to be accepted for this service */
2466 unsigned int bc_req_max_size;
2467 /* maximum reply size this service can ever send */
2468 unsigned int bc_rep_max_size;
2469};
2470
2471struct ptlrpc_service_thr_conf {
2472 /* threadname should be 8 characters or less - 6 will be added on */
2473 char *tc_thr_name;
2474 /* threads increasing factor for each CPU */
2475 unsigned int tc_thr_factor;
2476 /* service threads # to start on each partition while initializing */
2477 unsigned int tc_nthrs_init;
2478 /*
2479 * low water of threads # upper-limit on each partition while running,
2480 * service availability may be impacted if threads number is lower
2481 * than this value. It can be ZERO if the service doesn't require
2482 * CPU affinity or there is only one partition.
2483 */
2484 unsigned int tc_nthrs_base;
2485 /* "soft" limit for total threads number */
2486 unsigned int tc_nthrs_max;
2487 /* user specified threads number, it will be validated due to
2488 * other members of this structure. */
2489 unsigned int tc_nthrs_user;
2490 /* set NUMA node affinity for service threads */
2491 unsigned int tc_cpu_affinity;
2492 /* Tags for lu_context associated with service thread */
2493 __u32 tc_ctx_tags;
2494};
2495
2496struct ptlrpc_service_cpt_conf {
2497 struct cfs_cpt_table *cc_cptable;
2498 /* string pattern to describe CPTs for a service */
2499 char *cc_pattern;
2500};
2501
2502struct ptlrpc_service_conf {
2503 /* service name */
2504 char *psc_name;
2505 /* soft watchdog timeout multiplifier to print stuck service traces */
2506 unsigned int psc_watchdog_factor;
2507 /* buffer information */
2508 struct ptlrpc_service_buf_conf psc_buf;
2509 /* thread information */
2510 struct ptlrpc_service_thr_conf psc_thr;
2511 /* CPU partition information */
2512 struct ptlrpc_service_cpt_conf psc_cpt;
2513 /* function table */
2514 struct ptlrpc_service_ops psc_ops;
2515};
2516
2517/* ptlrpc/service.c */
2518/**
2519 * Server-side services API. Register/unregister service, request state
2520 * management, service thread management
2521 *
2522 * @{
2523 */
2524void ptlrpc_save_lock(struct ptlrpc_request *req,
2525 struct lustre_handle *lock, int mode, int no_ack);
2526void ptlrpc_commit_replies(struct obd_export *exp);
2527void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2528void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
2529int ptlrpc_hpreq_handler(struct ptlrpc_request *req);
2530struct ptlrpc_service *ptlrpc_register_service(
2531 struct ptlrpc_service_conf *conf,
328676f8 2532 struct kset *parent,
700815d4 2533 struct dentry *debugfs_entry);
d7e09d03
PT
2534void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
2535
2536int ptlrpc_start_threads(struct ptlrpc_service *svc);
2537int ptlrpc_unregister_service(struct ptlrpc_service *service);
2538int liblustre_check_services(void *arg);
2539void ptlrpc_daemonize(char *name);
2540int ptlrpc_service_health_check(struct ptlrpc_service *);
2541void ptlrpc_server_drop_request(struct ptlrpc_request *req);
2542void ptlrpc_request_change_export(struct ptlrpc_request *req,
2543 struct obd_export *export);
2544
2545int ptlrpc_hr_init(void);
2546void ptlrpc_hr_fini(void);
2547
2548/** @} */
2549
2550/* ptlrpc/import.c */
2551/**
2552 * Import API
2553 * @{
2554 */
2555int ptlrpc_connect_import(struct obd_import *imp);
2556int ptlrpc_init_import(struct obd_import *imp);
2557int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2558int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
2559void deuuidify(char *uuid, const char *prefix, char **uuid_start,
2560 int *uuid_len);
2561
2562/* ptlrpc/pack_generic.c */
2563int ptlrpc_reconnect_import(struct obd_import *imp);
2564/** @} */
2565
2566/**
2567 * ptlrpc msg buffer and swab interface
2568 *
2569 * @{
2570 */
2571int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2572 int index);
2573void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
2574 int index);
2575int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2576int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2577
2578int lustre_msg_check_version(struct lustre_msg *msg, __u32 version);
2579void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2580 char **bufs);
2581int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2582 __u32 *lens, char **bufs);
2583int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2584 char **bufs);
2585int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2586 __u32 *lens, char **bufs, int flags);
2587#define LPRFL_EARLY_REPLY 1
2588int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2589 char **bufs, int flags);
2590int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2591 unsigned int newlen, int move_data);
2592void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2593int __lustre_unpack_msg(struct lustre_msg *m, int len);
2594int lustre_msg_hdr_size(__u32 magic, int count);
2595int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2596int lustre_msg_size_v2(int count, __u32 *lengths);
2597int lustre_packed_msg_size(struct lustre_msg *msg);
2598int lustre_msg_early_size(void);
2599void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2600void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2601int lustre_msg_buflen(struct lustre_msg *m, int n);
2602void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
2603int lustre_msg_bufcount(struct lustre_msg *m);
2604char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2605__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2606void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2607__u32 lustre_msg_get_flags(struct lustre_msg *msg);
2608void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2609void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2610void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2611__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2612void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
2613void lustre_msg_set_op_flags(struct lustre_msg *msg, int flags);
2614struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2615__u32 lustre_msg_get_type(struct lustre_msg *msg);
2616__u32 lustre_msg_get_version(struct lustre_msg *msg);
2617void lustre_msg_add_version(struct lustre_msg *msg, int version);
2618__u32 lustre_msg_get_opc(struct lustre_msg *msg);
2619__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
2620__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2621__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2622__u64 lustre_msg_get_transno(struct lustre_msg *msg);
2623__u64 lustre_msg_get_slv(struct lustre_msg *msg);
2624__u32 lustre_msg_get_limit(struct lustre_msg *msg);
2625void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2626void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2627int lustre_msg_get_status(struct lustre_msg *msg);
2628__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
2629int lustre_msg_is_v1(struct lustre_msg *msg);
2630__u32 lustre_msg_get_magic(struct lustre_msg *msg);
2631__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2632__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
2633char *lustre_msg_get_jobid(struct lustre_msg *msg);
2634__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
d7e09d03 2635__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
1d8cb70c
GD
2636void lustre_msg_set_handle(struct lustre_msg *msg,
2637 struct lustre_handle *handle);
d7e09d03
PT
2638void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2639void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
2640void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
1d8cb70c
GD
2641void lustre_msg_set_last_committed(struct lustre_msg *msg,
2642 __u64 last_committed);
d7e09d03
PT
2643void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2644void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2645void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2646void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
2647void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes);
2648void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2649void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2650void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2651void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2652void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2653
2654static inline void
2655lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2656 unsigned int newlen, int move_data)
2657{
2658 LASSERT(req->rq_reply_state);
2659 LASSERT(req->rq_repmsg);
2660 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2661 newlen, move_data);
2662}
2d58de78
LW
2663
2664#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
2665
2666static inline int ptlrpc_status_hton(int h)
2667{
2668 /*
2669 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
2670 * ELDLM_LOCK_ABORTED, etc.
2671 */
2672 if (h < 0)
2673 return -lustre_errno_hton(-h);
2674 else
2675 return h;
2676}
2677
2678static inline int ptlrpc_status_ntoh(int n)
2679{
2680 /*
2681 * See the comment in ptlrpc_status_hton().
2682 */
2683 if (n < 0)
2684 return -lustre_errno_ntoh(-n);
2685 else
2686 return n;
2687}
2688
2689#else
2690
2691#define ptlrpc_status_hton(h) (h)
2692#define ptlrpc_status_ntoh(n) (n)
2693
2694#endif
d7e09d03
PT
2695/** @} */
2696
2697/** Change request phase of \a req to \a new_phase */
2698static inline void
2699ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2700{
2701 if (req->rq_phase == new_phase)
2702 return;
2703
2704 if (new_phase == RQ_PHASE_UNREGISTERING) {
2705 req->rq_next_phase = req->rq_phase;
2706 if (req->rq_import)
2707 atomic_inc(&req->rq_import->imp_unregistering);
2708 }
2709
2710 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
2711 if (req->rq_import)
2712 atomic_dec(&req->rq_import->imp_unregistering);
2713 }
2714
2715 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2716 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2717
2718 req->rq_phase = new_phase;
2719}
2720
2721/**
2722 * Returns true if request \a req got early reply and hard deadline is not met
2723 */
2724static inline int
2725ptlrpc_client_early(struct ptlrpc_request *req)
2726{
2727 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
7264b8a5 2728 req->rq_reply_deadline > get_seconds())
d7e09d03
PT
2729 return 0;
2730 return req->rq_early;
2731}
2732
2733/**
2734 * Returns true if we got real reply from server for this request
2735 */
2736static inline int
2737ptlrpc_client_replied(struct ptlrpc_request *req)
2738{
2739 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
7264b8a5 2740 req->rq_reply_deadline > get_seconds())
d7e09d03
PT
2741 return 0;
2742 return req->rq_replied;
2743}
2744
2745/** Returns true if request \a req is in process of receiving server reply */
2746static inline int
2747ptlrpc_client_recv(struct ptlrpc_request *req)
2748{
2749 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
7264b8a5 2750 req->rq_reply_deadline > get_seconds())
d7e09d03
PT
2751 return 1;
2752 return req->rq_receiving_reply;
2753}
2754
2755static inline int
2756ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
2757{
2758 int rc;
2759
2760 spin_lock(&req->rq_lock);
2761 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
7264b8a5 2762 req->rq_reply_deadline > get_seconds()) {
d7e09d03
PT
2763 spin_unlock(&req->rq_lock);
2764 return 1;
2765 }
cf378ff7
AL
2766 rc = req->rq_receiving_reply;
2767 rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
d7e09d03
PT
2768 spin_unlock(&req->rq_lock);
2769 return rc;
2770}
2771
2772static inline void
2773ptlrpc_client_wake_req(struct ptlrpc_request *req)
2774{
2775 if (req->rq_set == NULL)
2776 wake_up(&req->rq_reply_waitq);
2777 else
2778 wake_up(&req->rq_set->set_waitq);
2779}
2780
2781static inline void
2782ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
2783{
2784 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2785 atomic_inc(&rs->rs_refcount);
2786}
2787
2788static inline void
2789ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
2790{
2791 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2792 if (atomic_dec_and_test(&rs->rs_refcount))
2793 lustre_free_reply_state(rs);
2794}
2795
2796/* Should only be called once per req */
2797static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
2798{
2799 if (req->rq_reply_state == NULL)
2800 return; /* shouldn't occur */
2801 ptlrpc_rs_decref(req->rq_reply_state);
2802 req->rq_reply_state = NULL;
2803 req->rq_repmsg = NULL;
2804}
2805
2806static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
2807{
2808 return lustre_msg_get_magic(req->rq_reqmsg);
2809}
2810
2811static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
2812{
2813 switch (req->rq_reqmsg->lm_magic) {
2814 case LUSTRE_MSG_MAGIC_V2:
2815 return req->rq_reqmsg->lm_repsize;
2816 default:
2817 LASSERTF(0, "incorrect message magic: %08x\n",
2818 req->rq_reqmsg->lm_magic);
2819 return -EFAULT;
2820 }
2821}
2822
2823static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
2824{
2825 if (req->rq_delay_limit != 0 &&
699503bc
GKH
2826 time_before(cfs_time_add(req->rq_queued_time,
2827 cfs_time_seconds(req->rq_delay_limit)),
2828 cfs_time_current())) {
d7e09d03
PT
2829 return 1;
2830 }
2831 return 0;
2832}
2833
2834static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
2835{
2836 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
2837 spin_lock(&req->rq_lock);
2838 req->rq_no_resend = 1;
2839 spin_unlock(&req->rq_lock);
2840 }
2841 return req->rq_no_resend;
2842}
2843
2844static inline int
2845ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
2846{
2847 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
2848
2849 return svcpt->scp_service->srv_watchdog_factor *
2850 max_t(int, at, obd_timeout);
2851}
2852
2853static inline struct ptlrpc_service *
2854ptlrpc_req2svc(struct ptlrpc_request *req)
2855{
2856 LASSERT(req->rq_rqbd != NULL);
2857 return req->rq_rqbd->rqbd_svcpt->scp_service;
2858}
2859
2860/* ldlm/ldlm_lib.c */
2861/**
2862 * Target client logic
2863 * @{
2864 */
2865int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
2866int client_obd_cleanup(struct obd_device *obddev);
2867int client_connect_import(const struct lu_env *env,
2868 struct obd_export **exp, struct obd_device *obd,
2869 struct obd_uuid *cluuid, struct obd_connect_data *,
2870 void *localdata);
2871int client_disconnect_export(struct obd_export *exp);
2872int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
2873 int priority);
2874int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
2875int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
2876 struct obd_uuid *uuid);
2877int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
2878void client_destroy_import(struct obd_import *imp);
2879/** @} */
2880
2881
2882/* ptlrpc/pinger.c */
2883/**
2884 * Pinger API (client side only)
2885 * @{
2886 */
2887enum timeout_event {
2888 TIMEOUT_GRANT = 1
2889};
2890struct timeout_item;
2891typedef int (*timeout_cb_t)(struct timeout_item *, void *);
2892int ptlrpc_pinger_add_import(struct obd_import *imp);
2893int ptlrpc_pinger_del_import(struct obd_import *imp);
2894int ptlrpc_add_timeout_client(int time, enum timeout_event event,
2895 timeout_cb_t cb, void *data,
2896 struct list_head *obd_list);
2897int ptlrpc_del_timeout_client(struct list_head *obd_list,
2898 enum timeout_event event);
aff9d8e8 2899struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp);
d7e09d03 2900int ptlrpc_obd_ping(struct obd_device *obd);
d7e09d03
PT
2901void ping_evictor_start(void);
2902void ping_evictor_stop(void);
d7e09d03
PT
2903void ptlrpc_pinger_ir_up(void);
2904void ptlrpc_pinger_ir_down(void);
2905/** @} */
2906int ptlrpc_pinger_suppress_pings(void);
2907
2908/* ptlrpc daemon bind policy */
2909typedef enum {
2910 /* all ptlrpcd threads are free mode */
2911 PDB_POLICY_NONE = 1,
2912 /* all ptlrpcd threads are bound mode */
2913 PDB_POLICY_FULL = 2,
2914 /* <free1 bound1> <free2 bound2> ... <freeN boundN> */
2915 PDB_POLICY_PAIR = 3,
2916 /* <free1 bound1> <bound1 free2> ... <freeN boundN> <boundN free1>,
2917 * means each ptlrpcd[X] has two partners: thread[X-1] and thread[X+1].
2918 * If kernel supports NUMA, pthrpcd threads are binded and
2919 * grouped by NUMA node */
2920 PDB_POLICY_NEIGHBOR = 4,
2921} pdb_policy_t;
2922
2923/* ptlrpc daemon load policy
2924 * It is caller's duty to specify how to push the async RPC into some ptlrpcd
2925 * queue, but it is not enforced, affected by "ptlrpcd_bind_policy". If it is
2926 * "PDB_POLICY_FULL", then the RPC will be processed by the selected ptlrpcd,
2927 * Otherwise, the RPC may be processed by the selected ptlrpcd or its partner,
2928 * depends on which is scheduled firstly, to accelerate the RPC processing. */
2929typedef enum {
2930 /* on the same CPU core as the caller */
2931 PDL_POLICY_SAME = 1,
2932 /* within the same CPU partition, but not the same core as the caller */
2933 PDL_POLICY_LOCAL = 2,
2934 /* round-robin on all CPU cores, but not the same core as the caller */
2935 PDL_POLICY_ROUND = 3,
2936 /* the specified CPU core is preferred, but not enforced */
2937 PDL_POLICY_PREFERRED = 4,
2938} pdl_policy_t;
2939
2940/* ptlrpc/ptlrpcd.c */
2941void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
2942void ptlrpcd_free(struct ptlrpcd_ctl *pc);
2943void ptlrpcd_wake(struct ptlrpc_request *req);
2944void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
2945void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
2946int ptlrpcd_addref(void);
2947void ptlrpcd_decref(void);
2948
2949/* ptlrpc/lproc_ptlrpc.c */
2950/**
2951 * procfs output related functions
2952 * @{
2953 */
9c234f6c 2954const char *ll_opcode2str(__u32 opcode);
d7e09d03
PT
2955void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
2956void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
2957void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
d7e09d03
PT
2958/** @} */
2959
d7e09d03
PT
2960/* ptlrpc/llog_client.c */
2961extern struct llog_operations llog_client_ops;
2962
2963/** @} net */
2964
2965#endif
2966/** @} PtlRPC */
This page took 0.803261 seconds and 5 git commands to generate.