4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 /** Implementation of client-side PortalRPC interfaces */
39 #define DEBUG_SUBSYSTEM S_RPC
41 #include <obd_support.h>
42 #include <obd_class.h>
43 #include <lustre_lib.h>
44 #include <lustre_ha.h>
45 #include <lustre_import.h>
46 #include <lustre_req_layout.h>
48 #include "ptlrpc_internal.h"
50 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
);
53 * Initialize passed in client structure \a cl.
55 void ptlrpc_init_client(int req_portal
, int rep_portal
, char *name
,
56 struct ptlrpc_client
*cl
)
58 cl
->cli_request_portal
= req_portal
;
59 cl
->cli_reply_portal
= rep_portal
;
62 EXPORT_SYMBOL(ptlrpc_init_client
);
65 * Return PortalRPC connection for remore uud \a uuid
67 struct ptlrpc_connection
*ptlrpc_uuid_to_connection(struct obd_uuid
*uuid
)
69 struct ptlrpc_connection
*c
;
71 lnet_process_id_t peer
;
74 /* ptlrpc_uuid_to_peer() initializes its 2nd parameter
75 * before accessing its values. */
76 /* coverity[uninit_use_in_call] */
77 err
= ptlrpc_uuid_to_peer(uuid
, &peer
, &self
);
79 CNETERR("cannot find peer %s!\n", uuid
->uuid
);
83 c
= ptlrpc_connection_get(peer
, self
, uuid
);
85 memcpy(c
->c_remote_uuid
.uuid
,
86 uuid
->uuid
, sizeof(c
->c_remote_uuid
.uuid
));
89 CDEBUG(D_INFO
, "%s -> %p\n", uuid
->uuid
, c
);
93 EXPORT_SYMBOL(ptlrpc_uuid_to_connection
);
96 * Allocate and initialize new bulk descriptor on the sender.
97 * Returns pointer to the descriptor or NULL on error.
99 struct ptlrpc_bulk_desc
*ptlrpc_new_bulk(unsigned npages
, unsigned max_brw
,
100 unsigned type
, unsigned portal
)
102 struct ptlrpc_bulk_desc
*desc
;
105 OBD_ALLOC(desc
, offsetof(struct ptlrpc_bulk_desc
, bd_iov
[npages
]));
109 spin_lock_init(&desc
->bd_lock
);
110 init_waitqueue_head(&desc
->bd_waitq
);
111 desc
->bd_max_iov
= npages
;
112 desc
->bd_iov_count
= 0;
113 desc
->bd_portal
= portal
;
114 desc
->bd_type
= type
;
115 desc
->bd_md_count
= 0;
116 LASSERT(max_brw
> 0);
117 desc
->bd_md_max_brw
= min(max_brw
, PTLRPC_BULK_OPS_COUNT
);
118 /* PTLRPC_BULK_OPS_COUNT is the compile-time transfer limit for this
119 * node. Negotiated ocd_brw_size will always be <= this number. */
120 for (i
= 0; i
< PTLRPC_BULK_OPS_COUNT
; i
++)
121 LNetInvalidateHandle(&desc
->bd_mds
[i
]);
127 * Prepare bulk descriptor for specified outgoing request \a req that
128 * can fit \a npages * pages. \a type is bulk type. \a portal is where
129 * the bulk to be sent. Used on client-side.
130 * Returns pointer to newly allocatrd initialized bulk descriptor or NULL on
133 struct ptlrpc_bulk_desc
*ptlrpc_prep_bulk_imp(struct ptlrpc_request
*req
,
134 unsigned npages
, unsigned max_brw
,
135 unsigned type
, unsigned portal
)
137 struct obd_import
*imp
= req
->rq_import
;
138 struct ptlrpc_bulk_desc
*desc
;
141 LASSERT(type
== BULK_PUT_SINK
|| type
== BULK_GET_SOURCE
);
142 desc
= ptlrpc_new_bulk(npages
, max_brw
, type
, portal
);
146 desc
->bd_import_generation
= req
->rq_import_generation
;
147 desc
->bd_import
= class_import_get(imp
);
150 desc
->bd_cbid
.cbid_fn
= client_bulk_callback
;
151 desc
->bd_cbid
.cbid_arg
= desc
;
153 /* This makes req own desc, and free it when she frees herself */
158 EXPORT_SYMBOL(ptlrpc_prep_bulk_imp
);
161 * Add a page \a page to the bulk descriptor \a desc.
162 * Data to transfer in the page starts at offset \a pageoffset and
163 * amount of data to transfer from the page is \a len
165 void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc
*desc
,
166 struct page
*page
, int pageoffset
, int len
, int pin
)
168 LASSERT(desc
->bd_iov_count
< desc
->bd_max_iov
);
169 LASSERT(page
!= NULL
);
170 LASSERT(pageoffset
>= 0);
172 LASSERT(pageoffset
+ len
<= PAGE_CACHE_SIZE
);
177 page_cache_get(page
);
179 ptlrpc_add_bulk_page(desc
, page
, pageoffset
, len
);
181 EXPORT_SYMBOL(__ptlrpc_prep_bulk_page
);
184 * Uninitialize and free bulk descriptor \a desc.
185 * Works on bulk descriptors both from server and client side.
187 void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc
*desc
, int unpin
)
192 LASSERT(desc
!= NULL
);
193 LASSERT(desc
->bd_iov_count
!= LI_POISON
); /* not freed already */
194 LASSERT(desc
->bd_md_count
== 0); /* network hands off */
195 LASSERT((desc
->bd_export
!= NULL
) ^ (desc
->bd_import
!= NULL
));
197 sptlrpc_enc_pool_put_pages(desc
);
200 class_export_put(desc
->bd_export
);
202 class_import_put(desc
->bd_import
);
205 for (i
= 0; i
< desc
->bd_iov_count
; i
++)
206 page_cache_release(desc
->bd_iov
[i
].kiov_page
);
209 OBD_FREE(desc
, offsetof(struct ptlrpc_bulk_desc
,
210 bd_iov
[desc
->bd_max_iov
]));
213 EXPORT_SYMBOL(__ptlrpc_free_bulk
);
216 * Set server timelimit for this req, i.e. how long are we willing to wait
217 * for reply before timing out this request.
219 void ptlrpc_at_set_req_timeout(struct ptlrpc_request
*req
)
225 LASSERT(req
->rq_import
);
228 /* non-AT settings */
230 * \a imp_server_timeout means this is reverse import and
231 * we send (currently only) ASTs to the client and cannot afford
232 * to wait too long for the reply, otherwise the other client
233 * (because of which we are sending this request) would
234 * timeout waiting for us
236 req
->rq_timeout
= req
->rq_import
->imp_server_timeout
?
237 obd_timeout
/ 2 : obd_timeout
;
239 at
= &req
->rq_import
->imp_at
;
240 idx
= import_at_get_index(req
->rq_import
,
241 req
->rq_request_portal
);
242 serv_est
= at_get(&at
->iat_service_estimate
[idx
]);
243 req
->rq_timeout
= at_est2timeout(serv_est
);
245 /* We could get even fancier here, using history to predict increased
248 /* Let the server know what this RPC timeout is by putting it in the
250 lustre_msg_set_timeout(req
->rq_reqmsg
, req
->rq_timeout
);
252 EXPORT_SYMBOL(ptlrpc_at_set_req_timeout
);
254 /* Adjust max service estimate based on server value */
255 static void ptlrpc_at_adj_service(struct ptlrpc_request
*req
,
256 unsigned int serv_est
)
262 LASSERT(req
->rq_import
);
263 at
= &req
->rq_import
->imp_at
;
265 idx
= import_at_get_index(req
->rq_import
, req
->rq_request_portal
);
266 /* max service estimates are tracked on the server side,
267 so just keep minimal history here */
268 oldse
= at_measured(&at
->iat_service_estimate
[idx
], serv_est
);
270 CDEBUG(D_ADAPTTO
, "The RPC service estimate for %s ptl %d "
271 "has changed from %d to %d\n",
272 req
->rq_import
->imp_obd
->obd_name
,req
->rq_request_portal
,
273 oldse
, at_get(&at
->iat_service_estimate
[idx
]));
276 /* Expected network latency per remote node (secs) */
277 int ptlrpc_at_get_net_latency(struct ptlrpc_request
*req
)
279 return AT_OFF
? 0 : at_get(&req
->rq_import
->imp_at
.iat_net_latency
);
282 /* Adjust expected network latency */
283 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request
*req
,
284 unsigned int service_time
)
286 unsigned int nl
, oldnl
;
288 time_t now
= cfs_time_current_sec();
290 LASSERT(req
->rq_import
);
291 at
= &req
->rq_import
->imp_at
;
293 /* Network latency is total time less server processing time */
294 nl
= max_t(int, now
- req
->rq_sent
- service_time
, 0) +1/*st rounding*/;
295 if (service_time
> now
- req
->rq_sent
+ 3 /* bz16408 */)
296 CWARN("Reported service time %u > total measured time "
297 CFS_DURATION_T
"\n", service_time
,
298 cfs_time_sub(now
, req
->rq_sent
));
300 oldnl
= at_measured(&at
->iat_net_latency
, nl
);
302 CDEBUG(D_ADAPTTO
, "The network latency for %s (nid %s) "
303 "has changed from %d to %d\n",
304 req
->rq_import
->imp_obd
->obd_name
,
306 &req
->rq_import
->imp_connection
->c_remote_uuid
),
307 oldnl
, at_get(&at
->iat_net_latency
));
310 static int unpack_reply(struct ptlrpc_request
*req
)
314 if (SPTLRPC_FLVR_POLICY(req
->rq_flvr
.sf_rpc
) != SPTLRPC_POLICY_NULL
) {
315 rc
= ptlrpc_unpack_rep_msg(req
, req
->rq_replen
);
317 DEBUG_REQ(D_ERROR
, req
, "unpack_rep failed: %d", rc
);
322 rc
= lustre_unpack_rep_ptlrpc_body(req
, MSG_PTLRPC_BODY_OFF
);
324 DEBUG_REQ(D_ERROR
, req
, "unpack ptlrpc body failed: %d", rc
);
331 * Handle an early reply message, called with the rq_lock held.
332 * If anything goes wrong just ignore it - same as if it never happened
334 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request
*req
)
336 struct ptlrpc_request
*early_req
;
342 spin_unlock(&req
->rq_lock
);
344 rc
= sptlrpc_cli_unwrap_early_reply(req
, &early_req
);
346 spin_lock(&req
->rq_lock
);
350 rc
= unpack_reply(early_req
);
352 /* Expecting to increase the service time estimate here */
353 ptlrpc_at_adj_service(req
,
354 lustre_msg_get_timeout(early_req
->rq_repmsg
));
355 ptlrpc_at_adj_net_latency(req
,
356 lustre_msg_get_service_time(early_req
->rq_repmsg
));
359 sptlrpc_cli_finish_early_reply(early_req
);
362 spin_lock(&req
->rq_lock
);
366 /* Adjust the local timeout for this req */
367 ptlrpc_at_set_req_timeout(req
);
369 spin_lock(&req
->rq_lock
);
370 olddl
= req
->rq_deadline
;
371 /* server assumes it now has rq_timeout from when it sent the
372 * early reply, so client should give it at least that long. */
373 req
->rq_deadline
= cfs_time_current_sec() + req
->rq_timeout
+
374 ptlrpc_at_get_net_latency(req
);
376 DEBUG_REQ(D_ADAPTTO
, req
,
377 "Early reply #%d, new deadline in "CFS_DURATION_T
"s "
378 "("CFS_DURATION_T
"s)", req
->rq_early_count
,
379 cfs_time_sub(req
->rq_deadline
, cfs_time_current_sec()),
380 cfs_time_sub(req
->rq_deadline
, olddl
));
386 * Wind down request pool \a pool.
387 * Frees all requests from the pool too
389 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool
*pool
)
391 struct list_head
*l
, *tmp
;
392 struct ptlrpc_request
*req
;
394 LASSERT(pool
!= NULL
);
396 spin_lock(&pool
->prp_lock
);
397 list_for_each_safe(l
, tmp
, &pool
->prp_req_list
) {
398 req
= list_entry(l
, struct ptlrpc_request
, rq_list
);
399 list_del(&req
->rq_list
);
400 LASSERT(req
->rq_reqbuf
);
401 LASSERT(req
->rq_reqbuf_len
== pool
->prp_rq_size
);
402 OBD_FREE_LARGE(req
->rq_reqbuf
, pool
->prp_rq_size
);
403 OBD_FREE(req
, sizeof(*req
));
405 spin_unlock(&pool
->prp_lock
);
406 OBD_FREE(pool
, sizeof(*pool
));
408 EXPORT_SYMBOL(ptlrpc_free_rq_pool
);
411 * Allocates, initializes and adds \a num_rq requests to the pool \a pool
413 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool
*pool
, int num_rq
)
418 while (size
< pool
->prp_rq_size
)
421 LASSERTF(list_empty(&pool
->prp_req_list
) ||
422 size
== pool
->prp_rq_size
,
423 "Trying to change pool size with nonempty pool "
424 "from %d to %d bytes\n", pool
->prp_rq_size
, size
);
426 spin_lock(&pool
->prp_lock
);
427 pool
->prp_rq_size
= size
;
428 for (i
= 0; i
< num_rq
; i
++) {
429 struct ptlrpc_request
*req
;
430 struct lustre_msg
*msg
;
432 spin_unlock(&pool
->prp_lock
);
433 OBD_ALLOC(req
, sizeof(struct ptlrpc_request
));
436 OBD_ALLOC_LARGE(msg
, size
);
438 OBD_FREE(req
, sizeof(struct ptlrpc_request
));
441 req
->rq_reqbuf
= msg
;
442 req
->rq_reqbuf_len
= size
;
444 spin_lock(&pool
->prp_lock
);
445 list_add_tail(&req
->rq_list
, &pool
->prp_req_list
);
447 spin_unlock(&pool
->prp_lock
);
450 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool
);
453 * Create and initialize new request pool with given attributes:
454 * \a num_rq - initial number of requests to create for the pool
455 * \a msgsize - maximum message size possible for requests in thid pool
456 * \a populate_pool - function to be called when more requests need to be added
458 * Returns pointer to newly created pool or NULL on error.
460 struct ptlrpc_request_pool
*
461 ptlrpc_init_rq_pool(int num_rq
, int msgsize
,
462 void (*populate_pool
)(struct ptlrpc_request_pool
*, int))
464 struct ptlrpc_request_pool
*pool
;
466 OBD_ALLOC(pool
, sizeof (struct ptlrpc_request_pool
));
470 /* Request next power of two for the allocation, because internally
471 kernel would do exactly this */
473 spin_lock_init(&pool
->prp_lock
);
474 INIT_LIST_HEAD(&pool
->prp_req_list
);
475 pool
->prp_rq_size
= msgsize
+ SPTLRPC_MAX_PAYLOAD
;
476 pool
->prp_populate
= populate_pool
;
478 populate_pool(pool
, num_rq
);
480 if (list_empty(&pool
->prp_req_list
)) {
481 /* have not allocated a single request for the pool */
482 OBD_FREE(pool
, sizeof (struct ptlrpc_request_pool
));
487 EXPORT_SYMBOL(ptlrpc_init_rq_pool
);
490 * Fetches one request from pool \a pool
492 static struct ptlrpc_request
*
493 ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool
*pool
)
495 struct ptlrpc_request
*request
;
496 struct lustre_msg
*reqbuf
;
501 spin_lock(&pool
->prp_lock
);
503 /* See if we have anything in a pool, and bail out if nothing,
504 * in writeout path, where this matters, this is safe to do, because
505 * nothing is lost in this case, and when some in-flight requests
506 * complete, this code will be called again. */
507 if (unlikely(list_empty(&pool
->prp_req_list
))) {
508 spin_unlock(&pool
->prp_lock
);
512 request
= list_entry(pool
->prp_req_list
.next
, struct ptlrpc_request
,
514 list_del_init(&request
->rq_list
);
515 spin_unlock(&pool
->prp_lock
);
517 LASSERT(request
->rq_reqbuf
);
518 LASSERT(request
->rq_pool
);
520 reqbuf
= request
->rq_reqbuf
;
521 memset(request
, 0, sizeof(*request
));
522 request
->rq_reqbuf
= reqbuf
;
523 request
->rq_reqbuf_len
= pool
->prp_rq_size
;
524 request
->rq_pool
= pool
;
530 * Returns freed \a request to pool.
532 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request
*request
)
534 struct ptlrpc_request_pool
*pool
= request
->rq_pool
;
536 spin_lock(&pool
->prp_lock
);
537 LASSERT(list_empty(&request
->rq_list
));
538 LASSERT(!request
->rq_receiving_reply
);
539 list_add_tail(&request
->rq_list
, &pool
->prp_req_list
);
540 spin_unlock(&pool
->prp_lock
);
543 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request
*request
,
544 __u32 version
, int opcode
,
545 int count
, __u32
*lengths
, char **bufs
,
546 struct ptlrpc_cli_ctx
*ctx
)
548 struct obd_import
*imp
= request
->rq_import
;
553 request
->rq_cli_ctx
= sptlrpc_cli_ctx_get(ctx
);
555 rc
= sptlrpc_req_get_ctx(request
);
560 sptlrpc_req_set_flavor(request
, opcode
);
562 rc
= lustre_pack_request(request
, imp
->imp_msg_magic
, count
,
565 LASSERT(!request
->rq_pool
);
569 lustre_msg_add_version(request
->rq_reqmsg
, version
);
570 request
->rq_send_state
= LUSTRE_IMP_FULL
;
571 request
->rq_type
= PTL_RPC_MSG_REQUEST
;
572 request
->rq_export
= NULL
;
574 request
->rq_req_cbid
.cbid_fn
= request_out_callback
;
575 request
->rq_req_cbid
.cbid_arg
= request
;
577 request
->rq_reply_cbid
.cbid_fn
= reply_in_callback
;
578 request
->rq_reply_cbid
.cbid_arg
= request
;
580 request
->rq_reply_deadline
= 0;
581 request
->rq_phase
= RQ_PHASE_NEW
;
582 request
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
584 request
->rq_request_portal
= imp
->imp_client
->cli_request_portal
;
585 request
->rq_reply_portal
= imp
->imp_client
->cli_reply_portal
;
587 ptlrpc_at_set_req_timeout(request
);
589 spin_lock_init(&request
->rq_lock
);
590 INIT_LIST_HEAD(&request
->rq_list
);
591 INIT_LIST_HEAD(&request
->rq_timed_list
);
592 INIT_LIST_HEAD(&request
->rq_replay_list
);
593 INIT_LIST_HEAD(&request
->rq_ctx_chain
);
594 INIT_LIST_HEAD(&request
->rq_set_chain
);
595 INIT_LIST_HEAD(&request
->rq_history_list
);
596 INIT_LIST_HEAD(&request
->rq_exp_list
);
597 init_waitqueue_head(&request
->rq_reply_waitq
);
598 init_waitqueue_head(&request
->rq_set_waitq
);
599 request
->rq_xid
= ptlrpc_next_xid();
600 atomic_set(&request
->rq_refcount
, 1);
602 lustre_msg_set_opc(request
->rq_reqmsg
, opcode
);
606 sptlrpc_cli_ctx_put(request
->rq_cli_ctx
, 1);
608 class_import_put(imp
);
612 int ptlrpc_request_bufs_pack(struct ptlrpc_request
*request
,
613 __u32 version
, int opcode
, char **bufs
,
614 struct ptlrpc_cli_ctx
*ctx
)
618 count
= req_capsule_filled_sizes(&request
->rq_pill
, RCL_CLIENT
);
619 return __ptlrpc_request_bufs_pack(request
, version
, opcode
, count
,
620 request
->rq_pill
.rc_area
[RCL_CLIENT
],
623 EXPORT_SYMBOL(ptlrpc_request_bufs_pack
);
626 * Pack request buffers for network transfer, performing necessary encryption
627 * steps if necessary.
629 int ptlrpc_request_pack(struct ptlrpc_request
*request
,
630 __u32 version
, int opcode
)
633 rc
= ptlrpc_request_bufs_pack(request
, version
, opcode
, NULL
, NULL
);
637 /* For some old 1.8 clients (< 1.8.7), they will LASSERT the size of
638 * ptlrpc_body sent from server equal to local ptlrpc_body size, so we
639 * have to send old ptlrpc_body to keep interoprability with these
642 * Only three kinds of server->client RPCs so far:
647 * XXX This should be removed whenever we drop the interoprability with
648 * the these old clients.
650 if (opcode
== LDLM_BL_CALLBACK
|| opcode
== LDLM_CP_CALLBACK
||
651 opcode
== LDLM_GL_CALLBACK
)
652 req_capsule_shrink(&request
->rq_pill
, &RMF_PTLRPC_BODY
,
653 sizeof(struct ptlrpc_body_v2
), RCL_CLIENT
);
657 EXPORT_SYMBOL(ptlrpc_request_pack
);
660 * Helper function to allocate new request on import \a imp
661 * and possibly using existing request from pool \a pool if provided.
662 * Returns allocated request structure with import field filled or
666 struct ptlrpc_request
*__ptlrpc_request_alloc(struct obd_import
*imp
,
667 struct ptlrpc_request_pool
*pool
)
669 struct ptlrpc_request
*request
= NULL
;
672 request
= ptlrpc_prep_req_from_pool(pool
);
675 OBD_ALLOC_PTR(request
);
678 LASSERTF((unsigned long)imp
> 0x1000, "%p", imp
);
679 LASSERT(imp
!= LP_POISON
);
680 LASSERTF((unsigned long)imp
->imp_client
> 0x1000, "%p",
682 LASSERT(imp
->imp_client
!= LP_POISON
);
684 request
->rq_import
= class_import_get(imp
);
686 CERROR("request allocation out of memory\n");
693 * Helper function for creating a request.
694 * Calls __ptlrpc_request_alloc to allocate new request sturcture and inits
695 * buffer structures according to capsule template \a format.
696 * Returns allocated request structure pointer or NULL on error.
698 static struct ptlrpc_request
*
699 ptlrpc_request_alloc_internal(struct obd_import
*imp
,
700 struct ptlrpc_request_pool
* pool
,
701 const struct req_format
*format
)
703 struct ptlrpc_request
*request
;
705 request
= __ptlrpc_request_alloc(imp
, pool
);
709 req_capsule_init(&request
->rq_pill
, request
, RCL_CLIENT
);
710 req_capsule_set(&request
->rq_pill
, format
);
715 * Allocate new request structure for import \a imp and initialize its
716 * buffer structure according to capsule template \a format.
718 struct ptlrpc_request
*ptlrpc_request_alloc(struct obd_import
*imp
,
719 const struct req_format
*format
)
721 return ptlrpc_request_alloc_internal(imp
, NULL
, format
);
723 EXPORT_SYMBOL(ptlrpc_request_alloc
);
726 * Allocate new request structure for import \a imp from pool \a pool and
727 * initialize its buffer structure according to capsule template \a format.
729 struct ptlrpc_request
*ptlrpc_request_alloc_pool(struct obd_import
*imp
,
730 struct ptlrpc_request_pool
* pool
,
731 const struct req_format
*format
)
733 return ptlrpc_request_alloc_internal(imp
, pool
, format
);
735 EXPORT_SYMBOL(ptlrpc_request_alloc_pool
);
738 * For requests not from pool, free memory of the request structure.
739 * For requests obtained from a pool earlier, return request back to pool.
741 void ptlrpc_request_free(struct ptlrpc_request
*request
)
743 if (request
->rq_pool
)
744 __ptlrpc_free_req_to_pool(request
);
746 OBD_FREE_PTR(request
);
748 EXPORT_SYMBOL(ptlrpc_request_free
);
751 * Allocate new request for operatione \a opcode and immediatelly pack it for
753 * Only used for simple requests like OBD_PING where the only important
754 * part of the request is operation itself.
755 * Returns allocated request or NULL on error.
757 struct ptlrpc_request
*ptlrpc_request_alloc_pack(struct obd_import
*imp
,
758 const struct req_format
*format
,
759 __u32 version
, int opcode
)
761 struct ptlrpc_request
*req
= ptlrpc_request_alloc(imp
, format
);
765 rc
= ptlrpc_request_pack(req
, version
, opcode
);
767 ptlrpc_request_free(req
);
773 EXPORT_SYMBOL(ptlrpc_request_alloc_pack
);
776 * Prepare request (fetched from pool \a poolif not NULL) on import \a imp
777 * for operation \a opcode. Request would contain \a count buffers.
778 * Sizes of buffers are described in array \a lengths and buffers themselves
779 * are provided by a pointer \a bufs.
780 * Returns prepared request structure pointer or NULL on error.
782 struct ptlrpc_request
*
783 ptlrpc_prep_req_pool(struct obd_import
*imp
,
784 __u32 version
, int opcode
,
785 int count
, __u32
*lengths
, char **bufs
,
786 struct ptlrpc_request_pool
*pool
)
788 struct ptlrpc_request
*request
;
791 request
= __ptlrpc_request_alloc(imp
, pool
);
795 rc
= __ptlrpc_request_bufs_pack(request
, version
, opcode
, count
,
796 lengths
, bufs
, NULL
);
798 ptlrpc_request_free(request
);
803 EXPORT_SYMBOL(ptlrpc_prep_req_pool
);
806 * Same as ptlrpc_prep_req_pool, but without pool
808 struct ptlrpc_request
*
809 ptlrpc_prep_req(struct obd_import
*imp
, __u32 version
, int opcode
, int count
,
810 __u32
*lengths
, char **bufs
)
812 return ptlrpc_prep_req_pool(imp
, version
, opcode
, count
, lengths
, bufs
,
815 EXPORT_SYMBOL(ptlrpc_prep_req
);
818 * Allocate and initialize new request set structure.
819 * Returns a pointer to the newly allocated set structure or NULL on error.
821 struct ptlrpc_request_set
*ptlrpc_prep_set(void)
823 struct ptlrpc_request_set
*set
;
826 OBD_ALLOC(set
, sizeof *set
);
829 atomic_set(&set
->set_refcount
, 1);
830 INIT_LIST_HEAD(&set
->set_requests
);
831 init_waitqueue_head(&set
->set_waitq
);
832 atomic_set(&set
->set_new_count
, 0);
833 atomic_set(&set
->set_remaining
, 0);
834 spin_lock_init(&set
->set_new_req_lock
);
835 INIT_LIST_HEAD(&set
->set_new_requests
);
836 INIT_LIST_HEAD(&set
->set_cblist
);
837 set
->set_max_inflight
= UINT_MAX
;
838 set
->set_producer
= NULL
;
839 set
->set_producer_arg
= NULL
;
844 EXPORT_SYMBOL(ptlrpc_prep_set
);
847 * Allocate and initialize new request set structure with flow control
848 * extension. This extension allows to control the number of requests in-flight
849 * for the whole set. A callback function to generate requests must be provided
850 * and the request set will keep the number of requests sent over the wire to
852 * Returns a pointer to the newly allocated set structure or NULL on error.
854 struct ptlrpc_request_set
*ptlrpc_prep_fcset(int max
, set_producer_func func
,
858 struct ptlrpc_request_set
*set
;
860 set
= ptlrpc_prep_set();
864 set
->set_max_inflight
= max
;
865 set
->set_producer
= func
;
866 set
->set_producer_arg
= arg
;
870 EXPORT_SYMBOL(ptlrpc_prep_fcset
);
873 * Wind down and free request set structure previously allocated with
875 * Ensures that all requests on the set have completed and removes
876 * all requests from the request list in a set.
877 * If any unsent request happen to be on the list, pretends that they got
878 * an error in flight and calls their completion handler.
880 void ptlrpc_set_destroy(struct ptlrpc_request_set
*set
)
882 struct list_head
*tmp
;
883 struct list_head
*next
;
888 /* Requests on the set should either all be completed, or all be new */
889 expected_phase
= (atomic_read(&set
->set_remaining
) == 0) ?
890 RQ_PHASE_COMPLETE
: RQ_PHASE_NEW
;
891 list_for_each (tmp
, &set
->set_requests
) {
892 struct ptlrpc_request
*req
=
893 list_entry(tmp
, struct ptlrpc_request
,
896 LASSERT(req
->rq_phase
== expected_phase
);
900 LASSERTF(atomic_read(&set
->set_remaining
) == 0 ||
901 atomic_read(&set
->set_remaining
) == n
, "%d / %d\n",
902 atomic_read(&set
->set_remaining
), n
);
904 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
905 struct ptlrpc_request
*req
=
906 list_entry(tmp
, struct ptlrpc_request
,
908 list_del_init(&req
->rq_set_chain
);
910 LASSERT(req
->rq_phase
== expected_phase
);
912 if (req
->rq_phase
== RQ_PHASE_NEW
) {
913 ptlrpc_req_interpret(NULL
, req
, -EBADR
);
914 atomic_dec(&set
->set_remaining
);
917 spin_lock(&req
->rq_lock
);
919 req
->rq_invalid_rqset
= 0;
920 spin_unlock(&req
->rq_lock
);
922 ptlrpc_req_finished (req
);
925 LASSERT(atomic_read(&set
->set_remaining
) == 0);
927 ptlrpc_reqset_put(set
);
930 EXPORT_SYMBOL(ptlrpc_set_destroy
);
933 * Add a callback function \a fn to the set.
934 * This function would be called when all requests on this set are completed.
935 * The function will be passed \a data argument.
937 int ptlrpc_set_add_cb(struct ptlrpc_request_set
*set
,
938 set_interpreter_func fn
, void *data
)
940 struct ptlrpc_set_cbdata
*cbdata
;
942 OBD_ALLOC_PTR(cbdata
);
946 cbdata
->psc_interpret
= fn
;
947 cbdata
->psc_data
= data
;
948 list_add_tail(&cbdata
->psc_item
, &set
->set_cblist
);
952 EXPORT_SYMBOL(ptlrpc_set_add_cb
);
955 * Add a new request to the general purpose request set.
956 * Assumes request reference from the caller.
958 void ptlrpc_set_add_req(struct ptlrpc_request_set
*set
,
959 struct ptlrpc_request
*req
)
961 LASSERT(list_empty(&req
->rq_set_chain
));
963 /* The set takes over the caller's request reference */
964 list_add_tail(&req
->rq_set_chain
, &set
->set_requests
);
966 atomic_inc(&set
->set_remaining
);
967 req
->rq_queued_time
= cfs_time_current();
969 if (req
->rq_reqmsg
!= NULL
)
970 lustre_msg_set_jobid(req
->rq_reqmsg
, NULL
);
972 if (set
->set_producer
!= NULL
)
973 /* If the request set has a producer callback, the RPC must be
974 * sent straight away */
975 ptlrpc_send_new_req(req
);
977 EXPORT_SYMBOL(ptlrpc_set_add_req
);
980 * Add a request to a request with dedicated server thread
981 * and wake the thread to make any necessary processing.
982 * Currently only used for ptlrpcd.
984 void ptlrpc_set_add_new_req(struct ptlrpcd_ctl
*pc
,
985 struct ptlrpc_request
*req
)
987 struct ptlrpc_request_set
*set
= pc
->pc_set
;
990 LASSERT(req
->rq_set
== NULL
);
991 LASSERT(test_bit(LIOD_STOP
, &pc
->pc_flags
) == 0);
993 spin_lock(&set
->set_new_req_lock
);
995 * The set takes over the caller's request reference.
998 req
->rq_queued_time
= cfs_time_current();
999 list_add_tail(&req
->rq_set_chain
, &set
->set_new_requests
);
1000 count
= atomic_inc_return(&set
->set_new_count
);
1001 spin_unlock(&set
->set_new_req_lock
);
1003 /* Only need to call wakeup once for the first entry. */
1005 wake_up(&set
->set_waitq
);
1007 /* XXX: It maybe unnecessary to wakeup all the partners. But to
1008 * guarantee the async RPC can be processed ASAP, we have
1009 * no other better choice. It maybe fixed in future. */
1010 for (i
= 0; i
< pc
->pc_npartners
; i
++)
1011 wake_up(&pc
->pc_partners
[i
]->pc_set
->set_waitq
);
1014 EXPORT_SYMBOL(ptlrpc_set_add_new_req
);
1017 * Based on the current state of the import, determine if the request
1018 * can be sent, is an error, or should be delayed.
1020 * Returns true if this request should be delayed. If false, and
1021 * *status is set, then the request can not be sent and *status is the
1022 * error code. If false and status is 0, then request can be sent.
1024 * The imp->imp_lock must be held.
1026 static int ptlrpc_import_delay_req(struct obd_import
*imp
,
1027 struct ptlrpc_request
*req
, int *status
)
1032 LASSERT (status
!= NULL
);
1035 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
) {
1036 /* always allow ctx init/fini rpc go through */
1037 } else if (imp
->imp_state
== LUSTRE_IMP_NEW
) {
1038 DEBUG_REQ(D_ERROR
, req
, "Uninitialized import.");
1040 } else if (imp
->imp_state
== LUSTRE_IMP_CLOSED
) {
1041 /* pings may safely race with umount */
1042 DEBUG_REQ(lustre_msg_get_opc(req
->rq_reqmsg
) == OBD_PING
?
1043 D_HA
: D_ERROR
, req
, "IMP_CLOSED ");
1045 } else if (ptlrpc_send_limit_expired(req
)) {
1046 /* probably doesn't need to be a D_ERROR after initial testing */
1047 DEBUG_REQ(D_ERROR
, req
, "send limit expired ");
1049 } else if (req
->rq_send_state
== LUSTRE_IMP_CONNECTING
&&
1050 imp
->imp_state
== LUSTRE_IMP_CONNECTING
) {
1051 /* allow CONNECT even if import is invalid */ ;
1052 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1053 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1056 } else if (imp
->imp_invalid
|| imp
->imp_obd
->obd_no_recov
) {
1057 if (!imp
->imp_deactive
)
1058 DEBUG_REQ(D_NET
, req
, "IMP_INVALID");
1059 *status
= -ESHUTDOWN
; /* bz 12940 */
1060 } else if (req
->rq_import_generation
!= imp
->imp_generation
) {
1061 DEBUG_REQ(D_ERROR
, req
, "req wrong generation:");
1063 } else if (req
->rq_send_state
!= imp
->imp_state
) {
1064 /* invalidate in progress - any requests should be drop */
1065 if (atomic_read(&imp
->imp_inval_count
) != 0) {
1066 DEBUG_REQ(D_ERROR
, req
, "invalidate in flight");
1068 } else if (imp
->imp_dlm_fake
|| req
->rq_no_delay
) {
1069 *status
= -EWOULDBLOCK
;
1070 } else if (req
->rq_allow_replay
&&
1071 (imp
->imp_state
== LUSTRE_IMP_REPLAY
||
1072 imp
->imp_state
== LUSTRE_IMP_REPLAY_LOCKS
||
1073 imp
->imp_state
== LUSTRE_IMP_REPLAY_WAIT
||
1074 imp
->imp_state
== LUSTRE_IMP_RECOVER
)) {
1075 DEBUG_REQ(D_HA
, req
, "allow during recovery.\n");
1085 * Decide if the eror message regarding provided request \a req
1086 * should be printed to the console or not.
1087 * Makes it's decision on request status and other properties.
1088 * Returns 1 to print error on the system console or 0 if not.
1090 static int ptlrpc_console_allow(struct ptlrpc_request
*req
)
1095 LASSERT(req
->rq_reqmsg
!= NULL
);
1096 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1098 /* Suppress particular reconnect errors which are to be expected. No
1099 * errors are suppressed for the initial connection on an import */
1100 if ((lustre_handle_is_used(&req
->rq_import
->imp_remote_handle
)) &&
1101 (opc
== OST_CONNECT
|| opc
== MDS_CONNECT
|| opc
== MGS_CONNECT
)) {
1103 /* Suppress timed out reconnect requests */
1104 if (req
->rq_timedout
)
1107 /* Suppress unavailable/again reconnect requests */
1108 err
= lustre_msg_get_status(req
->rq_repmsg
);
1109 if (err
== -ENODEV
|| err
== -EAGAIN
)
1117 * Check request processing status.
1118 * Returns the status.
1120 static int ptlrpc_check_status(struct ptlrpc_request
*req
)
1125 err
= lustre_msg_get_status(req
->rq_repmsg
);
1126 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
) {
1127 struct obd_import
*imp
= req
->rq_import
;
1128 __u32 opc
= lustre_msg_get_opc(req
->rq_reqmsg
);
1129 if (ptlrpc_console_allow(req
))
1130 LCONSOLE_ERROR_MSG(0x011, "%s: Communicating with %s,"
1131 " operation %s failed with %d.\n",
1132 imp
->imp_obd
->obd_name
,
1134 imp
->imp_connection
->c_peer
.nid
),
1135 ll_opcode2str(opc
), err
);
1136 RETURN(err
< 0 ? err
: -EINVAL
);
1140 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1141 } else if (err
> 0) {
1142 /* XXX: translate this error from net to host */
1143 DEBUG_REQ(D_INFO
, req
, "status is %d", err
);
1150 * save pre-versions of objects into request for replay.
1151 * Versions are obtained from server reply.
1154 static void ptlrpc_save_versions(struct ptlrpc_request
*req
)
1156 struct lustre_msg
*repmsg
= req
->rq_repmsg
;
1157 struct lustre_msg
*reqmsg
= req
->rq_reqmsg
;
1158 __u64
*versions
= lustre_msg_get_versions(repmsg
);
1161 if (lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)
1165 lustre_msg_set_versions(reqmsg
, versions
);
1166 CDEBUG(D_INFO
, "Client save versions ["LPX64
"/"LPX64
"]\n",
1167 versions
[0], versions
[1]);
1173 * Callback function called when client receives RPC reply for \a req.
1174 * Returns 0 on success or error code.
1175 * The return alue would be assigned to req->rq_status by the caller
1176 * as request processing status.
1177 * This function also decides if the request needs to be saved for later replay.
1179 static int after_reply(struct ptlrpc_request
*req
)
1181 struct obd_import
*imp
= req
->rq_import
;
1182 struct obd_device
*obd
= req
->rq_import
->imp_obd
;
1184 struct timeval work_start
;
1188 LASSERT(obd
!= NULL
);
1189 /* repbuf must be unlinked */
1190 LASSERT(!req
->rq_receiving_reply
&& !req
->rq_must_unlink
);
1192 if (req
->rq_reply_truncate
) {
1193 if (ptlrpc_no_resend(req
)) {
1194 DEBUG_REQ(D_ERROR
, req
, "reply buffer overflow,"
1195 " expected: %d, actual size: %d",
1196 req
->rq_nob_received
, req
->rq_repbuf_len
);
1200 sptlrpc_cli_free_repbuf(req
);
1201 /* Pass the required reply buffer size (include
1202 * space for early reply).
1203 * NB: no need to roundup because alloc_repbuf
1204 * will roundup it */
1205 req
->rq_replen
= req
->rq_nob_received
;
1206 req
->rq_nob_received
= 0;
1212 * NB Until this point, the whole of the incoming message,
1213 * including buflens, status etc is in the sender's byte order.
1215 rc
= sptlrpc_cli_unwrap_reply(req
);
1217 DEBUG_REQ(D_ERROR
, req
, "unwrap reply failed (%d):", rc
);
1222 * Security layer unwrap might ask resend this request.
1227 rc
= unpack_reply(req
);
1231 /* retry indefinitely on EINPROGRESS */
1232 if (lustre_msg_get_status(req
->rq_repmsg
) == -EINPROGRESS
&&
1233 ptlrpc_no_resend(req
) == 0 && !req
->rq_no_retry_einprogress
) {
1234 time_t now
= cfs_time_current_sec();
1236 DEBUG_REQ(D_RPCTRACE
, req
, "Resending request on EINPROGRESS");
1238 req
->rq_nr_resend
++;
1240 /* allocate new xid to avoid reply reconstruction */
1241 if (!req
->rq_bulk
) {
1242 /* new xid is already allocated for bulk in
1243 * ptlrpc_check_set() */
1244 req
->rq_xid
= ptlrpc_next_xid();
1245 DEBUG_REQ(D_RPCTRACE
, req
, "Allocating new xid for "
1246 "resend on EINPROGRESS");
1249 /* Readjust the timeout for current conditions */
1250 ptlrpc_at_set_req_timeout(req
);
1251 /* delay resend to give a chance to the server to get ready.
1252 * The delay is increased by 1s on every resend and is capped to
1253 * the current request timeout (i.e. obd_timeout if AT is off,
1254 * or AT service time x 125% + 5s, see at_est2timeout) */
1255 if (req
->rq_nr_resend
> req
->rq_timeout
)
1256 req
->rq_sent
= now
+ req
->rq_timeout
;
1258 req
->rq_sent
= now
+ req
->rq_nr_resend
;
1263 do_gettimeofday(&work_start
);
1264 timediff
= cfs_timeval_sub(&work_start
, &req
->rq_arrival_time
, NULL
);
1265 if (obd
->obd_svc_stats
!= NULL
) {
1266 lprocfs_counter_add(obd
->obd_svc_stats
, PTLRPC_REQWAIT_CNTR
,
1268 ptlrpc_lprocfs_rpc_sent(req
, timediff
);
1271 if (lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_REPLY
&&
1272 lustre_msg_get_type(req
->rq_repmsg
) != PTL_RPC_MSG_ERR
) {
1273 DEBUG_REQ(D_ERROR
, req
, "invalid packet received (type=%u)",
1274 lustre_msg_get_type(req
->rq_repmsg
));
1278 if (lustre_msg_get_opc(req
->rq_reqmsg
) != OBD_PING
)
1279 CFS_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP
, cfs_fail_val
);
1280 ptlrpc_at_adj_service(req
, lustre_msg_get_timeout(req
->rq_repmsg
));
1281 ptlrpc_at_adj_net_latency(req
,
1282 lustre_msg_get_service_time(req
->rq_repmsg
));
1284 rc
= ptlrpc_check_status(req
);
1285 imp
->imp_connect_error
= rc
;
1289 * Either we've been evicted, or the server has failed for
1290 * some reason. Try to reconnect, and if that fails, punt to
1293 if (ll_rpc_recoverable_error(rc
)) {
1294 if (req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1295 imp
->imp_obd
->obd_no_recov
|| imp
->imp_dlm_fake
) {
1298 ptlrpc_request_handle_notconn(req
);
1303 * Let's look if server sent slv. Do it only for RPC with
1306 ldlm_cli_update_pool(req
);
1310 * Store transno in reqmsg for replay.
1312 if (!(lustre_msg_get_flags(req
->rq_reqmsg
) & MSG_REPLAY
)) {
1313 req
->rq_transno
= lustre_msg_get_transno(req
->rq_repmsg
);
1314 lustre_msg_set_transno(req
->rq_reqmsg
, req
->rq_transno
);
1317 if (imp
->imp_replayable
) {
1318 spin_lock(&imp
->imp_lock
);
1320 * No point in adding already-committed requests to the replay
1321 * list, we will just remove them immediately. b=9829
1323 if (req
->rq_transno
!= 0 &&
1325 lustre_msg_get_last_committed(req
->rq_repmsg
) ||
1327 /** version recovery */
1328 ptlrpc_save_versions(req
);
1329 ptlrpc_retain_replayable_request(req
, imp
);
1330 } else if (req
->rq_commit_cb
!= NULL
) {
1331 spin_unlock(&imp
->imp_lock
);
1332 req
->rq_commit_cb(req
);
1333 spin_lock(&imp
->imp_lock
);
1337 * Replay-enabled imports return commit-status information.
1339 if (lustre_msg_get_last_committed(req
->rq_repmsg
)) {
1340 imp
->imp_peer_committed_transno
=
1341 lustre_msg_get_last_committed(req
->rq_repmsg
);
1344 ptlrpc_free_committed(imp
);
1346 if (!list_empty(&imp
->imp_replay_list
)) {
1347 struct ptlrpc_request
*last
;
1349 last
= list_entry(imp
->imp_replay_list
.prev
,
1350 struct ptlrpc_request
,
1353 * Requests with rq_replay stay on the list even if no
1354 * commit is expected.
1356 if (last
->rq_transno
> imp
->imp_peer_committed_transno
)
1357 ptlrpc_pinger_commit_expected(imp
);
1360 spin_unlock(&imp
->imp_lock
);
1367 * Helper function to send request \a req over the network for the first time
1368 * Also adjusts request phase.
1369 * Returns 0 on success or error code.
1371 static int ptlrpc_send_new_req(struct ptlrpc_request
*req
)
1373 struct obd_import
*imp
= req
->rq_import
;
1377 LASSERT(req
->rq_phase
== RQ_PHASE_NEW
);
1378 if (req
->rq_sent
&& (req
->rq_sent
> cfs_time_current_sec()) &&
1379 (!req
->rq_generation_set
||
1380 req
->rq_import_generation
== imp
->imp_generation
))
1383 ptlrpc_rqphase_move(req
, RQ_PHASE_RPC
);
1385 spin_lock(&imp
->imp_lock
);
1387 if (!req
->rq_generation_set
)
1388 req
->rq_import_generation
= imp
->imp_generation
;
1390 if (ptlrpc_import_delay_req(imp
, req
, &rc
)) {
1391 spin_lock(&req
->rq_lock
);
1392 req
->rq_waiting
= 1;
1393 spin_unlock(&req
->rq_lock
);
1395 DEBUG_REQ(D_HA
, req
, "req from PID %d waiting for recovery: "
1396 "(%s != %s)", lustre_msg_get_status(req
->rq_reqmsg
),
1397 ptlrpc_import_state_name(req
->rq_send_state
),
1398 ptlrpc_import_state_name(imp
->imp_state
));
1399 LASSERT(list_empty(&req
->rq_list
));
1400 list_add_tail(&req
->rq_list
, &imp
->imp_delayed_list
);
1401 atomic_inc(&req
->rq_import
->imp_inflight
);
1402 spin_unlock(&imp
->imp_lock
);
1407 spin_unlock(&imp
->imp_lock
);
1408 req
->rq_status
= rc
;
1409 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1413 LASSERT(list_empty(&req
->rq_list
));
1414 list_add_tail(&req
->rq_list
, &imp
->imp_sending_list
);
1415 atomic_inc(&req
->rq_import
->imp_inflight
);
1416 spin_unlock(&imp
->imp_lock
);
1418 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
1420 rc
= sptlrpc_req_refresh_ctx(req
, -1);
1423 req
->rq_status
= rc
;
1426 req
->rq_wait_ctx
= 1;
1431 CDEBUG(D_RPCTRACE
, "Sending RPC pname:cluuid:pid:xid:nid:opc"
1432 " %s:%s:%d:"LPU64
":%s:%d\n", current_comm(),
1433 imp
->imp_obd
->obd_uuid
.uuid
,
1434 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1435 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1436 lustre_msg_get_opc(req
->rq_reqmsg
));
1438 rc
= ptl_send_rpc(req
, 0);
1440 DEBUG_REQ(D_HA
, req
, "send failed (%d); expect timeout", rc
);
1441 req
->rq_net_err
= 1;
1447 static inline int ptlrpc_set_producer(struct ptlrpc_request_set
*set
)
1452 LASSERT(set
->set_producer
!= NULL
);
1454 remaining
= atomic_read(&set
->set_remaining
);
1456 /* populate the ->set_requests list with requests until we
1457 * reach the maximum number of RPCs in flight for this set */
1458 while (atomic_read(&set
->set_remaining
) < set
->set_max_inflight
) {
1459 rc
= set
->set_producer(set
, set
->set_producer_arg
);
1460 if (rc
== -ENOENT
) {
1461 /* no more RPC to produce */
1462 set
->set_producer
= NULL
;
1463 set
->set_producer_arg
= NULL
;
1468 RETURN((atomic_read(&set
->set_remaining
) - remaining
));
1472 * this sends any unsent RPCs in \a set and returns 1 if all are sent
1473 * and no more replies are expected.
1474 * (it is possible to get less replies than requests sent e.g. due to timed out
1475 * requests or requests that we had trouble to send out)
1477 int ptlrpc_check_set(const struct lu_env
*env
, struct ptlrpc_request_set
*set
)
1479 struct list_head
*tmp
, *next
;
1480 int force_timer_recalc
= 0;
1483 if (atomic_read(&set
->set_remaining
) == 0)
1486 list_for_each_safe(tmp
, next
, &set
->set_requests
) {
1487 struct ptlrpc_request
*req
=
1488 list_entry(tmp
, struct ptlrpc_request
,
1490 struct obd_import
*imp
= req
->rq_import
;
1491 int unregistered
= 0;
1494 if (req
->rq_phase
== RQ_PHASE_NEW
&&
1495 ptlrpc_send_new_req(req
)) {
1496 force_timer_recalc
= 1;
1499 /* delayed send - skip */
1500 if (req
->rq_phase
== RQ_PHASE_NEW
&& req
->rq_sent
)
1503 /* delayed resend - skip */
1504 if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
&&
1505 req
->rq_sent
> cfs_time_current_sec())
1508 if (!(req
->rq_phase
== RQ_PHASE_RPC
||
1509 req
->rq_phase
== RQ_PHASE_BULK
||
1510 req
->rq_phase
== RQ_PHASE_INTERPRET
||
1511 req
->rq_phase
== RQ_PHASE_UNREGISTERING
||
1512 req
->rq_phase
== RQ_PHASE_COMPLETE
)) {
1513 DEBUG_REQ(D_ERROR
, req
, "bad phase %x", req
->rq_phase
);
1517 if (req
->rq_phase
== RQ_PHASE_UNREGISTERING
) {
1518 LASSERT(req
->rq_next_phase
!= req
->rq_phase
);
1519 LASSERT(req
->rq_next_phase
!= RQ_PHASE_UNDEFINED
);
1522 * Skip processing until reply is unlinked. We
1523 * can't return to pool before that and we can't
1524 * call interpret before that. We need to make
1525 * sure that all rdma transfers finished and will
1526 * not corrupt any data.
1528 if (ptlrpc_client_recv_or_unlink(req
) ||
1529 ptlrpc_client_bulk_active(req
))
1533 * Turn fail_loc off to prevent it from looping
1536 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
)) {
1537 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
,
1540 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
)) {
1541 OBD_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK
,
1546 * Move to next phase if reply was successfully
1549 ptlrpc_rqphase_move(req
, req
->rq_next_phase
);
1552 if (req
->rq_phase
== RQ_PHASE_COMPLETE
)
1555 if (req
->rq_phase
== RQ_PHASE_INTERPRET
)
1556 GOTO(interpret
, req
->rq_status
);
1559 * Note that this also will start async reply unlink.
1561 if (req
->rq_net_err
&& !req
->rq_timedout
) {
1562 ptlrpc_expire_one_request(req
, 1);
1565 * Check if we still need to wait for unlink.
1567 if (ptlrpc_client_recv_or_unlink(req
) ||
1568 ptlrpc_client_bulk_active(req
))
1570 /* If there is no need to resend, fail it now. */
1571 if (req
->rq_no_resend
) {
1572 if (req
->rq_status
== 0)
1573 req
->rq_status
= -EIO
;
1574 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1575 GOTO(interpret
, req
->rq_status
);
1582 spin_lock(&req
->rq_lock
);
1583 req
->rq_replied
= 0;
1584 spin_unlock(&req
->rq_lock
);
1585 if (req
->rq_status
== 0)
1586 req
->rq_status
= -EIO
;
1587 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1588 GOTO(interpret
, req
->rq_status
);
1591 /* ptlrpc_set_wait->l_wait_event sets lwi_allow_intr
1592 * so it sets rq_intr regardless of individual rpc
1593 * timeouts. The synchronous IO waiting path sets
1594 * rq_intr irrespective of whether ptlrpcd
1595 * has seen a timeout. Our policy is to only interpret
1596 * interrupted rpcs after they have timed out, so we
1597 * need to enforce that here.
1600 if (req
->rq_intr
&& (req
->rq_timedout
|| req
->rq_waiting
||
1601 req
->rq_wait_ctx
)) {
1602 req
->rq_status
= -EINTR
;
1603 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1604 GOTO(interpret
, req
->rq_status
);
1607 if (req
->rq_phase
== RQ_PHASE_RPC
) {
1608 if (req
->rq_timedout
|| req
->rq_resend
||
1609 req
->rq_waiting
|| req
->rq_wait_ctx
) {
1612 if (!ptlrpc_unregister_reply(req
, 1))
1615 spin_lock(&imp
->imp_lock
);
1616 if (ptlrpc_import_delay_req(imp
, req
, &status
)){
1617 /* put on delay list - only if we wait
1618 * recovery finished - before send */
1619 list_del_init(&req
->rq_list
);
1620 list_add_tail(&req
->rq_list
,
1623 spin_unlock(&imp
->imp_lock
);
1628 req
->rq_status
= status
;
1629 ptlrpc_rqphase_move(req
,
1630 RQ_PHASE_INTERPRET
);
1631 spin_unlock(&imp
->imp_lock
);
1632 GOTO(interpret
, req
->rq_status
);
1634 if (ptlrpc_no_resend(req
) &&
1635 !req
->rq_wait_ctx
) {
1636 req
->rq_status
= -ENOTCONN
;
1637 ptlrpc_rqphase_move(req
,
1638 RQ_PHASE_INTERPRET
);
1639 spin_unlock(&imp
->imp_lock
);
1640 GOTO(interpret
, req
->rq_status
);
1643 list_del_init(&req
->rq_list
);
1644 list_add_tail(&req
->rq_list
,
1645 &imp
->imp_sending_list
);
1647 spin_unlock(&imp
->imp_lock
);
1649 spin_lock(&req
->rq_lock
);
1650 req
->rq_waiting
= 0;
1651 spin_unlock(&req
->rq_lock
);
1653 if (req
->rq_timedout
|| req
->rq_resend
) {
1654 /* This is re-sending anyways,
1655 * let's mark req as resend. */
1656 spin_lock(&req
->rq_lock
);
1658 spin_unlock(&req
->rq_lock
);
1662 if (!ptlrpc_unregister_bulk(req
, 1))
1665 /* ensure previous bulk fails */
1666 old_xid
= req
->rq_xid
;
1667 req
->rq_xid
= ptlrpc_next_xid();
1668 CDEBUG(D_HA
, "resend bulk "
1671 old_xid
, req
->rq_xid
);
1675 * rq_wait_ctx is only touched by ptlrpcd,
1676 * so no lock is needed here.
1678 status
= sptlrpc_req_refresh_ctx(req
, -1);
1681 req
->rq_status
= status
;
1682 spin_lock(&req
->rq_lock
);
1683 req
->rq_wait_ctx
= 0;
1684 spin_unlock(&req
->rq_lock
);
1685 force_timer_recalc
= 1;
1687 spin_lock(&req
->rq_lock
);
1688 req
->rq_wait_ctx
= 1;
1689 spin_unlock(&req
->rq_lock
);
1694 spin_lock(&req
->rq_lock
);
1695 req
->rq_wait_ctx
= 0;
1696 spin_unlock(&req
->rq_lock
);
1699 rc
= ptl_send_rpc(req
, 0);
1701 DEBUG_REQ(D_HA
, req
,
1702 "send failed: rc = %d", rc
);
1703 force_timer_recalc
= 1;
1704 spin_lock(&req
->rq_lock
);
1705 req
->rq_net_err
= 1;
1706 spin_unlock(&req
->rq_lock
);
1708 /* need to reset the timeout */
1709 force_timer_recalc
= 1;
1712 spin_lock(&req
->rq_lock
);
1714 if (ptlrpc_client_early(req
)) {
1715 ptlrpc_at_recv_early_reply(req
);
1716 spin_unlock(&req
->rq_lock
);
1720 /* Still waiting for a reply? */
1721 if (ptlrpc_client_recv(req
)) {
1722 spin_unlock(&req
->rq_lock
);
1726 /* Did we actually receive a reply? */
1727 if (!ptlrpc_client_replied(req
)) {
1728 spin_unlock(&req
->rq_lock
);
1732 spin_unlock(&req
->rq_lock
);
1734 /* unlink from net because we are going to
1735 * swab in-place of reply buffer */
1736 unregistered
= ptlrpc_unregister_reply(req
, 1);
1740 req
->rq_status
= after_reply(req
);
1744 /* If there is no bulk associated with this request,
1745 * then we're done and should let the interpreter
1746 * process the reply. Similarly if the RPC returned
1747 * an error, and therefore the bulk will never arrive.
1749 if (req
->rq_bulk
== NULL
|| req
->rq_status
< 0) {
1750 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1751 GOTO(interpret
, req
->rq_status
);
1754 ptlrpc_rqphase_move(req
, RQ_PHASE_BULK
);
1757 LASSERT(req
->rq_phase
== RQ_PHASE_BULK
);
1758 if (ptlrpc_client_bulk_active(req
))
1761 if (req
->rq_bulk
->bd_failure
) {
1762 /* The RPC reply arrived OK, but the bulk screwed
1763 * up! Dead weird since the server told us the RPC
1764 * was good after getting the REPLY for her GET or
1765 * the ACK for her PUT. */
1766 DEBUG_REQ(D_ERROR
, req
, "bulk transfer failed");
1767 req
->rq_status
= -EIO
;
1770 ptlrpc_rqphase_move(req
, RQ_PHASE_INTERPRET
);
1773 LASSERT(req
->rq_phase
== RQ_PHASE_INTERPRET
);
1775 /* This moves to "unregistering" phase we need to wait for
1777 if (!unregistered
&& !ptlrpc_unregister_reply(req
, 1)) {
1778 /* start async bulk unlink too */
1779 ptlrpc_unregister_bulk(req
, 1);
1783 if (!ptlrpc_unregister_bulk(req
, 1))
1786 /* When calling interpret receiving already should be
1788 LASSERT(!req
->rq_receiving_reply
);
1790 ptlrpc_req_interpret(env
, req
, req
->rq_status
);
1792 ptlrpc_rqphase_move(req
, RQ_PHASE_COMPLETE
);
1794 CDEBUG(req
->rq_reqmsg
!= NULL
? D_RPCTRACE
: 0,
1795 "Completed RPC pname:cluuid:pid:xid:nid:"
1796 "opc %s:%s:%d:"LPU64
":%s:%d\n",
1797 current_comm(), imp
->imp_obd
->obd_uuid
.uuid
,
1798 lustre_msg_get_status(req
->rq_reqmsg
), req
->rq_xid
,
1799 libcfs_nid2str(imp
->imp_connection
->c_peer
.nid
),
1800 lustre_msg_get_opc(req
->rq_reqmsg
));
1802 spin_lock(&imp
->imp_lock
);
1803 /* Request already may be not on sending or delaying list. This
1804 * may happen in the case of marking it erroneous for the case
1805 * ptlrpc_import_delay_req(req, status) find it impossible to
1806 * allow sending this rpc and returns *status != 0. */
1807 if (!list_empty(&req
->rq_list
)) {
1808 list_del_init(&req
->rq_list
);
1809 atomic_dec(&imp
->imp_inflight
);
1811 spin_unlock(&imp
->imp_lock
);
1813 atomic_dec(&set
->set_remaining
);
1814 wake_up_all(&imp
->imp_recovery_waitq
);
1816 if (set
->set_producer
) {
1817 /* produce a new request if possible */
1818 if (ptlrpc_set_producer(set
) > 0)
1819 force_timer_recalc
= 1;
1821 /* free the request that has just been completed
1822 * in order not to pollute set->set_requests */
1823 list_del_init(&req
->rq_set_chain
);
1824 spin_lock(&req
->rq_lock
);
1826 req
->rq_invalid_rqset
= 0;
1827 spin_unlock(&req
->rq_lock
);
1829 /* record rq_status to compute the final status later */
1830 if (req
->rq_status
!= 0)
1831 set
->set_rc
= req
->rq_status
;
1832 ptlrpc_req_finished(req
);
1836 /* If we hit an error, we want to recover promptly. */
1837 RETURN(atomic_read(&set
->set_remaining
) == 0 || force_timer_recalc
);
1839 EXPORT_SYMBOL(ptlrpc_check_set
);
1842 * Time out request \a req. is \a async_unlink is set, that means do not wait
1843 * until LNet actually confirms network buffer unlinking.
1844 * Return 1 if we should give up further retrying attempts or 0 otherwise.
1846 int ptlrpc_expire_one_request(struct ptlrpc_request
*req
, int async_unlink
)
1848 struct obd_import
*imp
= req
->rq_import
;
1852 spin_lock(&req
->rq_lock
);
1853 req
->rq_timedout
= 1;
1854 spin_unlock(&req
->rq_lock
);
1856 DEBUG_REQ(D_WARNING
, req
, "Request sent has %s: [sent "CFS_DURATION_T
1857 "/real "CFS_DURATION_T
"]",
1858 req
->rq_net_err
? "failed due to network error" :
1859 ((req
->rq_real_sent
== 0 ||
1860 cfs_time_before(req
->rq_real_sent
, req
->rq_sent
) ||
1861 cfs_time_aftereq(req
->rq_real_sent
, req
->rq_deadline
)) ?
1862 "timed out for sent delay" : "timed out for slow reply"),
1863 req
->rq_sent
, req
->rq_real_sent
);
1865 if (imp
!= NULL
&& obd_debug_peer_on_timeout
)
1866 LNetCtl(IOC_LIBCFS_DEBUG_PEER
, &imp
->imp_connection
->c_peer
);
1868 ptlrpc_unregister_reply(req
, async_unlink
);
1869 ptlrpc_unregister_bulk(req
, async_unlink
);
1871 if (obd_dump_on_timeout
)
1872 libcfs_debug_dumplog();
1875 DEBUG_REQ(D_HA
, req
, "NULL import: already cleaned up?");
1879 atomic_inc(&imp
->imp_timeouts
);
1881 /* The DLM server doesn't want recovery run on its imports. */
1882 if (imp
->imp_dlm_fake
)
1885 /* If this request is for recovery or other primordial tasks,
1886 * then error it out here. */
1887 if (req
->rq_ctx_init
|| req
->rq_ctx_fini
||
1888 req
->rq_send_state
!= LUSTRE_IMP_FULL
||
1889 imp
->imp_obd
->obd_no_recov
) {
1890 DEBUG_REQ(D_RPCTRACE
, req
, "err -110, sent_state=%s (now=%s)",
1891 ptlrpc_import_state_name(req
->rq_send_state
),
1892 ptlrpc_import_state_name(imp
->imp_state
));
1893 spin_lock(&req
->rq_lock
);
1894 req
->rq_status
= -ETIMEDOUT
;
1896 spin_unlock(&req
->rq_lock
);
1900 /* if a request can't be resent we can't wait for an answer after
1902 if (ptlrpc_no_resend(req
)) {
1903 DEBUG_REQ(D_RPCTRACE
, req
, "TIMEOUT-NORESEND:");
1907 ptlrpc_fail_import(imp
, lustre_msg_get_conn_cnt(req
->rq_reqmsg
));
1913 * Time out all uncompleted requests in request set pointed by \a data
1914 * Callback used when waiting on sets with l_wait_event.
1917 int ptlrpc_expired_set(void *data
)
1919 struct ptlrpc_request_set
*set
= data
;
1920 struct list_head
*tmp
;
1921 time_t now
= cfs_time_current_sec();
1924 LASSERT(set
!= NULL
);
1927 * A timeout expired. See which reqs it applies to...
1929 list_for_each (tmp
, &set
->set_requests
) {
1930 struct ptlrpc_request
*req
=
1931 list_entry(tmp
, struct ptlrpc_request
,
1934 /* don't expire request waiting for context */
1935 if (req
->rq_wait_ctx
)
1938 /* Request in-flight? */
1939 if (!((req
->rq_phase
== RQ_PHASE_RPC
&&
1940 !req
->rq_waiting
&& !req
->rq_resend
) ||
1941 (req
->rq_phase
== RQ_PHASE_BULK
)))
1944 if (req
->rq_timedout
|| /* already dealt with */
1945 req
->rq_deadline
> now
) /* not expired */
1948 /* Deal with this guy. Do it asynchronously to not block
1949 * ptlrpcd thread. */
1950 ptlrpc_expire_one_request(req
, 1);
1954 * When waiting for a whole set, we always break out of the
1955 * sleep so we can recalculate the timeout, or enable interrupts
1956 * if everyone's timed out.
1960 EXPORT_SYMBOL(ptlrpc_expired_set
);
1963 * Sets rq_intr flag in \a req under spinlock.
1965 void ptlrpc_mark_interrupted(struct ptlrpc_request
*req
)
1967 spin_lock(&req
->rq_lock
);
1969 spin_unlock(&req
->rq_lock
);
1971 EXPORT_SYMBOL(ptlrpc_mark_interrupted
);
1974 * Interrupts (sets interrupted flag) all uncompleted requests in
1975 * a set \a data. Callback for l_wait_event for interruptible waits.
1977 void ptlrpc_interrupted_set(void *data
)
1979 struct ptlrpc_request_set
*set
= data
;
1980 struct list_head
*tmp
;
1982 LASSERT(set
!= NULL
);
1983 CDEBUG(D_RPCTRACE
, "INTERRUPTED SET %p\n", set
);
1985 list_for_each(tmp
, &set
->set_requests
) {
1986 struct ptlrpc_request
*req
=
1987 list_entry(tmp
, struct ptlrpc_request
,
1990 if (req
->rq_phase
!= RQ_PHASE_RPC
&&
1991 req
->rq_phase
!= RQ_PHASE_UNREGISTERING
)
1994 ptlrpc_mark_interrupted(req
);
1997 EXPORT_SYMBOL(ptlrpc_interrupted_set
);
2000 * Get the smallest timeout in the set; this does NOT set a timeout.
2002 int ptlrpc_set_next_timeout(struct ptlrpc_request_set
*set
)
2004 struct list_head
*tmp
;
2005 time_t now
= cfs_time_current_sec();
2007 struct ptlrpc_request
*req
;
2011 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
2013 list_for_each(tmp
, &set
->set_requests
) {
2014 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2017 * Request in-flight?
2019 if (!(((req
->rq_phase
== RQ_PHASE_RPC
) && !req
->rq_waiting
) ||
2020 (req
->rq_phase
== RQ_PHASE_BULK
) ||
2021 (req
->rq_phase
== RQ_PHASE_NEW
)))
2025 * Already timed out.
2027 if (req
->rq_timedout
)
2033 if (req
->rq_wait_ctx
)
2036 if (req
->rq_phase
== RQ_PHASE_NEW
)
2037 deadline
= req
->rq_sent
;
2038 else if (req
->rq_phase
== RQ_PHASE_RPC
&& req
->rq_resend
)
2039 deadline
= req
->rq_sent
;
2041 deadline
= req
->rq_sent
+ req
->rq_timeout
;
2043 if (deadline
<= now
) /* actually expired already */
2044 timeout
= 1; /* ASAP */
2045 else if (timeout
== 0 || timeout
> deadline
- now
)
2046 timeout
= deadline
- now
;
2050 EXPORT_SYMBOL(ptlrpc_set_next_timeout
);
2053 * Send all unset request from the set and then wait untill all
2054 * requests in the set complete (either get a reply, timeout, get an
2055 * error or otherwise be interrupted).
2056 * Returns 0 on success or error code otherwise.
2058 int ptlrpc_set_wait(struct ptlrpc_request_set
*set
)
2060 struct list_head
*tmp
;
2061 struct ptlrpc_request
*req
;
2062 struct l_wait_info lwi
;
2066 if (set
->set_producer
)
2067 (void)ptlrpc_set_producer(set
);
2069 list_for_each(tmp
, &set
->set_requests
) {
2070 req
= list_entry(tmp
, struct ptlrpc_request
,
2072 if (req
->rq_phase
== RQ_PHASE_NEW
)
2073 (void)ptlrpc_send_new_req(req
);
2076 if (list_empty(&set
->set_requests
))
2080 timeout
= ptlrpc_set_next_timeout(set
);
2082 /* wait until all complete, interrupted, or an in-flight
2084 CDEBUG(D_RPCTRACE
, "set %p going to sleep for %d seconds\n",
2087 if (timeout
== 0 && !cfs_signal_pending())
2089 * No requests are in-flight (ether timed out
2090 * or delayed), so we can allow interrupts.
2091 * We still want to block for a limited time,
2092 * so we allow interrupts during the timeout.
2094 lwi
= LWI_TIMEOUT_INTR_ALL(cfs_time_seconds(1),
2096 ptlrpc_interrupted_set
, set
);
2099 * At least one request is in flight, so no
2100 * interrupts are allowed. Wait until all
2101 * complete, or an in-flight req times out.
2103 lwi
= LWI_TIMEOUT(cfs_time_seconds(timeout
? timeout
: 1),
2104 ptlrpc_expired_set
, set
);
2106 rc
= l_wait_event(set
->set_waitq
, ptlrpc_check_set(NULL
, set
), &lwi
);
2108 /* LU-769 - if we ignored the signal because it was already
2109 * pending when we started, we need to handle it now or we risk
2110 * it being ignored forever */
2111 if (rc
== -ETIMEDOUT
&& !lwi
.lwi_allow_intr
&&
2112 cfs_signal_pending()) {
2113 sigset_t blocked_sigs
=
2114 cfs_block_sigsinv(LUSTRE_FATAL_SIGS
);
2116 /* In fact we only interrupt for the "fatal" signals
2117 * like SIGINT or SIGKILL. We still ignore less
2118 * important signals since ptlrpc set is not easily
2119 * reentrant from userspace again */
2120 if (cfs_signal_pending())
2121 ptlrpc_interrupted_set(set
);
2122 cfs_restore_sigs(blocked_sigs
);
2125 LASSERT(rc
== 0 || rc
== -EINTR
|| rc
== -ETIMEDOUT
);
2127 /* -EINTR => all requests have been flagged rq_intr so next
2129 * -ETIMEDOUT => someone timed out. When all reqs have
2130 * timed out, signals are enabled allowing completion with
2132 * I don't really care if we go once more round the loop in
2133 * the error cases -eeb. */
2134 if (rc
== 0 && atomic_read(&set
->set_remaining
) == 0) {
2135 list_for_each(tmp
, &set
->set_requests
) {
2136 req
= list_entry(tmp
, struct ptlrpc_request
,
2138 spin_lock(&req
->rq_lock
);
2139 req
->rq_invalid_rqset
= 1;
2140 spin_unlock(&req
->rq_lock
);
2143 } while (rc
!= 0 || atomic_read(&set
->set_remaining
) != 0);
2145 LASSERT(atomic_read(&set
->set_remaining
) == 0);
2147 rc
= set
->set_rc
; /* rq_status of already freed requests if any */
2148 list_for_each(tmp
, &set
->set_requests
) {
2149 req
= list_entry(tmp
, struct ptlrpc_request
, rq_set_chain
);
2151 LASSERT(req
->rq_phase
== RQ_PHASE_COMPLETE
);
2152 if (req
->rq_status
!= 0)
2153 rc
= req
->rq_status
;
2156 if (set
->set_interpret
!= NULL
) {
2157 int (*interpreter
)(struct ptlrpc_request_set
*set
,void *,int) =
2159 rc
= interpreter (set
, set
->set_arg
, rc
);
2161 struct ptlrpc_set_cbdata
*cbdata
, *n
;
2164 list_for_each_entry_safe(cbdata
, n
,
2165 &set
->set_cblist
, psc_item
) {
2166 list_del_init(&cbdata
->psc_item
);
2167 err
= cbdata
->psc_interpret(set
, cbdata
->psc_data
, rc
);
2170 OBD_FREE_PTR(cbdata
);
2176 EXPORT_SYMBOL(ptlrpc_set_wait
);
2179 * Helper fuction for request freeing.
2180 * Called when request count reached zero and request needs to be freed.
2181 * Removes request from all sorts of sending/replay lists it might be on,
2182 * frees network buffers if any are present.
2183 * If \a locked is set, that means caller is already holding import imp_lock
2184 * and so we no longer need to reobtain it (for certain lists manipulations)
2186 static void __ptlrpc_free_req(struct ptlrpc_request
*request
, int locked
)
2189 if (request
== NULL
) {
2194 LASSERTF(!request
->rq_receiving_reply
, "req %p\n", request
);
2195 LASSERTF(request
->rq_rqbd
== NULL
, "req %p\n",request
);/* client-side */
2196 LASSERTF(list_empty(&request
->rq_list
), "req %p\n", request
);
2197 LASSERTF(list_empty(&request
->rq_set_chain
), "req %p\n", request
);
2198 LASSERTF(list_empty(&request
->rq_exp_list
), "req %p\n", request
);
2199 LASSERTF(!request
->rq_replay
, "req %p\n", request
);
2201 req_capsule_fini(&request
->rq_pill
);
2203 /* We must take it off the imp_replay_list first. Otherwise, we'll set
2204 * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
2205 if (request
->rq_import
!= NULL
) {
2207 spin_lock(&request
->rq_import
->imp_lock
);
2208 list_del_init(&request
->rq_replay_list
);
2210 spin_unlock(&request
->rq_import
->imp_lock
);
2212 LASSERTF(list_empty(&request
->rq_replay_list
), "req %p\n", request
);
2214 if (atomic_read(&request
->rq_refcount
) != 0) {
2215 DEBUG_REQ(D_ERROR
, request
,
2216 "freeing request with nonzero refcount");
2220 if (request
->rq_repbuf
!= NULL
)
2221 sptlrpc_cli_free_repbuf(request
);
2222 if (request
->rq_export
!= NULL
) {
2223 class_export_put(request
->rq_export
);
2224 request
->rq_export
= NULL
;
2226 if (request
->rq_import
!= NULL
) {
2227 class_import_put(request
->rq_import
);
2228 request
->rq_import
= NULL
;
2230 if (request
->rq_bulk
!= NULL
)
2231 ptlrpc_free_bulk_pin(request
->rq_bulk
);
2233 if (request
->rq_reqbuf
!= NULL
|| request
->rq_clrbuf
!= NULL
)
2234 sptlrpc_cli_free_reqbuf(request
);
2236 if (request
->rq_cli_ctx
)
2237 sptlrpc_req_put_ctx(request
, !locked
);
2239 if (request
->rq_pool
)
2240 __ptlrpc_free_req_to_pool(request
);
2242 OBD_FREE(request
, sizeof(*request
));
2246 static int __ptlrpc_req_finished(struct ptlrpc_request
*request
, int locked
);
2248 * Drop one request reference. Must be called with import imp_lock held.
2249 * When reference count drops to zero, reuqest is freed.
2251 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request
*request
)
2253 LASSERT(spin_is_locked(&request
->rq_import
->imp_lock
));
2254 (void)__ptlrpc_req_finished(request
, 1);
2256 EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock
);
2260 * Drops one reference count for request \a request.
2261 * \a locked set indicates that caller holds import imp_lock.
2262 * Frees the request whe reference count reaches zero.
2264 static int __ptlrpc_req_finished(struct ptlrpc_request
*request
, int locked
)
2267 if (request
== NULL
)
2270 if (request
== LP_POISON
||
2271 request
->rq_reqmsg
== LP_POISON
) {
2272 CERROR("dereferencing freed request (bug 575)\n");
2277 DEBUG_REQ(D_INFO
, request
, "refcount now %u",
2278 atomic_read(&request
->rq_refcount
) - 1);
2280 if (atomic_dec_and_test(&request
->rq_refcount
)) {
2281 __ptlrpc_free_req(request
, locked
);
2289 * Drops one reference count for a request.
2291 void ptlrpc_req_finished(struct ptlrpc_request
*request
)
2293 __ptlrpc_req_finished(request
, 0);
2295 EXPORT_SYMBOL(ptlrpc_req_finished
);
2298 * Returns xid of a \a request
2300 __u64
ptlrpc_req_xid(struct ptlrpc_request
*request
)
2302 return request
->rq_xid
;
2304 EXPORT_SYMBOL(ptlrpc_req_xid
);
2307 * Disengage the client's reply buffer from the network
2308 * NB does _NOT_ unregister any client-side bulk.
2309 * IDEMPOTENT, but _not_ safe against concurrent callers.
2310 * The request owner (i.e. the thread doing the I/O) must call...
2311 * Returns 0 on success or 1 if unregistering cannot be made.
2313 int ptlrpc_unregister_reply(struct ptlrpc_request
*request
, int async
)
2316 wait_queue_head_t
*wq
;
2317 struct l_wait_info lwi
;
2322 LASSERT(!in_interrupt());
2325 * Let's setup deadline for reply unlink.
2327 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK
) &&
2328 async
&& request
->rq_reply_deadline
== 0)
2329 request
->rq_reply_deadline
= cfs_time_current_sec()+LONG_UNLINK
;
2332 * Nothing left to do.
2334 if (!ptlrpc_client_recv_or_unlink(request
))
2337 LNetMDUnlink(request
->rq_reply_md_h
);
2340 * Let's check it once again.
2342 if (!ptlrpc_client_recv_or_unlink(request
))
2346 * Move to "Unregistering" phase as reply was not unlinked yet.
2348 ptlrpc_rqphase_move(request
, RQ_PHASE_UNREGISTERING
);
2351 * Do not wait for unlink to finish.
2357 * We have to l_wait_event() whatever the result, to give liblustre
2358 * a chance to run reply_in_callback(), and to make sure we've
2359 * unlinked before returning a req to the pool.
2361 if (request
->rq_set
!= NULL
)
2362 wq
= &request
->rq_set
->set_waitq
;
2364 wq
= &request
->rq_reply_waitq
;
2367 /* Network access will complete in finite time but the HUGE
2368 * timeout lets us CWARN for visibility of sluggish NALs */
2369 lwi
= LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK
),
2370 cfs_time_seconds(1), NULL
, NULL
);
2371 rc
= l_wait_event(*wq
, !ptlrpc_client_recv_or_unlink(request
),
2374 ptlrpc_rqphase_move(request
, request
->rq_next_phase
);
2378 LASSERT(rc
== -ETIMEDOUT
);
2379 DEBUG_REQ(D_WARNING
, request
, "Unexpectedly long timeout "
2380 "rvcng=%d unlnk=%d", request
->rq_receiving_reply
,
2381 request
->rq_must_unlink
);
2385 EXPORT_SYMBOL(ptlrpc_unregister_reply
);
2388 * Iterates through replay_list on import and prunes
2389 * all requests have transno smaller than last_committed for the
2390 * import and don't have rq_replay set.
2391 * Since requests are sorted in transno order, stops when meetign first
2392 * transno bigger than last_committed.
2393 * caller must hold imp->imp_lock
2395 void ptlrpc_free_committed(struct obd_import
*imp
)
2397 struct list_head
*tmp
, *saved
;
2398 struct ptlrpc_request
*req
;
2399 struct ptlrpc_request
*last_req
= NULL
; /* temporary fire escape */
2402 LASSERT(imp
!= NULL
);
2404 LASSERT(spin_is_locked(&imp
->imp_lock
));
2407 if (imp
->imp_peer_committed_transno
== imp
->imp_last_transno_checked
&&
2408 imp
->imp_generation
== imp
->imp_last_generation_checked
) {
2409 CDEBUG(D_INFO
, "%s: skip recheck: last_committed "LPU64
"\n",
2410 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
);
2414 CDEBUG(D_RPCTRACE
, "%s: committing for last_committed "LPU64
" gen %d\n",
2415 imp
->imp_obd
->obd_name
, imp
->imp_peer_committed_transno
,
2416 imp
->imp_generation
);
2417 imp
->imp_last_transno_checked
= imp
->imp_peer_committed_transno
;
2418 imp
->imp_last_generation_checked
= imp
->imp_generation
;
2420 list_for_each_safe(tmp
, saved
, &imp
->imp_replay_list
) {
2421 req
= list_entry(tmp
, struct ptlrpc_request
,
2424 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
2425 LASSERT(req
!= last_req
);
2428 if (req
->rq_transno
== 0) {
2429 DEBUG_REQ(D_EMERG
, req
, "zero transno during replay");
2432 if (req
->rq_import_generation
< imp
->imp_generation
) {
2433 DEBUG_REQ(D_RPCTRACE
, req
, "free request with old gen");
2437 if (req
->rq_replay
) {
2438 DEBUG_REQ(D_RPCTRACE
, req
, "keeping (FL_REPLAY)");
2442 /* not yet committed */
2443 if (req
->rq_transno
> imp
->imp_peer_committed_transno
) {
2444 DEBUG_REQ(D_RPCTRACE
, req
, "stopping search");
2448 DEBUG_REQ(D_INFO
, req
, "commit (last_committed "LPU64
")",
2449 imp
->imp_peer_committed_transno
);
2451 spin_lock(&req
->rq_lock
);
2453 spin_unlock(&req
->rq_lock
);
2454 if (req
->rq_commit_cb
!= NULL
)
2455 req
->rq_commit_cb(req
);
2456 list_del_init(&req
->rq_replay_list
);
2457 __ptlrpc_req_finished(req
, 1);
2464 void ptlrpc_cleanup_client(struct obd_import
*imp
)
2470 EXPORT_SYMBOL(ptlrpc_cleanup_client
);
2473 * Schedule previously sent request for resend.
2474 * For bulk requests we assign new xid (to avoid problems with
2475 * lost replies and therefore several transfers landing into same buffer
2476 * from different sending attempts).
2478 void ptlrpc_resend_req(struct ptlrpc_request
*req
)
2480 DEBUG_REQ(D_HA
, req
, "going to resend");
2481 lustre_msg_set_handle(req
->rq_reqmsg
, &(struct lustre_handle
){ 0 });
2482 req
->rq_status
= -EAGAIN
;
2484 spin_lock(&req
->rq_lock
);
2486 req
->rq_net_err
= 0;
2487 req
->rq_timedout
= 0;
2489 __u64 old_xid
= req
->rq_xid
;
2491 /* ensure previous bulk fails */
2492 req
->rq_xid
= ptlrpc_next_xid();
2493 CDEBUG(D_HA
, "resend bulk old x"LPU64
" new x"LPU64
"\n",
2494 old_xid
, req
->rq_xid
);
2496 ptlrpc_client_wake_req(req
);
2497 spin_unlock(&req
->rq_lock
);
2499 EXPORT_SYMBOL(ptlrpc_resend_req
);
2501 /* XXX: this function and rq_status are currently unused */
2502 void ptlrpc_restart_req(struct ptlrpc_request
*req
)
2504 DEBUG_REQ(D_HA
, req
, "restarting (possibly-)completed request");
2505 req
->rq_status
= -ERESTARTSYS
;
2507 spin_lock(&req
->rq_lock
);
2508 req
->rq_restart
= 1;
2509 req
->rq_timedout
= 0;
2510 ptlrpc_client_wake_req(req
);
2511 spin_unlock(&req
->rq_lock
);
2513 EXPORT_SYMBOL(ptlrpc_restart_req
);
2516 * Grab additional reference on a request \a req
2518 struct ptlrpc_request
*ptlrpc_request_addref(struct ptlrpc_request
*req
)
2521 atomic_inc(&req
->rq_refcount
);
2524 EXPORT_SYMBOL(ptlrpc_request_addref
);
2527 * Add a request to import replay_list.
2528 * Must be called under imp_lock
2530 void ptlrpc_retain_replayable_request(struct ptlrpc_request
*req
,
2531 struct obd_import
*imp
)
2533 struct list_head
*tmp
;
2535 LASSERT(spin_is_locked(&imp
->imp_lock
));
2537 if (req
->rq_transno
== 0) {
2538 DEBUG_REQ(D_EMERG
, req
, "saving request with zero transno");
2542 /* clear this for new requests that were resent as well
2543 as resent replayed requests. */
2544 lustre_msg_clear_flags(req
->rq_reqmsg
, MSG_RESENT
);
2546 /* don't re-add requests that have been replayed */
2547 if (!list_empty(&req
->rq_replay_list
))
2550 lustre_msg_add_flags(req
->rq_reqmsg
, MSG_REPLAY
);
2552 LASSERT(imp
->imp_replayable
);
2553 /* Balanced in ptlrpc_free_committed, usually. */
2554 ptlrpc_request_addref(req
);
2555 list_for_each_prev(tmp
, &imp
->imp_replay_list
) {
2556 struct ptlrpc_request
*iter
=
2557 list_entry(tmp
, struct ptlrpc_request
,
2560 /* We may have duplicate transnos if we create and then
2561 * open a file, or for closes retained if to match creating
2562 * opens, so use req->rq_xid as a secondary key.
2563 * (See bugs 684, 685, and 428.)
2564 * XXX no longer needed, but all opens need transnos!
2566 if (iter
->rq_transno
> req
->rq_transno
)
2569 if (iter
->rq_transno
== req
->rq_transno
) {
2570 LASSERT(iter
->rq_xid
!= req
->rq_xid
);
2571 if (iter
->rq_xid
> req
->rq_xid
)
2575 list_add(&req
->rq_replay_list
, &iter
->rq_replay_list
);
2579 list_add(&req
->rq_replay_list
, &imp
->imp_replay_list
);
2581 EXPORT_SYMBOL(ptlrpc_retain_replayable_request
);
2584 * Send request and wait until it completes.
2585 * Returns request processing status.
2587 int ptlrpc_queue_wait(struct ptlrpc_request
*req
)
2589 struct ptlrpc_request_set
*set
;
2593 LASSERT(req
->rq_set
== NULL
);
2594 LASSERT(!req
->rq_receiving_reply
);
2596 set
= ptlrpc_prep_set();
2598 CERROR("Unable to allocate ptlrpc set.");
2602 /* for distributed debugging */
2603 lustre_msg_set_status(req
->rq_reqmsg
, current_pid());
2605 /* add a ref for the set (see comment in ptlrpc_set_add_req) */
2606 ptlrpc_request_addref(req
);
2607 ptlrpc_set_add_req(set
, req
);
2608 rc
= ptlrpc_set_wait(set
);
2609 ptlrpc_set_destroy(set
);
2613 EXPORT_SYMBOL(ptlrpc_queue_wait
);
2615 struct ptlrpc_replay_async_args
{
2617 int praa_old_status
;
2621 * Callback used for replayed requests reply processing.
2622 * In case of succesful reply calls registeresd request replay callback.
2623 * In case of error restart replay process.
2625 static int ptlrpc_replay_interpret(const struct lu_env
*env
,
2626 struct ptlrpc_request
*req
,
2627 void * data
, int rc
)
2629 struct ptlrpc_replay_async_args
*aa
= data
;
2630 struct obd_import
*imp
= req
->rq_import
;
2633 atomic_dec(&imp
->imp_replay_inflight
);
2635 if (!ptlrpc_client_replied(req
)) {
2636 CERROR("request replay timed out, restarting recovery\n");
2637 GOTO(out
, rc
= -ETIMEDOUT
);
2640 if (lustre_msg_get_type(req
->rq_repmsg
) == PTL_RPC_MSG_ERR
&&
2641 (lustre_msg_get_status(req
->rq_repmsg
) == -ENOTCONN
||
2642 lustre_msg_get_status(req
->rq_repmsg
) == -ENODEV
))
2643 GOTO(out
, rc
= lustre_msg_get_status(req
->rq_repmsg
));
2645 /** VBR: check version failure */
2646 if (lustre_msg_get_status(req
->rq_repmsg
) == -EOVERFLOW
) {
2647 /** replay was failed due to version mismatch */
2648 DEBUG_REQ(D_WARNING
, req
, "Version mismatch during replay\n");
2649 spin_lock(&imp
->imp_lock
);
2650 imp
->imp_vbr_failed
= 1;
2651 imp
->imp_no_lock_replay
= 1;
2652 spin_unlock(&imp
->imp_lock
);
2653 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2655 /** The transno had better not change over replay. */
2656 LASSERTF(lustre_msg_get_transno(req
->rq_reqmsg
) ==
2657 lustre_msg_get_transno(req
->rq_repmsg
) ||
2658 lustre_msg_get_transno(req
->rq_repmsg
) == 0,
2660 lustre_msg_get_transno(req
->rq_reqmsg
),
2661 lustre_msg_get_transno(req
->rq_repmsg
));
2664 spin_lock(&imp
->imp_lock
);
2665 /** if replays by version then gap occur on server, no trust to locks */
2666 if (lustre_msg_get_flags(req
->rq_repmsg
) & MSG_VERSION_REPLAY
)
2667 imp
->imp_no_lock_replay
= 1;
2668 imp
->imp_last_replay_transno
= lustre_msg_get_transno(req
->rq_reqmsg
);
2669 spin_unlock(&imp
->imp_lock
);
2670 LASSERT(imp
->imp_last_replay_transno
);
2672 /* transaction number shouldn't be bigger than the latest replayed */
2673 if (req
->rq_transno
> lustre_msg_get_transno(req
->rq_reqmsg
)) {
2674 DEBUG_REQ(D_ERROR
, req
,
2675 "Reported transno "LPU64
" is bigger than the "
2676 "replayed one: "LPU64
, req
->rq_transno
,
2677 lustre_msg_get_transno(req
->rq_reqmsg
));
2678 GOTO(out
, rc
= -EINVAL
);
2681 DEBUG_REQ(D_HA
, req
, "got rep");
2683 /* let the callback do fixups, possibly including in the request */
2684 if (req
->rq_replay_cb
)
2685 req
->rq_replay_cb(req
);
2687 if (ptlrpc_client_replied(req
) &&
2688 lustre_msg_get_status(req
->rq_repmsg
) != aa
->praa_old_status
) {
2689 DEBUG_REQ(D_ERROR
, req
, "status %d, old was %d",
2690 lustre_msg_get_status(req
->rq_repmsg
),
2691 aa
->praa_old_status
);
2693 /* Put it back for re-replay. */
2694 lustre_msg_set_status(req
->rq_repmsg
, aa
->praa_old_status
);
2698 * Errors while replay can set transno to 0, but
2699 * imp_last_replay_transno shouldn't be set to 0 anyway
2701 if (req
->rq_transno
== 0)
2702 CERROR("Transno is 0 during replay!\n");
2704 /* continue with recovery */
2705 rc
= ptlrpc_import_recovery_state_machine(imp
);
2707 req
->rq_send_state
= aa
->praa_old_state
;
2710 /* this replay failed, so restart recovery */
2711 ptlrpc_connect_import(imp
);
2717 * Prepares and queues request for replay.
2718 * Adds it to ptlrpcd queue for actual sending.
2719 * Returns 0 on success.
2721 int ptlrpc_replay_req(struct ptlrpc_request
*req
)
2723 struct ptlrpc_replay_async_args
*aa
;
2726 LASSERT(req
->rq_import
->imp_state
== LUSTRE_IMP_REPLAY
);
2728 LASSERT (sizeof (*aa
) <= sizeof (req
->rq_async_args
));
2729 aa
= ptlrpc_req_async_args(req
);
2730 memset(aa
, 0, sizeof *aa
);
2732 /* Prepare request to be resent with ptlrpcd */
2733 aa
->praa_old_state
= req
->rq_send_state
;
2734 req
->rq_send_state
= LUSTRE_IMP_REPLAY
;
2735 req
->rq_phase
= RQ_PHASE_NEW
;
2736 req
->rq_next_phase
= RQ_PHASE_UNDEFINED
;
2738 aa
->praa_old_status
= lustre_msg_get_status(req
->rq_repmsg
);
2740 req
->rq_interpret_reply
= ptlrpc_replay_interpret
;
2741 /* Readjust the timeout for current conditions */
2742 ptlrpc_at_set_req_timeout(req
);
2744 /* Tell server the net_latency, so the server can calculate how long
2745 * it should wait for next replay */
2746 lustre_msg_set_service_time(req
->rq_reqmsg
,
2747 ptlrpc_at_get_net_latency(req
));
2748 DEBUG_REQ(D_HA
, req
, "REPLAY");
2750 atomic_inc(&req
->rq_import
->imp_replay_inflight
);
2751 ptlrpc_request_addref(req
); /* ptlrpcd needs a ref */
2753 ptlrpcd_add_req(req
, PDL_POLICY_LOCAL
, -1);
2756 EXPORT_SYMBOL(ptlrpc_replay_req
);
2759 * Aborts all in-flight request on import \a imp sending and delayed lists
2761 void ptlrpc_abort_inflight(struct obd_import
*imp
)
2763 struct list_head
*tmp
, *n
;
2766 /* Make sure that no new requests get processed for this import.
2767 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2768 * this flag and then putting requests on sending_list or delayed_list.
2770 spin_lock(&imp
->imp_lock
);
2772 /* XXX locking? Maybe we should remove each request with the list
2773 * locked? Also, how do we know if the requests on the list are
2774 * being freed at this time?
2776 list_for_each_safe(tmp
, n
, &imp
->imp_sending_list
) {
2777 struct ptlrpc_request
*req
=
2778 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2780 DEBUG_REQ(D_RPCTRACE
, req
, "inflight");
2782 spin_lock(&req
->rq_lock
);
2783 if (req
->rq_import_generation
< imp
->imp_generation
) {
2785 req
->rq_status
= -EIO
;
2786 ptlrpc_client_wake_req(req
);
2788 spin_unlock(&req
->rq_lock
);
2791 list_for_each_safe(tmp
, n
, &imp
->imp_delayed_list
) {
2792 struct ptlrpc_request
*req
=
2793 list_entry(tmp
, struct ptlrpc_request
, rq_list
);
2795 DEBUG_REQ(D_RPCTRACE
, req
, "aborting waiting req");
2797 spin_lock(&req
->rq_lock
);
2798 if (req
->rq_import_generation
< imp
->imp_generation
) {
2800 req
->rq_status
= -EIO
;
2801 ptlrpc_client_wake_req(req
);
2803 spin_unlock(&req
->rq_lock
);
2806 /* Last chance to free reqs left on the replay list, but we
2807 * will still leak reqs that haven't committed. */
2808 if (imp
->imp_replayable
)
2809 ptlrpc_free_committed(imp
);
2811 spin_unlock(&imp
->imp_lock
);
2815 EXPORT_SYMBOL(ptlrpc_abort_inflight
);
2818 * Abort all uncompleted requests in request set \a set
2820 void ptlrpc_abort_set(struct ptlrpc_request_set
*set
)
2822 struct list_head
*tmp
, *pos
;
2824 LASSERT(set
!= NULL
);
2826 list_for_each_safe(pos
, tmp
, &set
->set_requests
) {
2827 struct ptlrpc_request
*req
=
2828 list_entry(pos
, struct ptlrpc_request
,
2831 spin_lock(&req
->rq_lock
);
2832 if (req
->rq_phase
!= RQ_PHASE_RPC
) {
2833 spin_unlock(&req
->rq_lock
);
2838 req
->rq_status
= -EINTR
;
2839 ptlrpc_client_wake_req(req
);
2840 spin_unlock(&req
->rq_lock
);
2844 static __u64 ptlrpc_last_xid
;
2845 static spinlock_t ptlrpc_last_xid_lock
;
2848 * Initialize the XID for the node. This is common among all requests on
2849 * this node, and only requires the property that it is monotonically
2850 * increasing. It does not need to be sequential. Since this is also used
2851 * as the RDMA match bits, it is important that a single client NOT have
2852 * the same match bits for two different in-flight requests, hence we do
2853 * NOT want to have an XID per target or similar.
2855 * To avoid an unlikely collision between match bits after a client reboot
2856 * (which would deliver old data into the wrong RDMA buffer) initialize
2857 * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2858 * If the time is clearly incorrect, we instead use a 62-bit random number.
2859 * In the worst case the random number will overflow 1M RPCs per second in
2860 * 9133 years, or permutations thereof.
2862 #define YEAR_2004 (1ULL << 30)
2863 void ptlrpc_init_xid(void)
2865 time_t now
= cfs_time_current_sec();
2867 spin_lock_init(&ptlrpc_last_xid_lock
);
2868 if (now
< YEAR_2004
) {
2869 cfs_get_random_bytes(&ptlrpc_last_xid
, sizeof(ptlrpc_last_xid
));
2870 ptlrpc_last_xid
>>= 2;
2871 ptlrpc_last_xid
|= (1ULL << 61);
2873 ptlrpc_last_xid
= (__u64
)now
<< 20;
2876 /* Need to always be aligned to a power-of-two for mutli-bulk BRW */
2877 CLASSERT((PTLRPC_BULK_OPS_COUNT
& (PTLRPC_BULK_OPS_COUNT
- 1)) == 0);
2878 ptlrpc_last_xid
&= PTLRPC_BULK_OPS_MASK
;
2882 * Increase xid and returns resulting new value to the caller.
2884 * Multi-bulk BRW RPCs consume multiple XIDs for each bulk transfer, starting
2885 * at the returned xid, up to xid + PTLRPC_BULK_OPS_COUNT - 1. The BRW RPC
2886 * itself uses the last bulk xid needed, so the server can determine the
2887 * the number of bulk transfers from the RPC XID and a bitmask. The starting
2888 * xid must align to a power-of-two value.
2890 * This is assumed to be true due to the initial ptlrpc_last_xid
2891 * value also being initialized to a power-of-two value. LU-1431
2893 __u64
ptlrpc_next_xid(void)
2897 spin_lock(&ptlrpc_last_xid_lock
);
2898 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2899 ptlrpc_last_xid
= next
;
2900 spin_unlock(&ptlrpc_last_xid_lock
);
2904 EXPORT_SYMBOL(ptlrpc_next_xid
);
2907 * Get a glimpse at what next xid value might have been.
2908 * Returns possible next xid.
2910 __u64
ptlrpc_sample_next_xid(void)
2912 #if BITS_PER_LONG == 32
2913 /* need to avoid possible word tearing on 32-bit systems */
2916 spin_lock(&ptlrpc_last_xid_lock
);
2917 next
= ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2918 spin_unlock(&ptlrpc_last_xid_lock
);
2922 /* No need to lock, since returned value is racy anyways */
2923 return ptlrpc_last_xid
+ PTLRPC_BULK_OPS_COUNT
;
2926 EXPORT_SYMBOL(ptlrpc_sample_next_xid
);
2929 * Functions for operating ptlrpc workers.
2931 * A ptlrpc work is a function which will be running inside ptlrpc context.
2932 * The callback shouldn't sleep otherwise it will block that ptlrpcd thread.
2934 * 1. after a work is created, it can be used many times, that is:
2935 * handler = ptlrpcd_alloc_work();
2936 * ptlrpcd_queue_work();
2938 * queue it again when necessary:
2939 * ptlrpcd_queue_work();
2940 * ptlrpcd_destroy_work();
2941 * 2. ptlrpcd_queue_work() can be called by multiple processes meanwhile, but
2942 * it will only be queued once in any time. Also as its name implies, it may
2943 * have delay before it really runs by ptlrpcd thread.
2945 struct ptlrpc_work_async_args
{
2947 int (*cb
)(const struct lu_env
*, void *);
2951 #define PTLRPC_WORK_MAGIC 0x6655436b676f4f44ULL /* magic code */
2953 static int work_interpreter(const struct lu_env
*env
,
2954 struct ptlrpc_request
*req
, void *data
, int rc
)
2956 struct ptlrpc_work_async_args
*arg
= data
;
2958 LASSERT(arg
->magic
== PTLRPC_WORK_MAGIC
);
2959 LASSERT(arg
->cb
!= NULL
);
2961 return arg
->cb(env
, arg
->cbdata
);
2965 * Create a work for ptlrpc.
2967 void *ptlrpcd_alloc_work(struct obd_import
*imp
,
2968 int (*cb
)(const struct lu_env
*, void *), void *cbdata
)
2970 struct ptlrpc_request
*req
= NULL
;
2971 struct ptlrpc_work_async_args
*args
;
2977 RETURN(ERR_PTR(-EINVAL
));
2979 /* copy some code from deprecated fakereq. */
2982 CERROR("ptlrpc: run out of memory!\n");
2983 RETURN(ERR_PTR(-ENOMEM
));
2986 req
->rq_send_state
= LUSTRE_IMP_FULL
;
2987 req
->rq_type
= PTL_RPC_MSG_REQUEST
;
2988 req
->rq_import
= class_import_get(imp
);
2989 req
->rq_export
= NULL
;
2990 req
->rq_interpret_reply
= work_interpreter
;
2991 /* don't want reply */
2992 req
->rq_receiving_reply
= 0;
2993 req
->rq_must_unlink
= 0;
2994 req
->rq_no_delay
= req
->rq_no_resend
= 1;
2996 spin_lock_init(&req
->rq_lock
);
2997 INIT_LIST_HEAD(&req
->rq_list
);
2998 INIT_LIST_HEAD(&req
->rq_replay_list
);
2999 INIT_LIST_HEAD(&req
->rq_set_chain
);
3000 INIT_LIST_HEAD(&req
->rq_history_list
);
3001 INIT_LIST_HEAD(&req
->rq_exp_list
);
3002 init_waitqueue_head(&req
->rq_reply_waitq
);
3003 init_waitqueue_head(&req
->rq_set_waitq
);
3004 atomic_set(&req
->rq_refcount
, 1);
3006 CLASSERT (sizeof(*args
) <= sizeof(req
->rq_async_args
));
3007 args
= ptlrpc_req_async_args(req
);
3008 args
->magic
= PTLRPC_WORK_MAGIC
;
3010 args
->cbdata
= cbdata
;
3014 EXPORT_SYMBOL(ptlrpcd_alloc_work
);
3016 void ptlrpcd_destroy_work(void *handler
)
3018 struct ptlrpc_request
*req
= handler
;
3021 ptlrpc_req_finished(req
);
3023 EXPORT_SYMBOL(ptlrpcd_destroy_work
);
3025 int ptlrpcd_queue_work(void *handler
)
3027 struct ptlrpc_request
*req
= handler
;
3030 * Check if the req is already being queued.
3032 * Here comes a trick: it lacks a way of checking if a req is being
3033 * processed reliably in ptlrpc. Here I have to use refcount of req
3034 * for this purpose. This is okay because the caller should use this
3035 * req as opaque data. - Jinshan
3037 LASSERT(atomic_read(&req
->rq_refcount
) > 0);
3038 if (atomic_read(&req
->rq_refcount
) > 1)
3041 if (atomic_inc_return(&req
->rq_refcount
) > 2) { /* race */
3042 atomic_dec(&req
->rq_refcount
);
3046 /* re-initialize the req */
3047 req
->rq_timeout
= obd_timeout
;
3048 req
->rq_sent
= cfs_time_current_sec();
3049 req
->rq_deadline
= req
->rq_sent
+ req
->rq_timeout
;
3050 req
->rq_reply_deadline
= req
->rq_deadline
;
3051 req
->rq_phase
= RQ_PHASE_INTERPRET
;
3052 req
->rq_next_phase
= RQ_PHASE_COMPLETE
;
3053 req
->rq_xid
= ptlrpc_next_xid();
3054 req
->rq_import_generation
= req
->rq_import
->imp_generation
;
3056 ptlrpcd_add_req(req
, PDL_POLICY_ROUND
, -1);
3059 EXPORT_SYMBOL(ptlrpcd_queue_work
);