4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Isaac Huang <isaac@clusterfs.com>
40 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
41 * - percpt data for service to improve smp performance
45 #define DEBUG_SUBSYSTEM S_LNET
57 static struct smoketest_rpc
{
58 spinlock_t rpc_glock
; /* global lock */
59 srpc_service_t
*rpc_services
[SRPC_SERVICE_MAX_ID
+ 1];
60 lnet_handle_eq_t rpc_lnet_eq
; /* _the_ LNet event queue */
61 srpc_state_t rpc_state
;
62 srpc_counters_t rpc_counters
;
63 __u64 rpc_matchbits
; /* matchbits counter */
67 srpc_serv_portal(int svc_id
)
69 return svc_id
< SRPC_FRAMEWORK_SERVICE_MAX_ID
?
70 SRPC_FRAMEWORK_REQUEST_PORTAL
: SRPC_REQUEST_PORTAL
;
74 int srpc_handle_rpc(swi_workitem_t
*wi
);
76 void srpc_get_counters(srpc_counters_t
*cnt
)
78 spin_lock(&srpc_data
.rpc_glock
);
79 *cnt
= srpc_data
.rpc_counters
;
80 spin_unlock(&srpc_data
.rpc_glock
);
83 void srpc_set_counters(const srpc_counters_t
*cnt
)
85 spin_lock(&srpc_data
.rpc_glock
);
86 srpc_data
.rpc_counters
= *cnt
;
87 spin_unlock(&srpc_data
.rpc_glock
);
91 srpc_add_bulk_page(srpc_bulk_t
*bk
, struct page
*pg
, int i
, int nob
)
93 nob
= min_t(int, nob
, PAGE_CACHE_SIZE
);
96 LASSERT(i
>= 0 && i
< bk
->bk_niov
);
98 bk
->bk_iovs
[i
].kiov_offset
= 0;
99 bk
->bk_iovs
[i
].kiov_page
= pg
;
100 bk
->bk_iovs
[i
].kiov_len
= nob
;
105 srpc_free_bulk(srpc_bulk_t
*bk
)
112 for (i
= 0; i
< bk
->bk_niov
; i
++) {
113 pg
= bk
->bk_iovs
[i
].kiov_page
;
120 LIBCFS_FREE(bk
, offsetof(srpc_bulk_t
, bk_iovs
[bk
->bk_niov
]));
125 srpc_alloc_bulk(int cpt
, unsigned bulk_npg
, unsigned bulk_len
, int sink
)
130 LASSERT(bulk_npg
> 0 && bulk_npg
<= LNET_MAX_IOV
);
132 LIBCFS_CPT_ALLOC(bk
, lnet_cpt_table(), cpt
,
133 offsetof(srpc_bulk_t
, bk_iovs
[bulk_npg
]));
135 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg
);
139 memset(bk
, 0, offsetof(srpc_bulk_t
, bk_iovs
[bulk_npg
]));
141 bk
->bk_len
= bulk_len
;
142 bk
->bk_niov
= bulk_npg
;
144 for (i
= 0; i
< bulk_npg
; i
++) {
148 pg
= alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt
),
151 CERROR("Can't allocate page %d of %d\n", i
, bulk_npg
);
156 nob
= srpc_add_bulk_page(bk
, pg
, i
, bulk_len
);
168 spin_lock(&srpc_data
.rpc_glock
);
169 id
= srpc_data
.rpc_matchbits
++;
170 spin_unlock(&srpc_data
.rpc_glock
);
175 srpc_init_server_rpc(struct srpc_server_rpc
*rpc
,
176 struct srpc_service_cd
*scd
,
177 struct srpc_buffer
*buffer
)
179 memset(rpc
, 0, sizeof(*rpc
));
180 swi_init_workitem(&rpc
->srpc_wi
, rpc
, srpc_handle_rpc
,
181 srpc_serv_is_framework(scd
->scd_svc
) ?
182 lst_sched_serial
: lst_sched_test
[scd
->scd_cpt
]);
184 rpc
->srpc_ev
.ev_fired
= 1; /* no event expected now */
187 rpc
->srpc_reqstbuf
= buffer
;
188 rpc
->srpc_peer
= buffer
->buf_peer
;
189 rpc
->srpc_self
= buffer
->buf_self
;
190 LNetInvalidateHandle(&rpc
->srpc_replymdh
);
194 srpc_service_fini(struct srpc_service
*svc
)
196 struct srpc_service_cd
*scd
;
197 struct srpc_server_rpc
*rpc
;
198 struct srpc_buffer
*buf
;
202 if (svc
->sv_cpt_data
== NULL
)
205 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
207 if (!list_empty(&scd
->scd_buf_posted
))
208 q
= &scd
->scd_buf_posted
;
209 else if (!list_empty(&scd
->scd_buf_blocked
))
210 q
= &scd
->scd_buf_blocked
;
214 while (!list_empty(q
)) {
215 buf
= list_entry(q
->next
, struct srpc_buffer
,
217 list_del(&buf
->buf_list
);
218 LIBCFS_FREE(buf
, sizeof(*buf
));
222 LASSERT(list_empty(&scd
->scd_rpc_active
));
224 while (!list_empty(&scd
->scd_rpc_free
)) {
225 rpc
= list_entry(scd
->scd_rpc_free
.next
,
226 struct srpc_server_rpc
,
228 list_del(&rpc
->srpc_list
);
229 LIBCFS_FREE(rpc
, sizeof(*rpc
));
233 cfs_percpt_free(svc
->sv_cpt_data
);
234 svc
->sv_cpt_data
= NULL
;
238 srpc_service_nrpcs(struct srpc_service
*svc
)
240 int nrpcs
= svc
->sv_wi_total
/ svc
->sv_ncpts
;
242 return srpc_serv_is_framework(svc
) ?
243 max(nrpcs
, SFW_FRWK_WI_MIN
) : max(nrpcs
, SFW_TEST_WI_MIN
);
246 int srpc_add_buffer(struct swi_workitem
*wi
);
249 srpc_service_init(struct srpc_service
*svc
)
251 struct srpc_service_cd
*scd
;
252 struct srpc_server_rpc
*rpc
;
257 svc
->sv_shuttingdown
= 0;
259 svc
->sv_cpt_data
= cfs_percpt_alloc(lnet_cpt_table(),
260 sizeof(struct srpc_service_cd
));
261 if (svc
->sv_cpt_data
== NULL
)
264 svc
->sv_ncpts
= srpc_serv_is_framework(svc
) ?
265 1 : cfs_cpt_number(lnet_cpt_table());
266 nrpcs
= srpc_service_nrpcs(svc
);
268 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
271 spin_lock_init(&scd
->scd_lock
);
272 INIT_LIST_HEAD(&scd
->scd_rpc_free
);
273 INIT_LIST_HEAD(&scd
->scd_rpc_active
);
274 INIT_LIST_HEAD(&scd
->scd_buf_posted
);
275 INIT_LIST_HEAD(&scd
->scd_buf_blocked
);
277 scd
->scd_ev
.ev_data
= scd
;
278 scd
->scd_ev
.ev_type
= SRPC_REQUEST_RCVD
;
281 * NB: don't use lst_sched_serial for adding buffer,
282 * see details in srpc_service_add_buffers()
284 swi_init_workitem(&scd
->scd_buf_wi
, scd
,
285 srpc_add_buffer
, lst_sched_test
[i
]);
287 if (i
!= 0 && srpc_serv_is_framework(svc
)) {
289 * NB: framework service only needs srpc_service_cd for
290 * one partition, but we allocate for all to make
291 * it easier to implement, it will waste a little
292 * memory but nobody should care about this
297 for (j
= 0; j
< nrpcs
; j
++) {
298 LIBCFS_CPT_ALLOC(rpc
, lnet_cpt_table(),
301 srpc_service_fini(svc
);
304 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
312 srpc_add_service(struct srpc_service
*sv
)
316 LASSERT(0 <= id
&& id
<= SRPC_SERVICE_MAX_ID
);
318 if (srpc_service_init(sv
) != 0)
321 spin_lock(&srpc_data
.rpc_glock
);
323 LASSERT(srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
325 if (srpc_data
.rpc_services
[id
] != NULL
) {
326 spin_unlock(&srpc_data
.rpc_glock
);
330 srpc_data
.rpc_services
[id
] = sv
;
331 spin_unlock(&srpc_data
.rpc_glock
);
333 CDEBUG(D_NET
, "Adding service: id %d, name %s\n", id
, sv
->sv_name
);
337 srpc_service_fini(sv
);
342 srpc_remove_service(srpc_service_t
*sv
)
346 spin_lock(&srpc_data
.rpc_glock
);
348 if (srpc_data
.rpc_services
[id
] != sv
) {
349 spin_unlock(&srpc_data
.rpc_glock
);
353 srpc_data
.rpc_services
[id
] = NULL
;
354 spin_unlock(&srpc_data
.rpc_glock
);
359 srpc_post_passive_rdma(int portal
, int local
, __u64 matchbits
, void *buf
,
360 int len
, int options
, lnet_process_id_t peer
,
361 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
365 lnet_handle_me_t meh
;
367 rc
= LNetMEAttach(portal
, peer
, matchbits
, 0, LNET_UNLINK
,
368 local
? LNET_INS_LOCAL
: LNET_INS_AFTER
, &meh
);
370 CERROR("LNetMEAttach failed: %d\n", rc
);
371 LASSERT(rc
== -ENOMEM
);
379 md
.options
= options
;
380 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
382 rc
= LNetMDAttach(meh
, md
, LNET_UNLINK
, mdh
);
384 CERROR("LNetMDAttach failed: %d\n", rc
);
385 LASSERT(rc
== -ENOMEM
);
387 rc
= LNetMEUnlink(meh
);
392 CDEBUG(D_NET
, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
393 libcfs_id2str(peer
), portal
, matchbits
);
398 srpc_post_active_rdma(int portal
, __u64 matchbits
, void *buf
, int len
,
399 int options
, lnet_process_id_t peer
, lnet_nid_t self
,
400 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
408 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
409 md
.threshold
= ((options
& LNET_MD_OP_GET
) != 0) ? 2 : 1;
410 md
.options
= options
& ~(LNET_MD_OP_PUT
| LNET_MD_OP_GET
);
412 rc
= LNetMDBind(md
, LNET_UNLINK
, mdh
);
414 CERROR("LNetMDBind failed: %d\n", rc
);
415 LASSERT(rc
== -ENOMEM
);
420 * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
421 * they're only meaningful for MDs attached to an ME (i.e. passive
424 if ((options
& LNET_MD_OP_PUT
) != 0) {
425 rc
= LNetPut(self
, *mdh
, LNET_NOACK_REQ
, peer
,
426 portal
, matchbits
, 0, 0);
428 LASSERT((options
& LNET_MD_OP_GET
) != 0);
430 rc
= LNetGet(self
, *mdh
, peer
, portal
, matchbits
, 0);
434 CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
435 ((options
& LNET_MD_OP_PUT
) != 0) ? "Put" : "Get",
436 libcfs_id2str(peer
), portal
, matchbits
, rc
);
439 * The forthcoming unlink event will complete this operation
440 * with failure, so fall through and return success here.
442 rc
= LNetMDUnlink(*mdh
);
445 CDEBUG(D_NET
, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
446 libcfs_id2str(peer
), portal
, matchbits
);
452 srpc_post_passive_rqtbuf(int service
, int local
, void *buf
, int len
,
453 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
455 lnet_process_id_t any
= {0};
457 any
.nid
= LNET_NID_ANY
;
458 any
.pid
= LNET_PID_ANY
;
460 return srpc_post_passive_rdma(srpc_serv_portal(service
),
461 local
, service
, buf
, len
,
462 LNET_MD_OP_PUT
, any
, mdh
, ev
);
466 srpc_service_post_buffer(struct srpc_service_cd
*scd
, struct srpc_buffer
*buf
)
467 __must_hold(&scd
->scd_lock
)
469 struct srpc_service
*sv
= scd
->scd_svc
;
470 struct srpc_msg
*msg
= &buf
->buf_msg
;
473 LNetInvalidateHandle(&buf
->buf_mdh
);
474 list_add(&buf
->buf_list
, &scd
->scd_buf_posted
);
475 scd
->scd_buf_nposted
++;
476 spin_unlock(&scd
->scd_lock
);
478 rc
= srpc_post_passive_rqtbuf(sv
->sv_id
,
479 !srpc_serv_is_framework(sv
),
480 msg
, sizeof(*msg
), &buf
->buf_mdh
,
484 * At this point, a RPC (new or delayed) may have arrived in
485 * msg and its event handler has been called. So we must add
486 * buf to scd_buf_posted _before_ dropping scd_lock
488 spin_lock(&scd
->scd_lock
);
491 if (!sv
->sv_shuttingdown
)
494 spin_unlock(&scd
->scd_lock
);
496 * srpc_shutdown_service might have tried to unlink me
497 * when my buf_mdh was still invalid
499 LNetMDUnlink(buf
->buf_mdh
);
500 spin_lock(&scd
->scd_lock
);
504 scd
->scd_buf_nposted
--;
505 if (sv
->sv_shuttingdown
)
506 return rc
; /* don't allow to change scd_buf_posted */
508 list_del(&buf
->buf_list
);
509 spin_unlock(&scd
->scd_lock
);
511 LIBCFS_FREE(buf
, sizeof(*buf
));
513 spin_lock(&scd
->scd_lock
);
518 srpc_add_buffer(struct swi_workitem
*wi
)
520 struct srpc_service_cd
*scd
= wi
->swi_workitem
.wi_data
;
521 struct srpc_buffer
*buf
;
525 * it's called by workitem scheduler threads, these threads
526 * should have been set CPT affinity, so buffers will be posted
527 * on CPT local list of Portal
529 spin_lock(&scd
->scd_lock
);
531 while (scd
->scd_buf_adjust
> 0 &&
532 !scd
->scd_svc
->sv_shuttingdown
) {
533 scd
->scd_buf_adjust
--; /* consume it */
534 scd
->scd_buf_posting
++;
536 spin_unlock(&scd
->scd_lock
);
538 LIBCFS_ALLOC(buf
, sizeof(*buf
));
540 CERROR("Failed to add new buf to service: %s\n",
541 scd
->scd_svc
->sv_name
);
542 spin_lock(&scd
->scd_lock
);
547 spin_lock(&scd
->scd_lock
);
548 if (scd
->scd_svc
->sv_shuttingdown
) {
549 spin_unlock(&scd
->scd_lock
);
550 LIBCFS_FREE(buf
, sizeof(*buf
));
552 spin_lock(&scd
->scd_lock
);
557 rc
= srpc_service_post_buffer(scd
, buf
);
559 break; /* buf has been freed inside */
561 LASSERT(scd
->scd_buf_posting
> 0);
562 scd
->scd_buf_posting
--;
563 scd
->scd_buf_total
++;
564 scd
->scd_buf_low
= max(2, scd
->scd_buf_total
/ 4);
568 scd
->scd_buf_err_stamp
= ktime_get_real_seconds();
569 scd
->scd_buf_err
= rc
;
571 LASSERT(scd
->scd_buf_posting
> 0);
572 scd
->scd_buf_posting
--;
575 spin_unlock(&scd
->scd_lock
);
580 srpc_service_add_buffers(struct srpc_service
*sv
, int nbuffer
)
582 struct srpc_service_cd
*scd
;
586 LASSERTF(nbuffer
> 0, "nbuffer must be positive: %d\n", nbuffer
);
588 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
589 spin_lock(&scd
->scd_lock
);
591 scd
->scd_buf_err
= 0;
592 scd
->scd_buf_err_stamp
= 0;
593 scd
->scd_buf_posting
= 0;
594 scd
->scd_buf_adjust
= nbuffer
;
595 /* start to post buffers */
596 swi_schedule_workitem(&scd
->scd_buf_wi
);
597 spin_unlock(&scd
->scd_lock
);
599 /* framework service only post buffer for one partition */
600 if (srpc_serv_is_framework(sv
))
604 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
605 spin_lock(&scd
->scd_lock
);
607 * NB: srpc_service_add_buffers() can be called inside
608 * thread context of lst_sched_serial, and we don't normally
609 * allow to sleep inside thread context of WI scheduler
610 * because it will block current scheduler thread from doing
611 * anything else, even worse, it could deadlock if it's
612 * waiting on result from another WI of the same scheduler.
613 * However, it's safe at here because scd_buf_wi is scheduled
614 * by thread in a different WI scheduler (lst_sched_test),
615 * so we don't have any risk of deadlock, though this could
616 * block all WIs pending on lst_sched_serial for a moment
617 * which is not good but not fatal.
619 lst_wait_until(scd
->scd_buf_err
!= 0 ||
620 (scd
->scd_buf_adjust
== 0 &&
621 scd
->scd_buf_posting
== 0),
622 scd
->scd_lock
, "waiting for adding buffer\n");
624 if (scd
->scd_buf_err
!= 0 && rc
== 0)
625 rc
= scd
->scd_buf_err
;
627 spin_unlock(&scd
->scd_lock
);
634 srpc_service_remove_buffers(struct srpc_service
*sv
, int nbuffer
)
636 struct srpc_service_cd
*scd
;
640 LASSERT(!sv
->sv_shuttingdown
);
642 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
643 spin_lock(&scd
->scd_lock
);
645 num
= scd
->scd_buf_total
+ scd
->scd_buf_posting
;
646 scd
->scd_buf_adjust
-= min(nbuffer
, num
);
648 spin_unlock(&scd
->scd_lock
);
652 /* returns 1 if sv has finished, otherwise 0 */
654 srpc_finish_service(struct srpc_service
*sv
)
656 struct srpc_service_cd
*scd
;
657 struct srpc_server_rpc
*rpc
;
660 LASSERT(sv
->sv_shuttingdown
); /* srpc_shutdown_service called */
662 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
663 spin_lock(&scd
->scd_lock
);
664 if (!swi_deschedule_workitem(&scd
->scd_buf_wi
)) {
665 spin_unlock(&scd
->scd_lock
);
669 if (scd
->scd_buf_nposted
> 0) {
670 CDEBUG(D_NET
, "waiting for %d posted buffers to unlink",
671 scd
->scd_buf_nposted
);
672 spin_unlock(&scd
->scd_lock
);
676 if (list_empty(&scd
->scd_rpc_active
)) {
677 spin_unlock(&scd
->scd_lock
);
681 rpc
= list_entry(scd
->scd_rpc_active
.next
,
682 struct srpc_server_rpc
, srpc_list
);
683 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
684 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
685 swi_state2str(rpc
->srpc_wi
.swi_state
),
686 rpc
->srpc_wi
.swi_workitem
.wi_scheduled
,
687 rpc
->srpc_wi
.swi_workitem
.wi_running
,
688 rpc
->srpc_ev
.ev_fired
, rpc
->srpc_ev
.ev_type
,
689 rpc
->srpc_ev
.ev_status
, rpc
->srpc_ev
.ev_lnet
);
690 spin_unlock(&scd
->scd_lock
);
694 /* no lock needed from now on */
695 srpc_service_fini(sv
);
699 /* called with sv->sv_lock held */
701 srpc_service_recycle_buffer(struct srpc_service_cd
*scd
, srpc_buffer_t
*buf
)
702 __must_hold(&scd
->scd_lock
)
704 if (!scd
->scd_svc
->sv_shuttingdown
&& scd
->scd_buf_adjust
>= 0) {
705 if (srpc_service_post_buffer(scd
, buf
) != 0) {
706 CWARN("Failed to post %s buffer\n",
707 scd
->scd_svc
->sv_name
);
712 /* service is shutting down, or we want to recycle some buffers */
713 scd
->scd_buf_total
--;
715 if (scd
->scd_buf_adjust
< 0) {
716 scd
->scd_buf_adjust
++;
717 if (scd
->scd_buf_adjust
< 0 &&
718 scd
->scd_buf_total
== 0 && scd
->scd_buf_posting
== 0) {
720 "Try to recycle %d buffers but nothing left\n",
721 scd
->scd_buf_adjust
);
722 scd
->scd_buf_adjust
= 0;
726 spin_unlock(&scd
->scd_lock
);
727 LIBCFS_FREE(buf
, sizeof(*buf
));
728 spin_lock(&scd
->scd_lock
);
732 srpc_abort_service(struct srpc_service
*sv
)
734 struct srpc_service_cd
*scd
;
735 struct srpc_server_rpc
*rpc
;
738 CDEBUG(D_NET
, "Aborting service: id %d, name %s\n",
739 sv
->sv_id
, sv
->sv_name
);
741 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
742 spin_lock(&scd
->scd_lock
);
745 * schedule in-flight RPCs to notice the abort, NB:
746 * racing with incoming RPCs; complete fix should make test
747 * RPCs carry session ID in its headers
749 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
) {
750 rpc
->srpc_aborted
= 1;
751 swi_schedule_workitem(&rpc
->srpc_wi
);
754 spin_unlock(&scd
->scd_lock
);
759 srpc_shutdown_service(srpc_service_t
*sv
)
761 struct srpc_service_cd
*scd
;
762 struct srpc_server_rpc
*rpc
;
766 CDEBUG(D_NET
, "Shutting down service: id %d, name %s\n",
767 sv
->sv_id
, sv
->sv_name
);
769 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
770 spin_lock(&scd
->scd_lock
);
772 sv
->sv_shuttingdown
= 1; /* i.e. no new active RPC */
774 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
775 spin_unlock(&scd
->scd_lock
);
777 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
778 spin_lock(&scd
->scd_lock
);
780 /* schedule in-flight RPCs to notice the shutdown */
781 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
)
782 swi_schedule_workitem(&rpc
->srpc_wi
);
784 spin_unlock(&scd
->scd_lock
);
787 * OK to traverse scd_buf_posted without lock, since no one
788 * touches scd_buf_posted now
790 list_for_each_entry(buf
, &scd
->scd_buf_posted
, buf_list
)
791 LNetMDUnlink(buf
->buf_mdh
);
796 srpc_send_request(srpc_client_rpc_t
*rpc
)
798 srpc_event_t
*ev
= &rpc
->crpc_reqstev
;
803 ev
->ev_type
= SRPC_REQUEST_SENT
;
805 rc
= srpc_post_active_rdma(srpc_serv_portal(rpc
->crpc_service
),
806 rpc
->crpc_service
, &rpc
->crpc_reqstmsg
,
807 sizeof(srpc_msg_t
), LNET_MD_OP_PUT
,
808 rpc
->crpc_dest
, LNET_NID_ANY
,
809 &rpc
->crpc_reqstmdh
, ev
);
811 LASSERT(rc
== -ENOMEM
);
812 ev
->ev_fired
= 1; /* no more event expected */
818 srpc_prepare_reply(srpc_client_rpc_t
*rpc
)
820 srpc_event_t
*ev
= &rpc
->crpc_replyev
;
821 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.rpyid
;
826 ev
->ev_type
= SRPC_REPLY_RCVD
;
828 *id
= srpc_next_id();
830 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
831 &rpc
->crpc_replymsg
, sizeof(srpc_msg_t
),
832 LNET_MD_OP_PUT
, rpc
->crpc_dest
,
833 &rpc
->crpc_replymdh
, ev
);
835 LASSERT(rc
== -ENOMEM
);
836 ev
->ev_fired
= 1; /* no more event expected */
842 srpc_prepare_bulk(srpc_client_rpc_t
*rpc
)
844 srpc_bulk_t
*bk
= &rpc
->crpc_bulk
;
845 srpc_event_t
*ev
= &rpc
->crpc_bulkev
;
846 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.bulkid
;
850 LASSERT(bk
->bk_niov
<= LNET_MAX_IOV
);
852 if (bk
->bk_niov
== 0)
853 return 0; /* nothing to do */
855 opt
= bk
->bk_sink
? LNET_MD_OP_PUT
: LNET_MD_OP_GET
;
860 ev
->ev_type
= SRPC_BULK_REQ_RCVD
;
862 *id
= srpc_next_id();
864 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
865 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
866 rpc
->crpc_dest
, &bk
->bk_mdh
, ev
);
868 LASSERT(rc
== -ENOMEM
);
869 ev
->ev_fired
= 1; /* no more event expected */
875 srpc_do_bulk(struct srpc_server_rpc
*rpc
)
877 srpc_event_t
*ev
= &rpc
->srpc_ev
;
878 srpc_bulk_t
*bk
= rpc
->srpc_bulk
;
879 __u64 id
= rpc
->srpc_reqstbuf
->buf_msg
.msg_body
.reqst
.bulkid
;
885 opt
= bk
->bk_sink
? LNET_MD_OP_GET
: LNET_MD_OP_PUT
;
890 ev
->ev_type
= bk
->bk_sink
? SRPC_BULK_GET_RPLD
: SRPC_BULK_PUT_SENT
;
892 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, id
,
893 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
894 rpc
->srpc_peer
, rpc
->srpc_self
,
897 ev
->ev_fired
= 1; /* no more event expected */
901 /* only called from srpc_handle_rpc */
903 srpc_server_rpc_done(struct srpc_server_rpc
*rpc
, int status
)
905 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
906 struct srpc_service
*sv
= scd
->scd_svc
;
907 srpc_buffer_t
*buffer
;
909 LASSERT(status
!= 0 || rpc
->srpc_wi
.swi_state
== SWI_STATE_DONE
);
911 rpc
->srpc_status
= status
;
913 CDEBUG_LIMIT(status
== 0 ? D_NET
: D_NETERROR
,
914 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
915 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
916 swi_state2str(rpc
->srpc_wi
.swi_state
), status
);
919 spin_lock(&srpc_data
.rpc_glock
);
920 srpc_data
.rpc_counters
.rpcs_dropped
++;
921 spin_unlock(&srpc_data
.rpc_glock
);
924 if (rpc
->srpc_done
!= NULL
)
925 (*rpc
->srpc_done
) (rpc
);
926 LASSERT(rpc
->srpc_bulk
== NULL
);
928 spin_lock(&scd
->scd_lock
);
930 if (rpc
->srpc_reqstbuf
!= NULL
) {
932 * NB might drop sv_lock in srpc_service_recycle_buffer, but
933 * sv won't go away for scd_rpc_active must not be empty
935 srpc_service_recycle_buffer(scd
, rpc
->srpc_reqstbuf
);
936 rpc
->srpc_reqstbuf
= NULL
;
939 list_del(&rpc
->srpc_list
); /* from scd->scd_rpc_active */
942 * No one can schedule me now since:
943 * - I'm not on scd_rpc_active.
944 * - all LNet events have been fired.
945 * Cancel pending schedules and prevent future schedule attempts:
947 LASSERT(rpc
->srpc_ev
.ev_fired
);
948 swi_exit_workitem(&rpc
->srpc_wi
);
950 if (!sv
->sv_shuttingdown
&& !list_empty(&scd
->scd_buf_blocked
)) {
951 buffer
= list_entry(scd
->scd_buf_blocked
.next
,
952 srpc_buffer_t
, buf_list
);
953 list_del(&buffer
->buf_list
);
955 srpc_init_server_rpc(rpc
, scd
, buffer
);
956 list_add_tail(&rpc
->srpc_list
, &scd
->scd_rpc_active
);
957 swi_schedule_workitem(&rpc
->srpc_wi
);
959 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
962 spin_unlock(&scd
->scd_lock
);
966 /* handles an incoming RPC */
968 srpc_handle_rpc(swi_workitem_t
*wi
)
970 struct srpc_server_rpc
*rpc
= wi
->swi_workitem
.wi_data
;
971 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
972 struct srpc_service
*sv
= scd
->scd_svc
;
973 srpc_event_t
*ev
= &rpc
->srpc_ev
;
976 LASSERT(wi
== &rpc
->srpc_wi
);
978 spin_lock(&scd
->scd_lock
);
980 if (sv
->sv_shuttingdown
|| rpc
->srpc_aborted
) {
981 spin_unlock(&scd
->scd_lock
);
983 if (rpc
->srpc_bulk
!= NULL
)
984 LNetMDUnlink(rpc
->srpc_bulk
->bk_mdh
);
985 LNetMDUnlink(rpc
->srpc_replymdh
);
987 if (ev
->ev_fired
) { /* no more event, OK to finish */
988 srpc_server_rpc_done(rpc
, -ESHUTDOWN
);
994 spin_unlock(&scd
->scd_lock
);
996 switch (wi
->swi_state
) {
999 case SWI_STATE_NEWBORN
: {
1001 srpc_generic_reply_t
*reply
;
1003 msg
= &rpc
->srpc_reqstbuf
->buf_msg
;
1004 reply
= &rpc
->srpc_replymsg
.msg_body
.reply
;
1006 if (msg
->msg_magic
== 0) {
1007 /* moaned already in srpc_lnet_ev_handler */
1008 srpc_server_rpc_done(rpc
, EBADMSG
);
1012 srpc_unpack_msg_hdr(msg
);
1013 if (msg
->msg_version
!= SRPC_MSG_VERSION
) {
1014 CWARN("Version mismatch: %u, %u expected, from %s\n",
1015 msg
->msg_version
, SRPC_MSG_VERSION
,
1016 libcfs_id2str(rpc
->srpc_peer
));
1017 reply
->status
= EPROTO
;
1018 /* drop through and send reply */
1021 rc
= (*sv
->sv_handler
)(rpc
);
1022 LASSERT(reply
->status
== 0 || !rpc
->srpc_bulk
);
1024 srpc_server_rpc_done(rpc
, rc
);
1029 wi
->swi_state
= SWI_STATE_BULK_STARTED
;
1031 if (rpc
->srpc_bulk
!= NULL
) {
1032 rc
= srpc_do_bulk(rpc
);
1034 return 0; /* wait for bulk */
1036 LASSERT(ev
->ev_fired
);
1040 case SWI_STATE_BULK_STARTED
:
1041 LASSERT(rpc
->srpc_bulk
== NULL
|| ev
->ev_fired
);
1043 if (rpc
->srpc_bulk
!= NULL
) {
1046 if (sv
->sv_bulk_ready
!= NULL
)
1047 rc
= (*sv
->sv_bulk_ready
) (rpc
, rc
);
1050 srpc_server_rpc_done(rpc
, rc
);
1055 wi
->swi_state
= SWI_STATE_REPLY_SUBMITTED
;
1056 rc
= srpc_send_reply(rpc
);
1058 return 0; /* wait for reply */
1059 srpc_server_rpc_done(rpc
, rc
);
1062 case SWI_STATE_REPLY_SUBMITTED
:
1063 if (!ev
->ev_fired
) {
1064 CERROR("RPC %p: bulk %p, service %d\n",
1065 rpc
, rpc
->srpc_bulk
, sv
->sv_id
);
1066 CERROR("Event: status %d, type %d, lnet %d\n",
1067 ev
->ev_status
, ev
->ev_type
, ev
->ev_lnet
);
1068 LASSERT(ev
->ev_fired
);
1071 wi
->swi_state
= SWI_STATE_DONE
;
1072 srpc_server_rpc_done(rpc
, ev
->ev_status
);
1080 srpc_client_rpc_expired(void *data
)
1082 srpc_client_rpc_t
*rpc
= data
;
1084 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1085 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1088 spin_lock(&rpc
->crpc_lock
);
1090 rpc
->crpc_timeout
= 0;
1091 srpc_abort_rpc(rpc
, -ETIMEDOUT
);
1093 spin_unlock(&rpc
->crpc_lock
);
1095 spin_lock(&srpc_data
.rpc_glock
);
1096 srpc_data
.rpc_counters
.rpcs_expired
++;
1097 spin_unlock(&srpc_data
.rpc_glock
);
1101 srpc_add_client_rpc_timer(srpc_client_rpc_t
*rpc
)
1103 stt_timer_t
*timer
= &rpc
->crpc_timer
;
1105 if (rpc
->crpc_timeout
== 0)
1108 INIT_LIST_HEAD(&timer
->stt_list
);
1109 timer
->stt_data
= rpc
;
1110 timer
->stt_func
= srpc_client_rpc_expired
;
1111 timer
->stt_expires
= ktime_get_real_seconds() + rpc
->crpc_timeout
;
1112 stt_add_timer(timer
);
1117 * Called with rpc->crpc_lock held.
1119 * Upon exit the RPC expiry timer is not queued and the handler is not
1120 * running on any CPU.
1123 srpc_del_client_rpc_timer(srpc_client_rpc_t
*rpc
)
1125 /* timer not planted or already exploded */
1126 if (rpc
->crpc_timeout
== 0)
1129 /* timer successfully defused */
1130 if (stt_del_timer(&rpc
->crpc_timer
))
1133 /* timer detonated, wait for it to explode */
1134 while (rpc
->crpc_timeout
!= 0) {
1135 spin_unlock(&rpc
->crpc_lock
);
1139 spin_lock(&rpc
->crpc_lock
);
1144 srpc_client_rpc_done(srpc_client_rpc_t
*rpc
, int status
)
1146 swi_workitem_t
*wi
= &rpc
->crpc_wi
;
1148 LASSERT(status
!= 0 || wi
->swi_state
== SWI_STATE_DONE
);
1150 spin_lock(&rpc
->crpc_lock
);
1152 rpc
->crpc_closed
= 1;
1153 if (rpc
->crpc_status
== 0)
1154 rpc
->crpc_status
= status
;
1156 srpc_del_client_rpc_timer(rpc
);
1158 CDEBUG_LIMIT((status
== 0) ? D_NET
: D_NETERROR
,
1159 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1160 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1161 swi_state2str(wi
->swi_state
), rpc
->crpc_aborted
, status
);
1164 * No one can schedule me now since:
1165 * - RPC timer has been defused.
1166 * - all LNet events have been fired.
1167 * - crpc_closed has been set, preventing srpc_abort_rpc from
1169 * Cancel pending schedules and prevent future schedule attempts:
1171 LASSERT(!srpc_event_pending(rpc
));
1172 swi_exit_workitem(wi
);
1174 spin_unlock(&rpc
->crpc_lock
);
1176 (*rpc
->crpc_done
)(rpc
);
1180 /* sends an outgoing RPC */
1182 srpc_send_rpc(swi_workitem_t
*wi
)
1185 srpc_client_rpc_t
*rpc
;
1189 LASSERT(wi
!= NULL
);
1191 rpc
= wi
->swi_workitem
.wi_data
;
1193 LASSERT(rpc
!= NULL
);
1194 LASSERT(wi
== &rpc
->crpc_wi
);
1196 reply
= &rpc
->crpc_replymsg
;
1197 do_bulk
= rpc
->crpc_bulk
.bk_niov
> 0;
1199 spin_lock(&rpc
->crpc_lock
);
1201 if (rpc
->crpc_aborted
) {
1202 spin_unlock(&rpc
->crpc_lock
);
1206 spin_unlock(&rpc
->crpc_lock
);
1208 switch (wi
->swi_state
) {
1211 case SWI_STATE_NEWBORN
:
1212 LASSERT(!srpc_event_pending(rpc
));
1214 rc
= srpc_prepare_reply(rpc
);
1216 srpc_client_rpc_done(rpc
, rc
);
1220 rc
= srpc_prepare_bulk(rpc
);
1224 wi
->swi_state
= SWI_STATE_REQUEST_SUBMITTED
;
1225 rc
= srpc_send_request(rpc
);
1228 case SWI_STATE_REQUEST_SUBMITTED
:
1230 * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1231 * order; however, they're processed in a strict order:
1232 * rqt, rpy, and bulk.
1234 if (!rpc
->crpc_reqstev
.ev_fired
)
1237 rc
= rpc
->crpc_reqstev
.ev_status
;
1241 wi
->swi_state
= SWI_STATE_REQUEST_SENT
;
1242 /* perhaps more events, fall thru */
1243 case SWI_STATE_REQUEST_SENT
: {
1244 srpc_msg_type_t type
= srpc_service2reply(rpc
->crpc_service
);
1246 if (!rpc
->crpc_replyev
.ev_fired
)
1249 rc
= rpc
->crpc_replyev
.ev_status
;
1253 srpc_unpack_msg_hdr(reply
);
1254 if (reply
->msg_type
!= type
||
1255 (reply
->msg_magic
!= SRPC_MSG_MAGIC
&&
1256 reply
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1257 CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
1258 libcfs_id2str(rpc
->crpc_dest
),
1259 reply
->msg_type
, type
,
1260 reply
->msg_magic
, SRPC_MSG_MAGIC
);
1265 if (do_bulk
&& reply
->msg_body
.reply
.status
!= 0) {
1266 CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
1267 reply
->msg_body
.reply
.status
,
1268 libcfs_id2str(rpc
->crpc_dest
));
1269 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1272 wi
->swi_state
= SWI_STATE_REPLY_RECEIVED
;
1274 case SWI_STATE_REPLY_RECEIVED
:
1275 if (do_bulk
&& !rpc
->crpc_bulkev
.ev_fired
)
1278 rc
= do_bulk
? rpc
->crpc_bulkev
.ev_status
: 0;
1281 * Bulk buffer was unlinked due to remote error. Clear error
1282 * since reply buffer still contains valid data.
1283 * NB rpc->crpc_done shouldn't look into bulk data in case of
1286 if (do_bulk
&& rpc
->crpc_bulkev
.ev_lnet
== LNET_EVENT_UNLINK
&&
1287 rpc
->crpc_status
== 0 && reply
->msg_body
.reply
.status
!= 0)
1290 wi
->swi_state
= SWI_STATE_DONE
;
1291 srpc_client_rpc_done(rpc
, rc
);
1296 spin_lock(&rpc
->crpc_lock
);
1297 srpc_abort_rpc(rpc
, rc
);
1298 spin_unlock(&rpc
->crpc_lock
);
1302 if (rpc
->crpc_aborted
) {
1303 LNetMDUnlink(rpc
->crpc_reqstmdh
);
1304 LNetMDUnlink(rpc
->crpc_replymdh
);
1305 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1307 if (!srpc_event_pending(rpc
)) {
1308 srpc_client_rpc_done(rpc
, -EINTR
);
1316 srpc_create_client_rpc(lnet_process_id_t peer
, int service
,
1317 int nbulkiov
, int bulklen
,
1318 void (*rpc_done
)(srpc_client_rpc_t
*),
1319 void (*rpc_fini
)(srpc_client_rpc_t
*), void *priv
)
1321 srpc_client_rpc_t
*rpc
;
1323 LIBCFS_ALLOC(rpc
, offsetof(srpc_client_rpc_t
,
1324 crpc_bulk
.bk_iovs
[nbulkiov
]));
1328 srpc_init_client_rpc(rpc
, peer
, service
, nbulkiov
,
1329 bulklen
, rpc_done
, rpc_fini
, priv
);
1333 /* called with rpc->crpc_lock held */
1335 srpc_abort_rpc(srpc_client_rpc_t
*rpc
, int why
)
1339 if (rpc
->crpc_aborted
|| /* already aborted */
1340 rpc
->crpc_closed
) /* callback imminent */
1343 CDEBUG(D_NET
, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1344 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1345 swi_state2str(rpc
->crpc_wi
.swi_state
), why
);
1347 rpc
->crpc_aborted
= 1;
1348 rpc
->crpc_status
= why
;
1349 swi_schedule_workitem(&rpc
->crpc_wi
);
1353 /* called with rpc->crpc_lock held */
1355 srpc_post_rpc(srpc_client_rpc_t
*rpc
)
1357 LASSERT(!rpc
->crpc_aborted
);
1358 LASSERT(srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
1360 CDEBUG(D_NET
, "Posting RPC: peer %s, service %d, timeout %d\n",
1361 libcfs_id2str(rpc
->crpc_dest
), rpc
->crpc_service
,
1364 srpc_add_client_rpc_timer(rpc
);
1365 swi_schedule_workitem(&rpc
->crpc_wi
);
1370 srpc_send_reply(struct srpc_server_rpc
*rpc
)
1372 srpc_event_t
*ev
= &rpc
->srpc_ev
;
1373 struct srpc_msg
*msg
= &rpc
->srpc_replymsg
;
1374 struct srpc_buffer
*buffer
= rpc
->srpc_reqstbuf
;
1375 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
1376 struct srpc_service
*sv
= scd
->scd_svc
;
1380 LASSERT(buffer
!= NULL
);
1381 rpyid
= buffer
->buf_msg
.msg_body
.reqst
.rpyid
;
1383 spin_lock(&scd
->scd_lock
);
1385 if (!sv
->sv_shuttingdown
&& !srpc_serv_is_framework(sv
)) {
1387 * Repost buffer before replying since test client
1388 * might send me another RPC once it gets the reply
1390 if (srpc_service_post_buffer(scd
, buffer
) != 0)
1391 CWARN("Failed to repost %s buffer\n", sv
->sv_name
);
1392 rpc
->srpc_reqstbuf
= NULL
;
1395 spin_unlock(&scd
->scd_lock
);
1399 ev
->ev_type
= SRPC_REPLY_SENT
;
1401 msg
->msg_magic
= SRPC_MSG_MAGIC
;
1402 msg
->msg_version
= SRPC_MSG_VERSION
;
1403 msg
->msg_type
= srpc_service2reply(sv
->sv_id
);
1405 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, rpyid
, msg
,
1406 sizeof(*msg
), LNET_MD_OP_PUT
,
1407 rpc
->srpc_peer
, rpc
->srpc_self
,
1408 &rpc
->srpc_replymdh
, ev
);
1410 ev
->ev_fired
= 1; /* no more event expected */
1414 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1416 srpc_lnet_ev_handler(lnet_event_t
*ev
)
1418 struct srpc_service_cd
*scd
;
1419 srpc_event_t
*rpcev
= ev
->md
.user_ptr
;
1420 srpc_client_rpc_t
*crpc
;
1421 struct srpc_server_rpc
*srpc
;
1422 srpc_buffer_t
*buffer
;
1425 srpc_msg_type_t type
;
1427 LASSERT(!in_interrupt());
1429 if (ev
->status
!= 0) {
1430 spin_lock(&srpc_data
.rpc_glock
);
1431 srpc_data
.rpc_counters
.errors
++;
1432 spin_unlock(&srpc_data
.rpc_glock
);
1435 rpcev
->ev_lnet
= ev
->type
;
1437 switch (rpcev
->ev_type
) {
1439 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1440 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1442 case SRPC_REQUEST_SENT
:
1443 if (ev
->status
== 0 && ev
->type
!= LNET_EVENT_UNLINK
) {
1444 spin_lock(&srpc_data
.rpc_glock
);
1445 srpc_data
.rpc_counters
.rpcs_sent
++;
1446 spin_unlock(&srpc_data
.rpc_glock
);
1448 case SRPC_REPLY_RCVD
:
1449 case SRPC_BULK_REQ_RCVD
:
1450 crpc
= rpcev
->ev_data
;
1452 if (rpcev
!= &crpc
->crpc_reqstev
&&
1453 rpcev
!= &crpc
->crpc_replyev
&&
1454 rpcev
!= &crpc
->crpc_bulkev
) {
1455 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1456 rpcev
, crpc
, &crpc
->crpc_reqstev
,
1457 &crpc
->crpc_replyev
, &crpc
->crpc_bulkev
);
1458 CERROR("Bad event: status %d, type %d, lnet %d\n",
1459 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1463 spin_lock(&crpc
->crpc_lock
);
1465 LASSERT(rpcev
->ev_fired
== 0);
1466 rpcev
->ev_fired
= 1;
1467 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1468 -EINTR
: ev
->status
;
1469 swi_schedule_workitem(&crpc
->crpc_wi
);
1471 spin_unlock(&crpc
->crpc_lock
);
1474 case SRPC_REQUEST_RCVD
:
1475 scd
= rpcev
->ev_data
;
1478 LASSERT(rpcev
== &scd
->scd_ev
);
1480 spin_lock(&scd
->scd_lock
);
1482 LASSERT(ev
->unlinked
);
1483 LASSERT(ev
->type
== LNET_EVENT_PUT
||
1484 ev
->type
== LNET_EVENT_UNLINK
);
1485 LASSERT(ev
->type
!= LNET_EVENT_UNLINK
||
1486 sv
->sv_shuttingdown
);
1488 buffer
= container_of(ev
->md
.start
, srpc_buffer_t
, buf_msg
);
1489 buffer
->buf_peer
= ev
->initiator
;
1490 buffer
->buf_self
= ev
->target
.nid
;
1492 LASSERT(scd
->scd_buf_nposted
> 0);
1493 scd
->scd_buf_nposted
--;
1495 if (sv
->sv_shuttingdown
) {
1497 * Leave buffer on scd->scd_buf_nposted since
1498 * srpc_finish_service needs to traverse it.
1500 spin_unlock(&scd
->scd_lock
);
1504 if (scd
->scd_buf_err_stamp
!= 0 &&
1505 scd
->scd_buf_err_stamp
< ktime_get_real_seconds()) {
1506 /* re-enable adding buffer */
1507 scd
->scd_buf_err_stamp
= 0;
1508 scd
->scd_buf_err
= 0;
1511 if (scd
->scd_buf_err
== 0 && /* adding buffer is enabled */
1512 scd
->scd_buf_adjust
== 0 &&
1513 scd
->scd_buf_nposted
< scd
->scd_buf_low
) {
1514 scd
->scd_buf_adjust
= max(scd
->scd_buf_total
/ 2,
1516 swi_schedule_workitem(&scd
->scd_buf_wi
);
1519 list_del(&buffer
->buf_list
); /* from scd->scd_buf_posted */
1520 msg
= &buffer
->buf_msg
;
1521 type
= srpc_service2request(sv
->sv_id
);
1523 if (ev
->status
!= 0 || ev
->mlength
!= sizeof(*msg
) ||
1524 (msg
->msg_type
!= type
&&
1525 msg
->msg_type
!= __swab32(type
)) ||
1526 (msg
->msg_magic
!= SRPC_MSG_MAGIC
&&
1527 msg
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1528 CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
1529 sv
->sv_name
, libcfs_id2str(ev
->initiator
),
1530 ev
->status
, ev
->mlength
,
1531 msg
->msg_type
, msg
->msg_magic
);
1534 * NB can't call srpc_service_recycle_buffer here since
1535 * it may call LNetM[DE]Attach. The invalid magic tells
1536 * srpc_handle_rpc to drop this RPC
1541 if (!list_empty(&scd
->scd_rpc_free
)) {
1542 srpc
= list_entry(scd
->scd_rpc_free
.next
,
1543 struct srpc_server_rpc
,
1545 list_del(&srpc
->srpc_list
);
1547 srpc_init_server_rpc(srpc
, scd
, buffer
);
1548 list_add_tail(&srpc
->srpc_list
,
1549 &scd
->scd_rpc_active
);
1550 swi_schedule_workitem(&srpc
->srpc_wi
);
1552 list_add_tail(&buffer
->buf_list
,
1553 &scd
->scd_buf_blocked
);
1556 spin_unlock(&scd
->scd_lock
);
1558 spin_lock(&srpc_data
.rpc_glock
);
1559 srpc_data
.rpc_counters
.rpcs_rcvd
++;
1560 spin_unlock(&srpc_data
.rpc_glock
);
1563 case SRPC_BULK_GET_RPLD
:
1564 LASSERT(ev
->type
== LNET_EVENT_SEND
||
1565 ev
->type
== LNET_EVENT_REPLY
||
1566 ev
->type
== LNET_EVENT_UNLINK
);
1569 break; /* wait for final event */
1571 case SRPC_BULK_PUT_SENT
:
1572 if (ev
->status
== 0 && ev
->type
!= LNET_EVENT_UNLINK
) {
1573 spin_lock(&srpc_data
.rpc_glock
);
1575 if (rpcev
->ev_type
== SRPC_BULK_GET_RPLD
)
1576 srpc_data
.rpc_counters
.bulk_get
+= ev
->mlength
;
1578 srpc_data
.rpc_counters
.bulk_put
+= ev
->mlength
;
1580 spin_unlock(&srpc_data
.rpc_glock
);
1582 case SRPC_REPLY_SENT
:
1583 srpc
= rpcev
->ev_data
;
1584 scd
= srpc
->srpc_scd
;
1586 LASSERT(rpcev
== &srpc
->srpc_ev
);
1588 spin_lock(&scd
->scd_lock
);
1590 rpcev
->ev_fired
= 1;
1591 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1592 -EINTR
: ev
->status
;
1593 swi_schedule_workitem(&srpc
->srpc_wi
);
1595 spin_unlock(&scd
->scd_lock
);
1605 memset(&srpc_data
, 0, sizeof(struct smoketest_rpc
));
1606 spin_lock_init(&srpc_data
.rpc_glock
);
1608 /* 1 second pause to avoid timestamp reuse */
1609 set_current_state(TASK_UNINTERRUPTIBLE
);
1610 schedule_timeout(cfs_time_seconds(1));
1611 srpc_data
.rpc_matchbits
= ((__u64
)ktime_get_real_seconds()) << 48;
1613 srpc_data
.rpc_state
= SRPC_STATE_NONE
;
1615 rc
= LNetNIInit(LUSTRE_SRV_LNET_PID
);
1617 CERROR("LNetNIInit() has failed: %d\n", rc
);
1621 srpc_data
.rpc_state
= SRPC_STATE_NI_INIT
;
1623 LNetInvalidateHandle(&srpc_data
.rpc_lnet_eq
);
1624 rc
= LNetEQAlloc(0, srpc_lnet_ev_handler
, &srpc_data
.rpc_lnet_eq
);
1626 CERROR("LNetEQAlloc() has failed: %d\n", rc
);
1630 rc
= LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1632 rc
= LNetSetLazyPortal(SRPC_REQUEST_PORTAL
);
1635 srpc_data
.rpc_state
= SRPC_STATE_EQ_INIT
;
1643 srpc_data
.rpc_state
= SRPC_STATE_RUNNING
;
1655 state
= srpc_data
.rpc_state
;
1656 srpc_data
.rpc_state
= SRPC_STATE_STOPPING
;
1661 case SRPC_STATE_RUNNING
:
1662 spin_lock(&srpc_data
.rpc_glock
);
1664 for (i
= 0; i
<= SRPC_SERVICE_MAX_ID
; i
++) {
1665 srpc_service_t
*sv
= srpc_data
.rpc_services
[i
];
1667 LASSERTF(sv
== NULL
,
1668 "service not empty: id %d, name %s\n",
1672 spin_unlock(&srpc_data
.rpc_glock
);
1676 case SRPC_STATE_EQ_INIT
:
1677 rc
= LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1678 rc
= LNetClearLazyPortal(SRPC_REQUEST_PORTAL
);
1680 rc
= LNetEQFree(srpc_data
.rpc_lnet_eq
);
1681 LASSERT(rc
== 0); /* the EQ should have no user by now */
1683 case SRPC_STATE_NI_INIT
: