4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Isaac Huang <isaac@clusterfs.com>
40 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
41 * - percpt data for service to improve smp performance
45 #define DEBUG_SUBSYSTEM S_LNET
57 struct smoketest_rpc
{
58 spinlock_t rpc_glock
; /* global lock */
59 srpc_service_t
*rpc_services
[SRPC_SERVICE_MAX_ID
+ 1];
60 lnet_handle_eq_t rpc_lnet_eq
; /* _the_ LNet event queue */
61 srpc_state_t rpc_state
;
62 srpc_counters_t rpc_counters
;
63 __u64 rpc_matchbits
; /* matchbits counter */
67 srpc_serv_portal(int svc_id
)
69 return svc_id
< SRPC_FRAMEWORK_SERVICE_MAX_ID
?
70 SRPC_FRAMEWORK_REQUEST_PORTAL
: SRPC_REQUEST_PORTAL
;
74 int srpc_handle_rpc (swi_workitem_t
*wi
);
76 void srpc_get_counters (srpc_counters_t
*cnt
)
78 spin_lock(&srpc_data
.rpc_glock
);
79 *cnt
= srpc_data
.rpc_counters
;
80 spin_unlock(&srpc_data
.rpc_glock
);
83 void srpc_set_counters (const srpc_counters_t
*cnt
)
85 spin_lock(&srpc_data
.rpc_glock
);
86 srpc_data
.rpc_counters
= *cnt
;
87 spin_unlock(&srpc_data
.rpc_glock
);
91 srpc_add_bulk_page(srpc_bulk_t
*bk
, struct page
*pg
, int i
, int nob
)
93 nob
= min(nob
, (int)PAGE_CACHE_SIZE
);
96 LASSERT(i
>= 0 && i
< bk
->bk_niov
);
98 bk
->bk_iovs
[i
].kiov_offset
= 0;
99 bk
->bk_iovs
[i
].kiov_page
= pg
;
100 bk
->bk_iovs
[i
].kiov_len
= nob
;
105 srpc_free_bulk (srpc_bulk_t
*bk
)
110 LASSERT (bk
!= NULL
);
112 for (i
= 0; i
< bk
->bk_niov
; i
++) {
113 pg
= bk
->bk_iovs
[i
].kiov_page
;
114 if (pg
== NULL
) break;
119 LIBCFS_FREE(bk
, offsetof(srpc_bulk_t
, bk_iovs
[bk
->bk_niov
]));
124 srpc_alloc_bulk(int cpt
, unsigned bulk_npg
, unsigned bulk_len
, int sink
)
129 LASSERT(bulk_npg
> 0 && bulk_npg
<= LNET_MAX_IOV
);
131 LIBCFS_CPT_ALLOC(bk
, lnet_cpt_table(), cpt
,
132 offsetof(srpc_bulk_t
, bk_iovs
[bulk_npg
]));
134 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg
);
138 memset(bk
, 0, offsetof(srpc_bulk_t
, bk_iovs
[bulk_npg
]));
140 bk
->bk_len
= bulk_len
;
141 bk
->bk_niov
= bulk_npg
;
143 for (i
= 0; i
< bulk_npg
; i
++) {
147 pg
= alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt
),
150 CERROR("Can't allocate page %d of %d\n", i
, bulk_npg
);
155 nob
= srpc_add_bulk_page(bk
, pg
, i
, bulk_len
);
167 spin_lock(&srpc_data
.rpc_glock
);
168 id
= srpc_data
.rpc_matchbits
++;
169 spin_unlock(&srpc_data
.rpc_glock
);
174 srpc_init_server_rpc(struct srpc_server_rpc
*rpc
,
175 struct srpc_service_cd
*scd
,
176 struct srpc_buffer
*buffer
)
178 memset(rpc
, 0, sizeof(*rpc
));
179 swi_init_workitem(&rpc
->srpc_wi
, rpc
, srpc_handle_rpc
,
180 srpc_serv_is_framework(scd
->scd_svc
) ?
181 lst_sched_serial
: lst_sched_test
[scd
->scd_cpt
]);
183 rpc
->srpc_ev
.ev_fired
= 1; /* no event expected now */
186 rpc
->srpc_reqstbuf
= buffer
;
187 rpc
->srpc_peer
= buffer
->buf_peer
;
188 rpc
->srpc_self
= buffer
->buf_self
;
189 LNetInvalidateHandle(&rpc
->srpc_replymdh
);
193 srpc_service_fini(struct srpc_service
*svc
)
195 struct srpc_service_cd
*scd
;
196 struct srpc_server_rpc
*rpc
;
197 struct srpc_buffer
*buf
;
201 if (svc
->sv_cpt_data
== NULL
)
204 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
206 if (!list_empty(&scd
->scd_buf_posted
))
207 q
= &scd
->scd_buf_posted
;
208 else if (!list_empty(&scd
->scd_buf_blocked
))
209 q
= &scd
->scd_buf_blocked
;
213 while (!list_empty(q
)) {
214 buf
= list_entry(q
->next
,
217 list_del(&buf
->buf_list
);
218 LIBCFS_FREE(buf
, sizeof(*buf
));
222 LASSERT(list_empty(&scd
->scd_rpc_active
));
224 while (!list_empty(&scd
->scd_rpc_free
)) {
225 rpc
= list_entry(scd
->scd_rpc_free
.next
,
226 struct srpc_server_rpc
,
228 list_del(&rpc
->srpc_list
);
229 LIBCFS_FREE(rpc
, sizeof(*rpc
));
233 cfs_percpt_free(svc
->sv_cpt_data
);
234 svc
->sv_cpt_data
= NULL
;
238 srpc_service_nrpcs(struct srpc_service
*svc
)
240 int nrpcs
= svc
->sv_wi_total
/ svc
->sv_ncpts
;
242 return srpc_serv_is_framework(svc
) ?
243 max(nrpcs
, SFW_FRWK_WI_MIN
) : max(nrpcs
, SFW_TEST_WI_MIN
);
246 int srpc_add_buffer(struct swi_workitem
*wi
);
249 srpc_service_init(struct srpc_service
*svc
)
251 struct srpc_service_cd
*scd
;
252 struct srpc_server_rpc
*rpc
;
257 svc
->sv_shuttingdown
= 0;
259 svc
->sv_cpt_data
= cfs_percpt_alloc(lnet_cpt_table(),
260 sizeof(struct srpc_service_cd
));
261 if (svc
->sv_cpt_data
== NULL
)
264 svc
->sv_ncpts
= srpc_serv_is_framework(svc
) ?
265 1 : cfs_cpt_number(lnet_cpt_table());
266 nrpcs
= srpc_service_nrpcs(svc
);
268 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
271 spin_lock_init(&scd
->scd_lock
);
272 INIT_LIST_HEAD(&scd
->scd_rpc_free
);
273 INIT_LIST_HEAD(&scd
->scd_rpc_active
);
274 INIT_LIST_HEAD(&scd
->scd_buf_posted
);
275 INIT_LIST_HEAD(&scd
->scd_buf_blocked
);
277 scd
->scd_ev
.ev_data
= scd
;
278 scd
->scd_ev
.ev_type
= SRPC_REQUEST_RCVD
;
280 /* NB: don't use lst_sched_serial for adding buffer,
281 * see details in srpc_service_add_buffers() */
282 swi_init_workitem(&scd
->scd_buf_wi
, scd
,
283 srpc_add_buffer
, lst_sched_test
[i
]);
285 if (i
!= 0 && srpc_serv_is_framework(svc
)) {
286 /* NB: framework service only needs srpc_service_cd for
287 * one partition, but we allocate for all to make
288 * it easier to implement, it will waste a little
289 * memory but nobody should care about this */
293 for (j
= 0; j
< nrpcs
; j
++) {
294 LIBCFS_CPT_ALLOC(rpc
, lnet_cpt_table(),
297 srpc_service_fini(svc
);
300 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
308 srpc_add_service(struct srpc_service
*sv
)
312 LASSERT(0 <= id
&& id
<= SRPC_SERVICE_MAX_ID
);
314 if (srpc_service_init(sv
) != 0)
317 spin_lock(&srpc_data
.rpc_glock
);
319 LASSERT(srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
321 if (srpc_data
.rpc_services
[id
] != NULL
) {
322 spin_unlock(&srpc_data
.rpc_glock
);
326 srpc_data
.rpc_services
[id
] = sv
;
327 spin_unlock(&srpc_data
.rpc_glock
);
329 CDEBUG(D_NET
, "Adding service: id %d, name %s\n", id
, sv
->sv_name
);
333 srpc_service_fini(sv
);
338 srpc_remove_service (srpc_service_t
*sv
)
342 spin_lock(&srpc_data
.rpc_glock
);
344 if (srpc_data
.rpc_services
[id
] != sv
) {
345 spin_unlock(&srpc_data
.rpc_glock
);
349 srpc_data
.rpc_services
[id
] = NULL
;
350 spin_unlock(&srpc_data
.rpc_glock
);
355 srpc_post_passive_rdma(int portal
, int local
, __u64 matchbits
, void *buf
,
356 int len
, int options
, lnet_process_id_t peer
,
357 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
361 lnet_handle_me_t meh
;
363 rc
= LNetMEAttach(portal
, peer
, matchbits
, 0, LNET_UNLINK
,
364 local
? LNET_INS_LOCAL
: LNET_INS_AFTER
, &meh
);
366 CERROR ("LNetMEAttach failed: %d\n", rc
);
367 LASSERT (rc
== -ENOMEM
);
375 md
.options
= options
;
376 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
378 rc
= LNetMDAttach(meh
, md
, LNET_UNLINK
, mdh
);
380 CERROR ("LNetMDAttach failed: %d\n", rc
);
381 LASSERT (rc
== -ENOMEM
);
383 rc
= LNetMEUnlink(meh
);
389 "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
390 libcfs_id2str(peer
), portal
, matchbits
);
395 srpc_post_active_rdma(int portal
, __u64 matchbits
, void *buf
, int len
,
396 int options
, lnet_process_id_t peer
, lnet_nid_t self
,
397 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
405 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
406 md
.threshold
= ((options
& LNET_MD_OP_GET
) != 0) ? 2 : 1;
407 md
.options
= options
& ~(LNET_MD_OP_PUT
| LNET_MD_OP_GET
);
409 rc
= LNetMDBind(md
, LNET_UNLINK
, mdh
);
411 CERROR ("LNetMDBind failed: %d\n", rc
);
412 LASSERT (rc
== -ENOMEM
);
416 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
417 * they're only meaningful for MDs attached to an ME (i.e. passive
419 if ((options
& LNET_MD_OP_PUT
) != 0) {
420 rc
= LNetPut(self
, *mdh
, LNET_NOACK_REQ
, peer
,
421 portal
, matchbits
, 0, 0);
423 LASSERT ((options
& LNET_MD_OP_GET
) != 0);
425 rc
= LNetGet(self
, *mdh
, peer
, portal
, matchbits
, 0);
429 CERROR ("LNet%s(%s, %d, %lld) failed: %d\n",
430 ((options
& LNET_MD_OP_PUT
) != 0) ? "Put" : "Get",
431 libcfs_id2str(peer
), portal
, matchbits
, rc
);
433 /* The forthcoming unlink event will complete this operation
434 * with failure, so fall through and return success here.
436 rc
= LNetMDUnlink(*mdh
);
440 "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
441 libcfs_id2str(peer
), portal
, matchbits
);
447 srpc_post_active_rqtbuf(lnet_process_id_t peer
, int service
, void *buf
,
448 int len
, lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
450 return srpc_post_active_rdma(srpc_serv_portal(service
), service
,
451 buf
, len
, LNET_MD_OP_PUT
, peer
,
452 LNET_NID_ANY
, mdh
, ev
);
456 srpc_post_passive_rqtbuf(int service
, int local
, void *buf
, int len
,
457 lnet_handle_md_t
*mdh
, srpc_event_t
*ev
)
459 lnet_process_id_t any
= {0};
461 any
.nid
= LNET_NID_ANY
;
462 any
.pid
= LNET_PID_ANY
;
464 return srpc_post_passive_rdma(srpc_serv_portal(service
),
465 local
, service
, buf
, len
,
466 LNET_MD_OP_PUT
, any
, mdh
, ev
);
470 srpc_service_post_buffer(struct srpc_service_cd
*scd
, struct srpc_buffer
*buf
)
472 struct srpc_service
*sv
= scd
->scd_svc
;
473 struct srpc_msg
*msg
= &buf
->buf_msg
;
476 LNetInvalidateHandle(&buf
->buf_mdh
);
477 list_add(&buf
->buf_list
, &scd
->scd_buf_posted
);
478 scd
->scd_buf_nposted
++;
479 spin_unlock(&scd
->scd_lock
);
481 rc
= srpc_post_passive_rqtbuf(sv
->sv_id
,
482 !srpc_serv_is_framework(sv
),
483 msg
, sizeof(*msg
), &buf
->buf_mdh
,
486 /* At this point, a RPC (new or delayed) may have arrived in
487 * msg and its event handler has been called. So we must add
488 * buf to scd_buf_posted _before_ dropping scd_lock */
490 spin_lock(&scd
->scd_lock
);
493 if (!sv
->sv_shuttingdown
)
496 spin_unlock(&scd
->scd_lock
);
497 /* srpc_shutdown_service might have tried to unlink me
498 * when my buf_mdh was still invalid */
499 LNetMDUnlink(buf
->buf_mdh
);
500 spin_lock(&scd
->scd_lock
);
504 scd
->scd_buf_nposted
--;
505 if (sv
->sv_shuttingdown
)
506 return rc
; /* don't allow to change scd_buf_posted */
508 list_del(&buf
->buf_list
);
509 spin_unlock(&scd
->scd_lock
);
511 LIBCFS_FREE(buf
, sizeof(*buf
));
513 spin_lock(&scd
->scd_lock
);
518 srpc_add_buffer(struct swi_workitem
*wi
)
520 struct srpc_service_cd
*scd
= wi
->swi_workitem
.wi_data
;
521 struct srpc_buffer
*buf
;
524 /* it's called by workitem scheduler threads, these threads
525 * should have been set CPT affinity, so buffers will be posted
526 * on CPT local list of Portal */
527 spin_lock(&scd
->scd_lock
);
529 while (scd
->scd_buf_adjust
> 0 &&
530 !scd
->scd_svc
->sv_shuttingdown
) {
531 scd
->scd_buf_adjust
--; /* consume it */
532 scd
->scd_buf_posting
++;
534 spin_unlock(&scd
->scd_lock
);
536 LIBCFS_ALLOC(buf
, sizeof(*buf
));
538 CERROR("Failed to add new buf to service: %s\n",
539 scd
->scd_svc
->sv_name
);
540 spin_lock(&scd
->scd_lock
);
545 spin_lock(&scd
->scd_lock
);
546 if (scd
->scd_svc
->sv_shuttingdown
) {
547 spin_unlock(&scd
->scd_lock
);
548 LIBCFS_FREE(buf
, sizeof(*buf
));
550 spin_lock(&scd
->scd_lock
);
555 rc
= srpc_service_post_buffer(scd
, buf
);
557 break; /* buf has been freed inside */
559 LASSERT(scd
->scd_buf_posting
> 0);
560 scd
->scd_buf_posting
--;
561 scd
->scd_buf_total
++;
562 scd
->scd_buf_low
= MAX(2, scd
->scd_buf_total
/ 4);
566 scd
->scd_buf_err_stamp
= get_seconds();
567 scd
->scd_buf_err
= rc
;
569 LASSERT(scd
->scd_buf_posting
> 0);
570 scd
->scd_buf_posting
--;
573 spin_unlock(&scd
->scd_lock
);
578 srpc_service_add_buffers(struct srpc_service
*sv
, int nbuffer
)
580 struct srpc_service_cd
*scd
;
584 LASSERTF(nbuffer
> 0, "nbuffer must be positive: %d\n", nbuffer
);
586 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
587 spin_lock(&scd
->scd_lock
);
589 scd
->scd_buf_err
= 0;
590 scd
->scd_buf_err_stamp
= 0;
591 scd
->scd_buf_posting
= 0;
592 scd
->scd_buf_adjust
= nbuffer
;
593 /* start to post buffers */
594 swi_schedule_workitem(&scd
->scd_buf_wi
);
595 spin_unlock(&scd
->scd_lock
);
597 /* framework service only post buffer for one partition */
598 if (srpc_serv_is_framework(sv
))
602 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
603 spin_lock(&scd
->scd_lock
);
605 * NB: srpc_service_add_buffers() can be called inside
606 * thread context of lst_sched_serial, and we don't normally
607 * allow to sleep inside thread context of WI scheduler
608 * because it will block current scheduler thread from doing
609 * anything else, even worse, it could deadlock if it's
610 * waiting on result from another WI of the same scheduler.
611 * However, it's safe at here because scd_buf_wi is scheduled
612 * by thread in a different WI scheduler (lst_sched_test),
613 * so we don't have any risk of deadlock, though this could
614 * block all WIs pending on lst_sched_serial for a moment
615 * which is not good but not fatal.
617 lst_wait_until(scd
->scd_buf_err
!= 0 ||
618 (scd
->scd_buf_adjust
== 0 &&
619 scd
->scd_buf_posting
== 0),
620 scd
->scd_lock
, "waiting for adding buffer\n");
622 if (scd
->scd_buf_err
!= 0 && rc
== 0)
623 rc
= scd
->scd_buf_err
;
625 spin_unlock(&scd
->scd_lock
);
632 srpc_service_remove_buffers(struct srpc_service
*sv
, int nbuffer
)
634 struct srpc_service_cd
*scd
;
638 LASSERT(!sv
->sv_shuttingdown
);
640 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
641 spin_lock(&scd
->scd_lock
);
643 num
= scd
->scd_buf_total
+ scd
->scd_buf_posting
;
644 scd
->scd_buf_adjust
-= min(nbuffer
, num
);
646 spin_unlock(&scd
->scd_lock
);
650 /* returns 1 if sv has finished, otherwise 0 */
652 srpc_finish_service(struct srpc_service
*sv
)
654 struct srpc_service_cd
*scd
;
655 struct srpc_server_rpc
*rpc
;
658 LASSERT(sv
->sv_shuttingdown
); /* srpc_shutdown_service called */
660 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
661 spin_lock(&scd
->scd_lock
);
662 if (!swi_deschedule_workitem(&scd
->scd_buf_wi
)) {
663 spin_unlock(&scd
->scd_lock
);
667 if (scd
->scd_buf_nposted
> 0) {
668 CDEBUG(D_NET
, "waiting for %d posted buffers to unlink",
669 scd
->scd_buf_nposted
);
670 spin_unlock(&scd
->scd_lock
);
674 if (list_empty(&scd
->scd_rpc_active
)) {
675 spin_unlock(&scd
->scd_lock
);
679 rpc
= list_entry(scd
->scd_rpc_active
.next
,
680 struct srpc_server_rpc
, srpc_list
);
681 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
682 "wi %s scheduled %d running %d, "
683 "ev fired %d type %d status %d lnet %d\n",
684 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
685 swi_state2str(rpc
->srpc_wi
.swi_state
),
686 rpc
->srpc_wi
.swi_workitem
.wi_scheduled
,
687 rpc
->srpc_wi
.swi_workitem
.wi_running
,
688 rpc
->srpc_ev
.ev_fired
, rpc
->srpc_ev
.ev_type
,
689 rpc
->srpc_ev
.ev_status
, rpc
->srpc_ev
.ev_lnet
);
690 spin_unlock(&scd
->scd_lock
);
694 /* no lock needed from now on */
695 srpc_service_fini(sv
);
699 /* called with sv->sv_lock held */
701 srpc_service_recycle_buffer(struct srpc_service_cd
*scd
, srpc_buffer_t
*buf
)
703 if (!scd
->scd_svc
->sv_shuttingdown
&& scd
->scd_buf_adjust
>= 0) {
704 if (srpc_service_post_buffer(scd
, buf
) != 0) {
705 CWARN("Failed to post %s buffer\n",
706 scd
->scd_svc
->sv_name
);
711 /* service is shutting down, or we want to recycle some buffers */
712 scd
->scd_buf_total
--;
714 if (scd
->scd_buf_adjust
< 0) {
715 scd
->scd_buf_adjust
++;
716 if (scd
->scd_buf_adjust
< 0 &&
717 scd
->scd_buf_total
== 0 && scd
->scd_buf_posting
== 0) {
719 "Try to recycle %d buffers but nothing left\n",
720 scd
->scd_buf_adjust
);
721 scd
->scd_buf_adjust
= 0;
725 spin_unlock(&scd
->scd_lock
);
726 LIBCFS_FREE(buf
, sizeof(*buf
));
727 spin_lock(&scd
->scd_lock
);
731 srpc_abort_service(struct srpc_service
*sv
)
733 struct srpc_service_cd
*scd
;
734 struct srpc_server_rpc
*rpc
;
737 CDEBUG(D_NET
, "Aborting service: id %d, name %s\n",
738 sv
->sv_id
, sv
->sv_name
);
740 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
741 spin_lock(&scd
->scd_lock
);
743 /* schedule in-flight RPCs to notice the abort, NB:
744 * racing with incoming RPCs; complete fix should make test
745 * RPCs carry session ID in its headers */
746 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
) {
747 rpc
->srpc_aborted
= 1;
748 swi_schedule_workitem(&rpc
->srpc_wi
);
751 spin_unlock(&scd
->scd_lock
);
756 srpc_shutdown_service(srpc_service_t
*sv
)
758 struct srpc_service_cd
*scd
;
759 struct srpc_server_rpc
*rpc
;
763 CDEBUG(D_NET
, "Shutting down service: id %d, name %s\n",
764 sv
->sv_id
, sv
->sv_name
);
766 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
767 spin_lock(&scd
->scd_lock
);
769 sv
->sv_shuttingdown
= 1; /* i.e. no new active RPC */
771 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
772 spin_unlock(&scd
->scd_lock
);
774 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
775 spin_lock(&scd
->scd_lock
);
777 /* schedule in-flight RPCs to notice the shutdown */
778 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
)
779 swi_schedule_workitem(&rpc
->srpc_wi
);
781 spin_unlock(&scd
->scd_lock
);
783 /* OK to traverse scd_buf_posted without lock, since no one
784 * touches scd_buf_posted now */
785 list_for_each_entry(buf
, &scd
->scd_buf_posted
, buf_list
)
786 LNetMDUnlink(buf
->buf_mdh
);
791 srpc_send_request (srpc_client_rpc_t
*rpc
)
793 srpc_event_t
*ev
= &rpc
->crpc_reqstev
;
798 ev
->ev_type
= SRPC_REQUEST_SENT
;
800 rc
= srpc_post_active_rqtbuf(rpc
->crpc_dest
, rpc
->crpc_service
,
801 &rpc
->crpc_reqstmsg
, sizeof(srpc_msg_t
),
802 &rpc
->crpc_reqstmdh
, ev
);
804 LASSERT (rc
== -ENOMEM
);
805 ev
->ev_fired
= 1; /* no more event expected */
811 srpc_prepare_reply (srpc_client_rpc_t
*rpc
)
813 srpc_event_t
*ev
= &rpc
->crpc_replyev
;
814 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.rpyid
;
819 ev
->ev_type
= SRPC_REPLY_RCVD
;
821 *id
= srpc_next_id();
823 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
824 &rpc
->crpc_replymsg
, sizeof(srpc_msg_t
),
825 LNET_MD_OP_PUT
, rpc
->crpc_dest
,
826 &rpc
->crpc_replymdh
, ev
);
828 LASSERT (rc
== -ENOMEM
);
829 ev
->ev_fired
= 1; /* no more event expected */
835 srpc_prepare_bulk (srpc_client_rpc_t
*rpc
)
837 srpc_bulk_t
*bk
= &rpc
->crpc_bulk
;
838 srpc_event_t
*ev
= &rpc
->crpc_bulkev
;
839 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.bulkid
;
843 LASSERT (bk
->bk_niov
<= LNET_MAX_IOV
);
845 if (bk
->bk_niov
== 0) return 0; /* nothing to do */
847 opt
= bk
->bk_sink
? LNET_MD_OP_PUT
: LNET_MD_OP_GET
;
852 ev
->ev_type
= SRPC_BULK_REQ_RCVD
;
854 *id
= srpc_next_id();
856 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
857 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
858 rpc
->crpc_dest
, &bk
->bk_mdh
, ev
);
860 LASSERT (rc
== -ENOMEM
);
861 ev
->ev_fired
= 1; /* no more event expected */
867 srpc_do_bulk (srpc_server_rpc_t
*rpc
)
869 srpc_event_t
*ev
= &rpc
->srpc_ev
;
870 srpc_bulk_t
*bk
= rpc
->srpc_bulk
;
871 __u64 id
= rpc
->srpc_reqstbuf
->buf_msg
.msg_body
.reqst
.bulkid
;
875 LASSERT (bk
!= NULL
);
877 opt
= bk
->bk_sink
? LNET_MD_OP_GET
: LNET_MD_OP_PUT
;
882 ev
->ev_type
= bk
->bk_sink
? SRPC_BULK_GET_RPLD
: SRPC_BULK_PUT_SENT
;
884 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, id
,
885 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
886 rpc
->srpc_peer
, rpc
->srpc_self
,
889 ev
->ev_fired
= 1; /* no more event expected */
893 /* only called from srpc_handle_rpc */
895 srpc_server_rpc_done(srpc_server_rpc_t
*rpc
, int status
)
897 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
898 struct srpc_service
*sv
= scd
->scd_svc
;
899 srpc_buffer_t
*buffer
;
901 LASSERT (status
!= 0 || rpc
->srpc_wi
.swi_state
== SWI_STATE_DONE
);
903 rpc
->srpc_status
= status
;
905 CDEBUG_LIMIT (status
== 0 ? D_NET
: D_NETERROR
,
906 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
907 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
908 swi_state2str(rpc
->srpc_wi
.swi_state
), status
);
911 spin_lock(&srpc_data
.rpc_glock
);
912 srpc_data
.rpc_counters
.rpcs_dropped
++;
913 spin_unlock(&srpc_data
.rpc_glock
);
916 if (rpc
->srpc_done
!= NULL
)
917 (*rpc
->srpc_done
) (rpc
);
918 LASSERT(rpc
->srpc_bulk
== NULL
);
920 spin_lock(&scd
->scd_lock
);
922 if (rpc
->srpc_reqstbuf
!= NULL
) {
923 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
924 * sv won't go away for scd_rpc_active must not be empty */
925 srpc_service_recycle_buffer(scd
, rpc
->srpc_reqstbuf
);
926 rpc
->srpc_reqstbuf
= NULL
;
929 list_del(&rpc
->srpc_list
); /* from scd->scd_rpc_active */
932 * No one can schedule me now since:
933 * - I'm not on scd_rpc_active.
934 * - all LNet events have been fired.
935 * Cancel pending schedules and prevent future schedule attempts:
937 LASSERT(rpc
->srpc_ev
.ev_fired
);
938 swi_exit_workitem(&rpc
->srpc_wi
);
940 if (!sv
->sv_shuttingdown
&& !list_empty(&scd
->scd_buf_blocked
)) {
941 buffer
= list_entry(scd
->scd_buf_blocked
.next
,
942 srpc_buffer_t
, buf_list
);
943 list_del(&buffer
->buf_list
);
945 srpc_init_server_rpc(rpc
, scd
, buffer
);
946 list_add_tail(&rpc
->srpc_list
, &scd
->scd_rpc_active
);
947 swi_schedule_workitem(&rpc
->srpc_wi
);
949 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
952 spin_unlock(&scd
->scd_lock
);
956 /* handles an incoming RPC */
958 srpc_handle_rpc(swi_workitem_t
*wi
)
960 struct srpc_server_rpc
*rpc
= wi
->swi_workitem
.wi_data
;
961 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
962 struct srpc_service
*sv
= scd
->scd_svc
;
963 srpc_event_t
*ev
= &rpc
->srpc_ev
;
966 LASSERT(wi
== &rpc
->srpc_wi
);
968 spin_lock(&scd
->scd_lock
);
970 if (sv
->sv_shuttingdown
|| rpc
->srpc_aborted
) {
971 spin_unlock(&scd
->scd_lock
);
973 if (rpc
->srpc_bulk
!= NULL
)
974 LNetMDUnlink(rpc
->srpc_bulk
->bk_mdh
);
975 LNetMDUnlink(rpc
->srpc_replymdh
);
977 if (ev
->ev_fired
) { /* no more event, OK to finish */
978 srpc_server_rpc_done(rpc
, -ESHUTDOWN
);
984 spin_unlock(&scd
->scd_lock
);
986 switch (wi
->swi_state
) {
989 case SWI_STATE_NEWBORN
: {
991 srpc_generic_reply_t
*reply
;
993 msg
= &rpc
->srpc_reqstbuf
->buf_msg
;
994 reply
= &rpc
->srpc_replymsg
.msg_body
.reply
;
996 if (msg
->msg_magic
== 0) {
997 /* moaned already in srpc_lnet_ev_handler */
998 srpc_server_rpc_done(rpc
, EBADMSG
);
1002 srpc_unpack_msg_hdr(msg
);
1003 if (msg
->msg_version
!= SRPC_MSG_VERSION
) {
1004 CWARN("Version mismatch: %u, %u expected, from %s\n",
1005 msg
->msg_version
, SRPC_MSG_VERSION
,
1006 libcfs_id2str(rpc
->srpc_peer
));
1007 reply
->status
= EPROTO
;
1008 /* drop through and send reply */
1011 rc
= (*sv
->sv_handler
)(rpc
);
1012 LASSERT(reply
->status
== 0 || !rpc
->srpc_bulk
);
1014 srpc_server_rpc_done(rpc
, rc
);
1019 wi
->swi_state
= SWI_STATE_BULK_STARTED
;
1021 if (rpc
->srpc_bulk
!= NULL
) {
1022 rc
= srpc_do_bulk(rpc
);
1024 return 0; /* wait for bulk */
1026 LASSERT (ev
->ev_fired
);
1030 case SWI_STATE_BULK_STARTED
:
1031 LASSERT (rpc
->srpc_bulk
== NULL
|| ev
->ev_fired
);
1033 if (rpc
->srpc_bulk
!= NULL
) {
1036 if (sv
->sv_bulk_ready
!= NULL
)
1037 rc
= (*sv
->sv_bulk_ready
) (rpc
, rc
);
1040 srpc_server_rpc_done(rpc
, rc
);
1045 wi
->swi_state
= SWI_STATE_REPLY_SUBMITTED
;
1046 rc
= srpc_send_reply(rpc
);
1048 return 0; /* wait for reply */
1049 srpc_server_rpc_done(rpc
, rc
);
1052 case SWI_STATE_REPLY_SUBMITTED
:
1053 if (!ev
->ev_fired
) {
1054 CERROR("RPC %p: bulk %p, service %d\n",
1055 rpc
, rpc
->srpc_bulk
, sv
->sv_id
);
1056 CERROR("Event: status %d, type %d, lnet %d\n",
1057 ev
->ev_status
, ev
->ev_type
, ev
->ev_lnet
);
1058 LASSERT (ev
->ev_fired
);
1061 wi
->swi_state
= SWI_STATE_DONE
;
1062 srpc_server_rpc_done(rpc
, ev
->ev_status
);
1070 srpc_client_rpc_expired (void *data
)
1072 srpc_client_rpc_t
*rpc
= data
;
1074 CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
1075 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1078 spin_lock(&rpc
->crpc_lock
);
1080 rpc
->crpc_timeout
= 0;
1081 srpc_abort_rpc(rpc
, -ETIMEDOUT
);
1083 spin_unlock(&rpc
->crpc_lock
);
1085 spin_lock(&srpc_data
.rpc_glock
);
1086 srpc_data
.rpc_counters
.rpcs_expired
++;
1087 spin_unlock(&srpc_data
.rpc_glock
);
1091 srpc_add_client_rpc_timer (srpc_client_rpc_t
*rpc
)
1093 stt_timer_t
*timer
= &rpc
->crpc_timer
;
1095 if (rpc
->crpc_timeout
== 0) return;
1097 INIT_LIST_HEAD(&timer
->stt_list
);
1098 timer
->stt_data
= rpc
;
1099 timer
->stt_func
= srpc_client_rpc_expired
;
1100 timer
->stt_expires
= cfs_time_add(rpc
->crpc_timeout
,
1102 stt_add_timer(timer
);
1107 * Called with rpc->crpc_lock held.
1109 * Upon exit the RPC expiry timer is not queued and the handler is not
1110 * running on any CPU. */
1112 srpc_del_client_rpc_timer (srpc_client_rpc_t
*rpc
)
1114 /* timer not planted or already exploded */
1115 if (rpc
->crpc_timeout
== 0)
1118 /* timer successfully defused */
1119 if (stt_del_timer(&rpc
->crpc_timer
))
1122 /* timer detonated, wait for it to explode */
1123 while (rpc
->crpc_timeout
!= 0) {
1124 spin_unlock(&rpc
->crpc_lock
);
1128 spin_lock(&rpc
->crpc_lock
);
1133 srpc_client_rpc_done (srpc_client_rpc_t
*rpc
, int status
)
1135 swi_workitem_t
*wi
= &rpc
->crpc_wi
;
1137 LASSERT(status
!= 0 || wi
->swi_state
== SWI_STATE_DONE
);
1139 spin_lock(&rpc
->crpc_lock
);
1141 rpc
->crpc_closed
= 1;
1142 if (rpc
->crpc_status
== 0)
1143 rpc
->crpc_status
= status
;
1145 srpc_del_client_rpc_timer(rpc
);
1147 CDEBUG_LIMIT ((status
== 0) ? D_NET
: D_NETERROR
,
1148 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1149 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1150 swi_state2str(wi
->swi_state
), rpc
->crpc_aborted
, status
);
1153 * No one can schedule me now since:
1154 * - RPC timer has been defused.
1155 * - all LNet events have been fired.
1156 * - crpc_closed has been set, preventing srpc_abort_rpc from
1158 * Cancel pending schedules and prevent future schedule attempts:
1160 LASSERT (!srpc_event_pending(rpc
));
1161 swi_exit_workitem(wi
);
1163 spin_unlock(&rpc
->crpc_lock
);
1165 (*rpc
->crpc_done
)(rpc
);
1169 /* sends an outgoing RPC */
1171 srpc_send_rpc (swi_workitem_t
*wi
)
1174 srpc_client_rpc_t
*rpc
;
1178 LASSERT(wi
!= NULL
);
1180 rpc
= wi
->swi_workitem
.wi_data
;
1182 LASSERT (rpc
!= NULL
);
1183 LASSERT (wi
== &rpc
->crpc_wi
);
1185 reply
= &rpc
->crpc_replymsg
;
1186 do_bulk
= rpc
->crpc_bulk
.bk_niov
> 0;
1188 spin_lock(&rpc
->crpc_lock
);
1190 if (rpc
->crpc_aborted
) {
1191 spin_unlock(&rpc
->crpc_lock
);
1195 spin_unlock(&rpc
->crpc_lock
);
1197 switch (wi
->swi_state
) {
1200 case SWI_STATE_NEWBORN
:
1201 LASSERT (!srpc_event_pending(rpc
));
1203 rc
= srpc_prepare_reply(rpc
);
1205 srpc_client_rpc_done(rpc
, rc
);
1209 rc
= srpc_prepare_bulk(rpc
);
1212 wi
->swi_state
= SWI_STATE_REQUEST_SUBMITTED
;
1213 rc
= srpc_send_request(rpc
);
1216 case SWI_STATE_REQUEST_SUBMITTED
:
1217 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1218 * order; however, they're processed in a strict order:
1219 * rqt, rpy, and bulk. */
1220 if (!rpc
->crpc_reqstev
.ev_fired
) break;
1222 rc
= rpc
->crpc_reqstev
.ev_status
;
1225 wi
->swi_state
= SWI_STATE_REQUEST_SENT
;
1226 /* perhaps more events, fall thru */
1227 case SWI_STATE_REQUEST_SENT
: {
1228 srpc_msg_type_t type
= srpc_service2reply(rpc
->crpc_service
);
1230 if (!rpc
->crpc_replyev
.ev_fired
) break;
1232 rc
= rpc
->crpc_replyev
.ev_status
;
1235 srpc_unpack_msg_hdr(reply
);
1236 if (reply
->msg_type
!= type
||
1237 (reply
->msg_magic
!= SRPC_MSG_MAGIC
&&
1238 reply
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1239 CWARN ("Bad message from %s: type %u (%d expected),"
1240 " magic %u (%d expected).\n",
1241 libcfs_id2str(rpc
->crpc_dest
),
1242 reply
->msg_type
, type
,
1243 reply
->msg_magic
, SRPC_MSG_MAGIC
);
1248 if (do_bulk
&& reply
->msg_body
.reply
.status
!= 0) {
1249 CWARN ("Remote error %d at %s, unlink bulk buffer in "
1250 "case peer didn't initiate bulk transfer\n",
1251 reply
->msg_body
.reply
.status
,
1252 libcfs_id2str(rpc
->crpc_dest
));
1253 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1256 wi
->swi_state
= SWI_STATE_REPLY_RECEIVED
;
1258 case SWI_STATE_REPLY_RECEIVED
:
1259 if (do_bulk
&& !rpc
->crpc_bulkev
.ev_fired
) break;
1261 rc
= do_bulk
? rpc
->crpc_bulkev
.ev_status
: 0;
1263 /* Bulk buffer was unlinked due to remote error. Clear error
1264 * since reply buffer still contains valid data.
1265 * NB rpc->crpc_done shouldn't look into bulk data in case of
1267 if (do_bulk
&& rpc
->crpc_bulkev
.ev_lnet
== LNET_EVENT_UNLINK
&&
1268 rpc
->crpc_status
== 0 && reply
->msg_body
.reply
.status
!= 0)
1271 wi
->swi_state
= SWI_STATE_DONE
;
1272 srpc_client_rpc_done(rpc
, rc
);
1277 spin_lock(&rpc
->crpc_lock
);
1278 srpc_abort_rpc(rpc
, rc
);
1279 spin_unlock(&rpc
->crpc_lock
);
1283 if (rpc
->crpc_aborted
) {
1284 LNetMDUnlink(rpc
->crpc_reqstmdh
);
1285 LNetMDUnlink(rpc
->crpc_replymdh
);
1286 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1288 if (!srpc_event_pending(rpc
)) {
1289 srpc_client_rpc_done(rpc
, -EINTR
);
1297 srpc_create_client_rpc (lnet_process_id_t peer
, int service
,
1298 int nbulkiov
, int bulklen
,
1299 void (*rpc_done
)(srpc_client_rpc_t
*),
1300 void (*rpc_fini
)(srpc_client_rpc_t
*), void *priv
)
1302 srpc_client_rpc_t
*rpc
;
1304 LIBCFS_ALLOC(rpc
, offsetof(srpc_client_rpc_t
,
1305 crpc_bulk
.bk_iovs
[nbulkiov
]));
1309 srpc_init_client_rpc(rpc
, peer
, service
, nbulkiov
,
1310 bulklen
, rpc_done
, rpc_fini
, priv
);
1314 /* called with rpc->crpc_lock held */
1316 srpc_abort_rpc (srpc_client_rpc_t
*rpc
, int why
)
1320 if (rpc
->crpc_aborted
|| /* already aborted */
1321 rpc
->crpc_closed
) /* callback imminent */
1325 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1326 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1327 swi_state2str(rpc
->crpc_wi
.swi_state
), why
);
1329 rpc
->crpc_aborted
= 1;
1330 rpc
->crpc_status
= why
;
1331 swi_schedule_workitem(&rpc
->crpc_wi
);
1335 /* called with rpc->crpc_lock held */
1337 srpc_post_rpc (srpc_client_rpc_t
*rpc
)
1339 LASSERT (!rpc
->crpc_aborted
);
1340 LASSERT (srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
1342 CDEBUG (D_NET
, "Posting RPC: peer %s, service %d, timeout %d\n",
1343 libcfs_id2str(rpc
->crpc_dest
), rpc
->crpc_service
,
1346 srpc_add_client_rpc_timer(rpc
);
1347 swi_schedule_workitem(&rpc
->crpc_wi
);
1353 srpc_send_reply(struct srpc_server_rpc
*rpc
)
1355 srpc_event_t
*ev
= &rpc
->srpc_ev
;
1356 struct srpc_msg
*msg
= &rpc
->srpc_replymsg
;
1357 struct srpc_buffer
*buffer
= rpc
->srpc_reqstbuf
;
1358 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
1359 struct srpc_service
*sv
= scd
->scd_svc
;
1363 LASSERT(buffer
!= NULL
);
1364 rpyid
= buffer
->buf_msg
.msg_body
.reqst
.rpyid
;
1366 spin_lock(&scd
->scd_lock
);
1368 if (!sv
->sv_shuttingdown
&& !srpc_serv_is_framework(sv
)) {
1369 /* Repost buffer before replying since test client
1370 * might send me another RPC once it gets the reply */
1371 if (srpc_service_post_buffer(scd
, buffer
) != 0)
1372 CWARN("Failed to repost %s buffer\n", sv
->sv_name
);
1373 rpc
->srpc_reqstbuf
= NULL
;
1376 spin_unlock(&scd
->scd_lock
);
1380 ev
->ev_type
= SRPC_REPLY_SENT
;
1382 msg
->msg_magic
= SRPC_MSG_MAGIC
;
1383 msg
->msg_version
= SRPC_MSG_VERSION
;
1384 msg
->msg_type
= srpc_service2reply(sv
->sv_id
);
1386 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, rpyid
, msg
,
1387 sizeof(*msg
), LNET_MD_OP_PUT
,
1388 rpc
->srpc_peer
, rpc
->srpc_self
,
1389 &rpc
->srpc_replymdh
, ev
);
1391 ev
->ev_fired
= 1; /* no more event expected */
1395 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1397 srpc_lnet_ev_handler(lnet_event_t
*ev
)
1399 struct srpc_service_cd
*scd
;
1400 srpc_event_t
*rpcev
= ev
->md
.user_ptr
;
1401 srpc_client_rpc_t
*crpc
;
1402 srpc_server_rpc_t
*srpc
;
1403 srpc_buffer_t
*buffer
;
1406 srpc_msg_type_t type
;
1408 LASSERT (!in_interrupt());
1410 if (ev
->status
!= 0) {
1411 spin_lock(&srpc_data
.rpc_glock
);
1412 srpc_data
.rpc_counters
.errors
++;
1413 spin_unlock(&srpc_data
.rpc_glock
);
1416 rpcev
->ev_lnet
= ev
->type
;
1418 switch (rpcev
->ev_type
) {
1420 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1421 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1423 case SRPC_REQUEST_SENT
:
1424 if (ev
->status
== 0 && ev
->type
!= LNET_EVENT_UNLINK
) {
1425 spin_lock(&srpc_data
.rpc_glock
);
1426 srpc_data
.rpc_counters
.rpcs_sent
++;
1427 spin_unlock(&srpc_data
.rpc_glock
);
1429 case SRPC_REPLY_RCVD
:
1430 case SRPC_BULK_REQ_RCVD
:
1431 crpc
= rpcev
->ev_data
;
1433 if (rpcev
!= &crpc
->crpc_reqstev
&&
1434 rpcev
!= &crpc
->crpc_replyev
&&
1435 rpcev
!= &crpc
->crpc_bulkev
) {
1436 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1437 rpcev
, crpc
, &crpc
->crpc_reqstev
,
1438 &crpc
->crpc_replyev
, &crpc
->crpc_bulkev
);
1439 CERROR("Bad event: status %d, type %d, lnet %d\n",
1440 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1444 spin_lock(&crpc
->crpc_lock
);
1446 LASSERT(rpcev
->ev_fired
== 0);
1447 rpcev
->ev_fired
= 1;
1448 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1449 -EINTR
: ev
->status
;
1450 swi_schedule_workitem(&crpc
->crpc_wi
);
1452 spin_unlock(&crpc
->crpc_lock
);
1455 case SRPC_REQUEST_RCVD
:
1456 scd
= rpcev
->ev_data
;
1459 LASSERT(rpcev
== &scd
->scd_ev
);
1461 spin_lock(&scd
->scd_lock
);
1463 LASSERT (ev
->unlinked
);
1464 LASSERT (ev
->type
== LNET_EVENT_PUT
||
1465 ev
->type
== LNET_EVENT_UNLINK
);
1466 LASSERT (ev
->type
!= LNET_EVENT_UNLINK
||
1467 sv
->sv_shuttingdown
);
1469 buffer
= container_of(ev
->md
.start
, srpc_buffer_t
, buf_msg
);
1470 buffer
->buf_peer
= ev
->initiator
;
1471 buffer
->buf_self
= ev
->target
.nid
;
1473 LASSERT(scd
->scd_buf_nposted
> 0);
1474 scd
->scd_buf_nposted
--;
1476 if (sv
->sv_shuttingdown
) {
1477 /* Leave buffer on scd->scd_buf_nposted since
1478 * srpc_finish_service needs to traverse it. */
1479 spin_unlock(&scd
->scd_lock
);
1483 if (scd
->scd_buf_err_stamp
!= 0 &&
1484 scd
->scd_buf_err_stamp
< get_seconds()) {
1485 /* re-enable adding buffer */
1486 scd
->scd_buf_err_stamp
= 0;
1487 scd
->scd_buf_err
= 0;
1490 if (scd
->scd_buf_err
== 0 && /* adding buffer is enabled */
1491 scd
->scd_buf_adjust
== 0 &&
1492 scd
->scd_buf_nposted
< scd
->scd_buf_low
) {
1493 scd
->scd_buf_adjust
= MAX(scd
->scd_buf_total
/ 2,
1495 swi_schedule_workitem(&scd
->scd_buf_wi
);
1498 list_del(&buffer
->buf_list
); /* from scd->scd_buf_posted */
1499 msg
= &buffer
->buf_msg
;
1500 type
= srpc_service2request(sv
->sv_id
);
1502 if (ev
->status
!= 0 || ev
->mlength
!= sizeof(*msg
) ||
1503 (msg
->msg_type
!= type
&&
1504 msg
->msg_type
!= __swab32(type
)) ||
1505 (msg
->msg_magic
!= SRPC_MSG_MAGIC
&&
1506 msg
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1507 CERROR ("Dropping RPC (%s) from %s: "
1508 "status %d mlength %d type %u magic %u.\n",
1509 sv
->sv_name
, libcfs_id2str(ev
->initiator
),
1510 ev
->status
, ev
->mlength
,
1511 msg
->msg_type
, msg
->msg_magic
);
1513 /* NB can't call srpc_service_recycle_buffer here since
1514 * it may call LNetM[DE]Attach. The invalid magic tells
1515 * srpc_handle_rpc to drop this RPC */
1519 if (!list_empty(&scd
->scd_rpc_free
)) {
1520 srpc
= list_entry(scd
->scd_rpc_free
.next
,
1521 struct srpc_server_rpc
,
1523 list_del(&srpc
->srpc_list
);
1525 srpc_init_server_rpc(srpc
, scd
, buffer
);
1526 list_add_tail(&srpc
->srpc_list
,
1527 &scd
->scd_rpc_active
);
1528 swi_schedule_workitem(&srpc
->srpc_wi
);
1530 list_add_tail(&buffer
->buf_list
,
1531 &scd
->scd_buf_blocked
);
1534 spin_unlock(&scd
->scd_lock
);
1536 spin_lock(&srpc_data
.rpc_glock
);
1537 srpc_data
.rpc_counters
.rpcs_rcvd
++;
1538 spin_unlock(&srpc_data
.rpc_glock
);
1541 case SRPC_BULK_GET_RPLD
:
1542 LASSERT (ev
->type
== LNET_EVENT_SEND
||
1543 ev
->type
== LNET_EVENT_REPLY
||
1544 ev
->type
== LNET_EVENT_UNLINK
);
1547 break; /* wait for final event */
1549 case SRPC_BULK_PUT_SENT
:
1550 if (ev
->status
== 0 && ev
->type
!= LNET_EVENT_UNLINK
) {
1551 spin_lock(&srpc_data
.rpc_glock
);
1553 if (rpcev
->ev_type
== SRPC_BULK_GET_RPLD
)
1554 srpc_data
.rpc_counters
.bulk_get
+= ev
->mlength
;
1556 srpc_data
.rpc_counters
.bulk_put
+= ev
->mlength
;
1558 spin_unlock(&srpc_data
.rpc_glock
);
1560 case SRPC_REPLY_SENT
:
1561 srpc
= rpcev
->ev_data
;
1562 scd
= srpc
->srpc_scd
;
1564 LASSERT(rpcev
== &srpc
->srpc_ev
);
1566 spin_lock(&scd
->scd_lock
);
1568 rpcev
->ev_fired
= 1;
1569 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1570 -EINTR
: ev
->status
;
1571 swi_schedule_workitem(&srpc
->srpc_wi
);
1573 spin_unlock(&scd
->scd_lock
);
1584 memset(&srpc_data
, 0, sizeof(struct smoketest_rpc
));
1585 spin_lock_init(&srpc_data
.rpc_glock
);
1587 /* 1 second pause to avoid timestamp reuse */
1588 set_current_state(TASK_UNINTERRUPTIBLE
);
1589 schedule_timeout(cfs_time_seconds(1));
1590 srpc_data
.rpc_matchbits
= ((__u64
) get_seconds()) << 48;
1592 srpc_data
.rpc_state
= SRPC_STATE_NONE
;
1594 rc
= LNetNIInit(LUSTRE_SRV_LNET_PID
);
1596 CERROR ("LNetNIInit() has failed: %d\n", rc
);
1600 srpc_data
.rpc_state
= SRPC_STATE_NI_INIT
;
1602 LNetInvalidateHandle(&srpc_data
.rpc_lnet_eq
);
1603 rc
= LNetEQAlloc(0, srpc_lnet_ev_handler
, &srpc_data
.rpc_lnet_eq
);
1605 CERROR("LNetEQAlloc() has failed: %d\n", rc
);
1609 rc
= LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1611 rc
= LNetSetLazyPortal(SRPC_REQUEST_PORTAL
);
1614 srpc_data
.rpc_state
= SRPC_STATE_EQ_INIT
;
1622 srpc_data
.rpc_state
= SRPC_STATE_RUNNING
;
1628 srpc_shutdown (void)
1634 state
= srpc_data
.rpc_state
;
1635 srpc_data
.rpc_state
= SRPC_STATE_STOPPING
;
1640 case SRPC_STATE_RUNNING
:
1641 spin_lock(&srpc_data
.rpc_glock
);
1643 for (i
= 0; i
<= SRPC_SERVICE_MAX_ID
; i
++) {
1644 srpc_service_t
*sv
= srpc_data
.rpc_services
[i
];
1646 LASSERTF (sv
== NULL
,
1647 "service not empty: id %d, name %s\n",
1651 spin_unlock(&srpc_data
.rpc_glock
);
1655 case SRPC_STATE_EQ_INIT
:
1656 rc
= LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1657 rc
= LNetClearLazyPortal(SRPC_REQUEST_PORTAL
);
1659 rc
= LNetEQFree(srpc_data
.rpc_lnet_eq
);
1660 LASSERT (rc
== 0); /* the EQ should have no user by now */
1662 case SRPC_STATE_NI_INIT
: