4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Isaac Huang <isaac@clusterfs.com>
36 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
37 * - percpt data for service to improve smp performance
41 #define DEBUG_SUBSYSTEM S_LNET
53 static struct smoketest_rpc
{
54 spinlock_t rpc_glock
; /* global lock */
55 struct srpc_service
*rpc_services
[SRPC_SERVICE_MAX_ID
+ 1];
56 lnet_handle_eq_t rpc_lnet_eq
; /* _the_ LNet event queue */
57 enum srpc_state rpc_state
;
58 struct srpc_counters rpc_counters
;
59 __u64 rpc_matchbits
; /* matchbits counter */
63 srpc_serv_portal(int svc_id
)
65 return svc_id
< SRPC_FRAMEWORK_SERVICE_MAX_ID
?
66 SRPC_FRAMEWORK_REQUEST_PORTAL
: SRPC_REQUEST_PORTAL
;
70 int srpc_handle_rpc(struct swi_workitem
*wi
);
72 void srpc_get_counters(struct srpc_counters
*cnt
)
74 spin_lock(&srpc_data
.rpc_glock
);
75 *cnt
= srpc_data
.rpc_counters
;
76 spin_unlock(&srpc_data
.rpc_glock
);
79 void srpc_set_counters(const struct srpc_counters
*cnt
)
81 spin_lock(&srpc_data
.rpc_glock
);
82 srpc_data
.rpc_counters
= *cnt
;
83 spin_unlock(&srpc_data
.rpc_glock
);
87 srpc_add_bulk_page(struct srpc_bulk
*bk
, struct page
*pg
, int i
, int off
,
90 LASSERT(off
< PAGE_SIZE
);
91 LASSERT(nob
> 0 && nob
<= PAGE_SIZE
);
93 bk
->bk_iovs
[i
].bv_offset
= off
;
94 bk
->bk_iovs
[i
].bv_page
= pg
;
95 bk
->bk_iovs
[i
].bv_len
= nob
;
100 srpc_free_bulk(struct srpc_bulk
*bk
)
107 for (i
= 0; i
< bk
->bk_niov
; i
++) {
108 pg
= bk
->bk_iovs
[i
].bv_page
;
115 LIBCFS_FREE(bk
, offsetof(struct srpc_bulk
, bk_iovs
[bk
->bk_niov
]));
119 srpc_alloc_bulk(int cpt
, unsigned int bulk_off
, unsigned int bulk_npg
,
120 unsigned int bulk_len
, int sink
)
122 struct srpc_bulk
*bk
;
125 LASSERT(bulk_npg
> 0 && bulk_npg
<= LNET_MAX_IOV
);
127 LIBCFS_CPT_ALLOC(bk
, lnet_cpt_table(), cpt
,
128 offsetof(struct srpc_bulk
, bk_iovs
[bulk_npg
]));
130 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg
);
134 memset(bk
, 0, offsetof(struct srpc_bulk
, bk_iovs
[bulk_npg
]));
136 bk
->bk_len
= bulk_len
;
137 bk
->bk_niov
= bulk_npg
;
139 for (i
= 0; i
< bulk_npg
; i
++) {
143 pg
= alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt
),
146 CERROR("Can't allocate page %d of %d\n", i
, bulk_npg
);
151 nob
= min_t(unsigned int, bulk_off
+ bulk_len
, PAGE_SIZE
) -
153 srpc_add_bulk_page(bk
, pg
, i
, bulk_off
, nob
);
166 spin_lock(&srpc_data
.rpc_glock
);
167 id
= srpc_data
.rpc_matchbits
++;
168 spin_unlock(&srpc_data
.rpc_glock
);
173 srpc_init_server_rpc(struct srpc_server_rpc
*rpc
,
174 struct srpc_service_cd
*scd
,
175 struct srpc_buffer
*buffer
)
177 memset(rpc
, 0, sizeof(*rpc
));
178 swi_init_workitem(&rpc
->srpc_wi
, rpc
, srpc_handle_rpc
,
179 srpc_serv_is_framework(scd
->scd_svc
) ?
180 lst_sched_serial
: lst_sched_test
[scd
->scd_cpt
]);
182 rpc
->srpc_ev
.ev_fired
= 1; /* no event expected now */
185 rpc
->srpc_reqstbuf
= buffer
;
186 rpc
->srpc_peer
= buffer
->buf_peer
;
187 rpc
->srpc_self
= buffer
->buf_self
;
188 LNetInvalidateHandle(&rpc
->srpc_replymdh
);
192 srpc_service_fini(struct srpc_service
*svc
)
194 struct srpc_service_cd
*scd
;
195 struct srpc_server_rpc
*rpc
;
196 struct srpc_buffer
*buf
;
200 if (!svc
->sv_cpt_data
)
203 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
205 if (!list_empty(&scd
->scd_buf_posted
))
206 q
= &scd
->scd_buf_posted
;
207 else if (!list_empty(&scd
->scd_buf_blocked
))
208 q
= &scd
->scd_buf_blocked
;
212 while (!list_empty(q
)) {
213 buf
= list_entry(q
->next
, struct srpc_buffer
,
215 list_del(&buf
->buf_list
);
216 LIBCFS_FREE(buf
, sizeof(*buf
));
220 LASSERT(list_empty(&scd
->scd_rpc_active
));
222 while (!list_empty(&scd
->scd_rpc_free
)) {
223 rpc
= list_entry(scd
->scd_rpc_free
.next
,
224 struct srpc_server_rpc
,
226 list_del(&rpc
->srpc_list
);
227 LIBCFS_FREE(rpc
, sizeof(*rpc
));
231 cfs_percpt_free(svc
->sv_cpt_data
);
232 svc
->sv_cpt_data
= NULL
;
236 srpc_service_nrpcs(struct srpc_service
*svc
)
238 int nrpcs
= svc
->sv_wi_total
/ svc
->sv_ncpts
;
240 return srpc_serv_is_framework(svc
) ?
241 max(nrpcs
, SFW_FRWK_WI_MIN
) : max(nrpcs
, SFW_TEST_WI_MIN
);
244 int srpc_add_buffer(struct swi_workitem
*wi
);
247 srpc_service_init(struct srpc_service
*svc
)
249 struct srpc_service_cd
*scd
;
250 struct srpc_server_rpc
*rpc
;
255 svc
->sv_shuttingdown
= 0;
257 svc
->sv_cpt_data
= cfs_percpt_alloc(lnet_cpt_table(),
258 sizeof(**svc
->sv_cpt_data
));
259 if (!svc
->sv_cpt_data
)
262 svc
->sv_ncpts
= srpc_serv_is_framework(svc
) ?
263 1 : cfs_cpt_number(lnet_cpt_table());
264 nrpcs
= srpc_service_nrpcs(svc
);
266 cfs_percpt_for_each(scd
, i
, svc
->sv_cpt_data
) {
269 spin_lock_init(&scd
->scd_lock
);
270 INIT_LIST_HEAD(&scd
->scd_rpc_free
);
271 INIT_LIST_HEAD(&scd
->scd_rpc_active
);
272 INIT_LIST_HEAD(&scd
->scd_buf_posted
);
273 INIT_LIST_HEAD(&scd
->scd_buf_blocked
);
275 scd
->scd_ev
.ev_data
= scd
;
276 scd
->scd_ev
.ev_type
= SRPC_REQUEST_RCVD
;
279 * NB: don't use lst_sched_serial for adding buffer,
280 * see details in srpc_service_add_buffers()
282 swi_init_workitem(&scd
->scd_buf_wi
, scd
,
283 srpc_add_buffer
, lst_sched_test
[i
]);
285 if (i
&& srpc_serv_is_framework(svc
)) {
287 * NB: framework service only needs srpc_service_cd for
288 * one partition, but we allocate for all to make
289 * it easier to implement, it will waste a little
290 * memory but nobody should care about this
295 for (j
= 0; j
< nrpcs
; j
++) {
296 LIBCFS_CPT_ALLOC(rpc
, lnet_cpt_table(),
299 srpc_service_fini(svc
);
302 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
310 srpc_add_service(struct srpc_service
*sv
)
314 LASSERT(0 <= id
&& id
<= SRPC_SERVICE_MAX_ID
);
316 if (srpc_service_init(sv
))
319 spin_lock(&srpc_data
.rpc_glock
);
321 LASSERT(srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
323 if (srpc_data
.rpc_services
[id
]) {
324 spin_unlock(&srpc_data
.rpc_glock
);
328 srpc_data
.rpc_services
[id
] = sv
;
329 spin_unlock(&srpc_data
.rpc_glock
);
331 CDEBUG(D_NET
, "Adding service: id %d, name %s\n", id
, sv
->sv_name
);
335 srpc_service_fini(sv
);
340 srpc_remove_service(struct srpc_service
*sv
)
344 spin_lock(&srpc_data
.rpc_glock
);
346 if (srpc_data
.rpc_services
[id
] != sv
) {
347 spin_unlock(&srpc_data
.rpc_glock
);
351 srpc_data
.rpc_services
[id
] = NULL
;
352 spin_unlock(&srpc_data
.rpc_glock
);
357 srpc_post_passive_rdma(int portal
, int local
, __u64 matchbits
, void *buf
,
358 int len
, int options
, lnet_process_id_t peer
,
359 lnet_handle_md_t
*mdh
, struct srpc_event
*ev
)
363 lnet_handle_me_t meh
;
365 rc
= LNetMEAttach(portal
, peer
, matchbits
, 0, LNET_UNLINK
,
366 local
? LNET_INS_LOCAL
: LNET_INS_AFTER
, &meh
);
368 CERROR("LNetMEAttach failed: %d\n", rc
);
369 LASSERT(rc
== -ENOMEM
);
377 md
.options
= options
;
378 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
380 rc
= LNetMDAttach(meh
, md
, LNET_UNLINK
, mdh
);
382 CERROR("LNetMDAttach failed: %d\n", rc
);
383 LASSERT(rc
== -ENOMEM
);
385 rc
= LNetMEUnlink(meh
);
390 CDEBUG(D_NET
, "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
391 libcfs_id2str(peer
), portal
, matchbits
);
396 srpc_post_active_rdma(int portal
, __u64 matchbits
, void *buf
, int len
,
397 int options
, lnet_process_id_t peer
, lnet_nid_t self
,
398 lnet_handle_md_t
*mdh
, struct srpc_event
*ev
)
406 md
.eq_handle
= srpc_data
.rpc_lnet_eq
;
407 md
.threshold
= options
& LNET_MD_OP_GET
? 2 : 1;
408 md
.options
= options
& ~(LNET_MD_OP_PUT
| LNET_MD_OP_GET
);
410 rc
= LNetMDBind(md
, LNET_UNLINK
, mdh
);
412 CERROR("LNetMDBind failed: %d\n", rc
);
413 LASSERT(rc
== -ENOMEM
);
418 * this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
419 * they're only meaningful for MDs attached to an ME (i.e. passive
422 if (options
& LNET_MD_OP_PUT
) {
423 rc
= LNetPut(self
, *mdh
, LNET_NOACK_REQ
, peer
,
424 portal
, matchbits
, 0, 0);
426 LASSERT(options
& LNET_MD_OP_GET
);
428 rc
= LNetGet(self
, *mdh
, peer
, portal
, matchbits
, 0);
432 CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
433 options
& LNET_MD_OP_PUT
? "Put" : "Get",
434 libcfs_id2str(peer
), portal
, matchbits
, rc
);
437 * The forthcoming unlink event will complete this operation
438 * with failure, so fall through and return success here.
440 rc
= LNetMDUnlink(*mdh
);
443 CDEBUG(D_NET
, "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
444 libcfs_id2str(peer
), portal
, matchbits
);
450 srpc_post_passive_rqtbuf(int service
, int local
, void *buf
, int len
,
451 lnet_handle_md_t
*mdh
, struct srpc_event
*ev
)
453 lnet_process_id_t any
= { 0 };
455 any
.nid
= LNET_NID_ANY
;
456 any
.pid
= LNET_PID_ANY
;
458 return srpc_post_passive_rdma(srpc_serv_portal(service
),
459 local
, service
, buf
, len
,
460 LNET_MD_OP_PUT
, any
, mdh
, ev
);
464 srpc_service_post_buffer(struct srpc_service_cd
*scd
, struct srpc_buffer
*buf
)
465 __must_hold(&scd
->scd_lock
)
467 struct srpc_service
*sv
= scd
->scd_svc
;
468 struct srpc_msg
*msg
= &buf
->buf_msg
;
471 LNetInvalidateHandle(&buf
->buf_mdh
);
472 list_add(&buf
->buf_list
, &scd
->scd_buf_posted
);
473 scd
->scd_buf_nposted
++;
474 spin_unlock(&scd
->scd_lock
);
476 rc
= srpc_post_passive_rqtbuf(sv
->sv_id
,
477 !srpc_serv_is_framework(sv
),
478 msg
, sizeof(*msg
), &buf
->buf_mdh
,
482 * At this point, a RPC (new or delayed) may have arrived in
483 * msg and its event handler has been called. So we must add
484 * buf to scd_buf_posted _before_ dropping scd_lock
486 spin_lock(&scd
->scd_lock
);
489 if (!sv
->sv_shuttingdown
)
492 spin_unlock(&scd
->scd_lock
);
494 * srpc_shutdown_service might have tried to unlink me
495 * when my buf_mdh was still invalid
497 LNetMDUnlink(buf
->buf_mdh
);
498 spin_lock(&scd
->scd_lock
);
502 scd
->scd_buf_nposted
--;
503 if (sv
->sv_shuttingdown
)
504 return rc
; /* don't allow to change scd_buf_posted */
506 list_del(&buf
->buf_list
);
507 spin_unlock(&scd
->scd_lock
);
509 LIBCFS_FREE(buf
, sizeof(*buf
));
511 spin_lock(&scd
->scd_lock
);
516 srpc_add_buffer(struct swi_workitem
*wi
)
518 struct srpc_service_cd
*scd
= wi
->swi_workitem
.wi_data
;
519 struct srpc_buffer
*buf
;
523 * it's called by workitem scheduler threads, these threads
524 * should have been set CPT affinity, so buffers will be posted
525 * on CPT local list of Portal
527 spin_lock(&scd
->scd_lock
);
529 while (scd
->scd_buf_adjust
> 0 &&
530 !scd
->scd_svc
->sv_shuttingdown
) {
531 scd
->scd_buf_adjust
--; /* consume it */
532 scd
->scd_buf_posting
++;
534 spin_unlock(&scd
->scd_lock
);
536 LIBCFS_ALLOC(buf
, sizeof(*buf
));
538 CERROR("Failed to add new buf to service: %s\n",
539 scd
->scd_svc
->sv_name
);
540 spin_lock(&scd
->scd_lock
);
545 spin_lock(&scd
->scd_lock
);
546 if (scd
->scd_svc
->sv_shuttingdown
) {
547 spin_unlock(&scd
->scd_lock
);
548 LIBCFS_FREE(buf
, sizeof(*buf
));
550 spin_lock(&scd
->scd_lock
);
555 rc
= srpc_service_post_buffer(scd
, buf
);
557 break; /* buf has been freed inside */
559 LASSERT(scd
->scd_buf_posting
> 0);
560 scd
->scd_buf_posting
--;
561 scd
->scd_buf_total
++;
562 scd
->scd_buf_low
= max(2, scd
->scd_buf_total
/ 4);
566 scd
->scd_buf_err_stamp
= ktime_get_real_seconds();
567 scd
->scd_buf_err
= rc
;
569 LASSERT(scd
->scd_buf_posting
> 0);
570 scd
->scd_buf_posting
--;
573 spin_unlock(&scd
->scd_lock
);
578 srpc_service_add_buffers(struct srpc_service
*sv
, int nbuffer
)
580 struct srpc_service_cd
*scd
;
584 LASSERTF(nbuffer
> 0, "nbuffer must be positive: %d\n", nbuffer
);
586 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
587 spin_lock(&scd
->scd_lock
);
589 scd
->scd_buf_err
= 0;
590 scd
->scd_buf_err_stamp
= 0;
591 scd
->scd_buf_posting
= 0;
592 scd
->scd_buf_adjust
= nbuffer
;
593 /* start to post buffers */
594 swi_schedule_workitem(&scd
->scd_buf_wi
);
595 spin_unlock(&scd
->scd_lock
);
597 /* framework service only post buffer for one partition */
598 if (srpc_serv_is_framework(sv
))
602 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
603 spin_lock(&scd
->scd_lock
);
605 * NB: srpc_service_add_buffers() can be called inside
606 * thread context of lst_sched_serial, and we don't normally
607 * allow to sleep inside thread context of WI scheduler
608 * because it will block current scheduler thread from doing
609 * anything else, even worse, it could deadlock if it's
610 * waiting on result from another WI of the same scheduler.
611 * However, it's safe at here because scd_buf_wi is scheduled
612 * by thread in a different WI scheduler (lst_sched_test),
613 * so we don't have any risk of deadlock, though this could
614 * block all WIs pending on lst_sched_serial for a moment
615 * which is not good but not fatal.
617 lst_wait_until(scd
->scd_buf_err
||
618 (!scd
->scd_buf_adjust
&&
619 !scd
->scd_buf_posting
),
620 scd
->scd_lock
, "waiting for adding buffer\n");
622 if (scd
->scd_buf_err
&& !rc
)
623 rc
= scd
->scd_buf_err
;
625 spin_unlock(&scd
->scd_lock
);
632 srpc_service_remove_buffers(struct srpc_service
*sv
, int nbuffer
)
634 struct srpc_service_cd
*scd
;
638 LASSERT(!sv
->sv_shuttingdown
);
640 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
641 spin_lock(&scd
->scd_lock
);
643 num
= scd
->scd_buf_total
+ scd
->scd_buf_posting
;
644 scd
->scd_buf_adjust
-= min(nbuffer
, num
);
646 spin_unlock(&scd
->scd_lock
);
650 /* returns 1 if sv has finished, otherwise 0 */
652 srpc_finish_service(struct srpc_service
*sv
)
654 struct srpc_service_cd
*scd
;
655 struct srpc_server_rpc
*rpc
;
658 LASSERT(sv
->sv_shuttingdown
); /* srpc_shutdown_service called */
660 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
661 spin_lock(&scd
->scd_lock
);
662 if (!swi_deschedule_workitem(&scd
->scd_buf_wi
)) {
663 spin_unlock(&scd
->scd_lock
);
667 if (scd
->scd_buf_nposted
> 0) {
668 CDEBUG(D_NET
, "waiting for %d posted buffers to unlink\n",
669 scd
->scd_buf_nposted
);
670 spin_unlock(&scd
->scd_lock
);
674 if (list_empty(&scd
->scd_rpc_active
)) {
675 spin_unlock(&scd
->scd_lock
);
679 rpc
= list_entry(scd
->scd_rpc_active
.next
,
680 struct srpc_server_rpc
, srpc_list
);
681 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
682 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
683 swi_state2str(rpc
->srpc_wi
.swi_state
),
684 rpc
->srpc_wi
.swi_workitem
.wi_scheduled
,
685 rpc
->srpc_wi
.swi_workitem
.wi_running
,
686 rpc
->srpc_ev
.ev_fired
, rpc
->srpc_ev
.ev_type
,
687 rpc
->srpc_ev
.ev_status
, rpc
->srpc_ev
.ev_lnet
);
688 spin_unlock(&scd
->scd_lock
);
692 /* no lock needed from now on */
693 srpc_service_fini(sv
);
697 /* called with sv->sv_lock held */
699 srpc_service_recycle_buffer(struct srpc_service_cd
*scd
,
700 struct srpc_buffer
*buf
)
701 __must_hold(&scd
->scd_lock
)
703 if (!scd
->scd_svc
->sv_shuttingdown
&& scd
->scd_buf_adjust
>= 0) {
704 if (srpc_service_post_buffer(scd
, buf
)) {
705 CWARN("Failed to post %s buffer\n",
706 scd
->scd_svc
->sv_name
);
711 /* service is shutting down, or we want to recycle some buffers */
712 scd
->scd_buf_total
--;
714 if (scd
->scd_buf_adjust
< 0) {
715 scd
->scd_buf_adjust
++;
716 if (scd
->scd_buf_adjust
< 0 &&
717 !scd
->scd_buf_total
&& !scd
->scd_buf_posting
) {
719 "Try to recycle %d buffers but nothing left\n",
720 scd
->scd_buf_adjust
);
721 scd
->scd_buf_adjust
= 0;
725 spin_unlock(&scd
->scd_lock
);
726 LIBCFS_FREE(buf
, sizeof(*buf
));
727 spin_lock(&scd
->scd_lock
);
731 srpc_abort_service(struct srpc_service
*sv
)
733 struct srpc_service_cd
*scd
;
734 struct srpc_server_rpc
*rpc
;
737 CDEBUG(D_NET
, "Aborting service: id %d, name %s\n",
738 sv
->sv_id
, sv
->sv_name
);
740 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
741 spin_lock(&scd
->scd_lock
);
744 * schedule in-flight RPCs to notice the abort, NB:
745 * racing with incoming RPCs; complete fix should make test
746 * RPCs carry session ID in its headers
748 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
) {
749 rpc
->srpc_aborted
= 1;
750 swi_schedule_workitem(&rpc
->srpc_wi
);
753 spin_unlock(&scd
->scd_lock
);
758 srpc_shutdown_service(struct srpc_service
*sv
)
760 struct srpc_service_cd
*scd
;
761 struct srpc_server_rpc
*rpc
;
762 struct srpc_buffer
*buf
;
765 CDEBUG(D_NET
, "Shutting down service: id %d, name %s\n",
766 sv
->sv_id
, sv
->sv_name
);
768 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
769 spin_lock(&scd
->scd_lock
);
771 sv
->sv_shuttingdown
= 1; /* i.e. no new active RPC */
773 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
)
774 spin_unlock(&scd
->scd_lock
);
776 cfs_percpt_for_each(scd
, i
, sv
->sv_cpt_data
) {
777 spin_lock(&scd
->scd_lock
);
779 /* schedule in-flight RPCs to notice the shutdown */
780 list_for_each_entry(rpc
, &scd
->scd_rpc_active
, srpc_list
)
781 swi_schedule_workitem(&rpc
->srpc_wi
);
783 spin_unlock(&scd
->scd_lock
);
786 * OK to traverse scd_buf_posted without lock, since no one
787 * touches scd_buf_posted now
789 list_for_each_entry(buf
, &scd
->scd_buf_posted
, buf_list
)
790 LNetMDUnlink(buf
->buf_mdh
);
795 srpc_send_request(struct srpc_client_rpc
*rpc
)
797 struct srpc_event
*ev
= &rpc
->crpc_reqstev
;
802 ev
->ev_type
= SRPC_REQUEST_SENT
;
804 rc
= srpc_post_active_rdma(srpc_serv_portal(rpc
->crpc_service
),
805 rpc
->crpc_service
, &rpc
->crpc_reqstmsg
,
806 sizeof(struct srpc_msg
), LNET_MD_OP_PUT
,
807 rpc
->crpc_dest
, LNET_NID_ANY
,
808 &rpc
->crpc_reqstmdh
, ev
);
810 LASSERT(rc
== -ENOMEM
);
811 ev
->ev_fired
= 1; /* no more event expected */
817 srpc_prepare_reply(struct srpc_client_rpc
*rpc
)
819 struct srpc_event
*ev
= &rpc
->crpc_replyev
;
820 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.rpyid
;
825 ev
->ev_type
= SRPC_REPLY_RCVD
;
827 *id
= srpc_next_id();
829 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
831 sizeof(struct srpc_msg
),
832 LNET_MD_OP_PUT
, rpc
->crpc_dest
,
833 &rpc
->crpc_replymdh
, ev
);
835 LASSERT(rc
== -ENOMEM
);
836 ev
->ev_fired
= 1; /* no more event expected */
842 srpc_prepare_bulk(struct srpc_client_rpc
*rpc
)
844 struct srpc_bulk
*bk
= &rpc
->crpc_bulk
;
845 struct srpc_event
*ev
= &rpc
->crpc_bulkev
;
846 __u64
*id
= &rpc
->crpc_reqstmsg
.msg_body
.reqst
.bulkid
;
850 LASSERT(bk
->bk_niov
<= LNET_MAX_IOV
);
853 return 0; /* nothing to do */
855 opt
= bk
->bk_sink
? LNET_MD_OP_PUT
: LNET_MD_OP_GET
;
860 ev
->ev_type
= SRPC_BULK_REQ_RCVD
;
862 *id
= srpc_next_id();
864 rc
= srpc_post_passive_rdma(SRPC_RDMA_PORTAL
, 0, *id
,
865 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
866 rpc
->crpc_dest
, &bk
->bk_mdh
, ev
);
868 LASSERT(rc
== -ENOMEM
);
869 ev
->ev_fired
= 1; /* no more event expected */
875 srpc_do_bulk(struct srpc_server_rpc
*rpc
)
877 struct srpc_event
*ev
= &rpc
->srpc_ev
;
878 struct srpc_bulk
*bk
= rpc
->srpc_bulk
;
879 __u64 id
= rpc
->srpc_reqstbuf
->buf_msg
.msg_body
.reqst
.bulkid
;
885 opt
= bk
->bk_sink
? LNET_MD_OP_GET
: LNET_MD_OP_PUT
;
890 ev
->ev_type
= bk
->bk_sink
? SRPC_BULK_GET_RPLD
: SRPC_BULK_PUT_SENT
;
892 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, id
,
893 &bk
->bk_iovs
[0], bk
->bk_niov
, opt
,
894 rpc
->srpc_peer
, rpc
->srpc_self
,
897 ev
->ev_fired
= 1; /* no more event expected */
901 /* only called from srpc_handle_rpc */
903 srpc_server_rpc_done(struct srpc_server_rpc
*rpc
, int status
)
905 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
906 struct srpc_service
*sv
= scd
->scd_svc
;
907 struct srpc_buffer
*buffer
;
909 LASSERT(status
|| rpc
->srpc_wi
.swi_state
== SWI_STATE_DONE
);
911 rpc
->srpc_status
= status
;
913 CDEBUG_LIMIT(!status
? D_NET
: D_NETERROR
,
914 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
915 rpc
, sv
->sv_name
, libcfs_id2str(rpc
->srpc_peer
),
916 swi_state2str(rpc
->srpc_wi
.swi_state
), status
);
919 spin_lock(&srpc_data
.rpc_glock
);
920 srpc_data
.rpc_counters
.rpcs_dropped
++;
921 spin_unlock(&srpc_data
.rpc_glock
);
925 (*rpc
->srpc_done
) (rpc
);
926 LASSERT(!rpc
->srpc_bulk
);
928 spin_lock(&scd
->scd_lock
);
930 if (rpc
->srpc_reqstbuf
) {
932 * NB might drop sv_lock in srpc_service_recycle_buffer, but
933 * sv won't go away for scd_rpc_active must not be empty
935 srpc_service_recycle_buffer(scd
, rpc
->srpc_reqstbuf
);
936 rpc
->srpc_reqstbuf
= NULL
;
939 list_del(&rpc
->srpc_list
); /* from scd->scd_rpc_active */
942 * No one can schedule me now since:
943 * - I'm not on scd_rpc_active.
944 * - all LNet events have been fired.
945 * Cancel pending schedules and prevent future schedule attempts:
947 LASSERT(rpc
->srpc_ev
.ev_fired
);
948 swi_exit_workitem(&rpc
->srpc_wi
);
950 if (!sv
->sv_shuttingdown
&& !list_empty(&scd
->scd_buf_blocked
)) {
951 buffer
= list_entry(scd
->scd_buf_blocked
.next
,
952 struct srpc_buffer
, buf_list
);
953 list_del(&buffer
->buf_list
);
955 srpc_init_server_rpc(rpc
, scd
, buffer
);
956 list_add_tail(&rpc
->srpc_list
, &scd
->scd_rpc_active
);
957 swi_schedule_workitem(&rpc
->srpc_wi
);
959 list_add(&rpc
->srpc_list
, &scd
->scd_rpc_free
);
962 spin_unlock(&scd
->scd_lock
);
965 /* handles an incoming RPC */
967 srpc_handle_rpc(struct swi_workitem
*wi
)
969 struct srpc_server_rpc
*rpc
= wi
->swi_workitem
.wi_data
;
970 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
971 struct srpc_service
*sv
= scd
->scd_svc
;
972 struct srpc_event
*ev
= &rpc
->srpc_ev
;
975 LASSERT(wi
== &rpc
->srpc_wi
);
977 spin_lock(&scd
->scd_lock
);
979 if (sv
->sv_shuttingdown
|| rpc
->srpc_aborted
) {
980 spin_unlock(&scd
->scd_lock
);
983 LNetMDUnlink(rpc
->srpc_bulk
->bk_mdh
);
984 LNetMDUnlink(rpc
->srpc_replymdh
);
986 if (ev
->ev_fired
) { /* no more event, OK to finish */
987 srpc_server_rpc_done(rpc
, -ESHUTDOWN
);
993 spin_unlock(&scd
->scd_lock
);
995 switch (wi
->swi_state
) {
998 case SWI_STATE_NEWBORN
: {
999 struct srpc_msg
*msg
;
1000 struct srpc_generic_reply
*reply
;
1002 msg
= &rpc
->srpc_reqstbuf
->buf_msg
;
1003 reply
= &rpc
->srpc_replymsg
.msg_body
.reply
;
1005 if (!msg
->msg_magic
) {
1006 /* moaned already in srpc_lnet_ev_handler */
1007 srpc_server_rpc_done(rpc
, EBADMSG
);
1011 srpc_unpack_msg_hdr(msg
);
1012 if (msg
->msg_version
!= SRPC_MSG_VERSION
) {
1013 CWARN("Version mismatch: %u, %u expected, from %s\n",
1014 msg
->msg_version
, SRPC_MSG_VERSION
,
1015 libcfs_id2str(rpc
->srpc_peer
));
1016 reply
->status
= EPROTO
;
1017 /* drop through and send reply */
1020 rc
= (*sv
->sv_handler
)(rpc
);
1021 LASSERT(!reply
->status
|| !rpc
->srpc_bulk
);
1023 srpc_server_rpc_done(rpc
, rc
);
1028 wi
->swi_state
= SWI_STATE_BULK_STARTED
;
1030 if (rpc
->srpc_bulk
) {
1031 rc
= srpc_do_bulk(rpc
);
1033 return 0; /* wait for bulk */
1035 LASSERT(ev
->ev_fired
);
1039 case SWI_STATE_BULK_STARTED
:
1040 LASSERT(!rpc
->srpc_bulk
|| ev
->ev_fired
);
1042 if (rpc
->srpc_bulk
) {
1045 if (sv
->sv_bulk_ready
)
1046 rc
= (*sv
->sv_bulk_ready
) (rpc
, rc
);
1049 srpc_server_rpc_done(rpc
, rc
);
1054 wi
->swi_state
= SWI_STATE_REPLY_SUBMITTED
;
1055 rc
= srpc_send_reply(rpc
);
1057 return 0; /* wait for reply */
1058 srpc_server_rpc_done(rpc
, rc
);
1061 case SWI_STATE_REPLY_SUBMITTED
:
1062 if (!ev
->ev_fired
) {
1063 CERROR("RPC %p: bulk %p, service %d\n",
1064 rpc
, rpc
->srpc_bulk
, sv
->sv_id
);
1065 CERROR("Event: status %d, type %d, lnet %d\n",
1066 ev
->ev_status
, ev
->ev_type
, ev
->ev_lnet
);
1067 LASSERT(ev
->ev_fired
);
1070 wi
->swi_state
= SWI_STATE_DONE
;
1071 srpc_server_rpc_done(rpc
, ev
->ev_status
);
1079 srpc_client_rpc_expired(void *data
)
1081 struct srpc_client_rpc
*rpc
= data
;
1083 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1084 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1087 spin_lock(&rpc
->crpc_lock
);
1089 rpc
->crpc_timeout
= 0;
1090 srpc_abort_rpc(rpc
, -ETIMEDOUT
);
1092 spin_unlock(&rpc
->crpc_lock
);
1094 spin_lock(&srpc_data
.rpc_glock
);
1095 srpc_data
.rpc_counters
.rpcs_expired
++;
1096 spin_unlock(&srpc_data
.rpc_glock
);
1100 srpc_add_client_rpc_timer(struct srpc_client_rpc
*rpc
)
1102 struct stt_timer
*timer
= &rpc
->crpc_timer
;
1104 if (!rpc
->crpc_timeout
)
1107 INIT_LIST_HEAD(&timer
->stt_list
);
1108 timer
->stt_data
= rpc
;
1109 timer
->stt_func
= srpc_client_rpc_expired
;
1110 timer
->stt_expires
= ktime_get_real_seconds() + rpc
->crpc_timeout
;
1111 stt_add_timer(timer
);
1115 * Called with rpc->crpc_lock held.
1117 * Upon exit the RPC expiry timer is not queued and the handler is not
1118 * running on any CPU.
1121 srpc_del_client_rpc_timer(struct srpc_client_rpc
*rpc
)
1123 /* timer not planted or already exploded */
1124 if (!rpc
->crpc_timeout
)
1127 /* timer successfully defused */
1128 if (stt_del_timer(&rpc
->crpc_timer
))
1131 /* timer detonated, wait for it to explode */
1132 while (rpc
->crpc_timeout
) {
1133 spin_unlock(&rpc
->crpc_lock
);
1137 spin_lock(&rpc
->crpc_lock
);
1142 srpc_client_rpc_done(struct srpc_client_rpc
*rpc
, int status
)
1144 struct swi_workitem
*wi
= &rpc
->crpc_wi
;
1146 LASSERT(status
|| wi
->swi_state
== SWI_STATE_DONE
);
1148 spin_lock(&rpc
->crpc_lock
);
1150 rpc
->crpc_closed
= 1;
1151 if (!rpc
->crpc_status
)
1152 rpc
->crpc_status
= status
;
1154 srpc_del_client_rpc_timer(rpc
);
1156 CDEBUG_LIMIT(!status
? D_NET
: D_NETERROR
,
1157 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1158 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1159 swi_state2str(wi
->swi_state
), rpc
->crpc_aborted
, status
);
1162 * No one can schedule me now since:
1163 * - RPC timer has been defused.
1164 * - all LNet events have been fired.
1165 * - crpc_closed has been set, preventing srpc_abort_rpc from
1167 * Cancel pending schedules and prevent future schedule attempts:
1169 LASSERT(!srpc_event_pending(rpc
));
1170 swi_exit_workitem(wi
);
1172 spin_unlock(&rpc
->crpc_lock
);
1174 (*rpc
->crpc_done
)(rpc
);
1177 /* sends an outgoing RPC */
1179 srpc_send_rpc(struct swi_workitem
*wi
)
1182 struct srpc_client_rpc
*rpc
;
1183 struct srpc_msg
*reply
;
1188 rpc
= wi
->swi_workitem
.wi_data
;
1191 LASSERT(wi
== &rpc
->crpc_wi
);
1193 reply
= &rpc
->crpc_replymsg
;
1194 do_bulk
= rpc
->crpc_bulk
.bk_niov
> 0;
1196 spin_lock(&rpc
->crpc_lock
);
1198 if (rpc
->crpc_aborted
) {
1199 spin_unlock(&rpc
->crpc_lock
);
1203 spin_unlock(&rpc
->crpc_lock
);
1205 switch (wi
->swi_state
) {
1208 case SWI_STATE_NEWBORN
:
1209 LASSERT(!srpc_event_pending(rpc
));
1211 rc
= srpc_prepare_reply(rpc
);
1213 srpc_client_rpc_done(rpc
, rc
);
1217 rc
= srpc_prepare_bulk(rpc
);
1221 wi
->swi_state
= SWI_STATE_REQUEST_SUBMITTED
;
1222 rc
= srpc_send_request(rpc
);
1225 case SWI_STATE_REQUEST_SUBMITTED
:
1227 * CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1228 * order; however, they're processed in a strict order:
1229 * rqt, rpy, and bulk.
1231 if (!rpc
->crpc_reqstev
.ev_fired
)
1234 rc
= rpc
->crpc_reqstev
.ev_status
;
1238 wi
->swi_state
= SWI_STATE_REQUEST_SENT
;
1239 /* perhaps more events, fall thru */
1240 case SWI_STATE_REQUEST_SENT
: {
1241 enum srpc_msg_type type
= srpc_service2reply(rpc
->crpc_service
);
1243 if (!rpc
->crpc_replyev
.ev_fired
)
1246 rc
= rpc
->crpc_replyev
.ev_status
;
1250 srpc_unpack_msg_hdr(reply
);
1251 if (reply
->msg_type
!= type
||
1252 (reply
->msg_magic
!= SRPC_MSG_MAGIC
&&
1253 reply
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1254 CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
1255 libcfs_id2str(rpc
->crpc_dest
),
1256 reply
->msg_type
, type
,
1257 reply
->msg_magic
, SRPC_MSG_MAGIC
);
1262 if (do_bulk
&& reply
->msg_body
.reply
.status
) {
1263 CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
1264 reply
->msg_body
.reply
.status
,
1265 libcfs_id2str(rpc
->crpc_dest
));
1266 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1269 wi
->swi_state
= SWI_STATE_REPLY_RECEIVED
;
1271 case SWI_STATE_REPLY_RECEIVED
:
1272 if (do_bulk
&& !rpc
->crpc_bulkev
.ev_fired
)
1275 rc
= do_bulk
? rpc
->crpc_bulkev
.ev_status
: 0;
1278 * Bulk buffer was unlinked due to remote error. Clear error
1279 * since reply buffer still contains valid data.
1280 * NB rpc->crpc_done shouldn't look into bulk data in case of
1283 if (do_bulk
&& rpc
->crpc_bulkev
.ev_lnet
== LNET_EVENT_UNLINK
&&
1284 !rpc
->crpc_status
&& reply
->msg_body
.reply
.status
)
1287 wi
->swi_state
= SWI_STATE_DONE
;
1288 srpc_client_rpc_done(rpc
, rc
);
1293 spin_lock(&rpc
->crpc_lock
);
1294 srpc_abort_rpc(rpc
, rc
);
1295 spin_unlock(&rpc
->crpc_lock
);
1299 if (rpc
->crpc_aborted
) {
1300 LNetMDUnlink(rpc
->crpc_reqstmdh
);
1301 LNetMDUnlink(rpc
->crpc_replymdh
);
1302 LNetMDUnlink(rpc
->crpc_bulk
.bk_mdh
);
1304 if (!srpc_event_pending(rpc
)) {
1305 srpc_client_rpc_done(rpc
, -EINTR
);
1312 struct srpc_client_rpc
*
1313 srpc_create_client_rpc(lnet_process_id_t peer
, int service
,
1314 int nbulkiov
, int bulklen
,
1315 void (*rpc_done
)(struct srpc_client_rpc
*),
1316 void (*rpc_fini
)(struct srpc_client_rpc
*), void *priv
)
1318 struct srpc_client_rpc
*rpc
;
1320 LIBCFS_ALLOC(rpc
, offsetof(struct srpc_client_rpc
,
1321 crpc_bulk
.bk_iovs
[nbulkiov
]));
1325 srpc_init_client_rpc(rpc
, peer
, service
, nbulkiov
,
1326 bulklen
, rpc_done
, rpc_fini
, priv
);
1330 /* called with rpc->crpc_lock held */
1332 srpc_abort_rpc(struct srpc_client_rpc
*rpc
, int why
)
1336 if (rpc
->crpc_aborted
|| /* already aborted */
1337 rpc
->crpc_closed
) /* callback imminent */
1340 CDEBUG(D_NET
, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1341 rpc
->crpc_service
, libcfs_id2str(rpc
->crpc_dest
),
1342 swi_state2str(rpc
->crpc_wi
.swi_state
), why
);
1344 rpc
->crpc_aborted
= 1;
1345 rpc
->crpc_status
= why
;
1346 swi_schedule_workitem(&rpc
->crpc_wi
);
1349 /* called with rpc->crpc_lock held */
1351 srpc_post_rpc(struct srpc_client_rpc
*rpc
)
1353 LASSERT(!rpc
->crpc_aborted
);
1354 LASSERT(srpc_data
.rpc_state
== SRPC_STATE_RUNNING
);
1356 CDEBUG(D_NET
, "Posting RPC: peer %s, service %d, timeout %d\n",
1357 libcfs_id2str(rpc
->crpc_dest
), rpc
->crpc_service
,
1360 srpc_add_client_rpc_timer(rpc
);
1361 swi_schedule_workitem(&rpc
->crpc_wi
);
1365 srpc_send_reply(struct srpc_server_rpc
*rpc
)
1367 struct srpc_event
*ev
= &rpc
->srpc_ev
;
1368 struct srpc_msg
*msg
= &rpc
->srpc_replymsg
;
1369 struct srpc_buffer
*buffer
= rpc
->srpc_reqstbuf
;
1370 struct srpc_service_cd
*scd
= rpc
->srpc_scd
;
1371 struct srpc_service
*sv
= scd
->scd_svc
;
1376 rpyid
= buffer
->buf_msg
.msg_body
.reqst
.rpyid
;
1378 spin_lock(&scd
->scd_lock
);
1380 if (!sv
->sv_shuttingdown
&& !srpc_serv_is_framework(sv
)) {
1382 * Repost buffer before replying since test client
1383 * might send me another RPC once it gets the reply
1385 if (srpc_service_post_buffer(scd
, buffer
))
1386 CWARN("Failed to repost %s buffer\n", sv
->sv_name
);
1387 rpc
->srpc_reqstbuf
= NULL
;
1390 spin_unlock(&scd
->scd_lock
);
1394 ev
->ev_type
= SRPC_REPLY_SENT
;
1396 msg
->msg_magic
= SRPC_MSG_MAGIC
;
1397 msg
->msg_version
= SRPC_MSG_VERSION
;
1398 msg
->msg_type
= srpc_service2reply(sv
->sv_id
);
1400 rc
= srpc_post_active_rdma(SRPC_RDMA_PORTAL
, rpyid
, msg
,
1401 sizeof(*msg
), LNET_MD_OP_PUT
,
1402 rpc
->srpc_peer
, rpc
->srpc_self
,
1403 &rpc
->srpc_replymdh
, ev
);
1405 ev
->ev_fired
= 1; /* no more event expected */
1409 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1411 srpc_lnet_ev_handler(lnet_event_t
*ev
)
1413 struct srpc_service_cd
*scd
;
1414 struct srpc_event
*rpcev
= ev
->md
.user_ptr
;
1415 struct srpc_client_rpc
*crpc
;
1416 struct srpc_server_rpc
*srpc
;
1417 struct srpc_buffer
*buffer
;
1418 struct srpc_service
*sv
;
1419 struct srpc_msg
*msg
;
1420 enum srpc_msg_type type
;
1422 LASSERT(!in_interrupt());
1427 spin_lock(&srpc_data
.rpc_glock
);
1428 if (ev
->status
!= -ECANCELED
) /* cancellation is not error */
1429 srpc_data
.rpc_counters
.errors
++;
1430 errors
= srpc_data
.rpc_counters
.errors
;
1431 spin_unlock(&srpc_data
.rpc_glock
);
1433 CNETERR("LNet event status %d type %d, RPC errors %u\n",
1434 ev
->status
, ev
->type
, errors
);
1437 rpcev
->ev_lnet
= ev
->type
;
1439 switch (rpcev
->ev_type
) {
1441 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1442 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1444 case SRPC_REQUEST_SENT
:
1445 if (!ev
->status
&& ev
->type
!= LNET_EVENT_UNLINK
) {
1446 spin_lock(&srpc_data
.rpc_glock
);
1447 srpc_data
.rpc_counters
.rpcs_sent
++;
1448 spin_unlock(&srpc_data
.rpc_glock
);
1450 case SRPC_REPLY_RCVD
:
1451 case SRPC_BULK_REQ_RCVD
:
1452 crpc
= rpcev
->ev_data
;
1454 if (rpcev
!= &crpc
->crpc_reqstev
&&
1455 rpcev
!= &crpc
->crpc_replyev
&&
1456 rpcev
!= &crpc
->crpc_bulkev
) {
1457 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1458 rpcev
, crpc
, &crpc
->crpc_reqstev
,
1459 &crpc
->crpc_replyev
, &crpc
->crpc_bulkev
);
1460 CERROR("Bad event: status %d, type %d, lnet %d\n",
1461 rpcev
->ev_status
, rpcev
->ev_type
, rpcev
->ev_lnet
);
1465 spin_lock(&crpc
->crpc_lock
);
1467 LASSERT(!rpcev
->ev_fired
);
1468 rpcev
->ev_fired
= 1;
1469 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1470 -EINTR
: ev
->status
;
1471 swi_schedule_workitem(&crpc
->crpc_wi
);
1473 spin_unlock(&crpc
->crpc_lock
);
1476 case SRPC_REQUEST_RCVD
:
1477 scd
= rpcev
->ev_data
;
1480 LASSERT(rpcev
== &scd
->scd_ev
);
1482 spin_lock(&scd
->scd_lock
);
1484 LASSERT(ev
->unlinked
);
1485 LASSERT(ev
->type
== LNET_EVENT_PUT
||
1486 ev
->type
== LNET_EVENT_UNLINK
);
1487 LASSERT(ev
->type
!= LNET_EVENT_UNLINK
||
1488 sv
->sv_shuttingdown
);
1490 buffer
= container_of(ev
->md
.start
, struct srpc_buffer
, buf_msg
);
1491 buffer
->buf_peer
= ev
->initiator
;
1492 buffer
->buf_self
= ev
->target
.nid
;
1494 LASSERT(scd
->scd_buf_nposted
> 0);
1495 scd
->scd_buf_nposted
--;
1497 if (sv
->sv_shuttingdown
) {
1499 * Leave buffer on scd->scd_buf_nposted since
1500 * srpc_finish_service needs to traverse it.
1502 spin_unlock(&scd
->scd_lock
);
1506 if (scd
->scd_buf_err_stamp
&&
1507 scd
->scd_buf_err_stamp
< ktime_get_real_seconds()) {
1508 /* re-enable adding buffer */
1509 scd
->scd_buf_err_stamp
= 0;
1510 scd
->scd_buf_err
= 0;
1513 if (!scd
->scd_buf_err
&& /* adding buffer is enabled */
1514 !scd
->scd_buf_adjust
&&
1515 scd
->scd_buf_nposted
< scd
->scd_buf_low
) {
1516 scd
->scd_buf_adjust
= max(scd
->scd_buf_total
/ 2,
1518 swi_schedule_workitem(&scd
->scd_buf_wi
);
1521 list_del(&buffer
->buf_list
); /* from scd->scd_buf_posted */
1522 msg
= &buffer
->buf_msg
;
1523 type
= srpc_service2request(sv
->sv_id
);
1525 if (ev
->status
|| ev
->mlength
!= sizeof(*msg
) ||
1526 (msg
->msg_type
!= type
&&
1527 msg
->msg_type
!= __swab32(type
)) ||
1528 (msg
->msg_magic
!= SRPC_MSG_MAGIC
&&
1529 msg
->msg_magic
!= __swab32(SRPC_MSG_MAGIC
))) {
1530 CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
1531 sv
->sv_name
, libcfs_id2str(ev
->initiator
),
1532 ev
->status
, ev
->mlength
,
1533 msg
->msg_type
, msg
->msg_magic
);
1536 * NB can't call srpc_service_recycle_buffer here since
1537 * it may call LNetM[DE]Attach. The invalid magic tells
1538 * srpc_handle_rpc to drop this RPC
1543 if (!list_empty(&scd
->scd_rpc_free
)) {
1544 srpc
= list_entry(scd
->scd_rpc_free
.next
,
1545 struct srpc_server_rpc
,
1547 list_del(&srpc
->srpc_list
);
1549 srpc_init_server_rpc(srpc
, scd
, buffer
);
1550 list_add_tail(&srpc
->srpc_list
,
1551 &scd
->scd_rpc_active
);
1552 swi_schedule_workitem(&srpc
->srpc_wi
);
1554 list_add_tail(&buffer
->buf_list
,
1555 &scd
->scd_buf_blocked
);
1558 spin_unlock(&scd
->scd_lock
);
1560 spin_lock(&srpc_data
.rpc_glock
);
1561 srpc_data
.rpc_counters
.rpcs_rcvd
++;
1562 spin_unlock(&srpc_data
.rpc_glock
);
1565 case SRPC_BULK_GET_RPLD
:
1566 LASSERT(ev
->type
== LNET_EVENT_SEND
||
1567 ev
->type
== LNET_EVENT_REPLY
||
1568 ev
->type
== LNET_EVENT_UNLINK
);
1571 break; /* wait for final event */
1573 case SRPC_BULK_PUT_SENT
:
1574 if (!ev
->status
&& ev
->type
!= LNET_EVENT_UNLINK
) {
1575 spin_lock(&srpc_data
.rpc_glock
);
1577 if (rpcev
->ev_type
== SRPC_BULK_GET_RPLD
)
1578 srpc_data
.rpc_counters
.bulk_get
+= ev
->mlength
;
1580 srpc_data
.rpc_counters
.bulk_put
+= ev
->mlength
;
1582 spin_unlock(&srpc_data
.rpc_glock
);
1584 case SRPC_REPLY_SENT
:
1585 srpc
= rpcev
->ev_data
;
1586 scd
= srpc
->srpc_scd
;
1588 LASSERT(rpcev
== &srpc
->srpc_ev
);
1590 spin_lock(&scd
->scd_lock
);
1592 rpcev
->ev_fired
= 1;
1593 rpcev
->ev_status
= (ev
->type
== LNET_EVENT_UNLINK
) ?
1594 -EINTR
: ev
->status
;
1595 swi_schedule_workitem(&srpc
->srpc_wi
);
1597 spin_unlock(&scd
->scd_lock
);
1607 memset(&srpc_data
, 0, sizeof(struct smoketest_rpc
));
1608 spin_lock_init(&srpc_data
.rpc_glock
);
1610 /* 1 second pause to avoid timestamp reuse */
1611 set_current_state(TASK_UNINTERRUPTIBLE
);
1612 schedule_timeout(cfs_time_seconds(1));
1613 srpc_data
.rpc_matchbits
= ((__u64
)ktime_get_real_seconds()) << 48;
1615 srpc_data
.rpc_state
= SRPC_STATE_NONE
;
1617 rc
= LNetNIInit(LNET_PID_LUSTRE
);
1619 CERROR("LNetNIInit() has failed: %d\n", rc
);
1623 srpc_data
.rpc_state
= SRPC_STATE_NI_INIT
;
1625 LNetInvalidateHandle(&srpc_data
.rpc_lnet_eq
);
1626 rc
= LNetEQAlloc(0, srpc_lnet_ev_handler
, &srpc_data
.rpc_lnet_eq
);
1628 CERROR("LNetEQAlloc() has failed: %d\n", rc
);
1632 rc
= LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1634 rc
= LNetSetLazyPortal(SRPC_REQUEST_PORTAL
);
1637 srpc_data
.rpc_state
= SRPC_STATE_EQ_INIT
;
1645 srpc_data
.rpc_state
= SRPC_STATE_RUNNING
;
1657 state
= srpc_data
.rpc_state
;
1658 srpc_data
.rpc_state
= SRPC_STATE_STOPPING
;
1663 case SRPC_STATE_RUNNING
:
1664 spin_lock(&srpc_data
.rpc_glock
);
1666 for (i
= 0; i
<= SRPC_SERVICE_MAX_ID
; i
++) {
1667 struct srpc_service
*sv
= srpc_data
.rpc_services
[i
];
1669 LASSERTF(!sv
, "service not empty: id %d, name %s\n",
1673 spin_unlock(&srpc_data
.rpc_glock
);
1677 case SRPC_STATE_EQ_INIT
:
1678 rc
= LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL
);
1679 rc
= LNetClearLazyPortal(SRPC_REQUEST_PORTAL
);
1681 rc
= LNetEQFree(srpc_data
.rpc_lnet_eq
);
1682 LASSERT(!rc
); /* the EQ should have no user by now */
1684 case SRPC_STATE_NI_INIT
: